Linux-libre 5.7.6-gnu
[librecmc/linux-libre.git] / drivers / gpu / drm / msm / dsi / phy / dsi_phy_10nm.c
1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  * Copyright (c) 2018, The Linux Foundation
4  */
5
6 #include <linux/iopoll.h>
7
8 #include "dsi_phy.h"
9 #include "dsi.xml.h"
10
11 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
12 {
13         void __iomem *base = phy->base;
14         u32 data = 0;
15
16         data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
17         mb(); /* make sure read happened */
18
19         return (data & BIT(0));
20 }
21
22 static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
23 {
24         void __iomem *lane_base = phy->lane_base;
25         int phy_lane_0 = 0;     /* TODO: Support all lane swap configs */
26
27         /*
28          * LPRX and CDRX need to enabled only for physical data lane
29          * corresponding to the logical data lane 0
30          */
31         if (enable)
32                 dsi_phy_write(lane_base +
33                               REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
34         else
35                 dsi_phy_write(lane_base +
36                               REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
37 }
38
39 static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
40 {
41         int i;
42         u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
43         void __iomem *lane_base = phy->lane_base;
44
45         if (phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)
46                 tx_dctrl[3] = 0x02;
47
48         /* Strength ctrl settings */
49         for (i = 0; i < 5; i++) {
50                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
51                               0x55);
52                 /*
53                  * Disable LPRX and CDRX for all lanes. And later on, it will
54                  * be only enabled for the physical data lane corresponding
55                  * to the logical data lane 0
56                  */
57                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
58                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
59                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
60                               0x88);
61         }
62
63         dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
64
65         /* other settings */
66         for (i = 0; i < 5; i++) {
67                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
68                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
69                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
70                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
71                               i == 4 ? 0x80 : 0x0);
72                 dsi_phy_write(lane_base +
73                               REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
74                 dsi_phy_write(lane_base +
75                               REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
76                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
77                               tx_dctrl[i]);
78         }
79
80         if (!(phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)) {
81                 /* Toggle BIT 0 to release freeze I/0 */
82                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
83                 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
84         }
85 }
86
87 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
88                                struct msm_dsi_phy_clk_request *clk_req)
89 {
90         int ret;
91         u32 status;
92         u32 const delay_us = 5;
93         u32 const timeout_us = 1000;
94         struct msm_dsi_dphy_timing *timing = &phy->timing;
95         void __iomem *base = phy->base;
96         u32 data;
97
98         DBG("");
99
100         if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
101                 DRM_DEV_ERROR(&phy->pdev->dev,
102                         "%s: D-PHY timing calculation failed\n", __func__);
103                 return -EINVAL;
104         }
105
106         if (dsi_phy_hw_v3_0_is_pll_on(phy))
107                 pr_warn("PLL turned on before configuring PHY\n");
108
109         /* wait for REFGEN READY */
110         ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
111                                         status, (status & BIT(0)),
112                                         delay_us, timeout_us);
113         if (ret) {
114                 pr_err("Ref gen not ready. Aborting\n");
115                 return -EINVAL;
116         }
117
118         /* de-assert digital and pll power down */
119         data = BIT(6) | BIT(5);
120         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
121
122         /* Assert PLL core reset */
123         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
124
125         /* turn off resync FIFO */
126         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
127
128         /* Select MS1 byte-clk */
129         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
130
131         /* Enable LDO */
132         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
133
134         /* Configure PHY lane swap (TODO: we need to calculate this) */
135         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
136         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
137
138         /* DSI PHY timings */
139         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
140                       timing->hs_halfbyte_en);
141         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
142                       timing->clk_zero);
143         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
144                       timing->clk_prepare);
145         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
146                       timing->clk_trail);
147         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
148                       timing->hs_exit);
149         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
150                       timing->hs_zero);
151         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
152                       timing->hs_prepare);
153         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
154                       timing->hs_trail);
155         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
156                       timing->hs_rqst);
157         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
158                       timing->ta_go | (timing->ta_sure << 3));
159         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
160                       timing->ta_get);
161         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
162                       0x00);
163
164         /* Remove power down from all blocks */
165         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
166
167         /* power up lanes */
168         data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
169
170         /* TODO: only power up lanes that are used */
171         data |= 0x1F;
172         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
173         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
174
175         /* Select full-rate mode */
176         dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
177
178         ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
179         if (ret) {
180                 DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
181                         __func__, ret);
182                 return ret;
183         }
184
185         /* DSI lane settings */
186         dsi_phy_hw_v3_0_lane_settings(phy);
187
188         DBG("DSI%d PHY enabled", phy->id);
189
190         return 0;
191 }
192
193 static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
194 {
195 }
196
197 static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
198 {
199         struct platform_device *pdev = phy->pdev;
200
201         phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
202                                      "DSI_PHY_LANE");
203         if (IS_ERR(phy->lane_base)) {
204                 DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
205                         __func__);
206                 return -ENOMEM;
207         }
208
209         return 0;
210 }
211
212 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
213         .type = MSM_DSI_PHY_10NM,
214         .src_pll_truthtable = { {false, false}, {true, false} },
215         .reg_cfg = {
216                 .num = 1,
217                 .regs = {
218                         {"vdds", 36000, 32},
219                 },
220         },
221         .ops = {
222                 .enable = dsi_10nm_phy_enable,
223                 .disable = dsi_10nm_phy_disable,
224                 .init = dsi_10nm_phy_init,
225         },
226         .io_start = { 0xae94400, 0xae96400 },
227         .num_dsi_phy = 2,
228 };
229
230 const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
231         .type = MSM_DSI_PHY_10NM,
232         .src_pll_truthtable = { {false, false}, {true, false} },
233         .reg_cfg = {
234                 .num = 1,
235                 .regs = {
236                         {"vdds", 36000, 32},
237                 },
238         },
239         .ops = {
240                 .enable = dsi_10nm_phy_enable,
241                 .disable = dsi_10nm_phy_disable,
242                 .init = dsi_10nm_phy_init,
243         },
244         .io_start = { 0xc994400, 0xc996400 },
245         .num_dsi_phy = 2,
246         .quirks = V3_0_0_10NM_OLD_TIMINGS_QUIRK,
247 };