kernel: bump 4.14 to 4.14.125 (FS#2305 FS#2297)
[oweals/openwrt.git] / target / linux / layerscape / patches-4.14 / 709-mdio-phy-support-layerscape.patch
1 From 83fe1ecb8ac6e0544ae74bf5a63806dcac768201 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:45 +0800
4 Subject: [PATCH] mdio-phy: support layerscape
5
6 This is an integrated patch of mdio-phy for layerscape
7
8 Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
9 Signed-off-by: Biwen Li <biwen.li@nxp.com>
10 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
11 Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
12 Signed-off-by: costi <constantin.tudor@freescale.com>
13 Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
14 Signed-off-by: Florinel Iordache <florinel.iordache@nxp.com>
15 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
16 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
17 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
18 Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
19 Signed-off-by: Shaohui Xie <Shaohui.Xie@freescale.com>
20 Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
21 Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
22 ---
23  drivers/net/phy/Kconfig                    |   33 +
24  drivers/net/phy/Makefile                   |    5 +
25  drivers/net/phy/aquantia.c                 |  328 +++-
26  drivers/net/phy/at803x.c                   |   21 +
27  drivers/net/phy/fsl_backplane.c            | 1780 ++++++++++++++++++++
28  drivers/net/phy/fsl_backplane.h            |   41 +
29  drivers/net/phy/fsl_backplane_serdes_10g.c |  281 +++
30  drivers/net/phy/fsl_backplane_serdes_28g.c |  336 ++++
31  drivers/net/phy/inphi.c                    |  594 +++++++
32  drivers/net/phy/mdio-mux-multiplexer.c     |  122 ++
33  drivers/net/phy/swphy.c                    |    1 +
34  include/linux/phy.h                        |    3 +
35  12 files changed, 3526 insertions(+), 19 deletions(-)
36  create mode 100644 drivers/net/phy/fsl_backplane.c
37  create mode 100644 drivers/net/phy/fsl_backplane.h
38  create mode 100644 drivers/net/phy/fsl_backplane_serdes_10g.c
39  create mode 100644 drivers/net/phy/fsl_backplane_serdes_28g.c
40  create mode 100644 drivers/net/phy/inphi.c
41  create mode 100644 drivers/net/phy/mdio-mux-multiplexer.c
42
43 --- a/drivers/net/phy/Kconfig
44 +++ b/drivers/net/phy/Kconfig
45 @@ -87,9 +87,27 @@ config MDIO_BUS_MUX_MMIOREG
46  
47           Currently, only 8/16/32 bits registers are supported.
48  
49 +config MDIO_BUS_MUX_MULTIPLEXER
50 +       tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
51 +       depends on OF
52 +       select MULTIPLEXER
53 +       select MDIO_BUS_MUX
54 +       help
55 +         This module provides a driver for MDIO bus multiplexer
56 +         that is controlled via the kernel multiplexer subsystem. The
57 +         bus multiplexer connects one of several child MDIO busses to
58 +         a parent bus.  Child bus selection is under the control of
59 +         the kernel multiplexer subsystem.
60 +
61  config MDIO_CAVIUM
62         tristate
63  
64 +config MDIO_FSL_BACKPLANE
65 +       tristate "Support for backplane on Freescale XFI interface"
66 +       depends on OF_MDIO
67 +       help
68 +         This module provides a driver for Freescale XFI's backplane.
69 +
70  config MDIO_GPIO
71         tristate "GPIO lib-based bitbanged MDIO buses"
72         depends on MDIO_BITBANG && GPIOLIB
73 @@ -303,6 +321,16 @@ config AT803X_PHY
74         ---help---
75           Currently supports the AT8030 and AT8035 model
76  
77 +config AT803X_PHY_SMART_EEE
78 +       depends on AT803X_PHY
79 +       default n
80 +       tristate "SmartEEE feature for AT803X PHYs"
81 +       ---help---
82 +         Enables the Atheros SmartEEE feature (not IEEE 802.3az). When 2 PHYs
83 +         which support this feature are connected back-to-back, they may
84 +         negotiate a low-power sleep mode autonomously, without the Ethernet
85 +         controller's knowledge.  May cause packet loss.
86 +
87  config BCM63XX_PHY
88         tristate "Broadcom 63xx SOCs internal PHY"
89         depends on BCM63XX
90 @@ -385,6 +413,11 @@ config ICPLUS_PHY
91         ---help---
92           Currently supports the IP175C and IP1001 PHYs.
93  
94 +config INPHI_PHY
95 +       tristate "Inphi CDR 10G/25G Ethernet PHY"
96 +       ---help---
97 +         Currently supports the IN112525_S03 part @ 25G
98 +
99  config INTEL_XWAY_PHY
100         tristate "Intel XWAY PHYs"
101         ---help---
102 --- a/drivers/net/phy/Makefile
103 +++ b/drivers/net/phy/Makefile
104 @@ -44,7 +44,11 @@ obj-$(CONFIG_MDIO_BUS_MUX)   += mdio-mux.o
105  obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC)   += mdio-mux-bcm-iproc.o
106  obj-$(CONFIG_MDIO_BUS_MUX_GPIO)        += mdio-mux-gpio.o
107  obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
108 +obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
109  obj-$(CONFIG_MDIO_CAVIUM)      += mdio-cavium.o
110 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
111 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_10g.o
112 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_28g.o
113  obj-$(CONFIG_MDIO_GPIO)                += mdio-gpio.o
114  obj-$(CONFIG_MDIO_HISI_FEMAC)  += mdio-hisi-femac.o
115  obj-$(CONFIG_MDIO_I2C)         += mdio-i2c.o
116 @@ -75,6 +79,7 @@ obj-$(CONFIG_DP83848_PHY)     += dp83848.o
117  obj-$(CONFIG_DP83867_PHY)      += dp83867.o
118  obj-$(CONFIG_FIXED_PHY)                += fixed_phy.o
119  obj-$(CONFIG_ICPLUS_PHY)       += icplus.o
120 +obj-$(CONFIG_INPHI_PHY)        += inphi.o
121  obj-$(CONFIG_INTEL_XWAY_PHY)   += intel-xway.o
122  obj-$(CONFIG_LSI_ET1011C_PHY)  += et1011c.o
123  obj-$(CONFIG_LXT_PHY)          += lxt.o
124 --- a/drivers/net/phy/aquantia.c
125 +++ b/drivers/net/phy/aquantia.c
126 @@ -4,6 +4,7 @@
127   * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
128   *
129   * Copyright 2015 Freescale Semiconductor, Inc.
130 + * Copyright 2018 NXP
131   *
132   * This file is licensed under the terms of the GNU General Public License
133   * version 2.  This program is licensed "as is" without any warranty of any
134 @@ -27,15 +28,200 @@
135  
136  #define PHY_AQUANTIA_FEATURES  (SUPPORTED_10000baseT_Full | \
137                                  SUPPORTED_1000baseT_Full | \
138 +                                SUPPORTED_2500baseX_Full | \
139                                  SUPPORTED_100baseT_Full | \
140 +                                SUPPORTED_Pause | \
141 +                                SUPPORTED_Asym_Pause | \
142                                  PHY_DEFAULT_FEATURES)
143  
144 +#define MDIO_PMA_CTRL1_AQ_SPEED10      0
145 +#define MDIO_PMA_CTRL1_AQ_SPEED2500    0x2058
146 +#define MDIO_PMA_CTRL1_AQ_SPEED5000    0x205c
147 +#define MDIO_PMA_CTRL2_AQ_2500BT       0x30
148 +#define MDIO_PMA_CTRL2_AQ_5000BT       0x31
149 +#define MDIO_PMA_CTRL2_AQ_TYPE_MASK    0x3F
150 +
151 +#define MDIO_AN_VENDOR_PROV_CTRL       0xc400
152 +#define MDIO_AN_RECV_LP_STATUS         0xe820
153 +
154 +#define MDIO_AN_LPA_PAUSE              0x20
155 +#define MDIO_AN_LPA_ASYM_PAUSE         0x10
156 +#define MDIO_AN_ADV_PAUSE              0x20
157 +#define MDIO_AN_ADV_ASYM_PAUSE         0x10
158 +
159 +static int aquantia_write_reg(struct phy_device *phydev, int devad,
160 +                             u32 regnum, u16 val)
161 +{
162 +       u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
163 +
164 +       return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, addr, val);
165 +}
166 +
167 +static int aquantia_read_reg(struct phy_device *phydev, int devad, u32 regnum)
168 +{
169 +       u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
170 +
171 +       return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
172 +}
173 +
174 +static int aquantia_pma_setup_forced(struct phy_device *phydev)
175 +{
176 +       int ctrl1, ctrl2, ret;
177 +
178 +       /* Half duplex is not supported */
179 +       if (phydev->duplex != DUPLEX_FULL)
180 +               return -EINVAL;
181 +
182 +       ctrl1 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
183 +       if (ctrl1 < 0)
184 +               return ctrl1;
185 +
186 +       ctrl2 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2);
187 +       if (ctrl2 < 0)
188 +               return ctrl2;
189 +
190 +       ctrl1 &= ~MDIO_CTRL1_SPEEDSEL;
191 +       ctrl2 &= ~(MDIO_PMA_CTRL2_AQ_TYPE_MASK);
192 +
193 +       switch (phydev->speed) {
194 +       case SPEED_10:
195 +               ctrl2 |= MDIO_PMA_CTRL2_10BT;
196 +               break;
197 +       case SPEED_100:
198 +               ctrl1 |= MDIO_PMA_CTRL1_SPEED100;
199 +               ctrl2 |= MDIO_PMA_CTRL2_100BTX;
200 +               break;
201 +       case SPEED_1000:
202 +               ctrl1 |= MDIO_PMA_CTRL1_SPEED1000;
203 +               /* Assume 1000base-T */
204 +               ctrl2 |= MDIO_PMA_CTRL2_1000BT;
205 +               break;
206 +       case SPEED_10000:
207 +               ctrl1 |= MDIO_CTRL1_SPEED10G;
208 +               /* Assume 10Gbase-T */
209 +               ctrl2 |= MDIO_PMA_CTRL2_10GBT;
210 +               break;
211 +       case SPEED_2500:
212 +               ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED2500;
213 +               ctrl2 |= MDIO_PMA_CTRL2_AQ_2500BT;
214 +               break;
215 +       case SPEED_5000:
216 +               ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED5000;
217 +               ctrl2 |= MDIO_PMA_CTRL2_AQ_5000BT;
218 +               break;
219 +       default:
220 +               return -EINVAL;
221 +       }
222 +
223 +       ret = aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, ctrl1);
224 +       if (ret < 0)
225 +               return ret;
226 +
227 +       return aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2);
228 +}
229 +
230 +static int aquantia_aneg(struct phy_device *phydev, bool control)
231 +{
232 +       int reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1);
233 +
234 +       if (reg < 0)
235 +               return reg;
236 +
237 +       if (control)
238 +               reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
239 +       else
240 +               reg &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
241 +
242 +       return aquantia_write_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1, reg);
243 +}
244 +
245 +static int aquantia_config_advert(struct phy_device *phydev)
246 +{
247 +       u32 advertise;
248 +       int oldadv, adv, oldadv1, adv1;
249 +       int err, changed = 0;
250 +
251 +       /* Only allow advertising what this PHY supports */
252 +       phydev->advertising &= phydev->supported;
253 +       advertise = phydev->advertising;
254 +
255 +       /* Setup standard advertisement */
256 +       oldadv = aquantia_read_reg(phydev, MDIO_MMD_AN,
257 +                                  MDIO_AN_10GBT_CTRL);
258 +       if (oldadv < 0)
259 +               return oldadv;
260 +
261 +       /* Aquantia vendor specific advertisments */
262 +       oldadv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
263 +                                   MDIO_AN_VENDOR_PROV_CTRL);
264 +       if (oldadv1 < 0)
265 +               return oldadv1;
266 +
267 +       adv  = 0;
268 +       adv1 = 0;
269 +
270 +       /*100BaseT_full is supported by default*/
271 +
272 +       if (advertise & ADVERTISED_1000baseT_Full)
273 +               adv1 |= 0x8000;
274 +       if (advertise & ADVERTISED_10000baseT_Full)
275 +               adv |= 0x1000;
276 +       if (advertise &  ADVERTISED_2500baseX_Full)
277 +               adv1 |= 0x400;
278 +
279 +       if (adv != oldadv) {
280 +               err = aquantia_write_reg(phydev, MDIO_MMD_AN,
281 +                                        MDIO_AN_10GBT_CTRL, adv);
282 +               if (err < 0)
283 +                       return err;
284 +               changed = 1;
285 +       }
286 +       if (adv1 != oldadv1) {
287 +               err = aquantia_write_reg(phydev, MDIO_MMD_AN,
288 +                                        MDIO_AN_VENDOR_PROV_CTRL, adv1);
289 +               if (err < 0)
290 +                       return err;
291 +               changed = 1;
292 +       }
293 +
294 +       /* advertise flow control */
295 +       oldadv = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
296 +       if (oldadv < 0)
297 +               return oldadv;
298 +
299 +       adv = oldadv & ~(MDIO_AN_ADV_PAUSE | MDIO_AN_ADV_ASYM_PAUSE);
300 +       if (advertise & ADVERTISED_Pause)
301 +               adv |= MDIO_AN_ADV_PAUSE;
302 +       if (advertise & ADVERTISED_Asym_Pause)
303 +               adv |= MDIO_AN_ADV_ASYM_PAUSE;
304 +
305 +       if (adv != oldadv) {
306 +               err = aquantia_write_reg(phydev, MDIO_MMD_AN,
307 +                                        MDIO_AN_ADVERTISE, adv);
308 +               if (err < 0)
309 +                       return err;
310 +               changed = 1;
311 +       }
312 +
313 +       return changed;
314 +}
315 +
316  static int aquantia_config_aneg(struct phy_device *phydev)
317  {
318 +       int ret = 0;
319 +
320         phydev->supported = PHY_AQUANTIA_FEATURES;
321 -       phydev->advertising = phydev->supported;
322 +       if (phydev->autoneg == AUTONEG_DISABLE) {
323 +               aquantia_pma_setup_forced(phydev);
324 +               return aquantia_aneg(phydev, false);
325 +       }
326  
327 -       return 0;
328 +       ret = aquantia_config_advert(phydev);
329 +       if (ret > 0)
330 +               /* restart autoneg */
331 +               return aquantia_aneg(phydev, true);
332 +
333 +       return ret;
334  }
335  
336  static int aquantia_aneg_done(struct phy_device *phydev)
337 @@ -51,25 +237,26 @@ static int aquantia_config_intr(struct p
338         int err;
339  
340         if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
341 -               err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
342 +               err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 1);
343                 if (err < 0)
344                         return err;
345  
346 -               err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
347 +               err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 1);
348                 if (err < 0)
349                         return err;
350  
351 -               err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
352 +               err = aquantia_write_reg(phydev, MDIO_MMD_VEND1,
353 +                                        0xff01, 0x1001);
354         } else {
355 -               err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
356 +               err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 0);
357                 if (err < 0)
358                         return err;
359  
360 -               err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
361 +               err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 0);
362                 if (err < 0)
363                         return err;
364  
365 -               err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
366 +               err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff01, 0);
367         }
368  
369         return err;
370 @@ -79,42 +266,145 @@ static int aquantia_ack_interrupt(struct
371  {
372         int reg;
373  
374 -       reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
375 +       reg = aquantia_read_reg(phydev, MDIO_MMD_AN, 0xcc01);
376         return (reg < 0) ? reg : 0;
377  }
378  
379 +static int aquantia_read_advert(struct phy_device *phydev)
380 +{
381 +       int adv, adv1;
382 +
383 +       /* Setup standard advertisement */
384 +       adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
385 +                               MDIO_AN_10GBT_CTRL);
386 +
387 +       /* Aquantia vendor specific advertisments */
388 +       adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
389 +                                MDIO_AN_VENDOR_PROV_CTRL);
390 +
391 +       /*100BaseT_full is supported by default*/
392 +       phydev->advertising |= ADVERTISED_100baseT_Full;
393 +
394 +       if (adv & 0x1000)
395 +               phydev->advertising |= ADVERTISED_10000baseT_Full;
396 +       else
397 +               phydev->advertising &= ~ADVERTISED_10000baseT_Full;
398 +       if (adv1 & 0x8000)
399 +               phydev->advertising |= ADVERTISED_1000baseT_Full;
400 +       else
401 +               phydev->advertising &= ~ADVERTISED_1000baseT_Full;
402 +       if (adv1 & 0x400)
403 +               phydev->advertising |= ADVERTISED_2500baseX_Full;
404 +       else
405 +               phydev->advertising &= ~ADVERTISED_2500baseX_Full;
406 +
407 +       /* flow control advertisement */
408 +       adv = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
409 +       if (adv & MDIO_AN_ADV_PAUSE)
410 +               phydev->advertising |= ADVERTISED_Pause;
411 +       else
412 +               phydev->advertising &= ~ADVERTISED_Pause;
413 +       if (adv & MDIO_AN_ADV_ASYM_PAUSE)
414 +               phydev->advertising |= ADVERTISED_Asym_Pause;
415 +       else
416 +               phydev->advertising &= ~ADVERTISED_Asym_Pause;
417 +
418 +       return 0;
419 +}
420 +
421 +static int aquantia_read_lp_advert(struct phy_device *phydev)
422 +{
423 +       int adv, adv1;
424 +
425 +       /* Read standard link partner advertisement */
426 +       adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
427 +                               MDIO_STAT1);
428 +
429 +       if (adv & 0x1)
430 +               phydev->lp_advertising |= ADVERTISED_Autoneg |
431 +                                         ADVERTISED_100baseT_Full;
432 +       else
433 +               phydev->lp_advertising &= ~(ADVERTISED_Autoneg |
434 +                                           ADVERTISED_100baseT_Full);
435 +
436 +       /* Read standard link partner advertisement */
437 +       adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
438 +                               MDIO_AN_10GBT_STAT);
439 +
440 +       /* Aquantia link partner advertisments */
441 +       adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
442 +                                MDIO_AN_RECV_LP_STATUS);
443 +
444 +       if (adv & 0x800)
445 +               phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
446 +       else
447 +               phydev->lp_advertising &= ~ADVERTISED_10000baseT_Full;
448 +       if (adv1 & 0x8000)
449 +               phydev->lp_advertising |= ADVERTISED_1000baseT_Full;
450 +       else
451 +               phydev->lp_advertising &= ~ADVERTISED_1000baseT_Full;
452 +       if (adv1 & 0x400)
453 +               phydev->lp_advertising |= ADVERTISED_2500baseX_Full;
454 +       else
455 +               phydev->lp_advertising &= ~ADVERTISED_2500baseX_Full;
456 +
457 +       return 0;
458 +}
459 +
460  static int aquantia_read_status(struct phy_device *phydev)
461  {
462         int reg;
463  
464 -       reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
465 -       reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
466 +       /* Read the link status twice; the bit is latching low */
467 +       reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
468 +       reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
469 +
470         if (reg & MDIO_STAT1_LSTATUS)
471                 phydev->link = 1;
472         else
473                 phydev->link = 0;
474  
475 -       reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
476         mdelay(10);
477 -       reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
478 +       reg = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
479 +
480 +       if ((reg & MDIO_CTRL1_SPEEDSELEXT) == MDIO_CTRL1_SPEEDSELEXT)
481 +               reg &= MDIO_CTRL1_SPEEDSEL;
482 +       else
483 +               reg &= MDIO_CTRL1_SPEEDSELEXT;
484  
485         switch (reg) {
486 -       case 0x9:
487 +       case MDIO_PMA_CTRL1_AQ_SPEED5000:
488 +               phydev->speed = SPEED_5000;
489 +               break;
490 +       case MDIO_PMA_CTRL1_AQ_SPEED2500:
491                 phydev->speed = SPEED_2500;
492                 break;
493 -       case 0x5:
494 -               phydev->speed = SPEED_1000;
495 +       case MDIO_PMA_CTRL1_AQ_SPEED10:
496 +               phydev->speed = SPEED_10;
497                 break;
498 -       case 0x3:
499 +       case MDIO_PMA_CTRL1_SPEED100:
500                 phydev->speed = SPEED_100;
501                 break;
502 -       case 0x7:
503 -       default:
504 +       case MDIO_PMA_CTRL1_SPEED1000:
505 +               phydev->speed = SPEED_1000;
506 +               break;
507 +       case MDIO_CTRL1_SPEED10G:
508                 phydev->speed = SPEED_10000;
509                 break;
510 +       default:
511 +               phydev->speed = SPEED_UNKNOWN;
512 +               break;
513         }
514 +
515         phydev->duplex = DUPLEX_FULL;
516  
517 +       reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
518 +       phydev->pause = reg & MDIO_AN_LPA_PAUSE ? 1 : 0;
519 +       phydev->asym_pause = reg & MDIO_AN_LPA_ASYM_PAUSE ? 1 : 0;
520 +
521 +       aquantia_read_advert(phydev);
522 +       aquantia_read_lp_advert(phydev);
523 +
524         return 0;
525  }
526  
527 --- a/drivers/net/phy/at803x.c
528 +++ b/drivers/net/phy/at803x.c
529 @@ -68,6 +68,8 @@
530  #define AT803X_DEBUG_REG_5                     0x05
531  #define AT803X_DEBUG_TX_CLK_DLY_EN             BIT(8)
532  
533 +#define AT803X_LPI_EN                          BIT(8)
534 +
535  #define ATH8030_PHY_ID 0x004dd076
536  #define ATH8031_PHY_ID 0x004dd074
537  #define ATH8032_PHY_ID 0x004dd023
538 @@ -290,6 +292,19 @@ static void at803x_disable_smarteee(stru
539         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
540  }
541  
542 +static void at803x_enable_smart_eee(struct phy_device *phydev, int on)
543 +{
544 +       int value;
545 +
546 +       /* 5.1.11 Smart_eee control3 */
547 +       value = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x805D);
548 +       if (on)
549 +               value |= AT803X_LPI_EN;
550 +       else
551 +               value &= ~AT803X_LPI_EN;
552 +       phy_write_mmd(phydev, MDIO_MMD_PCS, 0x805D, value);
553 +}
554 +
555  static int at803x_config_init(struct phy_device *phydev)
556  {
557         struct at803x_platform_data *pdata;
558 @@ -320,6 +335,12 @@ static int at803x_config_init(struct phy
559         if (ret < 0)
560                 return ret;
561  
562 +#ifdef CONFIG_AT803X_PHY_SMART_EEE
563 +       at803x_enable_smart_eee(phydev, 1);
564 +#else
565 +       at803x_enable_smart_eee(phydev, 0);
566 +#endif
567 +
568         if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
569                         phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
570                 ret = at803x_enable_rx_delay(phydev);
571 --- /dev/null
572 +++ b/drivers/net/phy/fsl_backplane.c
573 @@ -0,0 +1,1780 @@
574 +// SPDX-License-Identifier: GPL-2.0+
575 +/*
576 + *  DPAA backplane driver.
577 + *   Author: Shaohui Xie <Shaohui.Xie@freescale.com>
578 + *           Florinel Iordache <florinel.iordache@nxp.com>
579 + *
580 + * Copyright 2015 Freescale Semiconductor, Inc.
581 + * Copyright 2018 NXP
582 + *
583 + * Licensed under the GPL-2 or later.
584 + */
585 +
586 +#include <linux/kernel.h>
587 +#include <linux/module.h>
588 +#include <linux/mii.h>
589 +#include <linux/mdio.h>
590 +#include <linux/ethtool.h>
591 +#include <linux/phy.h>
592 +#include <linux/io.h>
593 +#include <linux/of.h>
594 +#include <linux/of_net.h>
595 +#include <linux/of_address.h>
596 +#include <linux/of_platform.h>
597 +#include <linux/timer.h>
598 +#include <linux/delay.h>
599 +#include <linux/workqueue.h>
600 +#include <linux/netdevice.h>
601 +
602 +#include "fsl_backplane.h"
603 +
604 +
605 +/* PCS Device Identifier */
606 +#define PCS_PHY_DEVICE_ID                      0x0083e400
607 +#define PCS_PHY_DEVICE_ID_MASK         0xffffffff
608 +
609 +/* 10G Long cables setup: 1 m to 2 m cables */
610 +#define RATIO_PREQ_10G                         0x3
611 +#define RATIO_PST1Q_10G                                0xd
612 +#define RATIO_EQ_10G                           0x20
613 +
614 +/* 10G Short cables setup: up to 30 cm cable */
615 +//#define RATIO_PREQ_10G                               0x3
616 +//#define RATIO_PST1Q_10G                              0xa
617 +//#define RATIO_EQ_10G                         0x29
618 +
619 +/* 40G Long cables setup: 1 m to 2 m cables */
620 +#define RATIO_PREQ_40G                         0x2
621 +#define RATIO_PST1Q_40G                                0xd
622 +#define RATIO_EQ_40G                           0x20
623 +
624 +/* 40G Short cables setup: up to 30 cm cable */
625 +//#define RATIO_PREQ_40G                               0x1
626 +//#define RATIO_PST1Q_40G                              0x3
627 +//#define RATIO_EQ_40G                         0x29
628 +
629 +/* LX2 2x40G default RCW setup */
630 +//#define RATIO_PREQ_40G                               0x0
631 +//#define RATIO_PST1Q_40G                              0x3
632 +//#define RATIO_EQ_40G                         0x30
633 +
634 +/* Max/Min coefficient values */
635 +#define PRE_COE_MAX                                    0x0
636 +#define PRE_COE_MIN                                    0x8
637 +#define POST_COE_MAX                           0x0
638 +#define POST_COE_MIN                           0x10
639 +#define ZERO_COE_MAX                           0x30
640 +#define ZERO_COE_MIN                           0x0
641 +
642 +/* KR PMD defines */
643 +#define PMD_RESET                                      0x1
644 +#define PMD_STATUS_SUP_STAT                    0x4
645 +#define PMD_STATUS_FRAME_LOCK          0x2
646 +#define TRAIN_EN                                       0x3
647 +#define TRAIN_DISABLE                          0x1
648 +#define RX_STAT                                                0x1
649 +
650 +/* PCS Link up */
651 +#define XFI_PCS_SR1                    0x20
652 +#define KR_RX_LINK_STAT_MASK           0x1000
653 +
654 +/* KX PCS mode register */
655 +#define KX_PCS_IF_MODE                         0x8014
656 +
657 +/* KX PCS mode register init value */
658 +#define KX_IF_MODE_INIT                                0x8
659 +
660 +/* KX/KR AN registers */
661 +#define AN_CTRL_INIT                           0x1200
662 +#define KX_AN_AD1_INIT                         0x25
663 +#define KR_AN_AD1_INIT_10G                     0x85
664 +#define KR_AN_AD1_INIT_40G                     0x105
665 +#define AN_LNK_UP_MASK                         0x4
666 +#define KR_AN_MASK_10G                         0x8
667 +#define KR_AN_MASK_40G                         0x20
668 +#define TRAIN_FAIL                                     0x8
669 +#define KR_AN_40G_MDIO_OFFSET          4
670 +
671 +/* XGKR Timeouts */
672 +#define XGKR_TIMEOUT                           1050
673 +#define XGKR_DENY_RT_INTERVAL          3000
674 +#define XGKR_AN_WAIT_ITERATIONS        5
675 +
676 +/* XGKR Increment/Decrement Requests */
677 +#define INCREMENT                                      1
678 +#define DECREMENT                                      2
679 +#define TIMEOUT_LONG                           3
680 +#define TIMEOUT_M1                                     3
681 +
682 +/* XGKR Masks */
683 +#define RX_READY_MASK                          0x8000
684 +#define PRESET_MASK                                    0x2000
685 +#define INIT_MASK                                      0x1000
686 +#define COP1_MASK                                      0x30
687 +#define COP1_SHIFT                                     4
688 +#define COZ_MASK                                       0xc
689 +#define COZ_SHIFT                                      2
690 +#define COM1_MASK                                      0x3
691 +#define COM1_SHIFT                                     0
692 +#define REQUEST_MASK                           0x3f
693 +#define LD_ALL_MASK                    (PRESET_MASK | INIT_MASK | \
694 +                                       COP1_MASK | COZ_MASK | COM1_MASK)
695 +
696 +/* Lanes definitions */
697 +#define MASTER_LANE                                    0
698 +#define SINGLE_LANE                                    0
699 +#define MAX_PHY_LANES_NO                       4
700 +
701 +/* Invalid value */
702 +#define VAL_INVALID                            0xff
703 +
704 +/* New XGKR Training Algorithm */
705 +#define NEW_ALGORITHM_TRAIN_TX
706 +
707 +#ifdef NEW_ALGORITHM_TRAIN_TX
708 +#define        FORCE_INC_COP1_NUMBER           0
709 +#define        FORCE_INC_COM1_NUMBER           1
710 +#endif
711 +
712 +/* Link_Training_Registers offsets */
713 +static int lt_MDIO_MMD = 0;
714 +static u32 lt_KR_PMD_CTRL = 0;
715 +static u32 lt_KR_PMD_STATUS = 0;
716 +static u32 lt_KR_LP_CU = 0;
717 +static u32 lt_KR_LP_STATUS = 0;
718 +static u32 lt_KR_LD_CU = 0;
719 +static u32 lt_KR_LD_STATUS = 0;
720 +
721 +/* KX/KR AN registers offsets */
722 +static u32 g_an_AD1 = 0;
723 +static u32 g_an_BP_STAT = 0;
724 +
725 +static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
726 +                                0x7, 0x9, 0xb, 0xc, VAL_INVALID};
727 +static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
728 +                                 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
729 +
730 +enum backplane_mode {
731 +       PHY_BACKPLANE_1000BASE_KX,
732 +       PHY_BACKPLANE_10GBASE_KR,
733 +       PHY_BACKPLANE_40GBASE_KR,
734 +       PHY_BACKPLANE_INVAL
735 +};
736 +
737 +enum serdes_type {
738 +       SERDES_10G,
739 +       SERDES_28G,
740 +       SERDES_INVAL
741 +};
742 +
743 +enum coe_filed {
744 +       COE_COP1,
745 +       COE_COZ,
746 +       COE_COM
747 +};
748 +
749 +enum coe_update {
750 +       COE_NOTUPDATED,
751 +       COE_UPDATED,
752 +       COE_MIN,
753 +       COE_MAX,
754 +       COE_INV
755 +};
756 +
757 +enum train_state {
758 +       DETECTING_LP,
759 +       TRAINED,
760 +};
761 +
762 +struct tx_condition {
763 +       bool bin_m1_late_early;
764 +       bool bin_long_late_early;
765 +       bool bin_m1_stop;
766 +       bool bin_long_stop;
767 +       bool tx_complete;
768 +       bool sent_init;
769 +       int m1_min_max_cnt;
770 +       int long_min_max_cnt;
771 +#ifdef NEW_ALGORITHM_TRAIN_TX
772 +       int pre_inc;
773 +       int post_inc;
774 +#endif
775 +};
776 +
777 +struct xgkr_params {
778 +       void *reg_base;         /* lane memory map: registers base address */
779 +       int idx;                        /* lane relative index inside a multi-lane PHY */
780 +       struct phy_device *phydev;
781 +       struct serdes_access *srds;
782 +       struct tx_condition tx_c;
783 +       struct delayed_work xgkr_wk;
784 +       enum train_state state;
785 +       int an_wait_count;
786 +       unsigned long rt_time;
787 +       u32 ld_update;
788 +       u32 ld_status;
789 +       u32 ratio_preq;
790 +       u32 ratio_pst1q;
791 +       u32 adpt_eq;
792 +       u32 tuned_ratio_preq;
793 +       u32 tuned_ratio_pst1q;
794 +       u32 tuned_adpt_eq;
795 +};
796 +
797 +struct xgkr_phy_data {
798 +       int bp_mode;
799 +       u32 phy_lanes;
800 +       struct mutex phy_lock;
801 +       bool aneg_done;
802 +       struct xgkr_params xgkr[MAX_PHY_LANES_NO];
803 +};
804 +
805 +static void setup_an_lt_ls(void)
806 +{
807 +       /* KR PMD registers */
808 +       lt_MDIO_MMD = MDIO_MMD_PMAPMD;
809 +       lt_KR_PMD_CTRL = 0x96;
810 +       lt_KR_PMD_STATUS = 0x97;
811 +       lt_KR_LP_CU = 0x98;
812 +       lt_KR_LP_STATUS = 0x99;
813 +       lt_KR_LD_CU = 0x9a;
814 +       lt_KR_LD_STATUS = 0x9b;
815 +
816 +       /* KX/KR AN registers */
817 +       g_an_AD1 = 0x11;
818 +       g_an_BP_STAT = 0x30;
819 +}
820 +
821 +static void setup_an_lt_lx(void)
822 +{
823 +       /* Auto-Negotiation and Link Training Core Registers page 1: 256 = 0x100 */
824 +       lt_MDIO_MMD = MDIO_MMD_AN;
825 +       lt_KR_PMD_CTRL = 0x100;
826 +       lt_KR_PMD_STATUS = 0x101;
827 +       lt_KR_LP_CU = 0x102;
828 +       lt_KR_LP_STATUS = 0x103;
829 +       lt_KR_LD_CU = 0x104;
830 +       lt_KR_LD_STATUS = 0x105;
831 +
832 +       /* KX/KR AN registers */
833 +       g_an_AD1 = 0x03;
834 +       g_an_BP_STAT = 0x0F;
835 +}
836 +
837 +static u32 le_ioread32(u32 *reg)
838 +{
839 +       return ioread32(reg);
840 +}
841 +
842 +static void le_iowrite32(u32 value, u32 *reg)
843 +{
844 +       iowrite32(value, reg);
845 +}
846 +
847 +static u32 be_ioread32(u32 *reg)
848 +{
849 +       return ioread32be(reg);
850 +}
851 +
852 +static void be_iowrite32(u32 value, u32 *reg)
853 +{
854 +       iowrite32be(value, reg);
855 +}
856 +
857 +/**
858 + * xgkr_phy_write_mmd - Wrapper function for phy_write_mmd
859 + * for writing a register on an MMD on a given PHY.
860 + *
861 + * Same rules as for phy_write_mmd();
862 + */
863 +static int xgkr_phy_write_mmd(struct xgkr_params *xgkr, int devad, u32 regnum, u16 val)
864 +{
865 +       struct phy_device *phydev = xgkr->phydev;
866 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
867 +       int mdio_addr = phydev->mdio.addr;
868 +       int err;
869 +
870 +       mutex_lock(&xgkr_inst->phy_lock);
871 +
872 +       if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
873 +               //40G AN: prepare mdio address for writing phydev AN registers for 40G on respective lane
874 +               phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
875 +       }
876 +
877 +       err = phy_write_mmd(phydev, devad, regnum, val);
878 +       if (err)
879 +               dev_err(&phydev->mdio.dev, "Writing PHY (%p) MMD = 0x%02x register = 0x%02x failed with error code: 0x%08x \n", phydev, devad, regnum, err);
880 +
881 +       if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
882 +               //40G AN: restore mdio address
883 +               phydev->mdio.addr = mdio_addr;
884 +       }
885 +
886 +       mutex_unlock(&xgkr_inst->phy_lock);
887 +
888 +       return err;
889 +}
890 +
891 +/**
892 + * xgkr_phy_read_mmd - Wrapper function for phy_read_mmd
893 + * for reading a register from an MMD on a given PHY.
894 + *
895 + * Same rules as for phy_read_mmd();
896 + */
897 +static int xgkr_phy_read_mmd(struct xgkr_params *xgkr, int devad, u32 regnum)
898 +{
899 +       struct phy_device *phydev = xgkr->phydev;
900 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
901 +       int mdio_addr = phydev->mdio.addr;
902 +       int ret;
903 +
904 +       mutex_lock(&xgkr_inst->phy_lock);
905 +
906 +       if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
907 +               //40G AN: prepare mdio address for reading phydev AN registers for 40G on respective lane
908 +               phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
909 +       }
910 +
911 +       ret = phy_read_mmd(phydev, devad, regnum);
912 +
913 +       if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
914 +               //40G AN: restore mdio address
915 +               phydev->mdio.addr = mdio_addr;
916 +       }
917 +
918 +       mutex_unlock(&xgkr_inst->phy_lock);
919 +
920 +       return ret;
921 +}
922 +
923 +static void tx_condition_init(struct tx_condition *tx_c)
924 +{
925 +       tx_c->bin_m1_late_early = true;
926 +       tx_c->bin_long_late_early = false;
927 +       tx_c->bin_m1_stop = false;
928 +       tx_c->bin_long_stop = false;
929 +       tx_c->tx_complete = false;
930 +       tx_c->sent_init = false;
931 +       tx_c->m1_min_max_cnt = 0;
932 +       tx_c->long_min_max_cnt = 0;
933 +#ifdef NEW_ALGORITHM_TRAIN_TX
934 +       tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
935 +       tx_c->post_inc = FORCE_INC_COP1_NUMBER;
936 +#endif
937 +}
938 +
939 +void tune_tecr(struct xgkr_params *xgkr)
940 +{
941 +       struct phy_device *phydev = xgkr->phydev;
942 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
943 +       bool reset = false;
944 +       
945 +       if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
946 +               /* Reset only the Master Lane */
947 +               reset = (xgkr->idx == MASTER_LANE);
948 +       } else {
949 +               reset = true;
950 +       }
951 +       
952 +       xgkr->srds->tune_tecr(xgkr->reg_base, xgkr->ratio_preq, xgkr->ratio_pst1q, xgkr->adpt_eq, reset);
953 +
954 +       xgkr->tuned_ratio_preq = xgkr->ratio_preq;
955 +       xgkr->tuned_ratio_pst1q = xgkr->ratio_pst1q;
956 +       xgkr->tuned_adpt_eq = xgkr->adpt_eq;
957 +}
958 +
959 +static void start_lt(struct xgkr_params *xgkr)
960 +{
961 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_EN);
962 +}
963 +
964 +static void stop_lt(struct xgkr_params *xgkr)
965 +{
966 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
967 +}
968 +
969 +static void reset_lt(struct xgkr_params *xgkr)
970 +{
971 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, MDIO_CTRL1, PMD_RESET);
972 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
973 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_CU, 0);
974 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_STATUS, 0);
975 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS, 0);
976 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU, 0);
977 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS, 0);
978 +       
979 +}
980 +
981 +static void ld_coe_status(struct xgkr_params *xgkr)
982 +{
983 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
984 +                     lt_KR_LD_STATUS, xgkr->ld_status);
985 +}
986 +
987 +static void ld_coe_update(struct xgkr_params *xgkr)
988 +{
989 +       dev_dbg(&xgkr->phydev->mdio.dev, "sending request: %x\n", xgkr->ld_update);
990 +       xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
991 +                     lt_KR_LD_CU, xgkr->ld_update);
992 +}
993 +
994 +static void start_xgkr_state_machine(struct delayed_work *work)
995 +{
996 +       queue_delayed_work(system_power_efficient_wq, work,
997 +                          msecs_to_jiffies(XGKR_TIMEOUT));
998 +}
999 +
1000 +static void start_xgkr_an(struct xgkr_params *xgkr)
1001 +{
1002 +       struct phy_device *phydev = xgkr->phydev;
1003 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
1004 +       int i;
1005 +       int err;
1006 +
1007 +       switch (xgkr_inst->bp_mode)
1008 +       {
1009 +       case PHY_BACKPLANE_1000BASE_KX:
1010 +               dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1011 +               break;
1012 +
1013 +       case PHY_BACKPLANE_10GBASE_KR:
1014 +               err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_10G);
1015 +               if (err)
1016 +                       dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", g_an_AD1, err);
1017 +               udelay(1);
1018 +               err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
1019 +               if (err)
1020 +                       dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", MDIO_CTRL1, err);
1021 +               break;
1022 +
1023 +       case PHY_BACKPLANE_40GBASE_KR:
1024 +               if (xgkr->idx == MASTER_LANE) {
1025 +                       for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1026 +                               err = xgkr_phy_write_mmd(&xgkr_inst->xgkr[i], MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_40G);
1027 +                               if (err)
1028 +                                       dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on lane %d failed with error code: 0x%08x \n", g_an_AD1, xgkr_inst->xgkr[i].idx, err);
1029 +                       }
1030 +                       udelay(1);
1031 +                       err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
1032 +                       if (err)
1033 +                               dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on Master Lane failed with error code: 0x%08x \n", MDIO_CTRL1, err);
1034 +               }
1035 +               break;
1036 +       }
1037 +}
1038 +
1039 +static void start_1gkx_an(struct phy_device *phydev)
1040 +{
1041 +       phy_write_mmd(phydev, MDIO_MMD_PCS, KX_PCS_IF_MODE, KX_IF_MODE_INIT);
1042 +       phy_write_mmd(phydev, MDIO_MMD_AN, g_an_AD1, KX_AN_AD1_INIT);
1043 +       phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
1044 +       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
1045 +}
1046 +
1047 +static void reset_tecr(struct xgkr_params *xgkr)
1048 +{
1049 +       struct phy_device *phydev = xgkr->phydev;
1050 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
1051 +
1052 +       switch (xgkr_inst->bp_mode)
1053 +       {
1054 +       case PHY_BACKPLANE_1000BASE_KX:
1055 +               dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1056 +               break;
1057 +
1058 +       case PHY_BACKPLANE_10GBASE_KR:
1059 +               xgkr->ratio_preq = RATIO_PREQ_10G;
1060 +               xgkr->ratio_pst1q = RATIO_PST1Q_10G;
1061 +               xgkr->adpt_eq = RATIO_EQ_10G;
1062 +               break;
1063 +
1064 +       case PHY_BACKPLANE_40GBASE_KR:
1065 +               xgkr->ratio_preq = RATIO_PREQ_40G;
1066 +               xgkr->ratio_pst1q = RATIO_PST1Q_40G;
1067 +               xgkr->adpt_eq = RATIO_EQ_40G;
1068 +               break;
1069 +       }
1070 +
1071 +       tune_tecr(xgkr);
1072 +}
1073 +
1074 +static void init_xgkr(struct xgkr_params *xgkr, int reset)
1075 +{
1076 +       if (reset)
1077 +               reset_tecr(xgkr);
1078 +
1079 +       tx_condition_init(&xgkr->tx_c);
1080 +       xgkr->state = DETECTING_LP;
1081 +
1082 +       xgkr->ld_status &= RX_READY_MASK;
1083 +       ld_coe_status(xgkr);
1084 +       xgkr->ld_update = 0;
1085 +       xgkr->ld_status &= ~RX_READY_MASK;
1086 +       ld_coe_status(xgkr);
1087 +
1088 +}
1089 +
1090 +static void initialize(struct xgkr_params *xgkr)
1091 +{
1092 +       reset_tecr(xgkr);
1093 +
1094 +       xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1095 +       xgkr->ld_status |= COE_UPDATED << COP1_SHIFT |
1096 +                          COE_UPDATED << COZ_SHIFT |
1097 +                          COE_UPDATED << COM1_SHIFT;
1098 +       ld_coe_status(xgkr);
1099 +}
1100 +
1101 +static void train_remote_tx(struct xgkr_params *xgkr)
1102 +{
1103 +       struct tx_condition *tx_c = &xgkr->tx_c;
1104 +       bool bin_m1_early, bin_long_early;
1105 +       u32 lp_status, old_ld_update;
1106 +       u32 status_cop1, status_coz, status_com1;
1107 +       u32 req_cop1, req_coz, req_com1, req_preset, req_init;
1108 +       u32 temp;
1109 +#ifdef NEW_ALGORITHM_TRAIN_TX
1110 +       u32 median_gaink2;
1111 +#endif
1112 +
1113 +recheck:
1114 +       if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
1115 +               tx_c->tx_complete = true;
1116 +               xgkr->ld_status |= RX_READY_MASK;
1117 +               ld_coe_status(xgkr);
1118 +
1119 +               /* tell LP we are ready */
1120 +               xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
1121 +                             lt_KR_PMD_STATUS, RX_STAT);
1122 +
1123 +               return;
1124 +       }
1125 +
1126 +       /* We start by checking the current LP status. If we got any responses,
1127 +        * we can clear up the appropriate update request so that the
1128 +        * subsequent code may easily issue new update requests if needed.
1129 +        */
1130 +       lp_status = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
1131 +                                REQUEST_MASK;
1132 +
1133 +       status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
1134 +       status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
1135 +       status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
1136 +
1137 +       old_ld_update = xgkr->ld_update;
1138 +       req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
1139 +       req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
1140 +       req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
1141 +       req_preset = old_ld_update & PRESET_MASK;
1142 +       req_init = old_ld_update & INIT_MASK;
1143 +
1144 +       /* IEEE802.3-2008, 72.6.10.2.3.1
1145 +        * We may clear PRESET when all coefficients show UPDATED or MAX.
1146 +        */
1147 +       if (req_preset) {
1148 +               if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
1149 +                   (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
1150 +                   (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
1151 +                       xgkr->ld_update &= ~PRESET_MASK;
1152 +               }
1153 +       }
1154 +
1155 +       /* IEEE802.3-2008, 72.6.10.2.3.2
1156 +        * We may clear INITIALIZE when no coefficients show NOT UPDATED.
1157 +        */
1158 +       if (req_init) {
1159 +               if (status_cop1 != COE_NOTUPDATED &&
1160 +                   status_coz != COE_NOTUPDATED &&
1161 +                   status_com1 != COE_NOTUPDATED) {
1162 +                       xgkr->ld_update &= ~INIT_MASK;
1163 +               }
1164 +       }
1165 +
1166 +       /* IEEE802.3-2008, 72.6.10.2.3.2
1167 +        * we send initialize to the other side to ensure default settings
1168 +        * for the LP. Naturally, we should do this only once.
1169 +        */
1170 +       if (!tx_c->sent_init) {
1171 +               if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
1172 +                       xgkr->ld_update = INIT_MASK;
1173 +                       tx_c->sent_init = true;
1174 +               }
1175 +       }
1176 +
1177 +       /* IEEE802.3-2008, 72.6.10.2.3.3
1178 +        * We set coefficient requests to HOLD when we get the information
1179 +        * about any updates On clearing our prior response, we also update
1180 +        * our internal status.
1181 +        */
1182 +       if (status_cop1 != COE_NOTUPDATED) {
1183 +               if (req_cop1) {
1184 +                       xgkr->ld_update &= ~COP1_MASK;
1185 +#ifdef NEW_ALGORITHM_TRAIN_TX
1186 +                       if (tx_c->post_inc) {
1187 +                               if (req_cop1 == INCREMENT &&
1188 +                                   status_cop1 == COE_MAX) {
1189 +                                       tx_c->post_inc = 0;
1190 +                                       tx_c->bin_long_stop = true;
1191 +                                       tx_c->bin_m1_stop = true;
1192 +                               } else {
1193 +                                       tx_c->post_inc -= 1;
1194 +                               }
1195 +
1196 +                               ld_coe_update(xgkr);
1197 +                               goto recheck;
1198 +                       }
1199 +#endif
1200 +                       if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
1201 +                           (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
1202 +                               dev_dbg(&xgkr->phydev->mdio.dev, "COP1 hit limit %s",
1203 +                                       (status_cop1 == COE_MIN) ?
1204 +                                       "DEC MIN" : "INC MAX");
1205 +                               tx_c->long_min_max_cnt++;
1206 +                               if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
1207 +                                       tx_c->bin_long_stop = true;
1208 +                                       ld_coe_update(xgkr);
1209 +                                       goto recheck;
1210 +                               }
1211 +                       }
1212 +               }
1213 +       }
1214 +
1215 +       if (status_coz != COE_NOTUPDATED) {
1216 +               if (req_coz)
1217 +                       xgkr->ld_update &= ~COZ_MASK;
1218 +       }
1219 +
1220 +       if (status_com1 != COE_NOTUPDATED) {
1221 +               if (req_com1) {
1222 +                       xgkr->ld_update &= ~COM1_MASK;
1223 +#ifdef NEW_ALGORITHM_TRAIN_TX
1224 +                       if (tx_c->pre_inc) {
1225 +                               if (req_com1 == INCREMENT &&
1226 +                                   status_com1 == COE_MAX)
1227 +                                       tx_c->pre_inc = 0;
1228 +                               else
1229 +                                       tx_c->pre_inc -= 1;
1230 +
1231 +                               ld_coe_update(xgkr);
1232 +                               goto recheck;
1233 +                       }
1234 +#endif
1235 +                       /* Stop If we have reached the limit for a parameter. */
1236 +                       if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
1237 +                           (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
1238 +                               dev_dbg(&xgkr->phydev->mdio.dev, "COM1 hit limit %s",
1239 +                                       (status_com1 == COE_MIN) ?
1240 +                                       "DEC MIN" : "INC MAX");
1241 +                               tx_c->m1_min_max_cnt++;
1242 +                               if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
1243 +                                       tx_c->bin_m1_stop = true;
1244 +                                       ld_coe_update(xgkr);
1245 +                                       goto recheck;
1246 +                               }
1247 +                       }
1248 +               }
1249 +       }
1250 +
1251 +       if (old_ld_update != xgkr->ld_update) {
1252 +               ld_coe_update(xgkr);
1253 +               /* Redo these status checks and updates until we have no more
1254 +                * changes, to speed up the overall process.
1255 +                */
1256 +               goto recheck;
1257 +       }
1258 +
1259 +       /* Do nothing if we have pending request. */
1260 +       if ((req_coz || req_com1 || req_cop1))
1261 +               return;
1262 +       else if (lp_status)
1263 +               /* No pending request but LP status was not reverted to
1264 +                * not updated.
1265 +                */
1266 +               return;
1267 +
1268 +#ifdef NEW_ALGORITHM_TRAIN_TX
1269 +       if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
1270 +               if (tx_c->pre_inc) {
1271 +                       xgkr->ld_update = INCREMENT << COM1_SHIFT;
1272 +                       ld_coe_update(xgkr);
1273 +                       return;
1274 +               }
1275 +
1276 +               if (status_cop1 != COE_MAX) {
1277 +                       median_gaink2 = xgkr->srds->get_median_gaink2(xgkr->reg_base);
1278 +                       if (median_gaink2 == 0xf) {
1279 +                               tx_c->post_inc = 1;
1280 +                       } else {
1281 +                               /* Gaink2 median lower than "F" */
1282 +                               tx_c->bin_m1_stop = true;
1283 +                               tx_c->bin_long_stop = true;
1284 +                               goto recheck;
1285 +                       }
1286 +               } else {
1287 +                       /* C1 MAX */
1288 +                       tx_c->bin_m1_stop = true;
1289 +                       tx_c->bin_long_stop = true;
1290 +                       goto recheck;
1291 +               }
1292 +
1293 +               if (tx_c->post_inc) {
1294 +                       xgkr->ld_update = INCREMENT << COP1_SHIFT;
1295 +                       ld_coe_update(xgkr);
1296 +                       return;
1297 +               }
1298 +       }
1299 +#endif
1300 +
1301 +       /* snapshot and select bin */
1302 +       bin_m1_early = xgkr->srds->is_bin_early(BIN_M1, xgkr->reg_base);
1303 +       bin_long_early = xgkr->srds->is_bin_early(BIN_LONG, xgkr->reg_base);
1304 +
1305 +       if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
1306 +               tx_c->bin_m1_stop = true;
1307 +               goto recheck;
1308 +       }
1309 +
1310 +       if (!tx_c->bin_long_stop &&
1311 +           tx_c->bin_long_late_early && !bin_long_early) {
1312 +               tx_c->bin_long_stop = true;
1313 +               goto recheck;
1314 +       }
1315 +
1316 +       /* IEEE802.3-2008, 72.6.10.2.3.3
1317 +        * We only request coefficient updates when no PRESET/INITIALIZE is
1318 +        * pending. We also only request coefficient updates when the
1319 +        * corresponding status is NOT UPDATED and nothing is pending.
1320 +        */
1321 +       if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
1322 +               if (!tx_c->bin_long_stop) {
1323 +                       /* BinM1 correction means changing COM1 */
1324 +                       if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
1325 +                               /* Avoid BinM1Late by requesting an
1326 +                                * immediate decrement.
1327 +                                */
1328 +                               if (!bin_m1_early) {
1329 +                                       /* request decrement c(-1) */
1330 +                                       temp = DECREMENT << COM1_SHIFT;
1331 +                                       xgkr->ld_update = temp;
1332 +                                       ld_coe_update(xgkr);
1333 +                                       tx_c->bin_m1_late_early = bin_m1_early;
1334 +                                       return;
1335 +                               }
1336 +                       }
1337 +
1338 +                       /* BinLong correction means changing COP1 */
1339 +                       if (!status_cop1 && !(xgkr->ld_update & COP1_MASK)) {
1340 +                               /* Locate BinLong transition point (if any)
1341 +                                * while avoiding BinM1Late.
1342 +                                */
1343 +                               if (bin_long_early) {
1344 +                                       /* request increment c(1) */
1345 +                                       temp = INCREMENT << COP1_SHIFT;
1346 +                                       xgkr->ld_update = temp;
1347 +                               } else {
1348 +                                       /* request decrement c(1) */
1349 +                                       temp = DECREMENT << COP1_SHIFT;
1350 +                                       xgkr->ld_update = temp;
1351 +                               }
1352 +
1353 +                               ld_coe_update(xgkr);
1354 +                               tx_c->bin_long_late_early = bin_long_early;
1355 +                       }
1356 +                       /* We try to finish BinLong before we do BinM1 */
1357 +                       return;
1358 +               }
1359 +
1360 +               if (!tx_c->bin_m1_stop) {
1361 +                       /* BinM1 correction means changing COM1 */
1362 +                       if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
1363 +                               /* Locate BinM1 transition point (if any) */
1364 +                               if (bin_m1_early) {
1365 +                                       /* request increment c(-1) */
1366 +                                       temp = INCREMENT << COM1_SHIFT;
1367 +                                       xgkr->ld_update = temp;
1368 +                               } else {
1369 +                                       /* request decrement c(-1) */
1370 +                                       temp = DECREMENT << COM1_SHIFT;
1371 +                                       xgkr->ld_update = temp;
1372 +                               }
1373 +
1374 +                               ld_coe_update(xgkr);
1375 +                               tx_c->bin_m1_late_early = bin_m1_early;
1376 +                       }
1377 +               }
1378 +       }
1379 +}
1380 +
1381 +static int is_link_up(struct phy_device *phydev)
1382 +{
1383 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
1384 +       int val = 0;
1385 +       
1386 +       mutex_lock(&xgkr_inst->phy_lock);
1387 +
1388 +       val = phy_read_mmd(phydev, MDIO_MMD_PCS, XFI_PCS_SR1);
1389 +
1390 +       mutex_unlock(&xgkr_inst->phy_lock);
1391 +
1392 +       return (val & KR_RX_LINK_STAT_MASK) ? 1 : 0;
1393 +}
1394 +
1395 +static int is_link_training_fail(struct xgkr_params *xgkr)
1396 +{
1397 +       struct phy_device *phydev = xgkr->phydev;
1398 +       int val;
1399 +       int timeout = 100;
1400 +
1401 +       val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS);
1402 +
1403 +       if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
1404 +               /* check LNK_STAT for sure */
1405 +               while (timeout--) {
1406 +                       if (is_link_up(phydev))
1407 +                               return 0;
1408 +
1409 +                       usleep_range(100, 500);
1410 +               }
1411 +       }
1412 +
1413 +       return 1;
1414 +}
1415 +
1416 +static int check_rx(struct xgkr_params *xgkr)
1417 +{
1418 +       return xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
1419 +                           RX_READY_MASK;
1420 +}
1421 +
1422 +/* Coefficient values have hardware restrictions */
1423 +static int is_ld_valid(struct xgkr_params *xgkr)
1424 +{
1425 +       u32 ratio_pst1q = xgkr->ratio_pst1q;
1426 +       u32 adpt_eq = xgkr->adpt_eq;
1427 +       u32 ratio_preq = xgkr->ratio_preq;
1428 +
1429 +       if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
1430 +               return 0;
1431 +
1432 +       if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
1433 +           ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
1434 +               return 0;
1435 +
1436 +       if (ratio_preq > ratio_pst1q)
1437 +               return 0;
1438 +
1439 +       if (ratio_preq > 8)
1440 +               return 0;
1441 +
1442 +       if (adpt_eq < 26)
1443 +               return 0;
1444 +
1445 +       if (ratio_pst1q > 16)
1446 +               return 0;
1447 +
1448 +       return 1;
1449 +}
1450 +
1451 +static int is_value_allowed(const u32 *val_table, u32 val)
1452 +{
1453 +       int i;
1454 +
1455 +       for (i = 0;; i++) {
1456 +               if (*(val_table + i) == VAL_INVALID)
1457 +                       return 0;
1458 +               if (*(val_table + i) == val)
1459 +                       return 1;
1460 +       }
1461 +}
1462 +
1463 +static enum coe_update inc_dec(struct xgkr_params *xgkr, int field, int request)
1464 +{
1465 +       u32 ld_limit[3], ld_coe[3], step[3];
1466 +
1467 +       ld_coe[0] = xgkr->ratio_pst1q;
1468 +       ld_coe[1] = xgkr->adpt_eq;
1469 +       ld_coe[2] = xgkr->ratio_preq;
1470 +
1471 +       /* Information specific to the SerDes for 10GBase-KR:
1472 +        * Incrementing C(+1) means *decrementing* RATIO_PST1Q
1473 +        * Incrementing C(0) means incrementing ADPT_EQ
1474 +        * Incrementing C(-1) means *decrementing* RATIO_PREQ
1475 +        */
1476 +       step[0] = -1;
1477 +       step[1] = 1;
1478 +       step[2] = -1;
1479 +
1480 +       switch (request) {
1481 +       case INCREMENT:
1482 +               ld_limit[0] = POST_COE_MAX;
1483 +               ld_limit[1] = ZERO_COE_MAX;
1484 +               ld_limit[2] = PRE_COE_MAX;
1485 +               if (ld_coe[field] != ld_limit[field])
1486 +                       ld_coe[field] += step[field];
1487 +               else
1488 +                       /* MAX */
1489 +                       return COE_MAX;
1490 +               break;
1491 +       case DECREMENT:
1492 +               ld_limit[0] = POST_COE_MIN;
1493 +               ld_limit[1] = ZERO_COE_MIN;
1494 +               ld_limit[2] = PRE_COE_MIN;
1495 +               if (ld_coe[field] != ld_limit[field])
1496 +                       ld_coe[field] -= step[field];
1497 +               else
1498 +                       /* MIN */
1499 +                       return COE_MIN;
1500 +               break;
1501 +       default:
1502 +               break;
1503 +       }
1504 +
1505 +       if (is_ld_valid(xgkr)) {
1506 +               /* accept new ld */
1507 +               xgkr->ratio_pst1q = ld_coe[0];
1508 +               xgkr->adpt_eq = ld_coe[1];
1509 +               xgkr->ratio_preq = ld_coe[2];
1510 +               /* only some values for preq and pst1q can be used.
1511 +                * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
1512 +                * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
1513 +                */
1514 +               if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
1515 +                       dev_dbg(&xgkr->phydev->mdio.dev,
1516 +                               "preq skipped value: %d\n", ld_coe[2]);
1517 +                       /* NOT UPDATED */
1518 +                       return COE_NOTUPDATED;
1519 +               }
1520 +
1521 +               if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
1522 +                       dev_dbg(&xgkr->phydev->mdio.dev,
1523 +                               "pst1q skipped value: %d\n", ld_coe[0]);
1524 +                       /* NOT UPDATED */
1525 +                       return COE_NOTUPDATED;
1526 +               }
1527 +
1528 +               tune_tecr(xgkr);
1529 +       } else {
1530 +               if (request == DECREMENT)
1531 +                       /* MIN */
1532 +                       return COE_MIN;
1533 +               if (request == INCREMENT)
1534 +                       /* MAX */
1535 +                       return COE_MAX;
1536 +       }
1537 +
1538 +       /* UPDATED */
1539 +       return COE_UPDATED;
1540 +}
1541 +
1542 +static void min_max_updated(struct xgkr_params *xgkr, int field, enum coe_update cs)
1543 +{
1544 +       u32 mask, val;
1545 +       u32 ld_cs = cs;
1546 +
1547 +       if (cs == COE_INV)
1548 +               return;
1549 +
1550 +       switch (field) {
1551 +       case COE_COP1:
1552 +               mask = COP1_MASK;
1553 +               val = ld_cs << COP1_SHIFT;
1554 +               break;
1555 +       case COE_COZ:
1556 +               mask = COZ_MASK;
1557 +               val = ld_cs << COZ_SHIFT;
1558 +               break;
1559 +       case COE_COM:
1560 +               mask = COM1_MASK;
1561 +               val = ld_cs << COM1_SHIFT;
1562 +               break;
1563 +       default:
1564 +               return;
1565 +       }
1566 +
1567 +       xgkr->ld_status &= ~mask;
1568 +       xgkr->ld_status |= val;
1569 +}
1570 +
1571 +static void check_request(struct xgkr_params *xgkr, int request)
1572 +{
1573 +       int cop1_req, coz_req, com_req;
1574 +       int old_status;
1575 +       enum coe_update cu;
1576 +
1577 +       cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
1578 +       coz_req = (request & COZ_MASK) >> COZ_SHIFT;
1579 +       com_req = (request & COM1_MASK) >> COM1_SHIFT;
1580 +
1581 +       /* IEEE802.3-2008, 72.6.10.2.5
1582 +        * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
1583 +        */
1584 +       old_status = xgkr->ld_status;
1585 +
1586 +       if (cop1_req && !(xgkr->ld_status & COP1_MASK)) {
1587 +               cu = inc_dec(xgkr, COE_COP1, cop1_req);
1588 +               min_max_updated(xgkr, COE_COP1, cu);
1589 +       }
1590 +
1591 +       if (coz_req && !(xgkr->ld_status & COZ_MASK)) {
1592 +               cu = inc_dec(xgkr, COE_COZ, coz_req);
1593 +               min_max_updated(xgkr, COE_COZ, cu);
1594 +       }
1595 +
1596 +       if (com_req && !(xgkr->ld_status & COM1_MASK)) {
1597 +               cu = inc_dec(xgkr, COE_COM, com_req);
1598 +               min_max_updated(xgkr, COE_COM, cu);
1599 +       }
1600 +
1601 +       if (old_status != xgkr->ld_status)
1602 +               ld_coe_status(xgkr);
1603 +}
1604 +
1605 +static void preset(struct xgkr_params *xgkr)
1606 +{
1607 +       /* These are all MAX values from the IEEE802.3 perspective. */
1608 +       xgkr->ratio_pst1q = POST_COE_MAX;
1609 +       xgkr->adpt_eq = ZERO_COE_MAX;
1610 +       xgkr->ratio_preq = PRE_COE_MAX;
1611 +
1612 +       tune_tecr(xgkr);
1613 +       xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1614 +       xgkr->ld_status |= COE_MAX << COP1_SHIFT |
1615 +                          COE_MAX << COZ_SHIFT |
1616 +                          COE_MAX << COM1_SHIFT;
1617 +       ld_coe_status(xgkr);
1618 +}
1619 +
1620 +static void train_local_tx(struct xgkr_params *xgkr)
1621 +{
1622 +       int request, old_ld_status;
1623 +
1624 +       /* get request from LP */
1625 +       request = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU) &
1626 +                             (LD_ALL_MASK);
1627 +
1628 +       old_ld_status = xgkr->ld_status;
1629 +
1630 +       /* IEEE802.3-2008, 72.6.10.2.5
1631 +        * Ensure we always go to NOT UDPATED for status reporting in
1632 +        * response to HOLD requests.
1633 +        * IEEE802.3-2008, 72.6.10.2.3.1/2
1634 +        * ... but only if PRESET/INITIALIZE are not active to ensure
1635 +        * we keep status until they are released.
1636 +        */
1637 +       if (!(request & (PRESET_MASK | INIT_MASK))) {
1638 +               if (!(request & COP1_MASK))
1639 +                       xgkr->ld_status &= ~COP1_MASK;
1640 +
1641 +               if (!(request & COZ_MASK))
1642 +                       xgkr->ld_status &= ~COZ_MASK;
1643 +
1644 +               if (!(request & COM1_MASK))
1645 +                       xgkr->ld_status &= ~COM1_MASK;
1646 +
1647 +               if (old_ld_status != xgkr->ld_status)
1648 +                       ld_coe_status(xgkr);
1649 +       }
1650 +
1651 +       /* As soon as the LP shows ready, no need to do any more updates. */
1652 +       if (check_rx(xgkr)) {
1653 +               /* LP receiver is ready */
1654 +               if (xgkr->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
1655 +                       xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1656 +                       ld_coe_status(xgkr);
1657 +               }
1658 +       } else {
1659 +               /* IEEE802.3-2008, 72.6.10.2.3.1/2
1660 +                * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
1661 +                */
1662 +               if (request & (PRESET_MASK | INIT_MASK)) {
1663 +                       if (!(xgkr->ld_status &
1664 +                             (COP1_MASK | COZ_MASK | COM1_MASK))) {
1665 +                               if (request & PRESET_MASK)
1666 +                                       preset(xgkr);
1667 +
1668 +                               if (request & INIT_MASK)
1669 +                                       initialize(xgkr);
1670 +                       }
1671 +               }
1672 +
1673 +               /* LP Coefficient are not in HOLD */
1674 +               if (request & REQUEST_MASK)
1675 +                       check_request(xgkr, request & REQUEST_MASK);
1676 +       }
1677 +}
1678 +
1679 +static void xgkr_start_train(struct xgkr_params *xgkr)
1680 +{
1681 +       struct phy_device *phydev = xgkr->phydev;
1682 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
1683 +       struct tx_condition *tx_c = &xgkr->tx_c;
1684 +       int val = 0, i, j;
1685 +       int lt_state;
1686 +       unsigned long dead_line;
1687 +       int lp_rx_ready, tx_training_complete;
1688 +       u32 lt_timeout = 500;
1689 +
1690 +       init_xgkr(xgkr, 0);
1691 +       
1692 +       start_lt(xgkr);
1693 +       
1694 +       if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
1695 +               lt_timeout = 2000;
1696 +       }
1697 +
1698 +       for (i = 0; i < 2;) {
1699 +               
1700 +               dead_line = jiffies + msecs_to_jiffies(lt_timeout);
1701 +               
1702 +               while (time_before(jiffies, dead_line)) {
1703 +
1704 +                       val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
1705 +                                          lt_KR_PMD_STATUS);
1706 +
1707 +                       if (val & TRAIN_FAIL) {
1708 +                               /* LT failed already, reset lane to avoid
1709 +                                * it run into hanging, then start LT again.
1710 +                                */
1711 +                               if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
1712 +                                       /* Reset only the Master Lane */
1713 +                                       if (xgkr->idx == MASTER_LANE)
1714 +                                               xgkr->srds->reset_lane(xgkr->reg_base);
1715 +                               } else {
1716 +                                       xgkr->srds->reset_lane(xgkr->reg_base);
1717 +                               }
1718 +                               
1719 +                               start_lt(xgkr);
1720 +                       } else if ((val & PMD_STATUS_SUP_STAT) &&
1721 +                                  (val & PMD_STATUS_FRAME_LOCK))
1722 +                               break;
1723 +                       usleep_range(100, 500);
1724 +               }
1725 +
1726 +               if (!((val & PMD_STATUS_FRAME_LOCK) &&
1727 +                     (val & PMD_STATUS_SUP_STAT))) {
1728 +                       i++;
1729 +                       continue;
1730 +               }
1731 +
1732 +               /* init process */
1733 +               lp_rx_ready = false;
1734 +               tx_training_complete = false;
1735 +               /* the LT should be finished in 500ms, failed or OK. */
1736 +               dead_line = jiffies + msecs_to_jiffies(lt_timeout);
1737 +
1738 +               while (time_before(jiffies, dead_line)) {
1739 +                       /* check if the LT is already failed */
1740 +
1741 +                       lt_state = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
1742 +                                               lt_KR_PMD_STATUS);
1743 +
1744 +                       if (lt_state & TRAIN_FAIL) {
1745 +                               
1746 +                               if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
1747 +                                       /* Reset only the Master Lane */
1748 +                                       if (xgkr->idx == MASTER_LANE)
1749 +                                               xgkr->srds->reset_lane(xgkr->reg_base);
1750 +                               } else {
1751 +                                       xgkr->srds->reset_lane(xgkr->reg_base);
1752 +                               }
1753 +                               
1754 +                               break;
1755 +                       }
1756 +
1757 +                       lp_rx_ready = check_rx(xgkr);
1758 +                       tx_training_complete = tx_c->tx_complete;
1759 +
1760 +                       if (lp_rx_ready && tx_training_complete)
1761 +                               break;
1762 +
1763 +                       if (!lp_rx_ready)
1764 +                               train_local_tx(xgkr);
1765 +
1766 +                       if (!tx_training_complete)
1767 +                               train_remote_tx(xgkr);
1768 +
1769 +                       usleep_range(100, 500);
1770 +               }
1771 +
1772 +               i++;
1773 +               /* check LT result */
1774 +               if (is_link_training_fail(xgkr)) {
1775 +                       init_xgkr(xgkr, 0);
1776 +                       continue;
1777 +               } else {
1778 +                       stop_lt(xgkr);
1779 +                       xgkr->state = TRAINED;
1780 +                       
1781 +                       switch (xgkr_inst->bp_mode)
1782 +                       {
1783 +                       case PHY_BACKPLANE_10GBASE_KR:
1784 +                               if (phydev->attached_dev == NULL)
1785 +                                       dev_info(&phydev->mdio.dev, "10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
1786 +                                                       xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
1787 +                               else
1788 +                                       dev_info(&phydev->mdio.dev, "%s %s: 10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
1789 +                                                       dev_driver_string(phydev->attached_dev->dev.parent), 
1790 +                                                       dev_name(phydev->attached_dev->dev.parent),
1791 +                                                       xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
1792 +                               break;
1793 +                               
1794 +                       case PHY_BACKPLANE_40GBASE_KR:
1795 +                               if (xgkr->idx == xgkr_inst->phy_lanes - 1) {
1796 +                                       if (phydev->attached_dev == NULL)
1797 +                                               dev_info(&phydev->mdio.dev, "40GBase-KR link trained at lanes Tx equalization:\n");
1798 +                                       else
1799 +                                               dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR link trained at lanes Tx equalization:\n",
1800 +                                                               dev_driver_string(phydev->attached_dev->dev.parent), 
1801 +                                                               dev_name(phydev->attached_dev->dev.parent));
1802 +
1803 +                                       for (j = 0; j < xgkr_inst->phy_lanes; j++) {
1804 +                                               if (phydev->attached_dev == NULL)
1805 +                                                       dev_info(&phydev->mdio.dev, "40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
1806 +                                                                       j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
1807 +                                               else
1808 +                                                       dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
1809 +                                                                       dev_driver_string(phydev->attached_dev->dev.parent),
1810 +                                                                       dev_name(phydev->attached_dev->dev.parent),
1811 +                                                                       j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
1812 +                                       }
1813 +                               }
1814 +                               break;
1815 +                       }
1816 +
1817 +                       break;
1818 +               }
1819 +       }
1820 +}
1821 +
1822 +static void xgkr_request_restart_an(struct xgkr_params *xgkr)
1823 +{
1824 +       struct phy_device *phydev = xgkr->phydev;
1825 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
1826 +       int i;
1827 +
1828 +       if (time_before(jiffies, xgkr->rt_time))
1829 +               return;
1830 +       
1831 +       switch (xgkr_inst->bp_mode)
1832 +       {
1833 +       case PHY_BACKPLANE_1000BASE_KX:
1834 +               dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1835 +               break;
1836 +
1837 +       case PHY_BACKPLANE_10GBASE_KR:
1838 +               init_xgkr(xgkr, 0);  
1839 +               reset_lt(xgkr);
1840 +               xgkr->state = DETECTING_LP;
1841 +               start_xgkr_an(xgkr);
1842 +               start_xgkr_state_machine(&xgkr->xgkr_wk);
1843 +               break;
1844 +
1845 +       case PHY_BACKPLANE_40GBASE_KR:
1846 +               for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1847 +                       init_xgkr(&xgkr_inst->xgkr[i], 0);
1848 +                       reset_lt(&xgkr_inst->xgkr[i]);
1849 +                       xgkr_inst->xgkr[i].state = DETECTING_LP;
1850 +               }
1851 +               //Start AN only for Master Lane
1852 +               start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
1853 +               //start state machine
1854 +               for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1855 +                       start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
1856 +               }
1857 +               break;
1858 +       }
1859 +       
1860 +       xgkr->rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
1861 +}
1862 +
1863 +static void xgkr_state_machine(struct work_struct *work)
1864 +{
1865 +       struct delayed_work *dwork = to_delayed_work(work);
1866 +       struct xgkr_params *xgkr = container_of(dwork,
1867 +                                                 struct xgkr_params, xgkr_wk);
1868 +       struct phy_device *phydev = xgkr->phydev;
1869 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
1870 +       int an_state;
1871 +       bool start_train = false;
1872 +       bool all_lanes_trained = false;
1873 +       int i;
1874 +
1875 +       if (!xgkr_inst->aneg_done) {
1876 +               start_xgkr_state_machine(&xgkr->xgkr_wk);
1877 +               return;
1878 +       }
1879 +
1880 +       mutex_lock(&phydev->lock);
1881 +       
1882 +       switch (xgkr->state) {
1883 +       case DETECTING_LP:
1884 +
1885 +               switch (xgkr_inst->bp_mode)
1886 +               {
1887 +               case PHY_BACKPLANE_1000BASE_KX:
1888 +                       dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1889 +                       break;
1890 +
1891 +               case PHY_BACKPLANE_10GBASE_KR:
1892 +                       an_state = xgkr_phy_read_mmd(xgkr, MDIO_MMD_AN, g_an_BP_STAT);
1893 +                       if (an_state & KR_AN_MASK_10G) {
1894 +                               //AN acquired: Train the lane
1895 +                               xgkr->an_wait_count = 0;
1896 +                               start_train = true;
1897 +                       } else {
1898 +                               //AN lost or not yet acquired
1899 +                               if (!is_link_up(phydev)) {
1900 +                                       //Link is down: restart training
1901 +                                       xgkr->an_wait_count = 0;
1902 +                                       xgkr_request_restart_an(xgkr);
1903 +                               } else {
1904 +                                       //Link is up: wait few iterations for AN to be acquired
1905 +                                       if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
1906 +                                               xgkr->an_wait_count = 0;
1907 +                                               xgkr_request_restart_an(xgkr);
1908 +                                       } else {
1909 +                                               xgkr->an_wait_count++;
1910 +                                       }
1911 +                               }
1912 +                       }
1913 +                       break;
1914 +
1915 +               case PHY_BACKPLANE_40GBASE_KR:
1916 +                       //Check AN state only on Master Lane
1917 +                       an_state = xgkr_phy_read_mmd(&xgkr_inst->xgkr[MASTER_LANE], MDIO_MMD_AN, g_an_BP_STAT);
1918 +                       if (an_state & KR_AN_MASK_40G) {
1919 +                               //AN acquired: Train all lanes in order starting with Master Lane
1920 +                               xgkr->an_wait_count = 0;
1921 +                               if (xgkr->idx == MASTER_LANE) {
1922 +                                       start_train = true;
1923 +                               }
1924 +                               else if (xgkr_inst->xgkr[xgkr->idx - 1].state == TRAINED) {
1925 +                                       start_train = true;
1926 +                               }
1927 +                       } else {
1928 +                               //AN lost or not yet acquired
1929 +                               if (!is_link_up(phydev)) {
1930 +                                       //Link is down: restart training
1931 +                                       xgkr->an_wait_count = 0;
1932 +                                       xgkr_request_restart_an(xgkr);
1933 +                               } else {
1934 +                                       //Link is up: wait few iterations for AN to be acquired
1935 +                                       if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
1936 +                                               xgkr->an_wait_count = 0;
1937 +                                               xgkr_request_restart_an(xgkr);
1938 +                                       } else {
1939 +                                               xgkr->an_wait_count++;
1940 +                                       }
1941 +                               }
1942 +                       }
1943 +                       break;
1944 +               }
1945 +               break;
1946 +
1947 +       case TRAINED:
1948 +               if (!is_link_up(phydev)) {
1949 +                       switch (xgkr_inst->bp_mode)
1950 +                       {
1951 +                       case PHY_BACKPLANE_1000BASE_KX:
1952 +                               dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
1953 +                               break;
1954 +
1955 +                       case PHY_BACKPLANE_10GBASE_KR:
1956 +                               dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
1957 +                               xgkr_request_restart_an(xgkr);
1958 +                               break;
1959 +
1960 +                       case PHY_BACKPLANE_40GBASE_KR:
1961 +                               if (xgkr->idx == MASTER_LANE) {
1962 +                                       //check if all lanes are trained only on Master Lane
1963 +                                       all_lanes_trained = true;
1964 +                                       for (i = 0; i < xgkr_inst->phy_lanes; i++) {
1965 +                                               if (xgkr_inst->xgkr[i].state != TRAINED) {
1966 +                                                       all_lanes_trained = false;
1967 +                                                       break;
1968 +                                               }
1969 +                                       }
1970 +                                       if (all_lanes_trained) {
1971 +                                               dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
1972 +                                               xgkr_request_restart_an(xgkr);
1973 +                                       }
1974 +                               }
1975 +                               break;
1976 +                       }
1977 +               }
1978 +               break;
1979 +       }
1980 +
1981 +       if (start_train) {
1982 +               xgkr_start_train(xgkr);
1983 +       }
1984 +
1985 +       mutex_unlock(&phydev->lock);
1986 +       start_xgkr_state_machine(&xgkr->xgkr_wk);
1987 +}
1988 +
1989 +static int fsl_backplane_probe(struct phy_device *phydev)
1990 +{
1991 +       struct xgkr_phy_data *xgkr_inst;
1992 +       struct device_node *phy_node, *lane_node;
1993 +       struct resource res_lane;
1994 +       struct serdes_access *srds = NULL;
1995 +       int serdes_type;
1996 +       const char *st;
1997 +       const char *bm;
1998 +       int ret, i, phy_lanes;
1999 +       int bp_mode;
2000 +       u32 lane_base_addr[MAX_PHY_LANES_NO], lane_memmap_size;
2001 +
2002 +       phy_node = phydev->mdio.dev.of_node;
2003 +       if (!phy_node) {
2004 +               dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2005 +               return -EINVAL;
2006 +       }
2007 +
2008 +       bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
2009 +       if (bp_mode < 0)
2010 +               return -EINVAL;
2011 +
2012 +       phy_lanes = 1;
2013 +       if (!strcasecmp(bm, "1000base-kx")) {
2014 +               bp_mode = PHY_BACKPLANE_1000BASE_KX;
2015 +       } else if (!strcasecmp(bm, "10gbase-kr")) {
2016 +               bp_mode = PHY_BACKPLANE_10GBASE_KR;
2017 +       } else if (!strcasecmp(bm, "40gbase-kr")) {
2018 +               bp_mode = PHY_BACKPLANE_40GBASE_KR;
2019 +               phy_lanes = 4;
2020 +       } else {
2021 +               dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
2022 +               return -EINVAL;
2023 +       }
2024 +
2025 +       lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
2026 +       if (!lane_node) {
2027 +               dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
2028 +               return -EINVAL;
2029 +       }
2030 +
2031 +       ret = of_property_read_string(lane_node, "compatible", &st);
2032 +       if (ret < 0) {
2033 +               //assume SERDES-10G if compatible property is not specified
2034 +               serdes_type = SERDES_10G;
2035 +       }
2036 +       else if (!strcasecmp(st, "fsl,serdes-10g")) {
2037 +               serdes_type = SERDES_10G;
2038 +       } else if (!strcasecmp(st, "fsl,serdes-28g")) {
2039 +               serdes_type = SERDES_28G;
2040 +       } else {
2041 +               dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
2042 +               return -EINVAL;
2043 +       }
2044 +
2045 +       ret = of_address_to_resource(lane_node, 0, &res_lane);
2046 +       if (ret) {
2047 +               dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
2048 +               return ret;
2049 +       }
2050 +
2051 +       of_node_put(lane_node);
2052 +       ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
2053 +                                        (u32 *)lane_base_addr, phy_lanes);
2054 +       if (ret) {
2055 +               dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
2056 +               return -EINVAL;
2057 +       }
2058 +
2059 +       switch (serdes_type)
2060 +       {
2061 +       case SERDES_10G:
2062 +               setup_an_lt_ls();
2063 +               srds = setup_serdes_access_10g();
2064 +               break;
2065 +
2066 +       case SERDES_28G:
2067 +               setup_an_lt_lx();
2068 +               srds = setup_serdes_access_28g();
2069 +               break;
2070 +
2071 +       default:
2072 +               dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
2073 +               return -EINVAL;
2074 +       }
2075 +
2076 +       if (!srds) {
2077 +               dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
2078 +               return -EINVAL;
2079 +       }
2080 +
2081 +       srds->serdes_type = serdes_type;
2082 +       srds->is_little_endian = of_property_read_bool(lane_node, "little-endian");
2083 +
2084 +       if (srds->is_little_endian) {
2085 +               srds->ioread32 = le_ioread32;
2086 +               srds->iowrite32 = le_iowrite32;
2087 +       } else {
2088 +               srds->ioread32 = be_ioread32;
2089 +               srds->iowrite32 = be_iowrite32;
2090 +       }
2091 +
2092 +       xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
2093 +                                sizeof(*xgkr_inst), GFP_KERNEL);
2094 +       if (!xgkr_inst)
2095 +               return -ENOMEM;
2096 +
2097 +       xgkr_inst->phy_lanes = phy_lanes;
2098 +       xgkr_inst->bp_mode = bp_mode;
2099 +       mutex_init(&xgkr_inst->phy_lock);
2100 +
2101 +       lane_memmap_size = srds->get_lane_memmap_size();
2102 +       
2103 +       for (i = 0; i < phy_lanes; i++) {
2104 +               xgkr_inst->xgkr[i].idx = i;
2105 +               xgkr_inst->xgkr[i].phydev = phydev;
2106 +               xgkr_inst->xgkr[i].srds = srds;
2107 +               xgkr_inst->xgkr[i].reg_base = devm_ioremap_nocache(&phydev->mdio.dev,
2108 +                                                   res_lane.start + lane_base_addr[i],
2109 +                                                   lane_memmap_size);
2110 +               if (!xgkr_inst->xgkr[i].reg_base) {
2111 +                       dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
2112 +                       return -ENOMEM;
2113 +               }
2114 +               xgkr_inst->xgkr[i].rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
2115 +       }
2116 +
2117 +       phydev->priv = xgkr_inst;
2118 +
2119 +       switch (bp_mode)
2120 +       {
2121 +       case PHY_BACKPLANE_1000BASE_KX:
2122 +               phydev->speed = SPEED_1000;
2123 +               /* configure the lane for 1000BASE-KX */
2124 +               srds->lane_set_1gkx(xgkr_inst->xgkr[SINGLE_LANE].reg_base);
2125 +               break;
2126 +
2127 +       case PHY_BACKPLANE_10GBASE_KR:
2128 +               phydev->speed = SPEED_10000;
2129 +               INIT_DELAYED_WORK(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk, xgkr_state_machine);
2130 +               break;
2131 +
2132 +       case PHY_BACKPLANE_40GBASE_KR:
2133 +               phydev->speed = SPEED_40000;
2134 +               for (i = 0; i < phy_lanes; i++)
2135 +                       INIT_DELAYED_WORK(&xgkr_inst->xgkr[i].xgkr_wk, xgkr_state_machine);
2136 +               break;
2137 +       }
2138 +
2139 +       return 0;
2140 +}
2141 +
2142 +static int fsl_backplane_aneg_done(struct phy_device *phydev)
2143 +{
2144 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
2145 +
2146 +       if (!phydev->mdio.dev.of_node) {
2147 +               dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2148 +               return -EINVAL;
2149 +       }
2150 +       
2151 +       xgkr_inst->aneg_done = true;
2152 +
2153 +       return 1;
2154 +}
2155 +
2156 +static int fsl_backplane_config_aneg(struct phy_device *phydev)
2157 +{
2158 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
2159 +       int i;
2160 +
2161 +       if (!phydev->mdio.dev.of_node) {
2162 +               dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2163 +               return -EINVAL;
2164 +       }
2165 +
2166 +       switch (phydev->speed)
2167 +       {
2168 +       case SPEED_1000:
2169 +               phydev->supported |= SUPPORTED_1000baseKX_Full;
2170 +               start_1gkx_an(phydev);
2171 +               break;
2172 +
2173 +       case SPEED_10000:
2174 +               phydev->supported |= SUPPORTED_10000baseKR_Full;
2175 +               reset_lt(&xgkr_inst->xgkr[SINGLE_LANE]);
2176 +               start_xgkr_an(&xgkr_inst->xgkr[SINGLE_LANE]);
2177 +               /* start state machine*/
2178 +               start_xgkr_state_machine(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk);
2179 +               break;
2180 +
2181 +       case SPEED_40000:
2182 +               phydev->supported |= SUPPORTED_40000baseKR4_Full;
2183 +               for (i = 0; i < xgkr_inst->phy_lanes; i++) {
2184 +                       reset_lt(&xgkr_inst->xgkr[i]);
2185 +               }
2186 +               //Start AN only for Master Lane
2187 +               start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
2188 +               /* start state machine*/
2189 +               for (i = 0; i < xgkr_inst->phy_lanes; i++) {
2190 +                       start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
2191 +               }
2192 +               
2193 +               break;
2194 +       }
2195 +
2196 +       phydev->advertising = phydev->supported;
2197 +       phydev->duplex = 1;
2198 +
2199 +       return 0;
2200 +}
2201 +
2202 +static int fsl_backplane_suspend(struct phy_device *phydev)
2203 +{
2204 +       int i;
2205 +
2206 +       if (!phydev->mdio.dev.of_node) {
2207 +               dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2208 +               return -EINVAL;
2209 +       }
2210 +
2211 +       if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
2212 +               struct xgkr_phy_data *xgkr_inst = phydev->priv;
2213 +
2214 +               for (i = 0; i < xgkr_inst->phy_lanes; i++)
2215 +                       cancel_delayed_work_sync(&xgkr_inst->xgkr[i].xgkr_wk);
2216 +       }
2217 +       return 0;
2218 +}
2219 +
2220 +static int fsl_backplane_resume(struct phy_device *phydev)
2221 +{
2222 +       struct xgkr_phy_data *xgkr_inst = phydev->priv;
2223 +       int i;
2224 +
2225 +       if (!phydev->mdio.dev.of_node) {
2226 +               dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2227 +               return -EINVAL;
2228 +       }
2229 +
2230 +       if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
2231 +               for (i = 0; i < xgkr_inst->phy_lanes; i++) {
2232 +                       init_xgkr(&xgkr_inst->xgkr[i], 1);
2233 +                       start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
2234 +               }
2235 +       }
2236 +       return 0;
2237 +}
2238 +
2239 +static int fsl_backplane_read_status(struct phy_device *phydev)
2240 +{
2241 +       if (!phydev->mdio.dev.of_node) {
2242 +               dev_err(&phydev->mdio.dev, "No associated device tree node\n");
2243 +               return -EINVAL;
2244 +       }
2245 +
2246 +       if (is_link_up(phydev))
2247 +               phydev->link = 1;
2248 +       else
2249 +               phydev->link = 0;
2250 +
2251 +       return 0;
2252 +}
2253 +
2254 +static int fsl_backplane_match_phy_device(struct phy_device *phydev)
2255 +{
2256 +       struct device_node *phy_node, *lane_node;
2257 +       const char *st;
2258 +       int serdes_type, i, ret;
2259 +       const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
2260 +
2261 +       if (!phydev->mdio.dev.of_node) {
2262 +               return 0;
2263 +       }
2264 +
2265 +       //       WORKAROUND:
2266 +       // Required for LX2 devices
2267 +       // where PHY ID cannot be verified in PCS
2268 +       // because PCS Device Identifier Upper and Lower registers are hidden
2269 +       // and always return 0 when they are read:
2270 +       // 2  02        Device_ID0  RO          Bits 15:0       0
2271 +       // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x2);
2272 +       // 3  03        Device_ID1  RO          Bits 31:16      0
2273 +       // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x3);
2274 +       //
2275 +       // To be removed: After the issue will be fixed on LX2 devices
2276 +
2277 +       if (!phydev->is_c45)
2278 +               return 0;
2279 +
2280 +       phy_node = phydev->mdio.dev.of_node;
2281 +
2282 +       lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
2283 +       if (!lane_node) {
2284 +               dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
2285 +               return 0;
2286 +       }
2287 +
2288 +       ret = of_property_read_string(lane_node, "compatible", &st);
2289 +       if (ret < 0) {
2290 +               //assume SERDES-10G if compatible property is not specified
2291 +               serdes_type = SERDES_10G;
2292 +       }
2293 +       else if (!strcasecmp(st, "fsl,serdes-10g")) {
2294 +               serdes_type = SERDES_10G;
2295 +       } else if (!strcasecmp(st, "fsl,serdes-28g")) {
2296 +               serdes_type = SERDES_28G;
2297 +       } else {
2298 +               dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
2299 +               return 0;
2300 +       }
2301 +
2302 +       if (serdes_type == SERDES_10G) {
2303 +               //On LS devices we must find the c45 device with correct PHY ID
2304 +               //Implementation similar with the one existent in phy_device: @function: phy_bus_match
2305 +               for (i = 1; i < num_ids; i++) {
2306 +                       if (!(phydev->c45_ids.devices_in_package & (1 << i)))
2307 +                               continue;
2308 +
2309 +                       if ((PCS_PHY_DEVICE_ID & PCS_PHY_DEVICE_ID_MASK) ==
2310 +                               (phydev->c45_ids.device_ids[i] & PCS_PHY_DEVICE_ID_MASK))
2311 +                       {
2312 +                               return 1;
2313 +                       }
2314 +               }
2315 +               return 0;
2316 +       }
2317 +
2318 +       //On LX devices we cannot verify PHY ID
2319 +       //so we are happy only with preliminary verifications already made: mdio.dev.of_node and is_c45
2320 +       //because we already filtered other undesired devices: non clause 45
2321 +
2322 +       return 1;
2323 +}
2324 +
2325 +static struct phy_driver fsl_backplane_driver[] = {
2326 +       {
2327 +       .phy_id         = PCS_PHY_DEVICE_ID,
2328 +       .name           = "Freescale Backplane",
2329 +       .phy_id_mask    = PCS_PHY_DEVICE_ID_MASK,
2330 +       .features       = SUPPORTED_Backplane | SUPPORTED_Autoneg |
2331 +                         SUPPORTED_MII,
2332 +       .probe          = fsl_backplane_probe,
2333 +       .aneg_done      = fsl_backplane_aneg_done,
2334 +       .config_aneg    = fsl_backplane_config_aneg,
2335 +       .read_status    = fsl_backplane_read_status,
2336 +       .suspend        = fsl_backplane_suspend,
2337 +       .resume         = fsl_backplane_resume,
2338 +       .match_phy_device = fsl_backplane_match_phy_device,
2339 +       },
2340 +};
2341 +
2342 +module_phy_driver(fsl_backplane_driver);
2343 +
2344 +static struct mdio_device_id __maybe_unused freescale_tbl[] = {
2345 +       { PCS_PHY_DEVICE_ID, PCS_PHY_DEVICE_ID_MASK },
2346 +       { }
2347 +};
2348 +
2349 +MODULE_DEVICE_TABLE(mdio, freescale_tbl);
2350 +
2351 +MODULE_DESCRIPTION("Freescale Backplane driver");
2352 +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
2353 +MODULE_LICENSE("GPL v2");
2354 --- /dev/null
2355 +++ b/drivers/net/phy/fsl_backplane.h
2356 @@ -0,0 +1,41 @@
2357 +/* SPDX-License-Identifier: GPL-2.0+ */
2358 +/*
2359 + *  DPAA backplane driver.
2360 + *   Author: Florinel Iordache <florinel.iordache@nxp.com>
2361 + *
2362 + * Copyright 2018 NXP
2363 + *
2364 + * Licensed under the GPL-2 or later.
2365 + */
2366 +
2367 +#ifndef FSL_BACKPLANE_H
2368 +#define FSL_BACKPLANE_H
2369 +
2370 +/* C(-1) */
2371 +#define BIN_M1                                         0
2372 +/* C(1) */
2373 +#define BIN_LONG                                       1
2374 +
2375 +#define BIN_SNAPSHOT_NUM                       5
2376 +#define BIN_M1_THRESHOLD                       3
2377 +#define BIN_LONG_THRESHOLD                     2
2378 +
2379 +struct serdes_access {
2380 +
2381 +       int serdes_type;
2382 +       bool is_little_endian;
2383 +       u32 (*ioread32)(u32 *reg);
2384 +       void (*iowrite32)(u32 value, u32 *reg);
2385 +       u32 (*get_lane_memmap_size)(void);
2386 +       void (*tune_tecr)(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset);
2387 +       void (*reset_lane)(void *reg);
2388 +       void (*lane_set_1gkx)(void *reg);
2389 +       int (*get_median_gaink2)(u32 *reg);
2390 +       bool (*is_bin_early)(int bin_sel, void *reg);
2391 +};
2392 +
2393 +struct serdes_access* setup_serdes_access_10g(void);
2394 +struct serdes_access* setup_serdes_access_28g(void);
2395 +
2396 +
2397 +#endif //FSL_BACKPLANE_H
2398 --- /dev/null
2399 +++ b/drivers/net/phy/fsl_backplane_serdes_10g.c
2400 @@ -0,0 +1,281 @@
2401 +// SPDX-License-Identifier: GPL-2.0+
2402 +/*
2403 + *  DPAA backplane driver for SerDes 10G.
2404 + *   Author: Florinel Iordache <florinel.iordache@nxp.com>
2405 + *
2406 + * Copyright 2018 NXP
2407 + *
2408 + * Licensed under the GPL-2 or later.
2409 + */
2410 +
2411 +#include <linux/io.h>
2412 +#include <linux/delay.h>
2413 +
2414 +#include "fsl_backplane.h"
2415 +
2416 +#define BIN_M1_SEL                                     6
2417 +#define BIN_Long_SEL                           7
2418 +#define CDR_SEL_MASK                           0x00070000
2419 +
2420 +#define PRE_COE_SHIFT                          22
2421 +#define POST_COE_SHIFT                         16
2422 +#define ZERO_COE_SHIFT                         8
2423 +
2424 +#define TECR0_INIT                                     0x24200000
2425 +
2426 +#define GCR0_RESET_MASK                                0x00600000
2427 +
2428 +#define GCR1_SNP_START_MASK                    0x00000040
2429 +#define GCR1_CTL_SNP_START_MASK                0x00002000
2430 +
2431 +#define RECR1_CTL_SNP_DONE_MASK                0x00000002
2432 +#define RECR1_SNP_DONE_MASK                    0x00000004
2433 +#define TCSR1_SNP_DATA_MASK                    0x0000ffc0
2434 +#define TCSR1_SNP_DATA_SHIFT           6
2435 +#define TCSR1_EQ_SNPBIN_SIGN_MASK      0x100
2436 +
2437 +#define RECR1_GAINK2_MASK                      0x0f000000
2438 +#define RECR1_GAINK2_SHIFT                     24
2439 +
2440 +/* Required only for 1000BASE KX */
2441 +#define GCR1_REIDL_TH_MASK                     0x00700000
2442 +#define GCR1_REIDL_EX_SEL_MASK         0x000c0000
2443 +#define GCR1_REIDL_ET_MAS_MASK         0x00004000
2444 +#define TECR0_AMP_RED_MASK                     0x0000003f
2445 +
2446 +struct per_lane_ctrl_status {
2447 +       u32 gcr0;       /* 0x.000 - General Control Register 0 */
2448 +       u32 gcr1;       /* 0x.004 - General Control Register 1 */
2449 +       u32 gcr2;       /* 0x.008 - General Control Register 2 */
2450 +       u32 resv1;      /* 0x.00C - Reserved */
2451 +       u32 recr0;      /* 0x.010 - Receive Equalization Control Register 0 */
2452 +       u32 recr1;      /* 0x.014 - Receive Equalization Control Register 1 */
2453 +       u32 tecr0;      /* 0x.018 - Transmit Equalization Control Register 0 */
2454 +       u32 resv2;      /* 0x.01C - Reserved */
2455 +       u32 tlcr0;      /* 0x.020 - TTL Control Register 0 */
2456 +       u32 tlcr1;      /* 0x.024 - TTL Control Register 1 */
2457 +       u32 tlcr2;      /* 0x.028 - TTL Control Register 2 */
2458 +       u32 tlcr3;      /* 0x.02C - TTL Control Register 3 */
2459 +       u32 tcsr0;      /* 0x.030 - Test Control/Status Register 0 */
2460 +       u32 tcsr1;      /* 0x.034 - Test Control/Status Register 1 */
2461 +       u32 tcsr2;      /* 0x.038 - Test Control/Status Register 2 */
2462 +       u32 tcsr3;      /* 0x.03C - Test Control/Status Register 3 */
2463 +};
2464 +
2465 +static struct serdes_access srds;
2466 +
2467 +static u32 get_lane_memmap_size(void)
2468 +{
2469 +       return 0x40;
2470 +}
2471 +
2472 +static void reset_lane(void *reg)
2473 +{
2474 +       struct per_lane_ctrl_status *reg_base = reg;
2475 +
2476 +       /* reset the lane */
2477 +       srds.iowrite32(srds.ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
2478 +                   &reg_base->gcr0);
2479 +       udelay(1);
2480 +       
2481 +       /* unreset the lane */
2482 +       srds.iowrite32(srds.ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
2483 +                   &reg_base->gcr0);
2484 +       udelay(1);
2485 +}
2486 +
2487 +static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
2488 +{
2489 +       struct per_lane_ctrl_status *reg_base = reg;
2490 +       u32 val;
2491 +
2492 +       val = TECR0_INIT |
2493 +               adpt_eq << ZERO_COE_SHIFT |
2494 +               ratio_preq << PRE_COE_SHIFT |
2495 +               ratio_pst1q << POST_COE_SHIFT;
2496 +
2497 +       if (reset) {
2498 +               /* reset the lane */
2499 +               srds.iowrite32(srds.ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
2500 +                               &reg_base->gcr0);
2501 +               udelay(1);
2502 +       }
2503 +       
2504 +       srds.iowrite32(val, &reg_base->tecr0);
2505 +       udelay(1);
2506 +       
2507 +       if (reset) {
2508 +               /* unreset the lane */
2509 +               srds.iowrite32(srds.ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
2510 +                               &reg_base->gcr0);
2511 +               udelay(1);
2512 +       }
2513 +}
2514 +
2515 +static void lane_set_1gkx(void *reg)
2516 +{
2517 +       struct per_lane_ctrl_status *reg_base = reg;
2518 +       u32 val;
2519 +
2520 +       /* reset the lane */
2521 +       srds.iowrite32(srds.ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
2522 +                   &reg_base->gcr0);
2523 +       udelay(1);
2524 +
2525 +       /* set gcr1 for 1GKX */
2526 +       val = srds.ioread32(&reg_base->gcr1);
2527 +       val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
2528 +                GCR1_REIDL_ET_MAS_MASK);
2529 +       srds.iowrite32(val, &reg_base->gcr1);
2530 +       udelay(1);
2531 +
2532 +       /* set tecr0 for 1GKX */
2533 +       val = srds.ioread32(&reg_base->tecr0);
2534 +       val &= ~TECR0_AMP_RED_MASK;
2535 +       srds.iowrite32(val, &reg_base->tecr0);
2536 +       udelay(1);
2537 +
2538 +       /* unreset the lane */
2539 +       srds.iowrite32(srds.ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
2540 +                   &reg_base->gcr0);
2541 +       udelay(1);
2542 +}
2543 +
2544 +static int get_median_gaink2(u32 *reg)
2545 +{
2546 +       int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
2547 +       u32 rx_eq_snp;
2548 +       struct per_lane_ctrl_status *reg_base;
2549 +       int timeout;
2550 +       int i, j, tmp, pos;
2551 +
2552 +       reg_base = (struct per_lane_ctrl_status *)reg;
2553 +
2554 +       for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2555 +               /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
2556 +               timeout = 100;
2557 +               while (srds.ioread32(&reg_base->recr1) &
2558 +                      RECR1_CTL_SNP_DONE_MASK) {
2559 +                       udelay(1);
2560 +                       timeout--;
2561 +                       if (timeout == 0)
2562 +                               break;
2563 +               }
2564 +
2565 +               /* start snap shot */
2566 +               srds.iowrite32((srds.ioread32(&reg_base->gcr1) |
2567 +                           GCR1_CTL_SNP_START_MASK),
2568 +                           &reg_base->gcr1);
2569 +
2570 +               /* wait for SNP done */
2571 +               timeout = 100;
2572 +               while (!(srds.ioread32(&reg_base->recr1) &
2573 +                      RECR1_CTL_SNP_DONE_MASK)) {
2574 +                       udelay(1);
2575 +                       timeout--;
2576 +                       if (timeout == 0)
2577 +                               break;
2578 +               }
2579 +
2580 +               /* read and save the snap shot */
2581 +               rx_eq_snp = srds.ioread32(&reg_base->recr1);
2582 +               gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
2583 +                                       RECR1_GAINK2_SHIFT;
2584 +
2585 +               /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2586 +               srds.iowrite32((srds.ioread32(&reg_base->gcr1) &
2587 +                           ~GCR1_CTL_SNP_START_MASK),
2588 +                           &reg_base->gcr1);
2589 +       }
2590 +
2591 +       /* get median of the 5 snap shot */
2592 +       for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
2593 +               tmp = gaink2_snap_shot[i];
2594 +               pos = i;
2595 +               for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
2596 +                       if (gaink2_snap_shot[j] < tmp) {
2597 +                               tmp = gaink2_snap_shot[j];
2598 +                               pos = j;
2599 +                       }
2600 +               }
2601 +
2602 +               gaink2_snap_shot[pos] = gaink2_snap_shot[i];
2603 +               gaink2_snap_shot[i] = tmp;
2604 +       }
2605 +
2606 +       return gaink2_snap_shot[2];
2607 +}
2608 +
2609 +static bool is_bin_early(int bin_sel, void *reg)
2610 +{
2611 +       bool early = false;
2612 +       int bin_snap_shot[BIN_SNAPSHOT_NUM];
2613 +       int i, negative_count = 0;
2614 +       struct per_lane_ctrl_status *reg_base = reg;
2615 +       int timeout;
2616 +
2617 +       for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2618 +               /* wait RECR1_SNP_DONE_MASK has cleared */
2619 +               timeout = 100;
2620 +               while ((srds.ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
2621 +                       udelay(1);
2622 +                       timeout--;
2623 +                       if (timeout == 0)
2624 +                               break;
2625 +               }
2626 +
2627 +               /* set TCSR1[CDR_SEL] to BinM1/BinLong */
2628 +               if (bin_sel == BIN_M1) {
2629 +                       srds.iowrite32((srds.ioread32(&reg_base->tcsr1) &
2630 +                                   ~CDR_SEL_MASK) | BIN_M1_SEL,
2631 +                                   &reg_base->tcsr1);
2632 +               } else {
2633 +                       srds.iowrite32((srds.ioread32(&reg_base->tcsr1) &
2634 +                                   ~CDR_SEL_MASK) | BIN_Long_SEL,
2635 +                                   &reg_base->tcsr1);
2636 +               }
2637 +
2638 +               /* start snap shot */
2639 +               srds.iowrite32(srds.ioread32(&reg_base->gcr1) | GCR1_SNP_START_MASK,
2640 +                           &reg_base->gcr1);
2641 +
2642 +               /* wait for SNP done */
2643 +               timeout = 100;
2644 +               while (!(srds.ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
2645 +                       udelay(1);
2646 +                       timeout--;
2647 +                       if (timeout == 0)
2648 +                               break;
2649 +               }
2650 +
2651 +               /* read and save the snap shot */
2652 +               bin_snap_shot[i] = (srds.ioread32(&reg_base->tcsr1) &
2653 +                               TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
2654 +               if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
2655 +                       negative_count++;
2656 +
2657 +               /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2658 +               srds.iowrite32(srds.ioread32(&reg_base->gcr1) & ~GCR1_SNP_START_MASK,
2659 +                           &reg_base->gcr1);
2660 +       }
2661 +
2662 +       if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
2663 +           ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
2664 +               early = true;
2665 +       }
2666 +
2667 +       return early;
2668 +}
2669 +
2670 +struct serdes_access* setup_serdes_access_10g(void)
2671 +{
2672 +       srds.get_lane_memmap_size = get_lane_memmap_size;
2673 +       srds.tune_tecr = tune_tecr;
2674 +       srds.reset_lane = reset_lane;
2675 +       srds.lane_set_1gkx = lane_set_1gkx;
2676 +       srds.get_median_gaink2 = get_median_gaink2;
2677 +       srds.is_bin_early = is_bin_early;
2678 +
2679 +       return &srds;
2680 +}
2681 +
2682 --- /dev/null
2683 +++ b/drivers/net/phy/fsl_backplane_serdes_28g.c
2684 @@ -0,0 +1,336 @@
2685 +// SPDX-License-Identifier: GPL-2.0+
2686 +/*
2687 + *  DPAA backplane driver for SerDes 28G.
2688 + *   Author: Florinel Iordache <florinel.iordache@nxp.com>
2689 + *
2690 + * Copyright 2018 NXP
2691 + *
2692 + * Licensed under the GPL-2 or later.
2693 + */
2694 +
2695 +#include <linux/io.h>
2696 +#include <linux/delay.h>
2697 +#include <linux/sched.h>
2698 +
2699 +#include "fsl_backplane.h"
2700 +
2701 +#define BIN_M1_SEL                                     0x0000c000
2702 +#define BIN_Long_SEL                           0x0000d000
2703 +#define CDR_SEL_MASK                           0x0000f000
2704 +
2705 +#define PRE_COE_SHIFT                          16
2706 +#define POST_COE_SHIFT                         8
2707 +#define ZERO_COE_SHIFT                         24
2708 +
2709 +#define TECR0_INIT                                     0x20808000
2710 +
2711 +#define RESET_REQ_MASK                         0x80000000
2712 +
2713 +#define RECR3_SNP_START_MASK           0x80000000
2714 +#define RECR3_SNP_DONE_MASK                    0x40000000
2715 +
2716 +#define RECR4_SNP_DATA_MASK                    0x000003ff
2717 +#define RECR4_SNP_DATA_SHIFT           0
2718 +#define RECR4_EQ_SNPBIN_SIGN_MASK      0x200
2719 +
2720 +#define RECR3_GAINK2_MASK                      0x1f000000
2721 +#define RECR3_GAINK2_SHIFT                     24
2722 +
2723 +/* Required only for 1000BASE KX */
2724 +#define GCR1_REIDL_TH_MASK                     0x00700000
2725 +#define GCR1_REIDL_EX_SEL_MASK         0x000c0000
2726 +#define GCR1_REIDL_ET_MAS_MASK         0x04000000
2727 +#define TECR0_AMP_RED_MASK                     0x0000003f
2728 +
2729 +struct per_lane_ctrl_status {
2730 +       u32 gcr0;       /* 0x.000 - General Control Register 0 */
2731 +       u32 resv1;      /* 0x.004 - Reserved */
2732 +       u32 resv2;      /* 0x.008 - Reserved */
2733 +       u32 resv3;      /* 0x.00C - Reserved */
2734 +       u32 resv4;      /* 0x.010 - Reserved */
2735 +       u32 resv5;      /* 0x.014 - Reserved */
2736 +       u32 resv6;      /* 0x.018 - Reserved */
2737 +       u32 resv7;      /* 0x.01C - Reserved */
2738 +       u32 trstctl;    /* 0x.020 - TX Reset Control Register */
2739 +       u32 tgcr0;      /* 0x.024 - TX General Control Register 0 */
2740 +       u32 tgcr1;      /* 0x.028 - TX General Control Register 1 */
2741 +       u32 tgcr2;      /* 0x.02C - TX General Control Register 2 */
2742 +       u32 tecr0;      /* 0x.030 - Transmit Equalization Control Register 0 */
2743 +       u32 tecr1;      /* 0x.034 - Transmit Equalization Control Register 1 */
2744 +       u32 resv8;      /* 0x.038 - Reserved */
2745 +       u32 resv9;      /* 0x.03C - Reserved */
2746 +       u32 rrstctl;    /* 0x.040 - RX Reset Control Register */
2747 +       u32 rgcr0;      /* 0x.044 - RX General Control Register 0 */
2748 +       u32 rxgcr1;     /* 0x.048 - RX General Control Register 1 */
2749 +       u32 resv10;     /* 0x.04C - Reserved */
2750 +       u32 recr0;      /* 0x.050 - RX Equalization Register 0 */
2751 +       u32 recr1;      /* 0x.054 - RX Equalization Register 1 */
2752 +       u32 recr2;      /* 0x.058 - RX Equalization Register 2 */
2753 +       u32 recr3;      /* 0x.05C - RX Equalization Register 3 */
2754 +       u32 recr4;      /* 0x.060 - RX Equalization Register 4 */
2755 +       u32 resv11;     /* 0x.064 - Reserved */
2756 +       u32 rccr0;      /* 0x.068 - RX Calibration Register 0 */
2757 +       u32 rccr1;      /* 0x.06C - RX Calibration Register 1 */
2758 +       u32 rcpcr0;     /* 0x.070 - RX Clock Path Register 0 */
2759 +       u32 rsccr0;     /* 0x.074 - RX Sampler Calibration Control Register 0 */
2760 +       u32 rsccr1;     /* 0x.078 - RX Sampler Calibration Control Register 1 */
2761 +       u32 resv12;     /* 0x.07C - Reserved */
2762 +       u32 ttlcr0;     /* 0x.080 - Transition Tracking Loop Register 0 */
2763 +       u32 ttlcr1;     /* 0x.084 - Transition Tracking Loop Register 1 */
2764 +       u32 ttlcr2;     /* 0x.088 - Transition Tracking Loop Register 2 */
2765 +       u32 ttlcr3;     /* 0x.08C - Transition Tracking Loop Register 3 */
2766 +       u32 resv13;     /* 0x.090 - Reserved */
2767 +       u32 resv14;     /* 0x.094 - Reserved */
2768 +       u32 resv15;     /* 0x.098 - Reserved */
2769 +       u32 resv16;     /* 0x.09C - Reserved */
2770 +       u32 tcsr0;      /* 0x.0A0 - Test Control/Status Register 0 */
2771 +       u32 tcsr1;      /* 0x.0A4 - Test Control/Status Register 1 */
2772 +       u32 tcsr2;      /* 0x.0A8 - Test Control/Status Register 2 */
2773 +       u32 tcsr3;      /* 0x.0AC - Test Control/Status Register 3 */
2774 +       u32 tcsr4;      /* 0x.0B0 - Test Control/Status Register 4 */
2775 +       u32 resv17;     /* 0x.0B4 - Reserved */
2776 +       u32 resv18;     /* 0x.0B8 - Reserved */
2777 +       u32 resv19;     /* 0x.0BC - Reserved */
2778 +       u32 rxcb0;      /* 0x.0C0 - RX Control Block Register 0 */
2779 +       u32 rxcb1;      /* 0x.0C4 - RX Control Block Register 1 */
2780 +       u32 resv20;     /* 0x.0C8 - Reserved */
2781 +       u32 resv21;     /* 0x.0CC - Reserved */
2782 +       u32 rxss0;      /* 0x.0D0 - RX Speed Switch Register 0 */
2783 +       u32 rxss1;      /* 0x.0D4 - RX Speed Switch Register 1 */
2784 +       u32 rxss2;      /* 0x.0D8 - RX Speed Switch Register 2 */
2785 +       u32 resv22;     /* 0x.0DC - Reserved */
2786 +       u32 txcb0;      /* 0x.0E0 - TX Control Block Register 0 */
2787 +       u32 txcb1;      /* 0x.0E4 - TX Control Block Register 1 */
2788 +       u32 resv23;     /* 0x.0E8 - Reserved */
2789 +       u32 resv24;     /* 0x.0EC - Reserved */
2790 +       u32 txss0;      /* 0x.0F0 - TX Speed Switch Register 0 */
2791 +       u32 txss1;      /* 0x.0F4 - TX Speed Switch Register 1 */
2792 +       u32 txss2;      /* 0x.0F8 - TX Speed Switch Register 2 */
2793 +       u32 resv25;     /* 0x.0FC - Reserved */
2794 +};
2795 +
2796 +static struct serdes_access srds;
2797 +
2798 +static u32 get_lane_memmap_size(void)
2799 +{
2800 +       return 0x100;
2801 +}
2802 +
2803 +static void reset_lane(void *reg)
2804 +{
2805 +       struct per_lane_ctrl_status *reg_base = reg;
2806 +       u32 val;
2807 +       unsigned long timeout;
2808 +
2809 +       /* reset Tx lane: send reset request */
2810 +       srds.iowrite32(srds.ioread32(&reg_base->trstctl) | RESET_REQ_MASK,
2811 +                   &reg_base->trstctl);
2812 +       udelay(1);
2813 +       timeout = 10;
2814 +       while (timeout--) {
2815 +               val = srds.ioread32(&reg_base->trstctl);
2816 +               if (!(val & RESET_REQ_MASK))
2817 +                       break;
2818 +               usleep_range(5, 20);
2819 +       }
2820 +       
2821 +       /* reset Rx lane: send reset request */
2822 +       srds.iowrite32(srds.ioread32(&reg_base->rrstctl) | RESET_REQ_MASK,
2823 +                   &reg_base->rrstctl);
2824 +       udelay(1);
2825 +       timeout = 10;
2826 +       while (timeout--) {
2827 +               val = srds.ioread32(&reg_base->rrstctl);
2828 +               if (!(val & RESET_REQ_MASK))
2829 +                       break;
2830 +               usleep_range(5, 20);
2831 +       }
2832 +       
2833 +       /* wait for a while after reset */
2834 +       timeout = jiffies + 10;
2835 +       while (time_before(jiffies, timeout)) {
2836 +               schedule();
2837 +               usleep_range(5, 20);
2838 +       }
2839 +}
2840 +
2841 +static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
2842 +{
2843 +       struct per_lane_ctrl_status *reg_base = reg;
2844 +       u32 val;
2845 +
2846 +       if (reset) {
2847 +               /* reset lanes */
2848 +               reset_lane(reg);
2849 +       }
2850 +       
2851 +       val = TECR0_INIT |
2852 +               ratio_preq << PRE_COE_SHIFT |
2853 +               ratio_pst1q << POST_COE_SHIFT;
2854 +       srds.iowrite32(val, &reg_base->tecr0);
2855 +
2856 +       val = adpt_eq << ZERO_COE_SHIFT;
2857 +       srds.iowrite32(val, &reg_base->tecr1);
2858 +       
2859 +       udelay(1);
2860 +}
2861 +
2862 +static void lane_set_1gkx(void *reg)
2863 +{
2864 +       struct per_lane_ctrl_status *reg_base = reg;
2865 +       u32 val;
2866 +
2867 +       /* reset lanes */
2868 +       reset_lane(reg);
2869 +
2870 +       /* set gcr1 for 1GKX */
2871 +       val = srds.ioread32(&reg_base->rxgcr1);
2872 +       val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
2873 +                GCR1_REIDL_ET_MAS_MASK);
2874 +       srds.iowrite32(val, &reg_base->rxgcr1);
2875 +       udelay(1);
2876 +
2877 +       /* set tecr0 for 1GKX */
2878 +       val = srds.ioread32(&reg_base->tecr0);
2879 +       val &= ~TECR0_AMP_RED_MASK;
2880 +       srds.iowrite32(val, &reg_base->tecr0);
2881 +       udelay(1);
2882 +}
2883 +
2884 +static int get_median_gaink2(u32 *reg)
2885 +{
2886 +       int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
2887 +       u32 rx_eq_snp;
2888 +       struct per_lane_ctrl_status *reg_base;
2889 +       int timeout;
2890 +       int i, j, tmp, pos;
2891 +
2892 +       reg_base = (struct per_lane_ctrl_status *)reg;
2893 +
2894 +       for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2895 +               /* wait RECR3_SNP_DONE_MASK has cleared */
2896 +               timeout = 100;
2897 +               while (srds.ioread32(&reg_base->recr3) &
2898 +                               RECR3_SNP_DONE_MASK) {
2899 +                       udelay(1);
2900 +                       timeout--;
2901 +                       if (timeout == 0)
2902 +                               break;
2903 +               }
2904 +
2905 +               /* start snap shot */
2906 +               srds.iowrite32((srds.ioread32(&reg_base->recr3) |
2907 +                           RECR3_SNP_START_MASK),
2908 +                           &reg_base->recr3);
2909 +
2910 +               /* wait for SNP done */
2911 +               timeout = 100;
2912 +               while (!(srds.ioread32(&reg_base->recr3) &
2913 +                               RECR3_SNP_DONE_MASK)) {
2914 +                       udelay(1);
2915 +                       timeout--;
2916 +                       if (timeout == 0)
2917 +                               break;
2918 +               }
2919 +
2920 +               /* read and save the snap shot */
2921 +               rx_eq_snp = srds.ioread32(&reg_base->recr3);
2922 +               gaink2_snap_shot[i] = (rx_eq_snp & RECR3_GAINK2_MASK) >>
2923 +                                       RECR3_GAINK2_SHIFT;
2924 +
2925 +               /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2926 +               srds.iowrite32((srds.ioread32(&reg_base->recr3) &
2927 +                           ~RECR3_SNP_START_MASK),
2928 +                           &reg_base->recr3);
2929 +       }
2930 +
2931 +       /* get median of the 5 snap shot */
2932 +       for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
2933 +               tmp = gaink2_snap_shot[i];
2934 +               pos = i;
2935 +               for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
2936 +                       if (gaink2_snap_shot[j] < tmp) {
2937 +                               tmp = gaink2_snap_shot[j];
2938 +                               pos = j;
2939 +                       }
2940 +               }
2941 +
2942 +               gaink2_snap_shot[pos] = gaink2_snap_shot[i];
2943 +               gaink2_snap_shot[i] = tmp;
2944 +       }
2945 +
2946 +       return gaink2_snap_shot[2];
2947 +}
2948 +
2949 +static bool is_bin_early(int bin_sel, void *reg)
2950 +{
2951 +       bool early = false;
2952 +       int bin_snap_shot[BIN_SNAPSHOT_NUM];
2953 +       int i, negative_count = 0;
2954 +       struct per_lane_ctrl_status *reg_base = reg;
2955 +       int timeout;
2956 +
2957 +       for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
2958 +               /* wait RECR3_SNP_DONE_MASK has cleared */
2959 +               timeout = 100;
2960 +               while ((srds.ioread32(&reg_base->recr3) & RECR3_SNP_DONE_MASK)) {
2961 +                       udelay(1);
2962 +                       timeout--;
2963 +                       if (timeout == 0)
2964 +                               break;
2965 +               }
2966 +
2967 +               /* set TCSR1[CDR_SEL] to BinM1/BinLong */
2968 +               if (bin_sel == BIN_M1) {
2969 +                       srds.iowrite32((srds.ioread32(&reg_base->recr4) &
2970 +                                   ~CDR_SEL_MASK) | BIN_M1_SEL,
2971 +                                   &reg_base->recr4);
2972 +               } else {
2973 +                       srds.iowrite32((srds.ioread32(&reg_base->recr4) &
2974 +                                   ~CDR_SEL_MASK) | BIN_Long_SEL,
2975 +                                   &reg_base->recr4);
2976 +               }
2977 +
2978 +               /* start snap shot */
2979 +               srds.iowrite32(srds.ioread32(&reg_base->recr3) | RECR3_SNP_START_MASK,
2980 +                           &reg_base->recr3);
2981 +
2982 +               /* wait for SNP done */
2983 +               timeout = 100;
2984 +               while (!(srds.ioread32(&reg_base->recr3) & RECR3_SNP_DONE_MASK)) {
2985 +                       udelay(1);
2986 +                       timeout--;
2987 +                       if (timeout == 0)
2988 +                               break;
2989 +               }
2990 +
2991 +               /* read and save the snap shot */
2992 +               bin_snap_shot[i] = (srds.ioread32(&reg_base->recr4) &
2993 +                               RECR4_SNP_DATA_MASK) >> RECR4_SNP_DATA_SHIFT;
2994 +               if (bin_snap_shot[i] & RECR4_EQ_SNPBIN_SIGN_MASK)
2995 +                       negative_count++;
2996 +
2997 +               /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
2998 +               srds.iowrite32(srds.ioread32(&reg_base->recr3) & ~RECR3_SNP_START_MASK,
2999 +                           &reg_base->recr3);
3000 +       }
3001 +
3002 +       if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
3003 +           ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
3004 +               early = true;
3005 +       }
3006 +
3007 +       return early;
3008 +}
3009 +
3010 +struct serdes_access* setup_serdes_access_28g(void)
3011 +{
3012 +       srds.get_lane_memmap_size = get_lane_memmap_size;
3013 +       srds.tune_tecr = tune_tecr;
3014 +       srds.reset_lane = reset_lane;
3015 +       srds.lane_set_1gkx = lane_set_1gkx;
3016 +       srds.get_median_gaink2 = get_median_gaink2;
3017 +       srds.is_bin_early = is_bin_early;
3018 +
3019 +       return &srds;
3020 +}
3021 --- /dev/null
3022 +++ b/drivers/net/phy/inphi.c
3023 @@ -0,0 +1,594 @@
3024 +/*
3025 + * Copyright 2018 NXP
3026 + * Copyright 2018 INPHI
3027 + *
3028 + * Redistribution and use in source and binary forms, with or without
3029 + * modification, are permitted provided that the following conditions are met:
3030 + *
3031 + * 1. Redistributions of source code must retain the above copyright notice,
3032 + * this list of conditions and the following disclaimer.
3033 + * 2. Redistributions in binary form must reproduce the above copyright notice,
3034 + * this list of conditions and the following disclaimer in the documentation
3035 + * and/or other materials provided with the distribution.
3036 + *
3037 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
3038 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3039 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3040 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
3041 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
3042 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
3043 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
3044 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3045 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3046 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3047 + * POSSIBILITY OF SUCH DAMAGE.
3048 + *
3049 + * Inphi is a registered trademark of Inphi Corporation
3050 + *
3051 + */
3052 +
3053 +#include <linux/module.h>
3054 +#include <linux/phy.h>
3055 +#include <linux/mdio.h>
3056 +#include <linux/interrupt.h>
3057 +#include <linux/platform_device.h>
3058 +#include <linux/of_irq.h>
3059 +#include <linux/workqueue.h>
3060 +#include <linux/i2c.h>
3061 +#include <linux/timer.h>
3062 +#include <linux/delay.h>
3063 +#include <linux/kernel.h>
3064 +#include <linux/init.h>
3065 +#include <linux/fs.h>
3066 +#include <linux/cdev.h>
3067 +#include <linux/device.h>
3068 +#include <linux/slab.h>
3069 +#include <linux/uaccess.h>
3070 +
3071 +#define PHY_ID_IN112525  0x02107440
3072 +
3073 +#define INPHI_S03_DEVICE_ID_MSB 0x2
3074 +#define INPHI_S03_DEVICE_ID_LSB 0x3
3075 +
3076 +#define ALL_LANES              4
3077 +#define INPHI_POLL_DELAY       2500
3078 +
3079 +#define PHYCTRL_REG1   0x0012
3080 +#define PHYCTRL_REG2   0x0014
3081 +#define PHYCTRL_REG3   0x0120
3082 +#define PHYCTRL_REG4   0x0121
3083 +#define PHYCTRL_REG5   0x0180
3084 +#define PHYCTRL_REG6   0x0580
3085 +#define PHYCTRL_REG7   0x05C4
3086 +#define PHYCTRL_REG8   0x01C8
3087 +#define PHYCTRL_REG9   0x0521
3088 +
3089 +#define PHYSTAT_REG1   0x0021
3090 +#define PHYSTAT_REG2   0x0022
3091 +#define PHYSTAT_REG3   0x0123
3092 +
3093 +#define PHYMISC_REG1   0x0025
3094 +#define PHYMISC_REG2   0x002c
3095 +#define PHYMISC_REG3   0x00b3
3096 +#define PHYMISC_REG4   0x0181
3097 +#define PHYMISC_REG5   0x019D
3098 +#define PHYMISC_REG6   0x0198
3099 +#define PHYMISC_REG7   0x0199
3100 +#define PHYMISC_REG8   0x0581
3101 +#define PHYMISC_REG9   0x0598
3102 +#define PHYMISC_REG10  0x059c
3103 +#define PHYMISC_REG20  0x01B0
3104 +#define PHYMISC_REG21  0x01BC
3105 +#define PHYMISC_REG22  0x01C0
3106 +
3107 +#define RX_VCO_CODE_OFFSET     5
3108 +#define VCO_CODE               390
3109 +
3110 +int vco_codes[ALL_LANES] = {
3111 +       VCO_CODE,
3112 +       VCO_CODE,
3113 +       VCO_CODE,
3114 +       VCO_CODE
3115 +};
3116 +
3117 +static void mykmod_work_handler(struct work_struct *w);
3118 +
3119 +static struct workqueue_struct *wq;
3120 +static DECLARE_DELAYED_WORK(mykmod_work, mykmod_work_handler);
3121 +static unsigned long onesec;
3122 +struct phy_device *inphi_phydev;
3123 +
3124 +static int mdio_wr(u32 regnum, u16 val)
3125 +{
3126 +       regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
3127 +
3128 +       return mdiobus_write(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
3129 +                               regnum, val);
3130 +}
3131 +
3132 +static int mdio_rd(u32 regnum)
3133 +{
3134 +       regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
3135 +
3136 +       return mdiobus_read(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
3137 +                               regnum);
3138 +}
3139 +
3140 +
3141 +int bit_test(int value, int bit_field)
3142 +{
3143 +       int result;
3144 +       int bit_mask = (1 << bit_field);
3145 +
3146 +       result = ((value & bit_mask) == bit_mask);
3147 +       return result;
3148 +}
3149 +
3150 +int tx_pll_lock_test(int lane)
3151 +{
3152 +       int i, val, locked = 1;
3153 +
3154 +       if (lane == ALL_LANES) {
3155 +               for (i = 0; i < ALL_LANES; i++) {
3156 +                       val = mdio_rd(i * 0x100 + PHYSTAT_REG3);
3157 +                       locked = locked & bit_test(val, 15);
3158 +               }
3159 +       } else {
3160 +               val = mdio_rd(lane * 0x100 + PHYSTAT_REG3);
3161 +               locked = locked & bit_test(val, 15);
3162 +       }
3163 +
3164 +       return locked;
3165 +}
3166 +
3167 +void rx_reset_assert(int lane)
3168 +{
3169 +       int mask, val;
3170 +
3171 +       if (lane == ALL_LANES) {
3172 +               val = mdio_rd(PHYMISC_REG2);
3173 +               mask = (1 << 15);
3174 +               mdio_wr(PHYMISC_REG2, val + mask);
3175 +       } else {
3176 +               val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3177 +               mask = (1 << 6);
3178 +               mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
3179 +       }
3180 +}
3181 +
3182 +void rx_reset_de_assert(int lane)
3183 +{
3184 +       int mask, val;
3185 +
3186 +       if (lane == ALL_LANES) {
3187 +               val = mdio_rd(PHYMISC_REG2);
3188 +               mask = 0xffff - (1 << 15);
3189 +               mdio_wr(PHYMISC_REG2, val & mask);
3190 +       } else {
3191 +               val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3192 +               mask = 0xffff - (1 << 6);
3193 +               mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
3194 +       }
3195 +}
3196 +
3197 +void rx_powerdown_assert(int lane)
3198 +{
3199 +       int mask, val;
3200 +
3201 +       val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3202 +       mask = (1 << 5);
3203 +       mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
3204 +}
3205 +
3206 +void rx_powerdown_de_assert(int lane)
3207 +{
3208 +       int mask, val;
3209 +
3210 +       val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
3211 +       mask = 0xffff - (1 << 5);
3212 +       mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
3213 +}
3214 +
3215 +void tx_pll_assert(int lane)
3216 +{
3217 +       int val, recal;
3218 +
3219 +       if (lane == ALL_LANES) {
3220 +               val = mdio_rd(PHYMISC_REG2);
3221 +               recal = (1 << 12);
3222 +               mdio_wr(PHYMISC_REG2, val | recal);
3223 +       } else {
3224 +               val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
3225 +               recal = (1 << 15);
3226 +               mdio_wr(lane * 0x100 + PHYCTRL_REG4, val | recal);
3227 +       }
3228 +}
3229 +
3230 +void tx_pll_de_assert(int lane)
3231 +{
3232 +       int recal, val;
3233 +
3234 +       if (lane == ALL_LANES) {
3235 +               val = mdio_rd(PHYMISC_REG2);
3236 +               recal = 0xefff;
3237 +               mdio_wr(PHYMISC_REG2, val & recal);
3238 +       } else {
3239 +               val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
3240 +               recal = 0x7fff;
3241 +               mdio_wr(lane * 0x100 + PHYCTRL_REG4, val & recal);
3242 +       }
3243 +}
3244 +
3245 +void tx_core_assert(int lane)
3246 +{
3247 +       int recal, val, val2, core_reset;
3248 +
3249 +       if (lane == 4) {
3250 +               val = mdio_rd(PHYMISC_REG2);
3251 +               recal = 1 << 10;
3252 +               mdio_wr(PHYMISC_REG2, val | recal);
3253 +       } else {
3254 +               val2 = mdio_rd(PHYMISC_REG3);
3255 +               core_reset = (1 << (lane + 8));
3256 +               mdio_wr(PHYMISC_REG3, val2 | core_reset);
3257 +       }
3258 +}
3259 +
3260 +void lol_disable(int lane)
3261 +{
3262 +       int val, mask;
3263 +
3264 +       val = mdio_rd(PHYMISC_REG3);
3265 +       mask = 1 << (lane + 4);
3266 +       mdio_wr(PHYMISC_REG3, val | mask);
3267 +}
3268 +
3269 +void tx_core_de_assert(int lane)
3270 +{
3271 +       int val, recal, val2, core_reset;
3272 +
3273 +       if (lane == ALL_LANES) {
3274 +               val = mdio_rd(PHYMISC_REG2);
3275 +               recal = 0xffff - (1 << 10);
3276 +               mdio_wr(PHYMISC_REG2, val & recal);
3277 +       } else {
3278 +               val2 = mdio_rd(PHYMISC_REG3);
3279 +               core_reset = 0xffff - (1 << (lane + 8));
3280 +               mdio_wr(PHYMISC_REG3, val2 & core_reset);
3281 +       }
3282 +}
3283 +
3284 +void tx_restart(int lane)
3285 +{
3286 +       tx_core_assert(lane);
3287 +       tx_pll_assert(lane);
3288 +       tx_pll_de_assert(lane);
3289 +       usleep_range(1500, 1600);
3290 +       tx_core_de_assert(lane);
3291 +}
3292 +
3293 +void disable_lane(int lane)
3294 +{
3295 +       rx_reset_assert(lane);
3296 +       rx_powerdown_assert(lane);
3297 +       tx_core_assert(lane);
3298 +       lol_disable(lane);
3299 +}
3300 +
3301 +void toggle_reset(int lane)
3302 +{
3303 +       int reg, val, orig;
3304 +
3305 +       if (lane == ALL_LANES) {
3306 +               mdio_wr(PHYMISC_REG2, 0x8000);
3307 +               udelay(100);
3308 +               mdio_wr(PHYMISC_REG2, 0x0000);
3309 +       } else {
3310 +               reg = lane * 0x100 + PHYCTRL_REG8;
3311 +               val = (1 << 6);
3312 +               orig = mdio_rd(reg);
3313 +               mdio_wr(reg, orig + val);
3314 +               udelay(100);
3315 +               mdio_wr(reg, orig);
3316 +       }
3317 +}
3318 +
3319 +int az_complete_test(int lane)
3320 +{
3321 +       int success = 1, value;
3322 +
3323 +       if (lane == 0 || lane == ALL_LANES) {
3324 +               value = mdio_rd(PHYCTRL_REG5);
3325 +               success = success & bit_test(value, 2);
3326 +       }
3327 +       if (lane == 1 || lane == ALL_LANES) {
3328 +               value = mdio_rd(PHYCTRL_REG5 + 0x100);
3329 +               success = success & bit_test(value, 2);
3330 +       }
3331 +       if (lane == 2 || lane == ALL_LANES) {
3332 +               value = mdio_rd(PHYCTRL_REG5 + 0x200);
3333 +               success = success & bit_test(value, 2);
3334 +       }
3335 +       if (lane == 3 || lane == ALL_LANES) {
3336 +               value = mdio_rd(PHYCTRL_REG5 + 0x300);
3337 +               success = success & bit_test(value, 2);
3338 +       }
3339 +
3340 +       return success;
3341 +}
3342 +
3343 +void save_az_offsets(int lane)
3344 +{
3345 +       int i;
3346 +
3347 +#define AZ_OFFSET_LANE_UPDATE(reg, lane) \
3348 +       mdio_wr((reg) + (lane) * 0x100,  \
3349 +               (mdio_rd((reg) + (lane) * 0x100) >> 8))
3350 +
3351 +       if (lane == ALL_LANES) {
3352 +               for (i = 0; i < ALL_LANES; i++) {
3353 +                       AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, i);
3354 +                       AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, i);
3355 +                       AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, i);
3356 +                       AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, i);
3357 +                       AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, i);
3358 +                       AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, i);
3359 +                       AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, i);
3360 +                       AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, i);
3361 +                       AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, i);
3362 +               }
3363 +       } else {
3364 +               AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, lane);
3365 +               AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, lane);
3366 +               AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, lane);
3367 +               AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, lane);
3368 +               AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, lane);
3369 +               AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, lane);
3370 +               AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, lane);
3371 +               AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, lane);
3372 +               AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, lane);
3373 +       }
3374 +
3375 +       mdio_wr(PHYCTRL_REG7, 0x0001);
3376 +}
3377 +
3378 +void save_vco_codes(int lane)
3379 +{
3380 +       int i;
3381 +
3382 +       if (lane == ALL_LANES) {
3383 +               for (i = 0; i < ALL_LANES; i++) {
3384 +                       vco_codes[i] = mdio_rd(PHYMISC_REG5 + i * 0x100);
3385 +                       mdio_wr(PHYMISC_REG5 + i * 0x100,
3386 +                               vco_codes[i] + RX_VCO_CODE_OFFSET);
3387 +               }
3388 +       } else {
3389 +               vco_codes[lane] = mdio_rd(PHYMISC_REG5 + lane * 0x100);
3390 +               mdio_wr(PHYMISC_REG5 + lane * 0x100,
3391 +                       vco_codes[lane] + RX_VCO_CODE_OFFSET);
3392 +       }
3393 +}
3394 +
3395 +int inphi_lane_recovery(int lane)
3396 +{
3397 +       int i, value, az_pass;
3398 +
3399 +       switch (lane) {
3400 +       case 0:
3401 +       case 1:
3402 +       case 2:
3403 +       case 3:
3404 +               rx_reset_assert(lane);
3405 +               mdelay(20);
3406 +               break;
3407 +       case ALL_LANES:
3408 +               mdio_wr(PHYMISC_REG2, 0x9C00);
3409 +               mdelay(20);
3410 +               do {
3411 +                       value = mdio_rd(PHYMISC_REG2);
3412 +                       udelay(10);
3413 +               } while (!bit_test(value, 4));
3414 +               break;
3415 +       default:
3416 +               dev_err(&inphi_phydev->mdio.dev,
3417 +                       "Incorrect usage of APIs in %s driver\n",
3418 +                       inphi_phydev->drv->name);
3419 +               break;
3420 +       }
3421 +
3422 +       if (lane == ALL_LANES) {
3423 +               for (i = 0; i < ALL_LANES; i++)
3424 +                       mdio_wr(PHYMISC_REG7 + i * 0x100, VCO_CODE);
3425 +       } else {
3426 +               mdio_wr(PHYMISC_REG7 + lane * 0x100, VCO_CODE);
3427 +       }
3428 +
3429 +       if (lane == ALL_LANES)
3430 +               for (i = 0; i < ALL_LANES; i++)
3431 +                       mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0418);
3432 +       else
3433 +               mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0418);
3434 +
3435 +       mdio_wr(PHYCTRL_REG7,   0x0000);
3436 +
3437 +       rx_reset_de_assert(lane);
3438 +
3439 +       if (lane == ALL_LANES) {
3440 +               for (i = 0; i < ALL_LANES; i++) {
3441 +                       mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0410);
3442 +                       mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0412);
3443 +               }
3444 +       } else {
3445 +               mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0410);
3446 +               mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0412);
3447 +       }
3448 +
3449 +       for (i = 0; i < 64; i++) {
3450 +               mdelay(100);
3451 +               az_pass = az_complete_test(lane);
3452 +               if (az_pass) {
3453 +                       save_az_offsets(lane);
3454 +                       break;
3455 +               }
3456 +       }
3457 +
3458 +       if (!az_pass) {
3459 +               pr_info("in112525: AZ calibration fail @ lane=%d\n", lane);
3460 +               return -1;
3461 +       }
3462 +
3463 +       if (lane == ALL_LANES) {
3464 +               mdio_wr(PHYMISC_REG8, 0x0002);
3465 +               mdio_wr(PHYMISC_REG9, 0x2028);
3466 +               mdio_wr(PHYCTRL_REG6, 0x0010);
3467 +               usleep_range(1000, 1200);
3468 +               mdio_wr(PHYCTRL_REG6, 0x0110);
3469 +               mdelay(30);
3470 +               mdio_wr(PHYMISC_REG9, 0x3020);
3471 +       } else {
3472 +               mdio_wr(PHYMISC_REG4 + lane * 0x100, 0x0002);
3473 +               mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x2028);
3474 +               mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0010);
3475 +               usleep_range(1000, 1200);
3476 +               mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0110);
3477 +               mdelay(30);
3478 +               mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x3020);
3479 +       }
3480 +
3481 +       if (lane == ALL_LANES) {
3482 +               mdio_wr(PHYMISC_REG2, 0x1C00);
3483 +               mdio_wr(PHYMISC_REG2, 0x0C00);
3484 +       } else {
3485 +               tx_restart(lane);
3486 +               mdelay(11);
3487 +       }
3488 +
3489 +       if (lane == ALL_LANES) {
3490 +               if (bit_test(mdio_rd(PHYMISC_REG2), 6) == 0)
3491 +                       return -1;
3492 +       } else {
3493 +               if (tx_pll_lock_test(lane) == 0)
3494 +                       return -1;
3495 +       }
3496 +
3497 +       save_vco_codes(lane);
3498 +
3499 +       if (lane == ALL_LANES) {
3500 +               mdio_wr(PHYMISC_REG2, 0x0400);
3501 +               mdio_wr(PHYMISC_REG2, 0x0000);
3502 +               value = mdio_rd(PHYCTRL_REG1);
3503 +               value = value & 0xffbf;
3504 +               mdio_wr(PHYCTRL_REG2, value);
3505 +       } else {
3506 +               tx_core_de_assert(lane);
3507 +       }
3508 +
3509 +       if (lane == ALL_LANES) {
3510 +               mdio_wr(PHYMISC_REG1, 0x8000);
3511 +               mdio_wr(PHYMISC_REG1, 0x0000);
3512 +       }
3513 +       mdio_rd(PHYMISC_REG1);
3514 +       mdio_rd(PHYMISC_REG1);
3515 +       usleep_range(1000, 1200);
3516 +       mdio_rd(PHYSTAT_REG1);
3517 +       mdio_rd(PHYSTAT_REG2);
3518 +
3519 +       return 0;
3520 +}
3521 +
3522 +static void mykmod_work_handler(struct work_struct *w)
3523 +{
3524 +       int all_lanes_lock, lane0_lock, lane1_lock, lane2_lock, lane3_lock;
3525 +
3526 +       lane0_lock = bit_test(mdio_rd(0x123), 15);
3527 +       lane1_lock = bit_test(mdio_rd(0x223), 15);
3528 +       lane2_lock = bit_test(mdio_rd(0x323), 15);
3529 +       lane3_lock = bit_test(mdio_rd(0x423), 15);
3530 +
3531 +       /* check if the chip had any successful lane lock from the previous
3532 +        * stage (e.g. u-boot)
3533 +        */
3534 +       all_lanes_lock = lane0_lock | lane1_lock | lane2_lock | lane3_lock;
3535 +
3536 +       if (!all_lanes_lock) {
3537 +               /* start fresh */
3538 +               inphi_lane_recovery(ALL_LANES);
3539 +       } else {
3540 +               if (!lane0_lock)
3541 +                       inphi_lane_recovery(0);
3542 +               if (!lane1_lock)
3543 +                       inphi_lane_recovery(1);
3544 +               if (!lane2_lock)
3545 +                       inphi_lane_recovery(2);
3546 +               if (!lane3_lock)
3547 +                       inphi_lane_recovery(3);
3548 +       }
3549 +
3550 +       queue_delayed_work(wq, &mykmod_work, onesec);
3551 +}
3552 +
3553 +int inphi_probe(struct phy_device *phydev)
3554 +{
3555 +       int phy_id = 0, id_lsb = 0, id_msb = 0;
3556 +
3557 +       /* setup the inphi_phydev ptr for mdio_rd/mdio_wr APIs */
3558 +       inphi_phydev = phydev;
3559 +
3560 +       /* Read device id from phy registers */
3561 +       id_lsb = mdio_rd(INPHI_S03_DEVICE_ID_MSB);
3562 +       if (id_lsb < 0)
3563 +               return -ENXIO;
3564 +
3565 +       phy_id = id_lsb << 16;
3566 +
3567 +       id_msb = mdio_rd(INPHI_S03_DEVICE_ID_LSB);
3568 +       if (id_msb < 0)
3569 +               return -ENXIO;
3570 +
3571 +       phy_id |= id_msb;
3572 +
3573 +       /* Make sure the device tree binding matched the driver with the
3574 +        * right device.
3575 +        */
3576 +       if (phy_id != phydev->drv->phy_id) {
3577 +               dev_err(&phydev->mdio.dev,
3578 +                       "Error matching phy with %s driver\n",
3579 +                       phydev->drv->name);
3580 +               return -ENODEV;
3581 +       }
3582 +
3583 +       /* update the local phydev pointer, used inside all APIs */
3584 +       inphi_phydev = phydev;
3585 +       onesec = msecs_to_jiffies(INPHI_POLL_DELAY);
3586 +
3587 +       wq = create_singlethread_workqueue("inphi_kmod");
3588 +       if (wq) {
3589 +               queue_delayed_work(wq, &mykmod_work, onesec);
3590 +       } else {
3591 +               dev_err(&phydev->mdio.dev,
3592 +                       "Error creating kernel workqueue for %s driver\n",
3593 +                       phydev->drv->name);
3594 +               return -ENOMEM;
3595 +       }
3596 +
3597 +       return 0;
3598 +}
3599 +
3600 +static struct phy_driver inphi_driver[] = {
3601 +{
3602 +       .phy_id         = PHY_ID_IN112525,
3603 +       .phy_id_mask    = 0x0ff0fff0,
3604 +       .name           = "Inphi 112525_S03",
3605 +       .features       = PHY_GBIT_FEATURES,
3606 +       .probe          = &inphi_probe,
3607 +},
3608 +};
3609 +
3610 +module_phy_driver(inphi_driver);
3611 +
3612 +static struct mdio_device_id __maybe_unused inphi_tbl[] = {
3613 +       { PHY_ID_IN112525, 0x0ff0fff0},
3614 +       {},
3615 +};
3616 +
3617 +MODULE_DEVICE_TABLE(mdio, inphi_tbl);
3618 --- /dev/null
3619 +++ b/drivers/net/phy/mdio-mux-multiplexer.c
3620 @@ -0,0 +1,122 @@
3621 +// SPDX-License-Identifier: GPL-2.0+
3622 +/* MDIO bus multiplexer using kernel multiplexer subsystem
3623 + *
3624 + * Copyright 2019 NXP
3625 + */
3626 +
3627 +#include <linux/platform_device.h>
3628 +#include <linux/mdio-mux.h>
3629 +#include <linux/module.h>
3630 +#include <linux/mux/consumer.h>
3631 +
3632 +struct mdio_mux_multiplexer_state {
3633 +       struct mux_control *muxc;
3634 +       bool do_deselect;
3635 +       void *mux_handle;
3636 +};
3637 +
3638 +/**
3639 + * mdio_mux_multiplexer_switch_fn - This function is called by the mdio-mux
3640 + *                                  layer when it thinks the mdio bus
3641 + *                                  multiplexer needs to switch.
3642 + * @current_child:  current value of the mux register.
3643 + * @desired_child: value of the 'reg' property of the target child MDIO node.
3644 + * @data: Private data used by this switch_fn passed to mdio_mux_init function
3645 + *        via mdio_mux_init(.., .., .., .., data, ..).
3646 + *
3647 + * The first time this function is called, current_child == -1.
3648 + * If current_child == desired_child, then the mux is already set to the
3649 + * correct bus.
3650 + */
3651 +static int mdio_mux_multiplexer_switch_fn(int current_child, int desired_child,
3652 +                                         void *data)
3653 +{
3654 +       struct platform_device *pdev;
3655 +       struct mdio_mux_multiplexer_state *s;
3656 +       int ret = 0;
3657 +
3658 +       pdev = (struct platform_device *)data;
3659 +       s = platform_get_drvdata(pdev);
3660 +
3661 +       if (!(current_child ^ desired_child))
3662 +               return 0;
3663 +
3664 +       if (s->do_deselect)
3665 +               ret = mux_control_deselect(s->muxc);
3666 +       if (ret) {
3667 +               dev_err(&pdev->dev, "mux_control_deselect failed in %s: %d\n",
3668 +                       __func__, ret);
3669 +               return ret;
3670 +       }
3671 +
3672 +       ret =  mux_control_select(s->muxc, desired_child);
3673 +       if (!ret) {
3674 +               dev_dbg(&pdev->dev, "%s %d -> %d\n", __func__, current_child,
3675 +                       desired_child);
3676 +               s->do_deselect = true;
3677 +       } else {
3678 +               s->do_deselect = false;
3679 +       }
3680 +
3681 +       return ret;
3682 +}
3683 +
3684 +static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
3685 +{
3686 +       struct device *dev = &pdev->dev;
3687 +       struct mdio_mux_multiplexer_state *s;
3688 +       int ret = 0;
3689 +
3690 +       s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
3691 +       if (!s)
3692 +               return -ENOMEM;
3693 +
3694 +       s->muxc = devm_mux_control_get(dev, NULL);
3695 +       if (IS_ERR(s->muxc)) {
3696 +               ret = PTR_ERR(s->muxc);
3697 +               if (ret != -EPROBE_DEFER)
3698 +                       dev_err(&pdev->dev, "Failed to get mux: %d\n", ret);
3699 +               return ret;
3700 +       }
3701 +
3702 +       platform_set_drvdata(pdev, s);
3703 +
3704 +       ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
3705 +                           mdio_mux_multiplexer_switch_fn, &s->mux_handle,
3706 +                           pdev, NULL);
3707 +
3708 +       return ret;
3709 +}
3710 +
3711 +static int mdio_mux_multiplexer_remove(struct platform_device *pdev)
3712 +{
3713 +       struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev);
3714 +
3715 +       mdio_mux_uninit(s->mux_handle);
3716 +
3717 +       if (s->do_deselect)
3718 +               mux_control_deselect(s->muxc);
3719 +
3720 +       return 0;
3721 +}
3722 +
3723 +static const struct of_device_id mdio_mux_multiplexer_match[] = {
3724 +       { .compatible = "mdio-mux-multiplexer", },
3725 +       {},
3726 +};
3727 +MODULE_DEVICE_TABLE(of, mdio_mux_multiplexer_match);
3728 +
3729 +static struct platform_driver mdio_mux_multiplexer_driver = {
3730 +       .driver = {
3731 +               .name           = "mdio-mux-multiplexer",
3732 +               .of_match_table = mdio_mux_multiplexer_match,
3733 +       },
3734 +       .probe          = mdio_mux_multiplexer_probe,
3735 +       .remove         = mdio_mux_multiplexer_remove,
3736 +};
3737 +
3738 +module_platform_driver(mdio_mux_multiplexer_driver);
3739 +
3740 +MODULE_DESCRIPTION("MDIO bus multiplexer using kernel multiplexer subsystem");
3741 +MODULE_AUTHOR("Pankaj Bansal <pankaj.bansal@nxp.com>");
3742 +MODULE_LICENSE("GPL");
3743 --- a/drivers/net/phy/swphy.c
3744 +++ b/drivers/net/phy/swphy.c
3745 @@ -77,6 +77,7 @@ static const struct swmii_regs duplex[]
3746  static int swphy_decode_speed(int speed)
3747  {
3748         switch (speed) {
3749 +       case 10000:
3750         case 1000:
3751                 return SWMII_SPEED_1000;
3752         case 100:
3753 --- a/include/linux/phy.h
3754 +++ b/include/linux/phy.h
3755 @@ -87,6 +87,7 @@ typedef enum {
3756         PHY_INTERFACE_MODE_XAUI,
3757         /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
3758         PHY_INTERFACE_MODE_10GKR,
3759 +       PHY_INTERFACE_MODE_2500SGMII,
3760         PHY_INTERFACE_MODE_MAX,
3761  } phy_interface_t;
3762  
3763 @@ -159,6 +160,8 @@ static inline const char *phy_modes(phy_
3764                 return "xaui";
3765         case PHY_INTERFACE_MODE_10GKR:
3766                 return "10gbase-kr";
3767 +       case PHY_INTERFACE_MODE_2500SGMII:
3768 +               return "sgmii-2500";
3769         default:
3770                 return "unknown";
3771         }