1 // SPDX-License-Identifier: GPL-2.0+
3 * Atheros AR71xx / AR9xxx GMAC driver
5 * Copyright (C) 2016 Marek Vasut <marex@denx.de>
6 * Copyright (C) 2019 Rosy Song <rosysong@rosinson.com>
10 #include <clock_legacy.h>
18 #include <asm/cache.h>
19 #include <linux/compiler.h>
20 #include <linux/err.h>
21 #include <linux/mii.h>
25 #include <mach/ath79.h>
27 DECLARE_GLOBAL_DATA_PTR;
36 /* MAC Configuration 1 */
37 #define AG7XXX_ETH_CFG1 0x00
38 #define AG7XXX_ETH_CFG1_SOFT_RST BIT(31)
39 #define AG7XXX_ETH_CFG1_RX_RST BIT(19)
40 #define AG7XXX_ETH_CFG1_TX_RST BIT(18)
41 #define AG7XXX_ETH_CFG1_LOOPBACK BIT(8)
42 #define AG7XXX_ETH_CFG1_RX_EN BIT(2)
43 #define AG7XXX_ETH_CFG1_TX_EN BIT(0)
45 /* MAC Configuration 2 */
46 #define AG7XXX_ETH_CFG2 0x04
47 #define AG7XXX_ETH_CFG2_IF_1000 BIT(9)
48 #define AG7XXX_ETH_CFG2_IF_10_100 BIT(8)
49 #define AG7XXX_ETH_CFG2_IF_SPEED_MASK (3 << 8)
50 #define AG7XXX_ETH_CFG2_HUGE_FRAME_EN BIT(5)
51 #define AG7XXX_ETH_CFG2_LEN_CHECK BIT(4)
52 #define AG7XXX_ETH_CFG2_PAD_CRC_EN BIT(2)
53 #define AG7XXX_ETH_CFG2_FDX BIT(0)
55 /* MII Configuration */
56 #define AG7XXX_ETH_MII_MGMT_CFG 0x20
57 #define AG7XXX_ETH_MII_MGMT_CFG_RESET BIT(31)
60 #define AG7XXX_ETH_MII_MGMT_CMD 0x24
61 #define AG7XXX_ETH_MII_MGMT_CMD_READ 0x1
64 #define AG7XXX_ETH_MII_MGMT_ADDRESS 0x28
65 #define AG7XXX_ETH_MII_MGMT_ADDRESS_SHIFT 8
68 #define AG7XXX_ETH_MII_MGMT_CTRL 0x2c
71 #define AG7XXX_ETH_MII_MGMT_STATUS 0x30
74 #define AG7XXX_ETH_MII_MGMT_IND 0x34
75 #define AG7XXX_ETH_MII_MGMT_IND_INVALID BIT(2)
76 #define AG7XXX_ETH_MII_MGMT_IND_BUSY BIT(0)
78 /* STA Address 1 & 2 */
79 #define AG7XXX_ETH_ADDR1 0x40
80 #define AG7XXX_ETH_ADDR2 0x44
82 /* ETH Configuration 0 - 5 */
83 #define AG7XXX_ETH_FIFO_CFG_0 0x48
84 #define AG7XXX_ETH_FIFO_CFG_1 0x4c
85 #define AG7XXX_ETH_FIFO_CFG_2 0x50
86 #define AG7XXX_ETH_FIFO_CFG_3 0x54
87 #define AG7XXX_ETH_FIFO_CFG_4 0x58
88 #define AG7XXX_ETH_FIFO_CFG_5 0x5c
90 /* DMA Transfer Control for Queue 0 */
91 #define AG7XXX_ETH_DMA_TX_CTRL 0x180
92 #define AG7XXX_ETH_DMA_TX_CTRL_TXE BIT(0)
94 /* Descriptor Address for Queue 0 Tx */
95 #define AG7XXX_ETH_DMA_TX_DESC 0x184
98 #define AG7XXX_ETH_DMA_TX_STATUS 0x188
101 #define AG7XXX_ETH_DMA_RX_CTRL 0x18c
102 #define AG7XXX_ETH_DMA_RX_CTRL_RXE BIT(0)
104 /* Pointer to Rx Descriptor */
105 #define AG7XXX_ETH_DMA_RX_DESC 0x190
108 #define AG7XXX_ETH_DMA_RX_STATUS 0x194
110 /* Custom register at 0x1805002C */
111 #define AG7XXX_ETH_XMII 0x2C
112 #define AG7XXX_ETH_XMII_TX_INVERT BIT(31)
113 #define AG7XXX_ETH_XMII_RX_DELAY_LSB 28
114 #define AG7XXX_ETH_XMII_RX_DELAY_MASK 0x30000000
115 #define AG7XXX_ETH_XMII_RX_DELAY_SET(x) \
116 (((x) << AG7XXX_ETH_XMII_RX_DELAY_LSB) & AG7XXX_ETH_XMII_RX_DELAY_MASK)
117 #define AG7XXX_ETH_XMII_TX_DELAY_LSB 26
118 #define AG7XXX_ETH_XMII_TX_DELAY_MASK 0x0c000000
119 #define AG7XXX_ETH_XMII_TX_DELAY_SET(x) \
120 (((x) << AG7XXX_ETH_XMII_TX_DELAY_LSB) & AG7XXX_ETH_XMII_TX_DELAY_MASK)
121 #define AG7XXX_ETH_XMII_GIGE BIT(25)
123 /* Custom register at 0x18070000 */
124 #define AG7XXX_GMAC_ETH_CFG 0x00
125 #define AG7XXX_ETH_CFG_RXDV_DELAY_LSB 16
126 #define AG7XXX_ETH_CFG_RXDV_DELAY_MASK 0x00030000
127 #define AG7XXX_ETH_CFG_RXDV_DELAY_SET(x) \
128 (((x) << AG7XXX_ETH_CFG_RXDV_DELAY_LSB) & AG7XXX_ETH_CFG_RXDV_DELAY_MASK)
129 #define AG7XXX_ETH_CFG_RXD_DELAY_LSB 14
130 #define AG7XXX_ETH_CFG_RXD_DELAY_MASK 0x0000c000
131 #define AG7XXX_ETH_CFG_RXD_DELAY_SET(x) \
132 (((x) << AG7XXX_ETH_CFG_RXD_DELAY_LSB) & AG7XXX_ETH_CFG_RXD_DELAY_MASK)
133 #define AG7XXX_ETH_CFG_SW_PHY_ADDR_SWAP BIT(8)
134 #define AG7XXX_ETH_CFG_SW_PHY_SWAP BIT(7)
135 #define AG7XXX_ETH_CFG_SW_ONLY_MODE BIT(6)
136 #define AG7XXX_ETH_CFG_GE0_ERR_EN BIT(5)
137 #define AG7XXX_ETH_CFG_MII_GE0_SLAVE BIT(4)
138 #define AG7XXX_ETH_CFG_MII_GE0_MASTER BIT(3)
139 #define AG7XXX_ETH_CFG_GMII_GE0 BIT(2)
140 #define AG7XXX_ETH_CFG_MII_GE0 BIT(1)
141 #define AG7XXX_ETH_CFG_RGMII_GE0 BIT(0)
143 #define CONFIG_TX_DESCR_NUM 8
144 #define CONFIG_RX_DESCR_NUM 8
145 #define CONFIG_ETH_BUFSIZE 2048
146 #define TX_TOTAL_BUFSIZE (CONFIG_ETH_BUFSIZE * CONFIG_TX_DESCR_NUM)
147 #define RX_TOTAL_BUFSIZE (CONFIG_ETH_BUFSIZE * CONFIG_RX_DESCR_NUM)
149 /* DMA descriptor. */
150 struct ag7xxx_dma_desc {
152 #define AG7XXX_DMADESC_IS_EMPTY BIT(31)
153 #define AG7XXX_DMADESC_FTPP_OVERRIDE_OFFSET 16
154 #define AG7XXX_DMADESC_PKT_SIZE_OFFSET 0
155 #define AG7XXX_DMADESC_PKT_SIZE_MASK 0xfff
161 struct ar7xxx_eth_priv {
162 struct ag7xxx_dma_desc tx_mac_descrtable[CONFIG_TX_DESCR_NUM];
163 struct ag7xxx_dma_desc rx_mac_descrtable[CONFIG_RX_DESCR_NUM];
164 char txbuffs[TX_TOTAL_BUFSIZE] __aligned(ARCH_DMA_MINALIGN);
165 char rxbuffs[RX_TOTAL_BUFSIZE] __aligned(ARCH_DMA_MINALIGN);
168 void __iomem *phyregs;
170 struct eth_device *dev;
171 struct phy_device *phydev;
177 enum ag7xxx_model model;
181 * Switch and MDIO access
183 static int ag7xxx_switch_read(struct mii_dev *bus, int addr, int reg, u16 *val)
185 struct ar7xxx_eth_priv *priv = bus->priv;
186 void __iomem *regs = priv->phyregs;
189 writel(0x0, regs + AG7XXX_ETH_MII_MGMT_CMD);
190 writel((addr << AG7XXX_ETH_MII_MGMT_ADDRESS_SHIFT) | reg,
191 regs + AG7XXX_ETH_MII_MGMT_ADDRESS);
192 writel(AG7XXX_ETH_MII_MGMT_CMD_READ,
193 regs + AG7XXX_ETH_MII_MGMT_CMD);
195 ret = wait_for_bit_le32(regs + AG7XXX_ETH_MII_MGMT_IND,
196 AG7XXX_ETH_MII_MGMT_IND_BUSY, 0, 1000, 0);
200 *val = readl(regs + AG7XXX_ETH_MII_MGMT_STATUS) & 0xffff;
201 writel(0x0, regs + AG7XXX_ETH_MII_MGMT_CMD);
206 static int ag7xxx_switch_write(struct mii_dev *bus, int addr, int reg, u16 val)
208 struct ar7xxx_eth_priv *priv = bus->priv;
209 void __iomem *regs = priv->phyregs;
212 writel((addr << AG7XXX_ETH_MII_MGMT_ADDRESS_SHIFT) | reg,
213 regs + AG7XXX_ETH_MII_MGMT_ADDRESS);
214 writel(val, regs + AG7XXX_ETH_MII_MGMT_CTRL);
216 ret = wait_for_bit_le32(regs + AG7XXX_ETH_MII_MGMT_IND,
217 AG7XXX_ETH_MII_MGMT_IND_BUSY, 0, 1000, 0);
222 static int ag7xxx_switch_reg_read(struct mii_dev *bus, int reg, u32 *val)
224 struct ar7xxx_eth_priv *priv = bus->priv;
229 u32 reg_temp_w = (reg & 0xfffffffc) >> 1;
233 if (priv->model == AG7XXX_MODEL_AG933X ||
234 priv->model == AG7XXX_MODEL_AG953X) {
237 } else if (priv->model == AG7XXX_MODEL_AG934X ||
238 priv->model == AG7XXX_MODEL_AG956X) {
244 if (priv->model == AG7XXX_MODEL_AG956X)
245 ret = ag7xxx_switch_write(bus, phy_addr, reg_addr, (reg >> 9) & 0x1ff);
247 ret = ag7xxx_switch_write(bus, phy_addr, reg_addr, reg >> 9);
251 phy_temp = ((reg >> 6) & 0x7) | 0x10;
252 if (priv->model == AG7XXX_MODEL_AG956X)
253 reg_temp = reg_temp_w & 0x1f;
255 reg_temp = (reg >> 1) & 0x1e;
258 ret = ag7xxx_switch_read(bus, phy_temp, reg_temp | 0, &rv);
263 if (priv->model == AG7XXX_MODEL_AG956X) {
264 phy_temp = (((reg_temp_w + 1) >> 5) & 0x7) | 0x10;
265 reg_temp = (reg_temp_w + 1) & 0x1f;
266 ret = ag7xxx_switch_read(bus, phy_temp, reg_temp, &rv);
268 ret = ag7xxx_switch_read(bus, phy_temp, reg_temp | 1, &rv);
277 static int ag7xxx_switch_reg_write(struct mii_dev *bus, int reg, u32 val)
279 struct ar7xxx_eth_priv *priv = bus->priv;
284 u32 reg_temp_w = (reg & 0xfffffffc) >> 1;
287 if (priv->model == AG7XXX_MODEL_AG933X ||
288 priv->model == AG7XXX_MODEL_AG953X) {
291 } else if (priv->model == AG7XXX_MODEL_AG934X ||
292 priv->model == AG7XXX_MODEL_AG956X) {
298 if (priv->model == AG7XXX_MODEL_AG956X)
299 ret = ag7xxx_switch_write(bus, phy_addr, reg_addr, (reg >> 9) & 0x1ff);
301 ret = ag7xxx_switch_write(bus, phy_addr, reg_addr, reg >> 9);
305 if (priv->model == AG7XXX_MODEL_AG956X) {
306 reg_temp = (reg_temp_w + 1) & 0x1f;
307 phy_temp = (((reg_temp_w + 1) >> 5) & 0x7) | 0x10;
309 phy_temp = ((reg >> 6) & 0x7) | 0x10;
310 reg_temp = (reg >> 1) & 0x1e;
314 * The switch on AR933x has some special register behavior, which
315 * expects particular write order of their nibbles:
316 * 0x40 ..... MSB first, LSB second
317 * 0x50 ..... MSB first, LSB second
318 * 0x98 ..... LSB first, MSB second
319 * others ... don't care
321 if ((priv->model == AG7XXX_MODEL_AG933X) && (reg == 0x98)) {
322 ret = ag7xxx_switch_write(bus, phy_temp, reg_temp | 0, val & 0xffff);
326 ret = ag7xxx_switch_write(bus, phy_temp, reg_temp | 1, val >> 16);
330 if (priv->model == AG7XXX_MODEL_AG956X)
331 ret = ag7xxx_switch_write(bus, phy_temp, reg_temp, val >> 16);
333 ret = ag7xxx_switch_write(bus, phy_temp, reg_temp | 1, val >> 16);
337 if (priv->model == AG7XXX_MODEL_AG956X) {
338 phy_temp = ((reg_temp_w >> 5) & 0x7) | 0x10;
339 reg_temp = reg_temp_w & 0x1f;
342 ret = ag7xxx_switch_write(bus, phy_temp, reg_temp | 0, val & 0xffff);
350 static int ag7xxx_mdio_rw(struct mii_dev *bus, int addr, int reg, u32 val)
355 /* No idea if this is long enough or too long */
356 int timeout_ms = 1000;
358 /* Dummy read followed by PHY read/write command. */
359 ret = ag7xxx_switch_reg_read(bus, 0x98, &data);
362 data = val | (reg << 16) | (addr << 21) | BIT(30) | BIT(31);
363 ret = ag7xxx_switch_reg_write(bus, 0x98, data);
367 start = get_timer(0);
369 /* Wait for operation to finish */
371 ret = ag7xxx_switch_reg_read(bus, 0x98, &data);
375 if (get_timer(start) > timeout_ms)
377 } while (data & BIT(31));
379 return data & 0xffff;
382 static int ag7xxx_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
384 return ag7xxx_mdio_rw(bus, addr, reg, BIT(27));
387 static int ag7xxx_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
392 ret = ag7xxx_mdio_rw(bus, addr, reg, val);
401 static void ag7xxx_dma_clean_tx(struct udevice *dev)
403 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
404 struct ag7xxx_dma_desc *curr, *next;
408 for (i = 0; i < CONFIG_TX_DESCR_NUM; i++) {
409 curr = &priv->tx_mac_descrtable[i];
410 next = &priv->tx_mac_descrtable[(i + 1) % CONFIG_TX_DESCR_NUM];
412 curr->data_addr = virt_to_phys(&priv->txbuffs[i * CONFIG_ETH_BUFSIZE]);
413 curr->config = AG7XXX_DMADESC_IS_EMPTY;
414 curr->next_desc = virt_to_phys(next);
417 priv->tx_currdescnum = 0;
419 /* Cache: Flush descriptors, don't care about buffers. */
420 start = (u32)(&priv->tx_mac_descrtable[0]);
421 end = start + sizeof(priv->tx_mac_descrtable);
422 flush_dcache_range(start, end);
425 static void ag7xxx_dma_clean_rx(struct udevice *dev)
427 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
428 struct ag7xxx_dma_desc *curr, *next;
432 for (i = 0; i < CONFIG_RX_DESCR_NUM; i++) {
433 curr = &priv->rx_mac_descrtable[i];
434 next = &priv->rx_mac_descrtable[(i + 1) % CONFIG_RX_DESCR_NUM];
436 curr->data_addr = virt_to_phys(&priv->rxbuffs[i * CONFIG_ETH_BUFSIZE]);
437 curr->config = AG7XXX_DMADESC_IS_EMPTY;
438 curr->next_desc = virt_to_phys(next);
441 priv->rx_currdescnum = 0;
443 /* Cache: Flush+Invalidate descriptors, Invalidate buffers. */
444 start = (u32)(&priv->rx_mac_descrtable[0]);
445 end = start + sizeof(priv->rx_mac_descrtable);
446 flush_dcache_range(start, end);
447 invalidate_dcache_range(start, end);
449 start = (u32)&priv->rxbuffs;
450 end = start + sizeof(priv->rxbuffs);
451 invalidate_dcache_range(start, end);
457 static int ag7xxx_eth_send(struct udevice *dev, void *packet, int length)
459 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
460 struct ag7xxx_dma_desc *curr;
463 curr = &priv->tx_mac_descrtable[priv->tx_currdescnum];
465 /* Cache: Invalidate descriptor. */
467 end = start + sizeof(*curr);
468 invalidate_dcache_range(start, end);
470 if (!(curr->config & AG7XXX_DMADESC_IS_EMPTY)) {
471 printf("ag7xxx: Out of TX DMA descriptors!\n");
475 /* Copy the packet into the data buffer. */
476 memcpy(phys_to_virt(curr->data_addr), packet, length);
477 curr->config = length & AG7XXX_DMADESC_PKT_SIZE_MASK;
479 /* Cache: Flush descriptor, Flush buffer. */
481 end = start + sizeof(*curr);
482 flush_dcache_range(start, end);
483 start = (u32)phys_to_virt(curr->data_addr);
484 end = start + length;
485 flush_dcache_range(start, end);
487 /* Load the DMA descriptor and start TX DMA. */
488 writel(AG7XXX_ETH_DMA_TX_CTRL_TXE,
489 priv->regs + AG7XXX_ETH_DMA_TX_CTRL);
491 /* Switch to next TX descriptor. */
492 priv->tx_currdescnum = (priv->tx_currdescnum + 1) % CONFIG_TX_DESCR_NUM;
497 static int ag7xxx_eth_recv(struct udevice *dev, int flags, uchar **packetp)
499 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
500 struct ag7xxx_dma_desc *curr;
501 u32 start, end, length;
503 curr = &priv->rx_mac_descrtable[priv->rx_currdescnum];
505 /* Cache: Invalidate descriptor. */
507 end = start + sizeof(*curr);
508 invalidate_dcache_range(start, end);
510 /* No packets received. */
511 if (curr->config & AG7XXX_DMADESC_IS_EMPTY)
514 length = curr->config & AG7XXX_DMADESC_PKT_SIZE_MASK;
516 /* Cache: Invalidate buffer. */
517 start = (u32)phys_to_virt(curr->data_addr);
518 end = start + length;
519 invalidate_dcache_range(start, end);
521 /* Receive one packet and return length. */
522 *packetp = phys_to_virt(curr->data_addr);
526 static int ag7xxx_eth_free_pkt(struct udevice *dev, uchar *packet,
529 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
530 struct ag7xxx_dma_desc *curr;
533 curr = &priv->rx_mac_descrtable[priv->rx_currdescnum];
535 curr->config = AG7XXX_DMADESC_IS_EMPTY;
537 /* Cache: Flush descriptor. */
539 end = start + sizeof(*curr);
540 flush_dcache_range(start, end);
542 /* Switch to next RX descriptor. */
543 priv->rx_currdescnum = (priv->rx_currdescnum + 1) % CONFIG_RX_DESCR_NUM;
548 static int ag7xxx_eth_start(struct udevice *dev)
550 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
552 /* FIXME: Check if link up */
554 /* Clear the DMA rings. */
555 ag7xxx_dma_clean_tx(dev);
556 ag7xxx_dma_clean_rx(dev);
558 /* Load DMA descriptors and start the RX DMA. */
559 writel(virt_to_phys(&priv->tx_mac_descrtable[priv->tx_currdescnum]),
560 priv->regs + AG7XXX_ETH_DMA_TX_DESC);
561 writel(virt_to_phys(&priv->rx_mac_descrtable[priv->rx_currdescnum]),
562 priv->regs + AG7XXX_ETH_DMA_RX_DESC);
563 writel(AG7XXX_ETH_DMA_RX_CTRL_RXE,
564 priv->regs + AG7XXX_ETH_DMA_RX_CTRL);
569 static void ag7xxx_eth_stop(struct udevice *dev)
571 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
573 /* Stop the TX DMA. */
574 writel(0, priv->regs + AG7XXX_ETH_DMA_TX_CTRL);
575 wait_for_bit_le32(priv->regs + AG7XXX_ETH_DMA_TX_CTRL, ~0, 0,
578 /* Stop the RX DMA. */
579 writel(0, priv->regs + AG7XXX_ETH_DMA_RX_CTRL);
580 wait_for_bit_le32(priv->regs + AG7XXX_ETH_DMA_RX_CTRL, ~0, 0,
587 static int ag7xxx_eth_write_hwaddr(struct udevice *dev)
589 struct eth_pdata *pdata = dev_get_platdata(dev);
590 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
591 unsigned char *mac = pdata->enetaddr;
592 u32 macid_lo, macid_hi;
594 macid_hi = mac[3] | (mac[2] << 8) | (mac[1] << 16) | (mac[0] << 24);
595 macid_lo = (mac[5] << 16) | (mac[4] << 24);
597 writel(macid_lo, priv->regs + AG7XXX_ETH_ADDR1);
598 writel(macid_hi, priv->regs + AG7XXX_ETH_ADDR2);
603 static void ag7xxx_hw_setup(struct udevice *dev)
605 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
608 setbits_be32(priv->regs + AG7XXX_ETH_CFG1,
609 AG7XXX_ETH_CFG1_RX_RST | AG7XXX_ETH_CFG1_TX_RST |
610 AG7XXX_ETH_CFG1_SOFT_RST);
614 writel(AG7XXX_ETH_CFG1_RX_EN | AG7XXX_ETH_CFG1_TX_EN,
615 priv->regs + AG7XXX_ETH_CFG1);
617 if (priv->interface == PHY_INTERFACE_MODE_RMII)
618 speed = AG7XXX_ETH_CFG2_IF_10_100;
620 speed = AG7XXX_ETH_CFG2_IF_1000;
622 clrsetbits_be32(priv->regs + AG7XXX_ETH_CFG2,
623 AG7XXX_ETH_CFG2_IF_SPEED_MASK,
624 speed | AG7XXX_ETH_CFG2_PAD_CRC_EN |
625 AG7XXX_ETH_CFG2_LEN_CHECK);
627 writel(0xfff0000, priv->regs + AG7XXX_ETH_FIFO_CFG_1);
628 writel(0x1fff, priv->regs + AG7XXX_ETH_FIFO_CFG_2);
630 writel(0x1f00, priv->regs + AG7XXX_ETH_FIFO_CFG_0);
631 setbits_be32(priv->regs + AG7XXX_ETH_FIFO_CFG_4, 0x3ffff);
632 writel(0x10ffff, priv->regs + AG7XXX_ETH_FIFO_CFG_1);
633 writel(0xaaa0555, priv->regs + AG7XXX_ETH_FIFO_CFG_2);
634 writel(0x7eccf, priv->regs + AG7XXX_ETH_FIFO_CFG_5);
635 writel(0x1f00140, priv->regs + AG7XXX_ETH_FIFO_CFG_3);
638 static int ag7xxx_mii_get_div(void)
640 ulong freq = get_bus_freq(0);
642 switch (freq / 1000000) {
643 case 150: return 0x7;
644 case 175: return 0x5;
645 case 200: return 0x4;
646 case 210: return 0x9;
647 case 220: return 0x9;
652 static int ag7xxx_mii_setup(struct udevice *dev)
654 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
655 int i, ret, div = ag7xxx_mii_get_div();
658 if (priv->model == AG7XXX_MODEL_AG933X) {
659 /* Unit 0 is PHY-less on AR9331, see datasheet Figure 2-3 */
660 if (priv->interface == PHY_INTERFACE_MODE_RMII)
664 if (priv->model == AG7XXX_MODEL_AG934X)
666 else if (priv->model == AG7XXX_MODEL_AG953X)
668 else if (priv->model == AG7XXX_MODEL_AG956X)
671 if (priv->model == AG7XXX_MODEL_AG934X ||
672 priv->model == AG7XXX_MODEL_AG953X ||
673 priv->model == AG7XXX_MODEL_AG956X) {
674 writel(AG7XXX_ETH_MII_MGMT_CFG_RESET | reg,
675 priv->regs + AG7XXX_ETH_MII_MGMT_CFG);
676 writel(reg, priv->regs + AG7XXX_ETH_MII_MGMT_CFG);
680 for (i = 0; i < 10; i++) {
681 writel(AG7XXX_ETH_MII_MGMT_CFG_RESET | div,
682 priv->regs + AG7XXX_ETH_MII_MGMT_CFG);
683 writel(div, priv->regs + AG7XXX_ETH_MII_MGMT_CFG);
685 /* Check the switch */
686 ret = ag7xxx_switch_reg_read(priv->bus, 0x10c, ®);
690 if (reg != 0x18007fff)
699 static int ag933x_phy_setup_wan(struct udevice *dev)
701 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
703 /* Configure switch port 4 (GMAC0) */
704 return ag7xxx_mdio_write(priv->bus, 4, 0, MII_BMCR, 0x9000);
707 static int ag933x_phy_setup_lan(struct udevice *dev)
709 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
713 /* Reset the switch */
714 ret = ag7xxx_switch_reg_read(priv->bus, 0, ®);
718 ret = ag7xxx_switch_reg_write(priv->bus, 0, reg);
723 ret = ag7xxx_switch_reg_read(priv->bus, 0, ®);
726 } while (reg & BIT(31));
728 /* Configure switch ports 0...3 (GMAC1) */
729 for (i = 0; i < 4; i++) {
730 ret = ag7xxx_mdio_write(priv->bus, 0x4, 0, MII_BMCR, 0x9000);
735 /* Enable CPU port */
736 ret = ag7xxx_switch_reg_write(priv->bus, 0x78, BIT(8));
740 for (i = 0; i < 4; i++) {
741 ret = ag7xxx_switch_reg_write(priv->bus, i * 0x100, BIT(9));
747 ret = ag7xxx_switch_reg_write(priv->bus, 0x38, 0xc000050e);
751 /* Disable Atheros header */
752 ret = ag7xxx_switch_reg_write(priv->bus, 0x104, 0x4004);
756 /* Tag priority mapping */
757 ret = ag7xxx_switch_reg_write(priv->bus, 0x70, 0xfa50);
761 /* Enable ARP packets to the CPU */
762 ret = ag7xxx_switch_reg_read(priv->bus, 0x5c, ®);
766 ret = ag7xxx_switch_reg_write(priv->bus, 0x5c, reg);
773 static int ag953x_phy_setup_wan(struct udevice *dev)
777 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
779 /* Set wan port connect to GE0 */
780 ret = ag7xxx_switch_reg_read(priv->bus, 0x8, ®);
784 ret = ag7xxx_switch_reg_write(priv->bus, 0x8, reg | BIT(28));
788 /* Configure switch port 4 (GMAC0) */
789 ret = ag7xxx_switch_write(priv->bus, 4, MII_BMCR, 0x9000);
796 static int ag953x_phy_setup_lan(struct udevice *dev)
798 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
802 /* Reset the switch */
803 ret = ag7xxx_switch_reg_read(priv->bus, 0, ®);
807 ret = ag7xxx_switch_reg_write(priv->bus, 0, reg | BIT(31));
812 ret = ag7xxx_switch_reg_read(priv->bus, 0, ®);
815 } while (reg & BIT(31));
817 ret = ag7xxx_switch_reg_write(priv->bus, 0x100, 0x4e);
822 ret = ag7xxx_switch_reg_read(priv->bus, 0x4, ®);
826 ret = ag7xxx_switch_reg_write(priv->bus, 0x4, reg | BIT(6));
830 /* Configure switch ports 0...4 (GMAC1) */
831 for (i = 0; i < 5; i++) {
832 ret = ag7xxx_switch_write(priv->bus, i, MII_BMCR, 0x9000);
837 for (i = 0; i < 5; i++) {
838 ret = ag7xxx_switch_reg_write(priv->bus, (i + 2) * 0x100, BIT(9));
844 ret = ag7xxx_switch_reg_write(priv->bus, 0x38, 0xc000050e);
848 /* Disable Atheros header */
849 ret = ag7xxx_switch_reg_write(priv->bus, 0x104, 0x4004);
853 /* Tag priority mapping */
854 ret = ag7xxx_switch_reg_write(priv->bus, 0x70, 0xfa50);
858 /* Enable ARP packets to the CPU */
859 ret = ag7xxx_switch_reg_read(priv->bus, 0x5c, ®);
863 ret = ag7xxx_switch_reg_write(priv->bus, 0x5c, reg | 0x100000);
867 /* Enable broadcast packets to the CPU */
868 ret = ag7xxx_switch_reg_read(priv->bus, 0x2c, ®);
872 ret = ag7xxx_switch_reg_write(priv->bus, 0x2c, reg | BIT(25) | BIT(26));
879 static int ag933x_phy_setup_reset_set(struct udevice *dev, int port)
881 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
884 if (priv->model == AG7XXX_MODEL_AG953X ||
885 priv->model == AG7XXX_MODEL_AG956X) {
886 ret = ag7xxx_switch_write(priv->bus, port, MII_ADVERTISE,
889 ret = ag7xxx_mdio_write(priv->bus, port, 0, MII_ADVERTISE,
890 ADVERTISE_ALL | ADVERTISE_PAUSE_CAP |
891 ADVERTISE_PAUSE_ASYM);
896 if (priv->model == AG7XXX_MODEL_AG934X) {
897 ret = ag7xxx_mdio_write(priv->bus, port, 0, MII_CTRL1000,
901 } else if (priv->model == AG7XXX_MODEL_AG956X) {
902 ret = ag7xxx_switch_write(priv->bus, port, MII_CTRL1000,
908 if (priv->model == AG7XXX_MODEL_AG953X ||
909 priv->model == AG7XXX_MODEL_AG956X)
910 return ag7xxx_switch_write(priv->bus, port, MII_BMCR,
911 BMCR_ANENABLE | BMCR_RESET);
913 return ag7xxx_mdio_write(priv->bus, port, 0, MII_BMCR,
914 BMCR_ANENABLE | BMCR_RESET);
917 static int ag933x_phy_setup_reset_fin(struct udevice *dev, int port)
919 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
923 if (priv->model == AG7XXX_MODEL_AG953X ||
924 priv->model == AG7XXX_MODEL_AG956X) {
926 ret = ag7xxx_switch_read(priv->bus, port, MII_BMCR, ®);
930 } while (reg & BMCR_RESET);
933 ret = ag7xxx_mdio_read(priv->bus, port, 0, MII_BMCR);
937 } while (ret & BMCR_RESET);
943 static int ag933x_phy_setup_common(struct udevice *dev)
945 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
949 if (priv->model == AG7XXX_MODEL_AG933X)
951 else if (priv->model == AG7XXX_MODEL_AG934X ||
952 priv->model == AG7XXX_MODEL_AG953X ||
953 priv->model == AG7XXX_MODEL_AG956X)
958 if (priv->interface == PHY_INTERFACE_MODE_RMII) {
959 ret = ag933x_phy_setup_reset_set(dev, phymax);
963 ret = ag933x_phy_setup_reset_fin(dev, phymax);
967 /* Read out link status */
968 if (priv->model == AG7XXX_MODEL_AG953X)
969 ret = ag7xxx_switch_read(priv->bus, phymax, MII_MIPSCR, ®);
971 ret = ag7xxx_mdio_read(priv->bus, phymax, 0, MII_MIPSCR);
979 for (i = 0; i < phymax; i++) {
980 ret = ag933x_phy_setup_reset_set(dev, i);
985 for (i = 0; i < phymax; i++) {
986 ret = ag933x_phy_setup_reset_fin(dev, i);
991 for (i = 0; i < phymax; i++) {
992 /* Read out link status */
993 if (priv->model == AG7XXX_MODEL_AG953X ||
994 priv->model == AG7XXX_MODEL_AG956X)
995 ret = ag7xxx_switch_read(priv->bus, i, MII_MIPSCR, ®);
997 ret = ag7xxx_mdio_read(priv->bus, i, 0, MII_MIPSCR);
1005 static int ag934x_phy_setup(struct udevice *dev)
1007 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
1011 ret = ag7xxx_switch_reg_write(priv->bus, 0x624, 0x7f7f7f7f);
1014 ret = ag7xxx_switch_reg_write(priv->bus, 0x10, 0x40000000);
1017 ret = ag7xxx_switch_reg_write(priv->bus, 0x4, 0x07600000);
1020 ret = ag7xxx_switch_reg_write(priv->bus, 0xc, 0x01000000);
1023 ret = ag7xxx_switch_reg_write(priv->bus, 0x7c, 0x0000007e);
1027 /* AR8327/AR8328 v1.0 fixup */
1028 ret = ag7xxx_switch_reg_read(priv->bus, 0, ®);
1031 if ((reg & 0xffff) == 0x1201) {
1032 for (i = 0; i < 5; i++) {
1033 ret = ag7xxx_mdio_write(priv->bus, i, 0, 0x1d, 0x0);
1036 ret = ag7xxx_mdio_write(priv->bus, i, 0, 0x1e, 0x02ea);
1039 ret = ag7xxx_mdio_write(priv->bus, i, 0, 0x1d, 0x3d);
1042 ret = ag7xxx_mdio_write(priv->bus, i, 0, 0x1e, 0x68a0);
1048 ret = ag7xxx_switch_reg_read(priv->bus, 0x66c, ®);
1052 ret = ag7xxx_switch_reg_write(priv->bus, 0x66c, reg);
1059 static int ag956x_phy_setup(struct udevice *dev)
1061 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
1065 ret = ag7xxx_switch_reg_read(priv->bus, 0x0, ®);
1068 if ((reg & 0xffff) >= 0x1301)
1073 ret = ag7xxx_switch_reg_write(priv->bus, 0x4, BIT(7));
1077 ret = ag7xxx_switch_reg_write(priv->bus, 0xe0, ctrl);
1081 ret = ag7xxx_switch_reg_write(priv->bus, 0x624, 0x7f7f7f7f);
1086 * Values suggested by the switch team when s17 in sgmii
1087 * configuration. 0x10(S17_PWS_REG) = 0x602613a0
1089 ret = ag7xxx_switch_reg_write(priv->bus, 0x10, 0x602613a0);
1093 ret = ag7xxx_switch_reg_write(priv->bus, 0x7c, 0x0000007e);
1097 /* AR8337/AR8334 v1.0 fixup */
1098 ret = ag7xxx_switch_reg_read(priv->bus, 0, ®);
1101 if ((reg & 0xffff) == 0x1301) {
1102 for (i = 0; i < 5; i++) {
1103 /* Turn on Gigabit clock */
1104 ret = ag7xxx_switch_write(priv->bus, i, 0x1d, 0x3d);
1107 ret = ag7xxx_switch_write(priv->bus, i, 0x1e, 0x6820);
1116 static int ag7xxx_mac_probe(struct udevice *dev)
1118 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
1121 ag7xxx_hw_setup(dev);
1122 ret = ag7xxx_mii_setup(dev);
1126 ag7xxx_eth_write_hwaddr(dev);
1128 if (priv->model == AG7XXX_MODEL_AG933X) {
1129 if (priv->interface == PHY_INTERFACE_MODE_RMII)
1130 ret = ag933x_phy_setup_wan(dev);
1132 ret = ag933x_phy_setup_lan(dev);
1133 } else if (priv->model == AG7XXX_MODEL_AG953X) {
1134 if (priv->interface == PHY_INTERFACE_MODE_RMII)
1135 ret = ag953x_phy_setup_wan(dev);
1137 ret = ag953x_phy_setup_lan(dev);
1138 } else if (priv->model == AG7XXX_MODEL_AG934X) {
1139 ret = ag934x_phy_setup(dev);
1140 } else if (priv->model == AG7XXX_MODEL_AG956X) {
1141 ret = ag956x_phy_setup(dev);
1149 return ag933x_phy_setup_common(dev);
1152 static int ag7xxx_mdio_probe(struct udevice *dev)
1154 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
1155 struct mii_dev *bus = mdio_alloc();
1160 bus->read = ag7xxx_mdio_read;
1161 bus->write = ag7xxx_mdio_write;
1162 snprintf(bus->name, sizeof(bus->name), dev->name);
1164 bus->priv = (void *)priv;
1166 return mdio_register(bus);
1169 static int ag7xxx_get_phy_iface_offset(struct udevice *dev)
1173 offset = fdtdec_lookup_phandle(gd->fdt_blob, dev_of_offset(dev), "phy");
1175 debug("%s: PHY OF node not found (ret=%i)\n", __func__, offset);
1179 offset = fdt_parent_offset(gd->fdt_blob, offset);
1181 debug("%s: PHY OF node parent MDIO bus not found (ret=%i)\n",
1186 offset = fdt_parent_offset(gd->fdt_blob, offset);
1188 debug("%s: PHY MDIO OF node parent MAC not found (ret=%i)\n",
1196 static int ag7xxx_eth_probe(struct udevice *dev)
1198 struct eth_pdata *pdata = dev_get_platdata(dev);
1199 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
1200 void __iomem *iobase, *phyiobase;
1203 /* Decoding of convoluted PHY wiring on Atheros MIPS. */
1204 ret = ag7xxx_get_phy_iface_offset(dev);
1207 phyreg = fdtdec_get_int(gd->fdt_blob, ret, "reg", -1);
1209 iobase = map_physmem(pdata->iobase, 0x200, MAP_NOCACHE);
1210 phyiobase = map_physmem(phyreg, 0x200, MAP_NOCACHE);
1212 debug("%s, iobase=%p, phyiobase=%p, priv=%p\n",
1213 __func__, iobase, phyiobase, priv);
1214 priv->regs = iobase;
1215 priv->phyregs = phyiobase;
1216 priv->interface = pdata->phy_interface;
1217 priv->model = dev_get_driver_data(dev);
1219 ret = ag7xxx_mdio_probe(dev);
1223 priv->bus = miiphy_get_dev_by_name(dev->name);
1225 ret = ag7xxx_mac_probe(dev);
1226 debug("%s, ret=%d\n", __func__, ret);
1231 static int ag7xxx_eth_remove(struct udevice *dev)
1233 struct ar7xxx_eth_priv *priv = dev_get_priv(dev);
1236 mdio_unregister(priv->bus);
1237 mdio_free(priv->bus);
1242 static const struct eth_ops ag7xxx_eth_ops = {
1243 .start = ag7xxx_eth_start,
1244 .send = ag7xxx_eth_send,
1245 .recv = ag7xxx_eth_recv,
1246 .free_pkt = ag7xxx_eth_free_pkt,
1247 .stop = ag7xxx_eth_stop,
1248 .write_hwaddr = ag7xxx_eth_write_hwaddr,
1251 static int ag7xxx_eth_ofdata_to_platdata(struct udevice *dev)
1253 struct eth_pdata *pdata = dev_get_platdata(dev);
1254 const char *phy_mode;
1257 pdata->iobase = devfdt_get_addr(dev);
1258 pdata->phy_interface = -1;
1260 /* Decoding of convoluted PHY wiring on Atheros MIPS. */
1261 ret = ag7xxx_get_phy_iface_offset(dev);
1265 phy_mode = fdt_getprop(gd->fdt_blob, ret, "phy-mode", NULL);
1267 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
1268 if (pdata->phy_interface == -1) {
1269 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1276 static const struct udevice_id ag7xxx_eth_ids[] = {
1277 { .compatible = "qca,ag933x-mac", .data = AG7XXX_MODEL_AG933X },
1278 { .compatible = "qca,ag934x-mac", .data = AG7XXX_MODEL_AG934X },
1279 { .compatible = "qca,ag953x-mac", .data = AG7XXX_MODEL_AG953X },
1280 { .compatible = "qca,ag956x-mac", .data = AG7XXX_MODEL_AG956X },
1284 U_BOOT_DRIVER(eth_ag7xxx) = {
1285 .name = "eth_ag7xxx",
1287 .of_match = ag7xxx_eth_ids,
1288 .ofdata_to_platdata = ag7xxx_eth_ofdata_to_platdata,
1289 .probe = ag7xxx_eth_probe,
1290 .remove = ag7xxx_eth_remove,
1291 .ops = &ag7xxx_eth_ops,
1292 .priv_auto_alloc_size = sizeof(struct ar7xxx_eth_priv),
1293 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1294 .flags = DM_FLAG_ALLOC_PRIV_DMA,