2 * Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 * Based on Atheros' AG7100 driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
14 #include <linux/sizes.h>
15 #include <linux/of_net.h>
16 #include <linux/of_address.h>
17 #include <linux/of_platform.h>
20 #define AG71XX_DEFAULT_MSG_ENABLE \
30 static int ag71xx_msg_level = -1;
32 module_param_named(msg_level, ag71xx_msg_level, int, 0);
33 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
35 #define ETH_SWITCH_HEADER_LEN 2
37 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush);
39 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu)
41 return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
44 static void ag71xx_dump_dma_regs(struct ag71xx *ag)
46 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
48 ag71xx_rr(ag, AG71XX_REG_TX_CTRL),
49 ag71xx_rr(ag, AG71XX_REG_TX_DESC),
50 ag71xx_rr(ag, AG71XX_REG_TX_STATUS));
52 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
54 ag71xx_rr(ag, AG71XX_REG_RX_CTRL),
55 ag71xx_rr(ag, AG71XX_REG_RX_DESC),
56 ag71xx_rr(ag, AG71XX_REG_RX_STATUS));
59 static void ag71xx_dump_regs(struct ag71xx *ag)
61 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
63 ag71xx_rr(ag, AG71XX_REG_MAC_CFG1),
64 ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
65 ag71xx_rr(ag, AG71XX_REG_MAC_IPG),
66 ag71xx_rr(ag, AG71XX_REG_MAC_HDX),
67 ag71xx_rr(ag, AG71XX_REG_MAC_MFL));
68 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
70 ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL),
71 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1),
72 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2));
73 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
75 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
76 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
77 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
78 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
80 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
81 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
82 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
85 static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr)
87 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
88 ag->dev->name, label, intr,
89 (intr & AG71XX_INT_TX_PS) ? "TXPS " : "",
90 (intr & AG71XX_INT_TX_UR) ? "TXUR " : "",
91 (intr & AG71XX_INT_TX_BE) ? "TXBE " : "",
92 (intr & AG71XX_INT_RX_PR) ? "RXPR " : "",
93 (intr & AG71XX_INT_RX_OF) ? "RXOF " : "",
94 (intr & AG71XX_INT_RX_BE) ? "RXBE " : "");
97 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
99 struct ag71xx_ring *ring = &ag->tx_ring;
100 struct net_device *dev = ag->dev;
101 int ring_mask = BIT(ring->order) - 1;
102 u32 bytes_compl = 0, pkts_compl = 0;
104 while (ring->curr != ring->dirty) {
105 struct ag71xx_desc *desc;
106 u32 i = ring->dirty & ring_mask;
108 desc = ag71xx_ring_desc(ring, i);
109 if (!ag71xx_desc_empty(desc)) {
111 dev->stats.tx_errors++;
114 if (ring->buf[i].skb) {
115 bytes_compl += ring->buf[i].len;
117 dev_kfree_skb_any(ring->buf[i].skb);
119 ring->buf[i].skb = NULL;
123 /* flush descriptors */
126 netdev_completed_queue(dev, pkts_compl, bytes_compl);
129 static void ag71xx_ring_tx_init(struct ag71xx *ag)
131 struct ag71xx_ring *ring = &ag->tx_ring;
132 int ring_size = BIT(ring->order);
133 int ring_mask = BIT(ring->order) - 1;
136 for (i = 0; i < ring_size; i++) {
137 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
139 desc->next = (u32) (ring->descs_dma +
140 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
142 desc->ctrl = DESC_EMPTY;
143 ring->buf[i].skb = NULL;
146 /* flush descriptors */
151 netdev_reset_queue(ag->dev);
154 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
156 struct ag71xx_ring *ring = &ag->rx_ring;
157 int ring_size = BIT(ring->order);
163 for (i = 0; i < ring_size; i++)
164 if (ring->buf[i].rx_buf) {
165 dma_unmap_single(&ag->pdev->dev, ring->buf[i].dma_addr,
166 ag->rx_buf_size, DMA_FROM_DEVICE);
167 skb_free_frag(ring->buf[i].rx_buf);
171 static int ag71xx_buffer_size(struct ag71xx *ag)
173 return ag->rx_buf_size +
174 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
177 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
179 void *(*alloc)(unsigned int size))
181 struct ag71xx_ring *ring = &ag->rx_ring;
182 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
185 data = alloc(ag71xx_buffer_size(ag));
190 buf->dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
192 desc->data = (u32) buf->dma_addr + offset;
196 static int ag71xx_ring_rx_init(struct ag71xx *ag)
198 struct ag71xx_ring *ring = &ag->rx_ring;
199 int ring_size = BIT(ring->order);
200 int ring_mask = BIT(ring->order) - 1;
205 for (i = 0; i < ring_size; i++) {
206 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
208 desc->next = (u32) (ring->descs_dma +
209 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
211 DBG("ag71xx: RX desc at %p, next is %08x\n",
215 for (i = 0; i < ring_size; i++) {
216 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
218 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
219 netdev_alloc_frag)) {
224 desc->ctrl = DESC_EMPTY;
227 /* flush descriptors */
236 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
238 struct ag71xx_ring *ring = &ag->rx_ring;
239 int ring_mask = BIT(ring->order) - 1;
241 int offset = ag->rx_buf_offset;
244 for (; ring->curr - ring->dirty > 0; ring->dirty++) {
245 struct ag71xx_desc *desc;
248 i = ring->dirty & ring_mask;
249 desc = ag71xx_ring_desc(ring, i);
251 if (!ring->buf[i].rx_buf &&
252 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
256 desc->ctrl = DESC_EMPTY;
260 /* flush descriptors */
263 DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count);
268 static int ag71xx_rings_init(struct ag71xx *ag)
270 struct ag71xx_ring *tx = &ag->tx_ring;
271 struct ag71xx_ring *rx = &ag->rx_ring;
272 int ring_size = BIT(tx->order) + BIT(rx->order);
273 int tx_size = BIT(tx->order);
275 tx->buf = kzalloc(ring_size * sizeof(*tx->buf), GFP_KERNEL);
279 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
280 &tx->descs_dma, GFP_KERNEL);
281 if (!tx->descs_cpu) {
287 rx->buf = &tx->buf[tx_size];
288 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
289 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
291 ag71xx_ring_tx_init(ag);
292 return ag71xx_ring_rx_init(ag);
295 static void ag71xx_rings_free(struct ag71xx *ag)
297 struct ag71xx_ring *tx = &ag->tx_ring;
298 struct ag71xx_ring *rx = &ag->rx_ring;
299 int ring_size = BIT(tx->order) + BIT(rx->order);
302 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
303 tx->descs_cpu, tx->descs_dma);
307 tx->descs_cpu = NULL;
308 rx->descs_cpu = NULL;
313 static void ag71xx_rings_cleanup(struct ag71xx *ag)
315 ag71xx_ring_rx_clean(ag);
316 ag71xx_ring_tx_clean(ag);
317 ag71xx_rings_free(ag);
319 netdev_reset_queue(ag->dev);
322 static unsigned char *ag71xx_speed_str(struct ag71xx *ag)
336 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
340 t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16)
341 | (((u32) mac[3]) << 8) | ((u32) mac[2]);
343 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
345 t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16);
346 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
349 static void ag71xx_dma_reset(struct ag71xx *ag)
354 ag71xx_dump_dma_regs(ag);
357 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
358 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
361 * give the hardware some time to really stop all rx/tx activity
362 * clearing the descriptors too early causes random memory corruption
366 /* clear descriptor addresses */
367 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
368 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
370 /* clear pending RX/TX interrupts */
371 for (i = 0; i < 256; i++) {
372 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
373 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
376 /* clear pending errors */
377 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
378 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
380 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
382 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
385 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
387 /* mask out reserved bits */
391 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
394 ag71xx_dump_dma_regs(ag);
397 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
398 MAC_CFG1_SRX | MAC_CFG1_STX)
400 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
402 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
403 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
404 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
405 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
406 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
409 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
410 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
411 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
412 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
413 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
414 FIFO_CFG5_17 | FIFO_CFG5_SF)
416 static void ag71xx_hw_stop(struct ag71xx *ag)
418 /* disable all interrupts and stop the rx/tx engine */
419 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
420 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
421 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
424 static void ag71xx_hw_setup(struct ag71xx *ag)
426 struct device_node *np = ag->pdev->dev.of_node;
427 u32 init = MAC_CFG1_INIT;
429 /* setup MAC configuration registers */
430 if (of_property_read_bool(np, "flow-control"))
431 init |= MAC_CFG1_TFC | MAC_CFG1_RFC;
432 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
434 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
435 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
437 /* setup max frame length to zero */
438 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
440 /* setup FIFO configuration registers */
441 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
442 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
443 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
444 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
445 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
448 static void ag71xx_hw_init(struct ag71xx *ag)
452 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
455 reset_control_assert(ag->mac_reset);
457 reset_control_assert(ag->mdio_reset);
459 reset_control_deassert(ag->mac_reset);
461 reset_control_deassert(ag->mdio_reset);
466 ag71xx_dma_reset(ag);
469 static void ag71xx_fast_reset(struct ag71xx *ag)
471 struct net_device *dev = ag->dev;
478 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
479 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
481 ag71xx_tx_packets(ag, true);
483 reset_control_assert(ag->mac_reset);
485 reset_control_deassert(ag->mac_reset);
488 ag71xx_dma_reset(ag);
490 ag->tx_ring.curr = 0;
491 ag->tx_ring.dirty = 0;
492 netdev_reset_queue(ag->dev);
494 /* setup max frame length */
495 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
496 ag71xx_max_frame_len(ag->dev->mtu));
498 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
499 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
500 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
502 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
505 static void ag71xx_hw_start(struct ag71xx *ag)
507 /* start RX engine */
508 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
510 /* enable interrupts */
511 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
513 netif_wake_queue(ag->dev);
516 static void ath79_set_pllval(struct ag71xx *ag)
518 u32 pll_reg = ag->pllreg[1];
526 pll_val = ag->plldata[2];
529 pll_val = ag->plldata[1];
532 pll_val = ag->plldata[0];
539 regmap_write(ag->pllregmap, pll_reg, pll_val);
542 static void ath79_set_pll(struct ag71xx *ag)
544 u32 pll_cfg = ag->pllreg[0];
545 u32 pll_shift = ag->pllreg[2];
550 regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 2 << pll_shift);
553 ath79_set_pllval(ag);
555 regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 3 << pll_shift);
558 regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 0);
562 static void ag71xx_bit_set(void __iomem *reg, u32 bit)
566 val = __raw_readl(reg) | bit;
567 __raw_writel(val, reg);
571 static void ag71xx_bit_clear(void __iomem *reg, u32 bit)
575 val = __raw_readl(reg) & ~bit;
576 __raw_writel(val, reg);
580 static void ag71xx_sgmii_init_qca955x(struct device_node *np)
582 struct device_node *np_dev;
583 void __iomem *gmac_base;
589 np = of_get_child_by_name(np, "gmac-config");
593 np_dev = of_parse_phandle(np, "device", 0);
597 gmac_base = of_iomap(np_dev, 0);
599 pr_err("%pOF: can't map GMAC registers\n", np_dev);
604 mr_an_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_MR_AN_STATUS);
605 if (!(mr_an_status & QCA955X_MR_AN_STATUS_AN_ABILITY))
608 /* SGMII reset sequence */
609 __raw_writel(QCA955X_SGMII_RESET_RX_CLK_N_RESET,
610 gmac_base + QCA955X_GMAC_REG_SGMII_RESET);
611 __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_RESET);
614 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
615 QCA955X_SGMII_RESET_HW_RX_125M_N);
618 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
619 QCA955X_SGMII_RESET_RX_125M_N);
622 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
623 QCA955X_SGMII_RESET_TX_125M_N);
626 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
627 QCA955X_SGMII_RESET_RX_CLK_N);
630 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
631 QCA955X_SGMII_RESET_TX_CLK_N);
635 * The following is what QCA has to say about what happens here:
637 * Across resets SGMII link status goes to weird state.
638 * If SGMII_DEBUG register reads other than 0x1f or 0x10,
639 * we are for sure in a bad state.
641 * Issue a PHY reset in MR_AN_CONTROL to keep going.
644 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL,
645 QCA955X_MR_AN_CONTROL_PHY_RESET |
646 QCA955X_MR_AN_CONTROL_AN_ENABLE);
648 ag71xx_bit_clear(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL,
649 QCA955X_MR_AN_CONTROL_PHY_RESET);
651 sgmii_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_DEBUG) &
652 QCA955X_SGMII_DEBUG_TX_STATE_MASK;
655 pr_err("ag71xx: max retries for SGMII fixup exceeded\n");
658 } while (!(sgmii_status == 0xf || sgmii_status == 0x10));
668 static void ath79_mii_ctrl_set_if(struct ag71xx *ag, unsigned int mii_if)
672 t = __raw_readl(ag->mii_base);
673 t &= ~(AR71XX_MII_CTRL_IF_MASK);
674 t |= (mii_if & AR71XX_MII_CTRL_IF_MASK);
675 __raw_writel(t, ag->mii_base);
678 static void ath79_mii0_ctrl_set_if(struct ag71xx *ag)
682 switch (ag->phy_if_mode) {
683 case PHY_INTERFACE_MODE_MII:
684 mii_if = AR71XX_MII0_CTRL_IF_MII;
686 case PHY_INTERFACE_MODE_GMII:
687 mii_if = AR71XX_MII0_CTRL_IF_GMII;
689 case PHY_INTERFACE_MODE_RGMII:
690 case PHY_INTERFACE_MODE_RGMII_ID:
691 case PHY_INTERFACE_MODE_RGMII_RXID:
692 case PHY_INTERFACE_MODE_RGMII_TXID:
693 mii_if = AR71XX_MII0_CTRL_IF_RGMII;
695 case PHY_INTERFACE_MODE_RMII:
696 mii_if = AR71XX_MII0_CTRL_IF_RMII;
699 WARN(1, "Impossible PHY mode defined.\n");
703 ath79_mii_ctrl_set_if(ag, mii_if);
706 static void ath79_mii1_ctrl_set_if(struct ag71xx *ag)
710 switch (ag->phy_if_mode) {
711 case PHY_INTERFACE_MODE_RMII:
712 mii_if = AR71XX_MII1_CTRL_IF_RMII;
714 case PHY_INTERFACE_MODE_RGMII:
715 case PHY_INTERFACE_MODE_RGMII_ID:
716 case PHY_INTERFACE_MODE_RGMII_RXID:
717 case PHY_INTERFACE_MODE_RGMII_TXID:
718 mii_if = AR71XX_MII1_CTRL_IF_RGMII;
721 WARN(1, "Impossible PHY mode defined.\n");
725 ath79_mii_ctrl_set_if(ag, mii_if);
728 static void ath79_mii_ctrl_set_speed(struct ag71xx *ag)
730 unsigned int mii_speed;
738 mii_speed = AR71XX_MII_CTRL_SPEED_10;
741 mii_speed = AR71XX_MII_CTRL_SPEED_100;
744 mii_speed = AR71XX_MII_CTRL_SPEED_1000;
750 t = __raw_readl(ag->mii_base);
751 t &= ~(AR71XX_MII_CTRL_SPEED_MASK << AR71XX_MII_CTRL_SPEED_SHIFT);
752 t |= mii_speed << AR71XX_MII_CTRL_SPEED_SHIFT;
753 __raw_writel(t, ag->mii_base);
757 __ag71xx_link_adjust(struct ag71xx *ag, bool update)
759 struct device_node *np = ag->pdev->dev.of_node;
764 if (!ag->link && update) {
766 netif_carrier_off(ag->dev);
767 if (netif_msg_link(ag))
768 pr_info("%s: link down\n", ag->dev->name);
772 if (!of_device_is_compatible(np, "qca,ar9130-eth") &&
773 !of_device_is_compatible(np, "qca,ar7100-eth"))
774 ag71xx_fast_reset(ag);
776 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
777 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
778 cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
780 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
781 ifctl &= ~(MAC_IFCTL_SPEED);
783 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
784 fifo5 &= ~FIFO_CFG5_BM;
788 cfg2 |= MAC_CFG2_IF_1000;
789 fifo5 |= FIFO_CFG5_BM;
792 cfg2 |= MAC_CFG2_IF_10_100;
793 ifctl |= MAC_IFCTL_SPEED;
796 cfg2 |= MAC_CFG2_IF_10_100;
803 if (ag->tx_ring.desc_split) {
804 ag->fifodata[2] &= 0xffff;
805 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
808 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
811 if (of_device_is_compatible(np, "qca,ar7100-eth") ||
812 of_device_is_compatible(np, "qca,ar9130-eth")) {
814 ath79_mii_ctrl_set_speed(ag);
815 } else if (of_device_is_compatible(np, "qca,ar7242-eth") ||
816 of_device_is_compatible(np, "qca,ar9340-eth") ||
817 of_device_is_compatible(np, "qca,qca9550-eth") ||
818 of_device_is_compatible(np, "qca,qca9560-eth")) {
819 ath79_set_pllval(ag);
820 if (of_property_read_bool(np, "qca955x-sgmii-fixup"))
821 ag71xx_sgmii_init_qca955x(np);
825 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
826 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
827 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
829 if (of_device_is_compatible(np, "qca,qca9530-eth") ||
830 of_device_is_compatible(np, "qca,qca9560-eth")) {
832 * The rx ring buffer can stall on small packets on QCA953x and
833 * QCA956x. Disabling the inline checksum engine fixes the stall.
834 * The wr, rr functions cannot be used since this hidden register
835 * is outside of the normal ag71xx register block.
837 void __iomem *dam = ioremap_nocache(0xb90001bc, 0x4);
839 __raw_writel(__raw_readl(dam) & ~BIT(27), dam);
840 (void)__raw_readl(dam);
847 netif_carrier_on(ag->dev);
848 if (update && netif_msg_link(ag))
849 pr_info("%s: link up (%sMbps/%s duplex)\n",
851 ag71xx_speed_str(ag),
852 (DUPLEX_FULL == ag->duplex) ? "Full" : "Half");
854 ag71xx_dump_regs(ag);
857 void ag71xx_link_adjust(struct ag71xx *ag)
859 __ag71xx_link_adjust(ag, true);
862 static int ag71xx_hw_enable(struct ag71xx *ag)
866 ret = ag71xx_rings_init(ag);
870 napi_enable(&ag->napi);
871 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
872 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
873 netif_start_queue(ag->dev);
878 static void ag71xx_hw_disable(struct ag71xx *ag)
880 netif_stop_queue(ag->dev);
883 ag71xx_dma_reset(ag);
885 napi_disable(&ag->napi);
886 del_timer_sync(&ag->oom_timer);
888 ag71xx_rings_cleanup(ag);
891 static int ag71xx_open(struct net_device *dev)
893 struct ag71xx *ag = netdev_priv(dev);
894 unsigned int max_frame_len;
897 netif_carrier_off(dev);
898 max_frame_len = ag71xx_max_frame_len(dev->mtu);
899 ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
901 /* setup max frame length */
902 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
903 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
905 ret = ag71xx_hw_enable(ag);
909 phy_start(ag->phy_dev);
914 ag71xx_rings_cleanup(ag);
918 static int ag71xx_stop(struct net_device *dev)
921 struct ag71xx *ag = netdev_priv(dev);
923 netif_carrier_off(dev);
924 phy_stop(ag->phy_dev);
926 spin_lock_irqsave(&ag->lock, flags);
929 ag71xx_link_adjust(ag);
931 spin_unlock_irqrestore(&ag->lock, flags);
933 ag71xx_hw_disable(ag);
938 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
941 struct ag71xx_desc *desc;
942 int ring_mask = BIT(ring->order) - 1;
944 int split = ring->desc_split;
950 unsigned int cur_len = len;
952 i = (ring->curr + ndesc) & ring_mask;
953 desc = ag71xx_ring_desc(ring, i);
955 if (!ag71xx_desc_empty(desc))
958 if (cur_len > split) {
962 * TX will hang if DMA transfers <= 4 bytes,
963 * make sure next segment is more than 4 bytes long.
965 if (len <= split + 4)
974 cur_len |= DESC_MORE;
976 /* prevent early tx attempt of this descriptor */
978 cur_len |= DESC_EMPTY;
980 desc->ctrl = cur_len;
987 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
988 struct net_device *dev)
990 struct ag71xx *ag = netdev_priv(dev);
991 struct ag71xx_ring *ring = &ag->tx_ring;
992 int ring_mask = BIT(ring->order) - 1;
993 int ring_size = BIT(ring->order);
994 struct ag71xx_desc *desc;
999 DBG("%s: packet len is too small\n", ag->dev->name);
1003 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1006 i = ring->curr & ring_mask;
1007 desc = ag71xx_ring_desc(ring, i);
1009 /* setup descriptor fields */
1010 n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask);
1012 goto err_drop_unmap;
1014 i = (ring->curr + n - 1) & ring_mask;
1015 ring->buf[i].len = skb->len;
1016 ring->buf[i].skb = skb;
1018 netdev_sent_queue(dev, skb->len);
1020 skb_tx_timestamp(skb);
1022 desc->ctrl &= ~DESC_EMPTY;
1025 /* flush descriptor */
1029 if (ring->desc_split)
1030 ring_min *= AG71XX_TX_RING_DS_PER_PKT;
1032 if (ring->curr - ring->dirty >= ring_size - ring_min) {
1033 DBG("%s: tx queue full\n", dev->name);
1034 netif_stop_queue(dev);
1037 DBG("%s: packet injected into TX queue\n", ag->dev->name);
1039 /* enable TX engine */
1040 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1042 return NETDEV_TX_OK;
1045 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1048 dev->stats.tx_dropped++;
1051 return NETDEV_TX_OK;
1054 static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1056 struct ag71xx *ag = netdev_priv(dev);
1062 (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
1068 (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
1075 if (ag->phy_dev == NULL)
1078 return phy_mii_ioctl(ag->phy_dev, ifr, cmd);
1087 static void ag71xx_oom_timer_handler(struct timer_list *t)
1089 struct ag71xx *ag = from_timer(ag, t, oom_timer);
1091 napi_schedule(&ag->napi);
1094 static void ag71xx_tx_timeout(struct net_device *dev)
1096 struct ag71xx *ag = netdev_priv(dev);
1098 if (netif_msg_tx_err(ag))
1099 pr_info("%s: tx timeout\n", ag->dev->name);
1101 schedule_delayed_work(&ag->restart_work, 1);
1104 static void ag71xx_restart_work_func(struct work_struct *work)
1106 struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work);
1109 ag71xx_hw_disable(ag);
1110 ag71xx_hw_enable(ag);
1112 __ag71xx_link_adjust(ag, false);
1116 static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
1118 unsigned long timestamp;
1119 u32 rx_sm, tx_sm, rx_fd;
1121 timestamp = netdev_get_tx_queue(ag->dev, 0)->trans_start;
1122 if (likely(time_before(jiffies, timestamp + HZ/10)))
1125 if (!netif_carrier_ok(ag->dev))
1128 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
1129 if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
1132 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
1133 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
1134 if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
1135 ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
1141 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
1143 struct ag71xx_ring *ring = &ag->tx_ring;
1144 bool dma_stuck = false;
1145 int ring_mask = BIT(ring->order) - 1;
1146 int ring_size = BIT(ring->order);
1148 int bytes_compl = 0;
1151 DBG("%s: processing TX ring\n", ag->dev->name);
1153 while (ring->dirty + n != ring->curr) {
1154 unsigned int i = (ring->dirty + n) & ring_mask;
1155 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1156 struct sk_buff *skb = ring->buf[i].skb;
1158 if (!flush && !ag71xx_desc_empty(desc)) {
1159 if (ag->tx_hang_workaround &&
1160 ag71xx_check_dma_stuck(ag)) {
1161 schedule_delayed_work(&ag->restart_work, HZ / 2);
1168 desc->ctrl |= DESC_EMPTY;
1174 dev_kfree_skb_any(skb);
1175 ring->buf[i].skb = NULL;
1177 bytes_compl += ring->buf[i].len;
1183 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
1188 DBG("%s: %d packets sent out\n", ag->dev->name, sent);
1193 ag->dev->stats.tx_bytes += bytes_compl;
1194 ag->dev->stats.tx_packets += sent;
1196 netdev_completed_queue(ag->dev, sent, bytes_compl);
1197 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
1198 netif_wake_queue(ag->dev);
1201 cancel_delayed_work(&ag->restart_work);
1206 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1208 struct net_device *dev = ag->dev;
1209 struct ag71xx_ring *ring = &ag->rx_ring;
1210 unsigned int pktlen_mask = ag->desc_pktlen_mask;
1211 unsigned int offset = ag->rx_buf_offset;
1212 int ring_mask = BIT(ring->order) - 1;
1213 int ring_size = BIT(ring->order);
1214 struct list_head rx_list;
1215 struct sk_buff *next;
1216 struct sk_buff *skb;
1219 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1220 dev->name, limit, ring->curr, ring->dirty);
1221 INIT_LIST_HEAD(&rx_list);
1223 while (done < limit) {
1224 unsigned int i = ring->curr & ring_mask;
1225 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1229 if (ag71xx_desc_empty(desc))
1232 if ((ring->dirty + ring_size) == ring->curr) {
1237 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1239 pktlen = desc->ctrl & pktlen_mask;
1240 pktlen -= ETH_FCS_LEN;
1242 dma_unmap_single(&ag->pdev->dev, ring->buf[i].dma_addr,
1243 ag->rx_buf_size, DMA_FROM_DEVICE);
1245 dev->stats.rx_packets++;
1246 dev->stats.rx_bytes += pktlen;
1248 skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag));
1250 skb_free_frag(ring->buf[i].rx_buf);
1254 skb_reserve(skb, offset);
1255 skb_put(skb, pktlen);
1258 dev->stats.rx_dropped++;
1262 skb->ip_summed = CHECKSUM_NONE;
1263 list_add_tail(&skb->list, &rx_list);
1267 ring->buf[i].rx_buf = NULL;
1273 ag71xx_ring_rx_refill(ag);
1275 list_for_each_entry_safe(skb, next, &rx_list, list)
1276 skb->protocol = eth_type_trans(skb, dev);
1277 netif_receive_skb_list(&rx_list);
1279 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1280 dev->name, ring->curr, ring->dirty, done);
1285 static int ag71xx_poll(struct napi_struct *napi, int limit)
1287 struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1288 struct net_device *dev = ag->dev;
1289 struct ag71xx_ring *rx_ring = &ag->rx_ring;
1290 int rx_ring_size = BIT(rx_ring->order);
1291 unsigned long flags;
1296 tx_done = ag71xx_tx_packets(ag, false);
1298 DBG("%s: processing RX ring\n", dev->name);
1299 rx_done = ag71xx_rx_packets(ag, limit);
1301 ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
1303 if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL)
1306 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1307 if (unlikely(status & RX_STATUS_OF)) {
1308 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1309 dev->stats.rx_fifo_errors++;
1312 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1315 if (rx_done < limit) {
1316 if (status & RX_STATUS_PR)
1319 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1320 if (status & TX_STATUS_PS)
1323 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1324 dev->name, rx_done, tx_done, limit);
1326 napi_complete(napi);
1328 /* enable interrupts */
1329 spin_lock_irqsave(&ag->lock, flags);
1330 ag71xx_int_enable(ag, AG71XX_INT_POLL);
1331 spin_unlock_irqrestore(&ag->lock, flags);
1336 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1337 dev->name, rx_done, tx_done, limit);
1341 if (netif_msg_rx_err(ag))
1342 pr_info("%s: out of memory\n", dev->name);
1344 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1345 napi_complete(napi);
1349 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1351 struct net_device *dev = dev_id;
1352 struct ag71xx *ag = netdev_priv(dev);
1355 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1356 ag71xx_dump_intr(ag, "raw", status);
1358 if (unlikely(!status))
1361 if (unlikely(status & AG71XX_INT_ERR)) {
1362 if (status & AG71XX_INT_TX_BE) {
1363 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1364 dev_err(&dev->dev, "TX BUS error\n");
1366 if (status & AG71XX_INT_RX_BE) {
1367 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1368 dev_err(&dev->dev, "RX BUS error\n");
1372 if (likely(status & AG71XX_INT_POLL)) {
1373 ag71xx_int_disable(ag, AG71XX_INT_POLL);
1374 DBG("%s: enable polling mode\n", dev->name);
1375 napi_schedule(&ag->napi);
1378 ag71xx_debugfs_update_int_stats(ag, status);
1383 static int ag71xx_change_mtu(struct net_device *dev, int new_mtu)
1385 struct ag71xx *ag = netdev_priv(dev);
1388 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1389 ag71xx_max_frame_len(dev->mtu));
1394 static const struct net_device_ops ag71xx_netdev_ops = {
1395 .ndo_open = ag71xx_open,
1396 .ndo_stop = ag71xx_stop,
1397 .ndo_start_xmit = ag71xx_hard_start_xmit,
1398 .ndo_do_ioctl = ag71xx_do_ioctl,
1399 .ndo_tx_timeout = ag71xx_tx_timeout,
1400 .ndo_change_mtu = ag71xx_change_mtu,
1401 .ndo_set_mac_address = eth_mac_addr,
1402 .ndo_validate_addr = eth_validate_addr,
1405 static int ag71xx_probe(struct platform_device *pdev)
1407 struct device_node *np = pdev->dev.of_node;
1408 struct net_device *dev;
1409 struct resource *res;
1411 const void *mac_addr;
1418 dev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1422 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1426 err = ag71xx_setup_gmac(np);
1430 SET_NETDEV_DEV(dev, &pdev->dev);
1432 ag = netdev_priv(dev);
1435 ag->msg_enable = netif_msg_init(ag71xx_msg_level,
1436 AG71XX_DEFAULT_MSG_ENABLE);
1437 spin_lock_init(&ag->lock);
1439 ag->mac_reset = devm_reset_control_get_exclusive(&pdev->dev, "mac");
1440 if (IS_ERR(ag->mac_reset)) {
1441 dev_err(&pdev->dev, "missing mac reset\n");
1442 return PTR_ERR(ag->mac_reset);
1445 ag->mdio_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "mdio");
1447 if (of_property_read_u32_array(np, "fifo-data", ag->fifodata, 3)) {
1448 if (of_device_is_compatible(np, "qca,ar9130-eth") ||
1449 of_device_is_compatible(np, "qca,ar7100-eth")) {
1450 ag->fifodata[0] = 0x0fff0000;
1451 ag->fifodata[1] = 0x00001fff;
1453 ag->fifodata[0] = 0x0010ffff;
1454 ag->fifodata[1] = 0x015500aa;
1455 ag->fifodata[2] = 0x01f00140;
1457 if (of_device_is_compatible(np, "qca,ar9130-eth"))
1458 ag->fifodata[2] = 0x00780fff;
1459 else if (of_device_is_compatible(np, "qca,ar7100-eth"))
1460 ag->fifodata[2] = 0x008001ff;
1463 if (of_property_read_u32_array(np, "pll-data", ag->plldata, 3))
1464 dev_dbg(&pdev->dev, "failed to read pll-data property\n");
1466 if (of_property_read_u32_array(np, "pll-reg", ag->pllreg, 3))
1467 dev_dbg(&pdev->dev, "failed to read pll-reg property\n");
1469 ag->pllregmap = syscon_regmap_lookup_by_phandle(np, "pll-handle");
1470 if (IS_ERR(ag->pllregmap)) {
1471 dev_dbg(&pdev->dev, "failed to read pll-handle property\n");
1472 ag->pllregmap = NULL;
1475 ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
1476 res->end - res->start + 1);
1480 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1482 ag->mii_base = devm_ioremap_nocache(&pdev->dev, res->start,
1483 res->end - res->start + 1);
1488 dev->irq = platform_get_irq(pdev, 0);
1489 err = devm_request_irq(&pdev->dev, dev->irq, ag71xx_interrupt,
1490 0x0, dev_name(&pdev->dev), dev);
1492 dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
1496 dev->netdev_ops = &ag71xx_netdev_ops;
1497 dev->ethtool_ops = &ag71xx_ethtool_ops;
1499 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1501 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1503 tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1504 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1506 if (of_device_is_compatible(np, "qca,ar9340-eth") ||
1507 of_device_is_compatible(np, "qca,qca9530-eth") ||
1508 of_device_is_compatible(np, "qca,qca9550-eth") ||
1509 of_device_is_compatible(np, "qca,qca9560-eth"))
1510 ag->desc_pktlen_mask = SZ_16K - 1;
1512 ag->desc_pktlen_mask = SZ_4K - 1;
1514 if (ag->desc_pktlen_mask == SZ_16K - 1 &&
1515 !of_device_is_compatible(np, "qca,qca9550-eth") &&
1516 !of_device_is_compatible(np, "qca,qca9560-eth"))
1517 max_frame_len = ag->desc_pktlen_mask;
1519 max_frame_len = 1540;
1522 dev->max_mtu = max_frame_len - ag71xx_max_frame_len(0);
1524 if (of_device_is_compatible(np, "qca,ar7240-eth") ||
1525 of_device_is_compatible(np, "qca,ar7241-eth") ||
1526 of_device_is_compatible(np, "qca,ar7242-eth") ||
1527 of_device_is_compatible(np, "qca,ar9330-eth") ||
1528 of_device_is_compatible(np, "qca,ar9340-eth") ||
1529 of_device_is_compatible(np, "qca,qca9530-eth") ||
1530 of_device_is_compatible(np, "qca,qca9550-eth") ||
1531 of_device_is_compatible(np, "qca,qca9560-eth"))
1532 ag->tx_hang_workaround = 1;
1534 ag->rx_buf_offset = NET_SKB_PAD;
1535 if (!of_device_is_compatible(np, "qca,ar7100-eth") &&
1536 !of_device_is_compatible(np, "qca,ar9130-eth"))
1537 ag->rx_buf_offset += NET_IP_ALIGN;
1539 if (of_device_is_compatible(np, "qca,ar7100-eth")) {
1540 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1541 tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1543 ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1545 ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1546 sizeof(struct ag71xx_desc),
1547 &ag->stop_desc_dma, GFP_KERNEL);
1551 ag->stop_desc->data = 0;
1552 ag->stop_desc->ctrl = 0;
1553 ag->stop_desc->next = (u32) ag->stop_desc_dma;
1555 mac_addr = of_get_mac_address(np);
1556 if (IS_ERR_OR_NULL(mac_addr) || !is_valid_ether_addr(mac_addr)) {
1557 dev_err(&pdev->dev, "invalid MAC address, using random address\n");
1558 eth_random_addr(dev->dev_addr);
1560 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
1563 ag->phy_if_mode = of_get_phy_mode(np);
1564 if (ag->phy_if_mode < 0) {
1565 dev_err(&pdev->dev, "missing phy-mode property in DT\n");
1566 return ag->phy_if_mode;
1569 if (of_property_read_u32(np, "qca,mac-idx", &ag->mac_idx))
1572 switch (ag->mac_idx) {
1574 ath79_mii0_ctrl_set_if(ag);
1577 ath79_mii1_ctrl_set_if(ag);
1583 netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1585 ag71xx_dump_regs(ag);
1587 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1591 ag71xx_dump_regs(ag);
1594 * populate current node to register mdio-bus as a subdevice.
1595 * the mdio bus works independently on ar7241 and later chips
1596 * and we need to load mdio1 before gmac0, which can be done
1597 * by adding a "simple-mfd" compatible to gmac node. The
1598 * following code checks OF_POPULATED_BUS flag before populating
1599 * to avoid duplicated population.
1601 if (!of_node_check_flag(np, OF_POPULATED_BUS)) {
1602 err = of_platform_populate(np, NULL, NULL, &pdev->dev);
1607 err = ag71xx_phy_connect(ag);
1611 err = ag71xx_debugfs_init(ag);
1613 goto err_phy_disconnect;
1615 platform_set_drvdata(pdev, dev);
1617 err = register_netdev(dev);
1619 dev_err(&pdev->dev, "unable to register net device\n");
1620 platform_set_drvdata(pdev, NULL);
1621 ag71xx_debugfs_exit(ag);
1622 goto err_phy_disconnect;
1625 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode: %s\n",
1626 dev->name, (unsigned long) ag->mac_base, dev->irq,
1627 phy_modes(ag->phy_if_mode));
1632 ag71xx_phy_disconnect(ag);
1636 static int ag71xx_remove(struct platform_device *pdev)
1638 struct net_device *dev = platform_get_drvdata(pdev);
1644 ag = netdev_priv(dev);
1645 ag71xx_debugfs_exit(ag);
1646 ag71xx_phy_disconnect(ag);
1647 unregister_netdev(dev);
1648 platform_set_drvdata(pdev, NULL);
1652 static const struct of_device_id ag71xx_match[] = {
1653 { .compatible = "qca,ar7100-eth" },
1654 { .compatible = "qca,ar7240-eth" },
1655 { .compatible = "qca,ar7241-eth" },
1656 { .compatible = "qca,ar7242-eth" },
1657 { .compatible = "qca,ar9130-eth" },
1658 { .compatible = "qca,ar9330-eth" },
1659 { .compatible = "qca,ar9340-eth" },
1660 { .compatible = "qca,qca9530-eth" },
1661 { .compatible = "qca,qca9550-eth" },
1662 { .compatible = "qca,qca9560-eth" },
1666 static struct platform_driver ag71xx_driver = {
1667 .probe = ag71xx_probe,
1668 .remove = ag71xx_remove,
1670 .name = AG71XX_DRV_NAME,
1671 .of_match_table = ag71xx_match,
1675 static int __init ag71xx_module_init(void)
1679 ret = ag71xx_debugfs_root_init();
1683 ret = platform_driver_register(&ag71xx_driver);
1685 goto err_debugfs_exit;
1690 ag71xx_debugfs_root_exit();
1695 static void __exit ag71xx_module_exit(void)
1697 platform_driver_unregister(&ag71xx_driver);
1698 ag71xx_debugfs_root_exit();
1701 module_init(ag71xx_module_init);
1702 module_exit(ag71xx_module_exit);
1704 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1705 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1706 MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>");
1707 MODULE_LICENSE("GPL v2");
1708 MODULE_ALIAS("platform:" AG71XX_DRV_NAME);