2 * Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 * Based on Atheros' AG7100 driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
16 #define AG71XX_DEFAULT_MSG_ENABLE \
26 static int ag71xx_msg_level = -1;
28 module_param_named(msg_level, ag71xx_msg_level, int, 0);
29 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
31 #define ETH_SWITCH_HEADER_LEN 2
33 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush);
34 static void ag71xx_qca955x_sgmii_init(void);
36 static inline unsigned int ag71xx_max_frame_len(unsigned int mtu)
38 return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
41 static void ag71xx_dump_dma_regs(struct ag71xx *ag)
43 DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
45 ag71xx_rr(ag, AG71XX_REG_TX_CTRL),
46 ag71xx_rr(ag, AG71XX_REG_TX_DESC),
47 ag71xx_rr(ag, AG71XX_REG_TX_STATUS));
49 DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
51 ag71xx_rr(ag, AG71XX_REG_RX_CTRL),
52 ag71xx_rr(ag, AG71XX_REG_RX_DESC),
53 ag71xx_rr(ag, AG71XX_REG_RX_STATUS));
56 static void ag71xx_dump_regs(struct ag71xx *ag)
58 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
60 ag71xx_rr(ag, AG71XX_REG_MAC_CFG1),
61 ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
62 ag71xx_rr(ag, AG71XX_REG_MAC_IPG),
63 ag71xx_rr(ag, AG71XX_REG_MAC_HDX),
64 ag71xx_rr(ag, AG71XX_REG_MAC_MFL));
65 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
67 ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL),
68 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1),
69 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2));
70 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
72 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
73 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
74 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
75 DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
77 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
78 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
79 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
82 static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr)
84 DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
85 ag->dev->name, label, intr,
86 (intr & AG71XX_INT_TX_PS) ? "TXPS " : "",
87 (intr & AG71XX_INT_TX_UR) ? "TXUR " : "",
88 (intr & AG71XX_INT_TX_BE) ? "TXBE " : "",
89 (intr & AG71XX_INT_RX_PR) ? "RXPR " : "",
90 (intr & AG71XX_INT_RX_OF) ? "RXOF " : "",
91 (intr & AG71XX_INT_RX_BE) ? "RXBE " : "");
94 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
96 struct ag71xx_ring *ring = &ag->tx_ring;
97 struct net_device *dev = ag->dev;
98 int ring_mask = BIT(ring->order) - 1;
99 u32 bytes_compl = 0, pkts_compl = 0;
101 while (ring->curr != ring->dirty) {
102 struct ag71xx_desc *desc;
103 u32 i = ring->dirty & ring_mask;
105 desc = ag71xx_ring_desc(ring, i);
106 if (!ag71xx_desc_empty(desc)) {
108 dev->stats.tx_errors++;
111 if (ring->buf[i].skb) {
112 bytes_compl += ring->buf[i].len;
114 dev_kfree_skb_any(ring->buf[i].skb);
116 ring->buf[i].skb = NULL;
120 /* flush descriptors */
123 netdev_completed_queue(dev, pkts_compl, bytes_compl);
126 static void ag71xx_ring_tx_init(struct ag71xx *ag)
128 struct ag71xx_ring *ring = &ag->tx_ring;
129 int ring_size = BIT(ring->order);
130 int ring_mask = ring_size - 1;
133 for (i = 0; i < ring_size; i++) {
134 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
136 desc->next = (u32) (ring->descs_dma +
137 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
139 desc->ctrl = DESC_EMPTY;
140 ring->buf[i].skb = NULL;
143 /* flush descriptors */
148 netdev_reset_queue(ag->dev);
151 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
153 struct ag71xx_ring *ring = &ag->rx_ring;
154 int ring_size = BIT(ring->order);
160 for (i = 0; i < ring_size; i++)
161 if (ring->buf[i].rx_buf) {
162 dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr,
163 ag->rx_buf_size, DMA_FROM_DEVICE);
164 skb_free_frag(ring->buf[i].rx_buf);
168 static int ag71xx_buffer_offset(struct ag71xx *ag)
170 int offset = NET_SKB_PAD;
173 * On AR71xx/AR91xx packets must be 4-byte aligned.
175 * When using builtin AR8216 support, hardware adds a 2-byte header,
176 * so we don't need any extra alignment in that case.
178 if (!ag71xx_get_pdata(ag)->is_ar724x || ag71xx_has_ar8216(ag))
181 return offset + NET_IP_ALIGN;
184 static int ag71xx_buffer_size(struct ag71xx *ag)
186 return ag->rx_buf_size +
187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
190 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
192 void *(*alloc)(unsigned int size))
194 struct ag71xx_ring *ring = &ag->rx_ring;
195 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
198 data = alloc(ag71xx_buffer_size(ag));
203 buf->dma_addr = dma_map_single(&ag->dev->dev, data, ag->rx_buf_size,
205 desc->data = (u32) buf->dma_addr + offset;
209 static int ag71xx_ring_rx_init(struct ag71xx *ag)
211 struct ag71xx_ring *ring = &ag->rx_ring;
212 int ring_size = BIT(ring->order);
213 int ring_mask = BIT(ring->order) - 1;
216 int offset = ag71xx_buffer_offset(ag);
219 for (i = 0; i < ring_size; i++) {
220 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
222 desc->next = (u32) (ring->descs_dma +
223 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
225 DBG("ag71xx: RX desc at %p, next is %08x\n",
229 for (i = 0; i < ring_size; i++) {
230 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
232 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
233 netdev_alloc_frag)) {
238 desc->ctrl = DESC_EMPTY;
241 /* flush descriptors */
250 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
252 struct ag71xx_ring *ring = &ag->rx_ring;
253 int ring_mask = BIT(ring->order) - 1;
255 int offset = ag71xx_buffer_offset(ag);
258 for (; ring->curr - ring->dirty > 0; ring->dirty++) {
259 struct ag71xx_desc *desc;
262 i = ring->dirty & ring_mask;
263 desc = ag71xx_ring_desc(ring, i);
265 if (!ring->buf[i].rx_buf &&
266 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
270 desc->ctrl = DESC_EMPTY;
274 /* flush descriptors */
277 DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count);
282 static int ag71xx_rings_init(struct ag71xx *ag)
284 struct ag71xx_ring *tx = &ag->tx_ring;
285 struct ag71xx_ring *rx = &ag->rx_ring;
286 int ring_size = BIT(tx->order) + BIT(rx->order);
287 int tx_size = BIT(tx->order);
289 tx->buf = kzalloc(ring_size * sizeof(*tx->buf), GFP_KERNEL);
293 tx->descs_cpu = dma_alloc_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
294 &tx->descs_dma, GFP_ATOMIC);
295 if (!tx->descs_cpu) {
301 rx->buf = &tx->buf[BIT(tx->order)];
302 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
303 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
305 ag71xx_ring_tx_init(ag);
306 return ag71xx_ring_rx_init(ag);
309 static void ag71xx_rings_free(struct ag71xx *ag)
311 struct ag71xx_ring *tx = &ag->tx_ring;
312 struct ag71xx_ring *rx = &ag->rx_ring;
313 int ring_size = BIT(tx->order) + BIT(rx->order);
316 dma_free_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
317 tx->descs_cpu, tx->descs_dma);
321 tx->descs_cpu = NULL;
322 rx->descs_cpu = NULL;
327 static void ag71xx_rings_cleanup(struct ag71xx *ag)
329 ag71xx_ring_rx_clean(ag);
330 ag71xx_ring_tx_clean(ag);
331 ag71xx_rings_free(ag);
333 netdev_reset_queue(ag->dev);
336 static unsigned char *ag71xx_speed_str(struct ag71xx *ag)
350 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
354 t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16)
355 | (((u32) mac[3]) << 8) | ((u32) mac[2]);
357 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
359 t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16);
360 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
363 static void ag71xx_dma_reset(struct ag71xx *ag)
368 ag71xx_dump_dma_regs(ag);
371 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
372 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
375 * give the hardware some time to really stop all rx/tx activity
376 * clearing the descriptors too early causes random memory corruption
380 /* clear descriptor addresses */
381 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
382 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
384 /* clear pending RX/TX interrupts */
385 for (i = 0; i < 256; i++) {
386 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
387 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
390 /* clear pending errors */
391 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
392 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
394 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
396 pr_alert("%s: unable to clear DMA Rx status: %08x\n",
399 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
401 /* mask out reserved bits */
405 pr_alert("%s: unable to clear DMA Tx status: %08x\n",
408 ag71xx_dump_dma_regs(ag);
411 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
412 MAC_CFG1_SRX | MAC_CFG1_STX)
414 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
416 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
417 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
418 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
419 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
420 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
423 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
424 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
425 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
426 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
427 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
428 FIFO_CFG5_17 | FIFO_CFG5_SF)
430 static void ag71xx_hw_stop(struct ag71xx *ag)
432 /* disable all interrupts and stop the rx/tx engine */
433 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
434 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
435 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
438 static void ag71xx_hw_setup(struct ag71xx *ag)
440 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
441 u32 init = MAC_CFG1_INIT;
443 /* setup MAC configuration registers */
444 if (pdata->use_flow_control)
445 init |= MAC_CFG1_TFC | MAC_CFG1_RFC;
446 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
448 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
449 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
451 /* setup max frame length to zero */
452 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
454 /* setup FIFO configuration registers */
455 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
456 if (pdata->is_ar724x) {
457 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0010ffff);
458 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x015500aa);
460 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000);
461 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff);
463 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
464 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
467 static void ag71xx_hw_init(struct ag71xx *ag)
469 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
470 u32 reset_mask = pdata->reset_bit;
474 if (pdata->is_ar724x) {
475 u32 reset_phy = reset_mask;
477 reset_phy &= AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY;
478 reset_mask &= ~(AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY);
480 ath79_device_reset_set(reset_phy);
482 ath79_device_reset_clear(reset_phy);
486 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
489 ath79_device_reset_set(reset_mask);
491 ath79_device_reset_clear(reset_mask);
496 ag71xx_dma_reset(ag);
499 static void ag71xx_fast_reset(struct ag71xx *ag)
501 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
502 struct net_device *dev = ag->dev;
503 u32 reset_mask = pdata->reset_bit;
507 reset_mask &= AR71XX_RESET_GE0_MAC | AR71XX_RESET_GE1_MAC;
512 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
513 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
515 ag71xx_tx_packets(ag, true);
517 ath79_device_reset_set(reset_mask);
519 ath79_device_reset_clear(reset_mask);
522 ag71xx_dma_reset(ag);
524 ag->tx_ring.curr = 0;
525 ag->tx_ring.dirty = 0;
526 netdev_reset_queue(ag->dev);
528 /* setup max frame length */
529 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
530 ag71xx_max_frame_len(ag->dev->mtu));
532 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
533 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
534 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
536 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
539 static void ag71xx_hw_start(struct ag71xx *ag)
541 /* start RX engine */
542 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
544 /* enable interrupts */
545 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
547 netif_wake_queue(ag->dev);
551 __ag71xx_link_adjust(struct ag71xx *ag, bool update)
553 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
559 if (!ag->link && update) {
561 netif_carrier_off(ag->dev);
562 if (netif_msg_link(ag))
563 pr_info("%s: link down\n", ag->dev->name);
567 if (pdata->is_ar724x)
568 ag71xx_fast_reset(ag);
570 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
571 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
572 cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
574 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
575 ifctl &= ~(MAC_IFCTL_SPEED);
577 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
578 fifo5 &= ~FIFO_CFG5_BM;
582 cfg2 |= MAC_CFG2_IF_1000;
583 fifo5 |= FIFO_CFG5_BM;
586 cfg2 |= MAC_CFG2_IF_10_100;
587 ifctl |= MAC_IFCTL_SPEED;
590 cfg2 |= MAC_CFG2_IF_10_100;
597 if (pdata->is_ar91xx)
599 else if (pdata->is_ar724x)
604 if (ag->tx_ring.desc_split) {
606 fifo3 |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
609 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, fifo3);
611 if (update && pdata->set_speed)
612 pdata->set_speed(ag->speed);
614 if (update && pdata->enable_sgmii_fixup)
615 ag71xx_qca955x_sgmii_init();
617 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
618 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
619 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
621 if (pdata->disable_inline_checksum_engine) {
623 * The rx ring buffer can stall on small packets on QCA953x and
624 * QCA956x. Disabling the inline checksum engine fixes the stall.
625 * The wr, rr functions cannot be used since this hidden register
626 * is outside of the normal ag71xx register block.
628 void __iomem *dam = ioremap_nocache(0xb90001bc, 0x4);
630 __raw_writel(__raw_readl(dam) & ~BIT(27), dam);
631 (void)__raw_readl(dam);
638 netif_carrier_on(ag->dev);
639 if (update && netif_msg_link(ag))
640 pr_info("%s: link up (%sMbps/%s duplex)\n",
642 ag71xx_speed_str(ag),
643 (DUPLEX_FULL == ag->duplex) ? "Full" : "Half");
645 DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
647 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
648 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
649 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
651 DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
653 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
654 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
655 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
657 DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n",
659 ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
660 ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL));
663 void ag71xx_link_adjust(struct ag71xx *ag)
665 __ag71xx_link_adjust(ag, true);
668 static int ag71xx_hw_enable(struct ag71xx *ag)
672 ret = ag71xx_rings_init(ag);
676 napi_enable(&ag->napi);
677 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
678 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
679 netif_start_queue(ag->dev);
684 static void ag71xx_hw_disable(struct ag71xx *ag)
688 spin_lock_irqsave(&ag->lock, flags);
690 netif_stop_queue(ag->dev);
693 ag71xx_dma_reset(ag);
695 napi_disable(&ag->napi);
696 del_timer_sync(&ag->oom_timer);
698 spin_unlock_irqrestore(&ag->lock, flags);
700 ag71xx_rings_cleanup(ag);
703 static int ag71xx_open(struct net_device *dev)
705 struct ag71xx *ag = netdev_priv(dev);
706 unsigned int max_frame_len;
709 netif_carrier_off(dev);
710 max_frame_len = ag71xx_max_frame_len(dev->mtu);
711 ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
713 /* setup max frame length */
714 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
715 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
717 ret = ag71xx_hw_enable(ag);
721 ag71xx_phy_start(ag);
726 ag71xx_rings_cleanup(ag);
730 static int ag71xx_stop(struct net_device *dev)
732 struct ag71xx *ag = netdev_priv(dev);
734 netif_carrier_off(dev);
736 ag71xx_hw_disable(ag);
741 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
744 struct ag71xx_desc *desc;
745 int ring_mask = BIT(ring->order) - 1;
747 int split = ring->desc_split;
753 unsigned int cur_len = len;
755 i = (ring->curr + ndesc) & ring_mask;
756 desc = ag71xx_ring_desc(ring, i);
758 if (!ag71xx_desc_empty(desc))
761 if (cur_len > split) {
765 * TX will hang if DMA transfers <= 4 bytes,
766 * make sure next segment is more than 4 bytes long.
768 if (len <= split + 4)
777 cur_len |= DESC_MORE;
779 /* prevent early tx attempt of this descriptor */
781 cur_len |= DESC_EMPTY;
783 desc->ctrl = cur_len;
790 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
791 struct net_device *dev)
793 struct ag71xx *ag = netdev_priv(dev);
794 struct ag71xx_ring *ring = &ag->tx_ring;
795 int ring_mask = BIT(ring->order) - 1;
796 int ring_size = BIT(ring->order);
797 struct ag71xx_desc *desc;
801 if (ag71xx_has_ar8216(ag))
802 ag71xx_add_ar8216_header(ag, skb);
805 DBG("%s: packet len is too small\n", ag->dev->name);
809 dma_addr = dma_map_single(&dev->dev, skb->data, skb->len,
812 i = ring->curr & ring_mask;
813 desc = ag71xx_ring_desc(ring, i);
815 /* setup descriptor fields */
816 n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask);
820 i = (ring->curr + n - 1) & ring_mask;
821 ring->buf[i].len = skb->len;
822 ring->buf[i].skb = skb;
824 netdev_sent_queue(dev, skb->len);
826 skb_tx_timestamp(skb);
828 desc->ctrl &= ~DESC_EMPTY;
831 /* flush descriptor */
835 if (ring->desc_split)
836 ring_min *= AG71XX_TX_RING_DS_PER_PKT;
838 if (ring->curr - ring->dirty >= ring_size - ring_min) {
839 DBG("%s: tx queue full\n", dev->name);
840 netif_stop_queue(dev);
843 DBG("%s: packet injected into TX queue\n", ag->dev->name);
845 /* enable TX engine */
846 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
851 dma_unmap_single(&dev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
854 dev->stats.tx_dropped++;
860 static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
862 struct ag71xx *ag = netdev_priv(dev);
867 if (ag->phy_dev == NULL)
870 spin_lock_irq(&ag->lock);
871 ret = phy_ethtool_ioctl(ag->phy_dev, (void *) ifr->ifr_data);
872 spin_unlock_irq(&ag->lock);
877 (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
883 (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
890 if (ag->phy_dev == NULL)
893 return phy_mii_ioctl(ag->phy_dev, ifr, cmd);
902 static void ag71xx_oom_timer_handler(unsigned long data)
904 struct net_device *dev = (struct net_device *) data;
905 struct ag71xx *ag = netdev_priv(dev);
907 napi_schedule(&ag->napi);
910 static void ag71xx_tx_timeout(struct net_device *dev)
912 struct ag71xx *ag = netdev_priv(dev);
914 if (netif_msg_tx_err(ag))
915 pr_info("%s: tx timeout\n", ag->dev->name);
917 schedule_delayed_work(&ag->restart_work, 1);
920 static void ag71xx_bit_set(void __iomem *reg, u32 bit)
922 u32 val = __raw_readl(reg) | bit;
923 __raw_writel(val, reg);
927 static void ag71xx_bit_clear(void __iomem *reg, u32 bit)
929 u32 val = __raw_readl(reg) & ~bit;
930 __raw_writel(val, reg);
934 static void ag71xx_qca955x_sgmii_init()
936 void __iomem *gmac_base;
937 u32 mr_an_status, sgmii_status;
940 gmac_base = ioremap_nocache(QCA955X_GMAC_BASE, QCA955X_GMAC_SIZE);
945 mr_an_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_MR_AN_STATUS);
946 if (!(mr_an_status & QCA955X_MR_AN_STATUS_AN_ABILITY))
949 __raw_writel(QCA955X_SGMII_RESET_RX_CLK_N_RESET ,
950 gmac_base + QCA955X_GMAC_REG_SGMII_RESET);
951 __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_RESET);
955 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
956 QCA955X_SGMII_RESET_HW_RX_125M_N);
959 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
960 QCA955X_SGMII_RESET_RX_125M_N);
963 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
964 QCA955X_SGMII_RESET_TX_125M_N);
967 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
968 QCA955X_SGMII_RESET_RX_CLK_N);
971 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
972 QCA955X_SGMII_RESET_TX_CLK_N);
976 ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL,
977 QCA955X_MR_AN_CONTROL_PHY_RESET |
978 QCA955X_MR_AN_CONTROL_AN_ENABLE);
980 ag71xx_bit_clear(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL,
981 QCA955X_MR_AN_CONTROL_PHY_RESET);
983 sgmii_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_DEBUG) & 0xF;
985 if (tries++ >= QCA955X_SGMII_LINK_WAR_MAX_TRY) {
986 pr_warn("ag71xx: max retries for SGMII fixup exceeded!\n");
989 } while (!(sgmii_status == 0xf || sgmii_status == 0x10));
995 static void ag71xx_restart_work_func(struct work_struct *work)
997 struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work);
1000 ag71xx_hw_disable(ag);
1001 ag71xx_hw_enable(ag);
1003 __ag71xx_link_adjust(ag, false);
1007 static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
1009 unsigned long timestamp;
1010 u32 rx_sm, tx_sm, rx_fd;
1012 timestamp = netdev_get_tx_queue(ag->dev, 0)->trans_start;
1013 if (likely(time_before(jiffies, timestamp + HZ/10)))
1016 if (!netif_carrier_ok(ag->dev))
1019 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
1020 if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
1023 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
1024 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
1025 if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
1026 ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
1032 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
1034 struct ag71xx_ring *ring = &ag->tx_ring;
1035 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
1036 bool dma_stuck = false;
1037 int ring_mask = BIT(ring->order) - 1;
1038 int ring_size = BIT(ring->order);
1040 int bytes_compl = 0;
1043 DBG("%s: processing TX ring\n", ag->dev->name);
1045 while (ring->dirty + n != ring->curr) {
1046 unsigned int i = (ring->dirty + n) & ring_mask;
1047 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1048 struct sk_buff *skb = ring->buf[i].skb;
1050 if (!flush && !ag71xx_desc_empty(desc)) {
1051 if (pdata->is_ar724x &&
1052 ag71xx_check_dma_stuck(ag)) {
1053 schedule_delayed_work(&ag->restart_work, HZ / 2);
1060 desc->ctrl |= DESC_EMPTY;
1066 dev_kfree_skb_any(skb);
1067 ring->buf[i].skb = NULL;
1069 bytes_compl += ring->buf[i].len;
1075 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
1080 DBG("%s: %d packets sent out\n", ag->dev->name, sent);
1085 ag->dev->stats.tx_bytes += bytes_compl;
1086 ag->dev->stats.tx_packets += sent;
1088 netdev_completed_queue(ag->dev, sent, bytes_compl);
1089 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
1090 netif_wake_queue(ag->dev);
1093 cancel_delayed_work(&ag->restart_work);
1098 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1100 struct net_device *dev = ag->dev;
1101 struct ag71xx_ring *ring = &ag->rx_ring;
1102 int offset = ag71xx_buffer_offset(ag);
1103 unsigned int pktlen_mask = ag->desc_pktlen_mask;
1104 int ring_mask = BIT(ring->order) - 1;
1105 int ring_size = BIT(ring->order);
1106 struct sk_buff_head queue;
1107 struct sk_buff *skb;
1110 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1111 dev->name, limit, ring->curr, ring->dirty);
1113 skb_queue_head_init(&queue);
1115 while (done < limit) {
1116 unsigned int i = ring->curr & ring_mask;
1117 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1121 if (ag71xx_desc_empty(desc))
1124 if ((ring->dirty + ring_size) == ring->curr) {
1129 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1131 pktlen = desc->ctrl & pktlen_mask;
1132 pktlen -= ETH_FCS_LEN;
1134 dma_unmap_single(&dev->dev, ring->buf[i].dma_addr,
1135 ag->rx_buf_size, DMA_FROM_DEVICE);
1137 dev->stats.rx_packets++;
1138 dev->stats.rx_bytes += pktlen;
1140 skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag));
1142 skb_free_frag(ring->buf[i].rx_buf);
1146 skb_reserve(skb, offset);
1147 skb_put(skb, pktlen);
1149 if (ag71xx_has_ar8216(ag))
1150 err = ag71xx_remove_ar8216_header(ag, skb, pktlen);
1153 dev->stats.rx_dropped++;
1157 skb->ip_summed = CHECKSUM_NONE;
1158 __skb_queue_tail(&queue, skb);
1162 ring->buf[i].rx_buf = NULL;
1168 ag71xx_ring_rx_refill(ag);
1170 while ((skb = __skb_dequeue(&queue)) != NULL) {
1171 skb->protocol = eth_type_trans(skb, dev);
1172 netif_receive_skb(skb);
1175 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1176 dev->name, ring->curr, ring->dirty, done);
1181 static int ag71xx_poll(struct napi_struct *napi, int limit)
1183 struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1184 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
1185 struct net_device *dev = ag->dev;
1186 struct ag71xx_ring *rx_ring = &ag->rx_ring;
1187 int rx_ring_size = BIT(rx_ring->order);
1188 unsigned long flags;
1194 tx_done = ag71xx_tx_packets(ag, false);
1196 DBG("%s: processing RX ring\n", dev->name);
1197 rx_done = ag71xx_rx_packets(ag, limit);
1199 ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
1201 if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL)
1204 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1205 if (unlikely(status & RX_STATUS_OF)) {
1206 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1207 dev->stats.rx_fifo_errors++;
1210 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1213 if (rx_done < limit) {
1214 if (status & RX_STATUS_PR)
1217 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1218 if (status & TX_STATUS_PS)
1221 DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1222 dev->name, rx_done, tx_done, limit);
1224 napi_complete(napi);
1226 /* enable interrupts */
1227 spin_lock_irqsave(&ag->lock, flags);
1228 ag71xx_int_enable(ag, AG71XX_INT_POLL);
1229 spin_unlock_irqrestore(&ag->lock, flags);
1234 DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1235 dev->name, rx_done, tx_done, limit);
1239 if (netif_msg_rx_err(ag))
1240 pr_info("%s: out of memory\n", dev->name);
1242 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1243 napi_complete(napi);
1247 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1249 struct net_device *dev = dev_id;
1250 struct ag71xx *ag = netdev_priv(dev);
1253 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1254 ag71xx_dump_intr(ag, "raw", status);
1256 if (unlikely(!status))
1259 if (unlikely(status & AG71XX_INT_ERR)) {
1260 if (status & AG71XX_INT_TX_BE) {
1261 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1262 dev_err(&dev->dev, "TX BUS error\n");
1264 if (status & AG71XX_INT_RX_BE) {
1265 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1266 dev_err(&dev->dev, "RX BUS error\n");
1270 if (likely(status & AG71XX_INT_POLL)) {
1271 ag71xx_int_disable(ag, AG71XX_INT_POLL);
1272 DBG("%s: enable polling mode\n", dev->name);
1273 napi_schedule(&ag->napi);
1276 ag71xx_debugfs_update_int_stats(ag, status);
1281 #ifdef CONFIG_NET_POLL_CONTROLLER
1283 * Polling 'interrupt' - used by things like netconsole to send skbs
1284 * without having to re-enable interrupts. It's not called while
1285 * the interrupt routine is executing.
1287 static void ag71xx_netpoll(struct net_device *dev)
1289 disable_irq(dev->irq);
1290 ag71xx_interrupt(dev->irq, dev);
1291 enable_irq(dev->irq);
1295 static int ag71xx_change_mtu(struct net_device *dev, int new_mtu)
1297 struct ag71xx *ag = netdev_priv(dev);
1298 unsigned int max_frame_len;
1300 max_frame_len = ag71xx_max_frame_len(new_mtu);
1301 if (new_mtu < 68 || max_frame_len > ag->max_frame_len)
1304 if (netif_running(dev))
1311 static const struct net_device_ops ag71xx_netdev_ops = {
1312 .ndo_open = ag71xx_open,
1313 .ndo_stop = ag71xx_stop,
1314 .ndo_start_xmit = ag71xx_hard_start_xmit,
1315 .ndo_do_ioctl = ag71xx_do_ioctl,
1316 .ndo_tx_timeout = ag71xx_tx_timeout,
1317 .ndo_change_mtu = ag71xx_change_mtu,
1318 .ndo_set_mac_address = eth_mac_addr,
1319 .ndo_validate_addr = eth_validate_addr,
1320 #ifdef CONFIG_NET_POLL_CONTROLLER
1321 .ndo_poll_controller = ag71xx_netpoll,
1325 static const char *ag71xx_get_phy_if_mode_name(phy_interface_t mode)
1328 case PHY_INTERFACE_MODE_MII:
1330 case PHY_INTERFACE_MODE_GMII:
1332 case PHY_INTERFACE_MODE_RMII:
1334 case PHY_INTERFACE_MODE_RGMII:
1336 case PHY_INTERFACE_MODE_SGMII:
1346 static int ag71xx_probe(struct platform_device *pdev)
1348 struct net_device *dev;
1349 struct resource *res;
1351 struct ag71xx_platform_data *pdata;
1354 pdata = pdev->dev.platform_data;
1356 dev_err(&pdev->dev, "no platform data specified\n");
1361 if (pdata->mii_bus_dev == NULL && pdata->phy_mask) {
1362 dev_err(&pdev->dev, "no MII bus device specified\n");
1367 dev = alloc_etherdev(sizeof(*ag));
1369 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1374 if (!pdata->max_frame_len || !pdata->desc_pktlen_mask)
1377 SET_NETDEV_DEV(dev, &pdev->dev);
1379 ag = netdev_priv(dev);
1382 ag->msg_enable = netif_msg_init(ag71xx_msg_level,
1383 AG71XX_DEFAULT_MSG_ENABLE);
1384 spin_lock_init(&ag->lock);
1386 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base");
1388 dev_err(&pdev->dev, "no mac_base resource found\n");
1393 ag->mac_base = ioremap_nocache(res->start, res->end - res->start + 1);
1394 if (!ag->mac_base) {
1395 dev_err(&pdev->dev, "unable to ioremap mac_base\n");
1400 dev->irq = platform_get_irq(pdev, 0);
1401 err = request_irq(dev->irq, ag71xx_interrupt,
1405 dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
1406 goto err_unmap_base;
1409 dev->base_addr = (unsigned long)ag->mac_base;
1410 dev->netdev_ops = &ag71xx_netdev_ops;
1411 dev->ethtool_ops = &ag71xx_ethtool_ops;
1413 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1415 init_timer(&ag->oom_timer);
1416 ag->oom_timer.data = (unsigned long) dev;
1417 ag->oom_timer.function = ag71xx_oom_timer_handler;
1419 tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1420 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1422 ag->max_frame_len = pdata->max_frame_len;
1423 ag->desc_pktlen_mask = pdata->desc_pktlen_mask;
1425 if (!pdata->is_ar724x && !pdata->is_ar91xx) {
1426 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1427 tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1429 ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1431 ag->stop_desc = dma_alloc_coherent(NULL,
1432 sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL);
1437 ag->stop_desc->data = 0;
1438 ag->stop_desc->ctrl = 0;
1439 ag->stop_desc->next = (u32) ag->stop_desc_dma;
1441 memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN);
1443 netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1445 ag71xx_dump_regs(ag);
1449 ag71xx_dump_regs(ag);
1451 err = ag71xx_phy_connect(ag);
1455 err = ag71xx_debugfs_init(ag);
1457 goto err_phy_disconnect;
1459 platform_set_drvdata(pdev, dev);
1461 err = register_netdev(dev);
1463 dev_err(&pdev->dev, "unable to register net device\n");
1464 goto err_debugfs_exit;
1467 pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1468 dev->name, dev->base_addr, dev->irq,
1469 ag71xx_get_phy_if_mode_name(pdata->phy_if_mode));
1474 ag71xx_debugfs_exit(ag);
1476 ag71xx_phy_disconnect(ag);
1478 dma_free_coherent(NULL, sizeof(struct ag71xx_desc), ag->stop_desc,
1481 free_irq(dev->irq, dev);
1483 iounmap(ag->mac_base);
1487 platform_set_drvdata(pdev, NULL);
1491 static int ag71xx_remove(struct platform_device *pdev)
1493 struct net_device *dev = platform_get_drvdata(pdev);
1496 struct ag71xx *ag = netdev_priv(dev);
1498 ag71xx_debugfs_exit(ag);
1499 ag71xx_phy_disconnect(ag);
1500 unregister_netdev(dev);
1501 free_irq(dev->irq, dev);
1502 iounmap(ag->mac_base);
1504 platform_set_drvdata(pdev, NULL);
1510 static struct platform_driver ag71xx_driver = {
1511 .probe = ag71xx_probe,
1512 .remove = ag71xx_remove,
1514 .name = AG71XX_DRV_NAME,
1518 static int __init ag71xx_module_init(void)
1522 ret = ag71xx_debugfs_root_init();
1526 ret = ag71xx_mdio_driver_init();
1528 goto err_debugfs_exit;
1530 ret = platform_driver_register(&ag71xx_driver);
1537 ag71xx_mdio_driver_exit();
1539 ag71xx_debugfs_root_exit();
1544 static void __exit ag71xx_module_exit(void)
1546 platform_driver_unregister(&ag71xx_driver);
1547 ag71xx_mdio_driver_exit();
1548 ag71xx_debugfs_root_exit();
1551 module_init(ag71xx_module_init);
1552 module_exit(ag71xx_module_exit);
1554 MODULE_VERSION(AG71XX_DRV_VERSION);
1555 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1556 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1557 MODULE_LICENSE("GPL v2");
1558 MODULE_ALIAS("platform:" AG71XX_DRV_NAME);