2 * Ethernet device driver for Gemini SoC (SL351x GMAC).
4 * Copyright (C) 2011, Tobias Waldvogel <tobias.waldvogel@gmail.com>
6 * Based on work by Michał Mirosław <mirq-linux@rere.qmqm.pl> and
7 * Paulius Zaleckas <paulius.zaleckas@gmail.com> and
8 * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it> and
9 * GPLd spaghetti code from Raidsonic and other Gemini-based NAS vendors.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
21 #include <linux/spinlock.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/cache.h>
25 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_vlan.h>
30 #include <linux/skbuff.h>
31 #include <linux/phy.h>
32 #include <linux/crc32.h>
33 #include <linux/ethtool.h>
34 #include <linux/tcp.h>
35 #include <linux/u64_stats_sync.h>
39 #include <linux/ipv6.h>
41 #include <mach/hardware.h>
42 #include <mach/global_reg.h>
44 #include <mach/gmac.h>
45 #include "sl351x_hw.h"
47 #define DRV_NAME "gmac-gemini"
48 #define DRV_VERSION "1.0"
54 #define HBURST_SINGLE 0b00
55 #define HBURST_INCR 0b01
56 #define HBURST_INCR4 0b10
57 #define HBURST_INCR8 0b11
59 #define HPROT_DATA_CACHE BIT(0)
60 #define HPROT_PRIVILIGED BIT(1)
61 #define HPROT_BUFFERABLE BIT(2)
62 #define HPROT_CACHABLE BIT(3)
64 #define DEFAULT_RX_COALESCE_NSECS 0
65 #define DEFAULT_GMAC_RXQ_ORDER 9
66 #define DEFAULT_GMAC_TXQ_ORDER 8
67 #define DEFAULT_RX_BUF_ORDER 11
68 #define DEFAULT_NAPI_WEIGHT 64
69 #define TX_MAX_FRAGS 16
70 #define TX_QUEUE_NUM 1 /* max: 6 */
71 #define RX_MAX_ALLOC_ORDER 2
73 #define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT|GMAC0_TXPERR_INT_BIT| \
74 GMAC0_RXDERR_INT_BIT|GMAC0_RXPERR_INT_BIT)
75 #define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT| \
76 GMAC0_SWTQ00_FIN_INT_BIT)
77 #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT|GMAC0_RX_OVERRUN_INT_BIT)
79 #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
80 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
81 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
83 MODULE_AUTHOR("Tobias Waldvogel");
84 MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
85 MODULE_LICENSE("GPL");
86 MODULE_ALIAS("platform:" DRV_NAME);
92 struct net_device *netdev[2];
93 __le32 mac_addr[2][3];
98 unsigned int freeq_order;
99 unsigned int freeq_frag_order;
100 GMAC_RXDESC_T *freeq_ring;
101 dma_addr_t freeq_dma_base;
102 struct page **freeq_page_tab;
103 spinlock_t freeq_lock;
108 struct sk_buff **skb;
110 unsigned int noirq_packets;
113 struct gmac_private {
115 struct toe_private *toe;
116 void __iomem *ctl_iomem;
117 void __iomem *dma_iomem;
119 void __iomem *rxq_rwptr;
120 GMAC_RXDESC_T *rxq_ring;
121 unsigned int rxq_order;
123 struct napi_struct napi;
124 struct hrtimer rx_coalesce_timer;
125 unsigned int rx_coalesce_nsecs;
126 unsigned int freeq_refill;
127 struct gmac_txq txq[TX_QUEUE_NUM];
128 unsigned int txq_order;
129 unsigned int irq_every_tx_packets;
131 dma_addr_t rxq_dma_base;
132 dma_addr_t txq_dma_base;
134 unsigned int msg_enable;
135 spinlock_t config_lock;
137 struct u64_stats_sync tx_stats_syncp;
138 struct u64_stats_sync rx_stats_syncp;
139 struct u64_stats_sync ir_stats_syncp;
141 struct rtnl_link_stats64 stats;
142 u64 hw_stats[RX_STATS_NUM];
143 u64 rx_stats[RX_STATUS_NUM];
144 u64 rx_csum_stats[RX_CHKSUM_NUM];
146 u64 tx_frag_stats[TX_MAX_FRAGS];
147 u64 tx_frags_linearized;
151 #define GMAC_STATS_NUM ( \
152 RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
155 static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = {
162 "RX_STATUS_GOOD_FRAME",
163 "RX_STATUS_TOO_LONG_GOOD_CRC",
164 "RX_STATUS_RUNT_FRAME",
165 "RX_STATUS_SFD_NOT_FOUND",
166 "RX_STATUS_CRC_ERROR",
167 "RX_STATUS_TOO_LONG_BAD_CRC",
168 "RX_STATUS_ALIGNMENT_ERROR",
169 "RX_STATUS_TOO_LONG_BAD_ALIGN",
171 "RX_STATUS_DA_FILTERED",
172 "RX_STATUS_BUFFER_FULL",
178 "RX_CHKSUM_IP_UDP_TCP_OK",
179 "RX_CHKSUM_IP_OK_ONLY",
182 "RX_CHKSUM_IP_ERR_UNKNOWN",
184 "RX_CHKSUM_TCP_UDP_ERR",
203 "TX_FRAGS_LINEARIZED",
207 static void gmac_dump_dma_state(struct net_device *dev);
209 static void gmac_update_config0_reg(struct net_device *dev, u32 val, u32 vmask)
211 struct gmac_private *gmac = netdev_priv(dev);
215 spin_lock_irqsave(&gmac->config_lock, flags);
217 reg = readl(gmac->ctl_iomem + GMAC_CONFIG0);
218 reg = (reg & ~vmask) | val;
219 writel(reg, gmac->ctl_iomem + GMAC_CONFIG0);
221 spin_unlock_irqrestore(&gmac->config_lock, flags);
224 static void gmac_enable_tx_rx(struct net_device *dev)
226 struct gmac_private *gmac = netdev_priv(dev);
227 void __iomem *config0 = gmac->ctl_iomem + GMAC_CONFIG0;
231 spin_lock_irqsave(&gmac->config_lock, flags);
233 reg = readl(config0);
234 reg &= ~CONFIG0_TX_RX_DISABLE;
235 writel(reg, config0);
237 spin_unlock_irqrestore(&gmac->config_lock, flags);
240 static void gmac_disable_tx_rx(struct net_device *dev)
242 struct gmac_private *gmac = netdev_priv(dev);
243 void __iomem *config0 = gmac->ctl_iomem + GMAC_CONFIG0;
247 spin_lock_irqsave(&gmac->config_lock, flags);
249 reg = readl(config0);
250 reg |= CONFIG0_TX_RX_DISABLE;
251 writel(reg, config0);
253 spin_unlock_irqrestore(&gmac->config_lock, flags);
255 mdelay(10); /* let GMAC consume packet */
258 static void gmac_set_flow_control(struct net_device *dev, bool tx, bool rx)
260 struct gmac_private *gmac = netdev_priv(dev);
261 void __iomem *config0 = gmac->ctl_iomem + GMAC_CONFIG0;
265 spin_lock_irqsave(&gmac->config_lock, flags);
267 reg = readl(config0);
268 reg &= ~CONFIG0_FLOW_CTL;
270 reg |= CONFIG0_FLOW_TX;
272 reg |= CONFIG0_FLOW_RX;
273 writel(reg, config0);
275 spin_unlock_irqrestore(&gmac->config_lock, flags);
278 static void gmac_update_link_state(struct net_device *dev)
280 struct gmac_private *gmac = netdev_priv(dev);
281 void __iomem *status_reg = gmac->ctl_iomem + GMAC_STATUS;
282 struct phy_device *phydev = dev->phydev;
283 GMAC_STATUS_T status, old_status;
284 int pause_tx=0, pause_rx=0;
286 old_status.bits32 = status.bits32 = readl(status_reg);
288 status.bits.link = phydev->link;
289 status.bits.duplex = phydev->duplex;
291 switch (phydev->speed) {
293 status.bits.speed = GMAC_SPEED_1000;
294 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
295 status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
298 status.bits.speed = GMAC_SPEED_100;
299 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
300 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
303 status.bits.speed = GMAC_SPEED_10;
304 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
305 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
308 netdev_warn(dev, "Not supported PHY speed (%d)\n",
312 if (phydev->duplex == DUPLEX_FULL) {
313 u16 lcladv = phy_read(phydev, MII_ADVERTISE);
314 u16 rmtadv = phy_read(phydev, MII_LPA);
315 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
317 if (cap & FLOW_CTRL_RX)
319 if (cap & FLOW_CTRL_TX)
323 gmac_set_flow_control(dev, pause_tx, pause_rx);
325 if (old_status.bits32 == status.bits32)
328 if (netif_msg_link(gmac)) {
329 phy_print_status(phydev);
330 netdev_info(dev, "link flow control: %s\n",
332 ? (phydev->asym_pause ? "tx" : "both")
333 : (phydev->asym_pause ? "rx" : "none")
337 gmac_disable_tx_rx(dev);
338 writel(status.bits32, status_reg);
339 gmac_enable_tx_rx(dev);
342 static int gmac_setup_phy(struct net_device *dev)
344 struct gmac_private *gmac = netdev_priv(dev);
345 struct toe_private *toe = gmac->toe;
346 struct gemini_gmac_platform_data *pdata = toe->dev->platform_data;
347 GMAC_STATUS_T status = { .bits32 = 0 };
348 int num = dev->dev_id;
350 dev->phydev = phy_connect(dev, pdata->bus_id[num],
351 &gmac_update_link_state, pdata->interface[num]);
353 if (IS_ERR(dev->phydev)) {
354 int err = PTR_ERR(dev->phydev);
359 dev->phydev->supported &= PHY_GBIT_FEATURES;
360 dev->phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
361 dev->phydev->advertising = dev->phydev->supported;
363 /* set PHY interface type */
364 switch (dev->phydev->interface) {
365 case PHY_INTERFACE_MODE_MII:
366 status.bits.mii_rmii = GMAC_PHY_MII;
368 case PHY_INTERFACE_MODE_GMII:
369 status.bits.mii_rmii = GMAC_PHY_GMII;
371 case PHY_INTERFACE_MODE_RGMII:
372 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
375 netdev_err(dev, "Unsupported MII interface\n");
376 phy_disconnect(dev->phydev);
380 writel(status.bits32, gmac->ctl_iomem + GMAC_STATUS);
385 static int gmac_pick_rx_max_len(int max_l3_len)
387 /* index = CONFIG_MAXLEN_XXX values */
388 static const int max_len[8] = {
389 1536, 1518, 1522, 1542,
390 9212, 10236, 1518, 1518
394 max_l3_len += ETH_HLEN + VLAN_HLEN;
396 if (max_l3_len > max_len[n])
399 for (i = 0; i < 5; ++i) {
400 if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
407 static int gmac_init(struct net_device *dev)
409 struct gmac_private *gmac = netdev_priv(dev);
412 GMAC_CONFIG0_T config0 = { .bits = {
423 .port0_chk_classq = 1,
424 .port1_chk_classq = 1,
426 GMAC_AHB_WEIGHT_T ahb_weight = { .bits = {
433 GMAC_TX_WCR0_T hw_weigh = { .bits = {
439 GMAC_TX_WCR1_T sw_weigh = { .bits = {
447 GMAC_CONFIG1_T config1 = { .bits = {
451 GMAC_CONFIG2_T config2 = { .bits = {
455 GMAC_CONFIG3_T config3 = { .bits = {
460 config0.bits.max_len = gmac_pick_rx_max_len(dev->mtu);
462 val = readl(gmac->ctl_iomem + GMAC_CONFIG0);
463 config0.bits.reserved = ((GMAC_CONFIG0_T)val).bits.reserved;
464 writel(config0.bits32, gmac->ctl_iomem + GMAC_CONFIG0);
465 writel(config1.bits32, gmac->ctl_iomem + GMAC_CONFIG1);
466 writel(config2.bits32, gmac->ctl_iomem + GMAC_CONFIG2);
467 writel(config3.bits32, gmac->ctl_iomem + GMAC_CONFIG3);
469 val = readl(gmac->dma_iomem + GMAC_AHB_WEIGHT_REG);
470 writel(ahb_weight.bits32, gmac->dma_iomem + GMAC_AHB_WEIGHT_REG);
472 writel(hw_weigh.bits32,
473 gmac->dma_iomem + GMAC_TX_WEIGHTING_CTRL_0_REG);
474 writel(sw_weigh.bits32,
475 gmac->dma_iomem + GMAC_TX_WEIGHTING_CTRL_1_REG);
477 gmac->rxq_order = DEFAULT_GMAC_RXQ_ORDER;
478 gmac->txq_order = DEFAULT_GMAC_TXQ_ORDER;
479 gmac->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS;
481 /* Mark every quarter of the queue a packet for interrupt
482 in order to be able to wake up the queue if it was stopped */
483 gmac->irq_every_tx_packets = 1 << (gmac->txq_order - 2);
488 static void gmac_uninit(struct net_device *dev)
491 phy_disconnect(dev->phydev);
494 static int gmac_setup_txqs(struct net_device *dev)
496 struct gmac_private *gmac = netdev_priv(dev);
497 struct toe_private *toe = gmac->toe;
498 void __iomem *rwptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE0_PTR_REG;
499 void __iomem *base_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE_BASE_REG;
501 unsigned int n_txq = dev->num_tx_queues;
502 size_t entries = 1 <<gmac->txq_order;
503 size_t len = n_txq * entries;
504 struct gmac_txq *txq = gmac->txq;
505 GMAC_TXDESC_T *desc_ring;
506 struct sk_buff **skb_tab;
510 skb_tab = kzalloc(len * sizeof(*skb_tab), GFP_KERNEL);
514 desc_ring = dma_alloc_coherent(toe->dev, len * sizeof(*desc_ring),
515 &gmac->txq_dma_base, GFP_KERNEL);
522 BUG_ON(gmac->txq_dma_base & ~DMA_Q_BASE_MASK);
524 writel(gmac->txq_dma_base | gmac->txq_order, base_reg);
526 for (i = 0; i < n_txq; i++) {
527 txq->ring = desc_ring;
529 txq->noirq_packets = 0;
531 r = readw(rwptr_reg);
533 writew(r, rwptr_reg);
538 desc_ring += entries;
545 static void gmac_clean_txq(struct net_device *dev, struct gmac_txq *txq,
548 struct gmac_private *gmac = netdev_priv(dev);
549 struct toe_private *toe = gmac->toe;
550 unsigned int errs = 0;
551 unsigned int pkts = 0;
552 unsigned int hwchksum = 0;
553 unsigned long bytes = 0;
554 unsigned int m = (1 << gmac->txq_order) - 1;
555 unsigned int c = txq->cptr;
556 GMAC_TXDESC_0_T word0;
557 GMAC_TXDESC_1_T word1;
561 unsigned short nfrags;
563 if (unlikely(c == r))
571 mapping = txd->word2.buf_adr;
572 word3 = txd->word3.bits32;
574 dma_unmap_single(toe->dev, mapping, word0.bits.buffer_size, DMA_TO_DEVICE);
577 dev_kfree_skb(txq->skb[c]);
582 if (!(word3 & SOF_BIT))
585 if (!word0.bits.status_tx_ok) {
591 bytes += txd->word1.bits.byte_count;
593 if (word1.bits32 & TSS_CHECKUM_ENABLE)
596 nfrags = word0.bits.desc_count - 1;
598 if (nfrags >= TX_MAX_FRAGS)
599 nfrags = TX_MAX_FRAGS - 1;
601 u64_stats_update_begin(&gmac->tx_stats_syncp);
602 gmac->tx_frag_stats[nfrags]++;
603 u64_stats_update_end(&gmac->ir_stats_syncp);
607 u64_stats_update_begin(&gmac->ir_stats_syncp);
608 gmac->stats.tx_errors += errs;
609 gmac->stats.tx_packets += pkts;
610 gmac->stats.tx_bytes += bytes;
611 gmac->tx_hw_csummed += hwchksum;
612 u64_stats_update_end(&gmac->ir_stats_syncp);
617 static void gmac_cleanup_txqs(struct net_device *dev)
619 struct gmac_private *gmac = netdev_priv(dev);
620 struct toe_private *toe = gmac->toe;
621 void __iomem *rwptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE0_PTR_REG;
622 void __iomem *base_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE_BASE_REG;
624 unsigned n_txq = dev->num_tx_queues;
627 for (i = 0; i < n_txq; i++) {
628 r = readw(rwptr_reg);
630 writew(r, rwptr_reg);
633 gmac_clean_txq(dev, gmac->txq + i, r);
637 kfree(gmac->txq->skb);
638 dma_free_coherent(toe->dev,
639 n_txq * sizeof(*gmac->txq->ring) << gmac->txq_order,
640 gmac->txq->ring, gmac->txq_dma_base);
643 static int gmac_setup_rxq(struct net_device *dev)
645 struct gmac_private *gmac = netdev_priv(dev);
646 struct toe_private *toe = gmac->toe;
647 NONTOE_QHDR_T __iomem *qhdr = toe->iomem + TOE_DEFAULT_Q_HDR_BASE(dev->dev_id);
649 gmac->rxq_rwptr = &qhdr->word1;
650 gmac->rxq_ring = dma_alloc_coherent(toe->dev,
651 sizeof(*gmac->rxq_ring) << gmac->rxq_order,
652 &gmac->rxq_dma_base, GFP_KERNEL);
656 BUG_ON(gmac->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK);
658 writel(gmac->rxq_dma_base | gmac->rxq_order, &qhdr->word0);
659 writel(0, gmac->rxq_rwptr);
663 static void gmac_cleanup_rxq(struct net_device *dev)
665 struct gmac_private *gmac = netdev_priv(dev);
666 struct toe_private *toe = gmac->toe;
668 NONTOE_QHDR_T __iomem *qhdr = toe->iomem + TOE_DEFAULT_Q_HDR_BASE(dev->dev_id);
669 void __iomem *dma_reg = &qhdr->word0;
670 void __iomem *ptr_reg = &qhdr->word1;
671 GMAC_RXDESC_T *rxd = gmac->rxq_ring;
674 unsigned int m = (1 <<gmac->rxq_order) - 1;
678 rw.bits32 = readl(ptr_reg);
681 writew(r, ptr_reg + 2);
687 mapping = rxd[r].word2.buf_adr;
694 page = pfn_to_page(dma_to_pfn(toe->dev, mapping));
698 dma_free_coherent(toe->dev, sizeof(*gmac->rxq_ring) << gmac->rxq_order,
699 gmac->rxq_ring, gmac->rxq_dma_base);
702 static struct page *toe_freeq_alloc_map_page(struct toe_private *toe, int pn)
704 unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
705 unsigned int frag_len = 1 << toe->freeq_frag_order;
706 GMAC_RXDESC_T *freeq_entry;
711 page = alloc_page(__GFP_COLD | GFP_ATOMIC);
715 mapping = dma_map_single(toe->dev, page_address(page),
716 PAGE_SIZE, DMA_FROM_DEVICE);
718 if (unlikely(dma_mapping_error(toe->dev, mapping) || !mapping)) {
723 freeq_entry = toe->freeq_ring + (pn << fpp_order);
724 for (i = 1 << fpp_order; i > 0; --i) {
725 freeq_entry->word2.buf_adr = mapping;
730 if (toe->freeq_page_tab[pn]) {
731 mapping = toe->freeq_ring[pn << fpp_order].word2.buf_adr;
732 dma_unmap_single(toe->dev, mapping, frag_len, DMA_FROM_DEVICE);
733 put_page(toe->freeq_page_tab[pn]);
736 toe->freeq_page_tab[pn] = page;
740 static unsigned int toe_fill_freeq(struct toe_private *toe, int reset)
742 void __iomem *rwptr_reg = toe->iomem + GLOBAL_SWFQ_RWPTR_REG;
745 unsigned int pn, epn;
746 unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
747 unsigned int m_pn = (1 << (toe->freeq_order - fpp_order)) - 1;
749 unsigned int count = 0;
752 spin_lock_irqsave(&toe->freeq_lock, flags);
754 rw.bits32 = readl(rwptr_reg);
755 pn = (reset ? rw.bits.rptr : rw.bits.wptr) >> fpp_order;
756 epn = (rw.bits.rptr >> fpp_order) - 1;
760 page = toe->freeq_page_tab[pn];
762 if (atomic_read(&page->_count) > 1) {
763 unsigned int fl = (pn -epn) & m_pn;
765 if (fl > 64 >> fpp_order)
768 page = toe_freeq_alloc_map_page(toe, pn);
773 atomic_add(1 << fpp_order, &page->_count);
774 count += 1 << fpp_order;
780 writew(pn << fpp_order, rwptr_reg+2);
782 spin_unlock_irqrestore(&toe->freeq_lock, flags);
786 static int toe_setup_freeq(struct toe_private *toe)
788 void __iomem *dma_reg = toe->iomem + GLOBAL_SW_FREEQ_BASE_SIZE_REG;
789 QUEUE_THRESHOLD_T qt;
790 DMA_SKB_SIZE_T skbsz;
792 unsigned int frag_len = 1 << toe->freeq_frag_order;
793 unsigned int len = 1 << toe->freeq_order;
794 unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
795 unsigned int pages = len >> fpp_order;
799 toe->freeq_ring = dma_alloc_coherent(toe->dev,
800 sizeof(*toe->freeq_ring) << toe->freeq_order,
801 &toe->freeq_dma_base, GFP_KERNEL);
802 if (!toe->freeq_ring)
805 BUG_ON(toe->freeq_dma_base & ~DMA_Q_BASE_MASK);
807 toe->freeq_page_tab = kzalloc(pages * sizeof(*toe->freeq_page_tab),
809 if (!toe->freeq_page_tab)
812 for (pn = 0; pn < pages; pn++)
813 if (!toe_freeq_alloc_map_page(toe, pn))
814 goto err_freeq_alloc;
816 filled = toe_fill_freeq(toe, 1);
818 goto err_freeq_alloc;
820 qt.bits32 = readl(toe->iomem + GLOBAL_QUEUE_THRESHOLD_REG);
821 qt.bits.swfq_empty = 32;
822 writel(qt.bits32, toe->iomem + GLOBAL_QUEUE_THRESHOLD_REG);
824 skbsz.bits.sw_skb_size = 1 << toe->freeq_frag_order;
825 writel(skbsz.bits32, toe->iomem + GLOBAL_DMA_SKB_SIZE_REG);
826 writel(toe->freeq_dma_base | toe->freeq_order, dma_reg);
833 mapping = toe->freeq_ring[pn << fpp_order].word2.buf_adr;
834 dma_unmap_single(toe->dev, mapping, frag_len, DMA_FROM_DEVICE);
835 put_page(toe->freeq_page_tab[pn]);
839 dma_free_coherent(toe->dev,
840 sizeof(*toe->freeq_ring) << toe->freeq_order,
841 toe->freeq_ring, toe->freeq_dma_base);
842 toe->freeq_ring = NULL;
846 static void toe_cleanup_freeq(struct toe_private *toe)
848 void __iomem *dma_reg = toe->iomem + GLOBAL_SW_FREEQ_BASE_SIZE_REG;
849 void __iomem *ptr_reg = toe->iomem + GLOBAL_SWFQ_RWPTR_REG;
851 unsigned int frag_len = 1 << toe->freeq_frag_order;
852 unsigned int len = 1 << toe->freeq_order;
853 unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
854 unsigned int pages = len >> fpp_order;
859 writew(readw(ptr_reg), ptr_reg + 2);
862 for (pn = 0; pn < pages; pn++) {
863 mapping = toe->freeq_ring[pn << fpp_order].word2.buf_adr;
864 dma_unmap_single(toe->dev, mapping, frag_len, DMA_FROM_DEVICE);
866 page = toe->freeq_page_tab[pn];
867 while (atomic_read(&page->_count) > 0)
871 kfree(toe->freeq_page_tab);
873 dma_free_coherent(toe->dev,
874 sizeof(*toe->freeq_ring) << toe->freeq_order,
875 toe->freeq_ring, toe->freeq_dma_base);
878 static int toe_resize_freeq(struct toe_private *toe, int changing_dev_id)
880 void __iomem *irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG;
881 struct gmac_private *gmac;
882 struct net_device *other = toe->netdev[1 - changing_dev_id];
883 unsigned new_size = 0;
889 if (other && netif_running(other))
892 if (toe->netdev[0]) {
893 gmac = netdev_priv(toe->netdev[0]);
894 new_size = 1 << (gmac->rxq_order + 1);
897 if (toe->netdev[1]) {
898 gmac = netdev_priv(toe->netdev[1]);
899 new_size += 1 << (gmac->rxq_order + 1);
902 new_order = min(15, ilog2(new_size - 1) + 1);
903 if (toe->freeq_order == new_order)
906 spin_lock_irqsave(&toe->irq_lock, flags);
907 en = readl(irqen_reg);
908 en &= ~SWFQ_EMPTY_INT_BIT;
909 writel(en, irqen_reg);
912 toe_cleanup_freeq(toe);
914 toe->freeq_order = new_order;
915 err = toe_setup_freeq(toe);
917 en |= SWFQ_EMPTY_INT_BIT;
918 writel(en, irqen_reg);
919 spin_unlock_irqrestore(&toe->irq_lock, flags);
924 static void gmac_tx_irq_enable(struct net_device *dev, unsigned txq, int en)
926 struct gmac_private *gmac = netdev_priv(dev);
927 struct toe_private *toe = gmac->toe;
930 mask = GMAC0_IRQ0_TXQ0_INTS << (6 * dev->dev_id + txq);
933 writel(mask, toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG);
935 val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
936 val = en ? val | mask : val & ~mask;
937 writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
941 static void gmac_tx_irq(struct net_device *dev, unsigned txq_num)
943 struct netdev_queue *ntxq = netdev_get_tx_queue(dev, txq_num);
945 gmac_tx_irq_enable(dev, txq_num, 0);
946 netif_tx_wake_queue(ntxq);
949 static int gmac_map_tx_bufs(struct net_device *dev, struct sk_buff *skb,
950 struct gmac_txq *txq, unsigned short *desc)
952 struct gmac_private *gmac = netdev_priv(dev);
953 struct toe_private *toe = gmac->toe;
954 struct skb_shared_info *skb_si = skb_shinfo(skb);
955 skb_frag_t *skb_frag;
956 short frag, last_frag = skb_si->nr_frags - 1;
957 unsigned short m = (1 << gmac->txq_order) -1;
958 unsigned short w = *desc;
959 unsigned word1, word3, buflen;
967 if (skb->protocol == htons(ETH_P_8021Q))
974 word1 |= TSS_MTU_ENABLE_BIT;
978 if (skb->ip_summed != CHECKSUM_NONE) {
980 if (skb->protocol == htons(ETH_P_IP)) {
981 word1 |= TSS_IP_CHKSUM_BIT;
982 tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
984 word1 |= TSS_IPV6_ENABLE_BIT;
985 tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
988 word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
992 while (frag <= last_frag) {
995 buflen = skb_headlen(skb);
997 skb_frag = skb_si->frags + frag;
998 buffer = page_address(skb_frag_page(skb_frag)) +
999 skb_frag->page_offset;
1000 buflen = skb_frag->size;
1003 if (frag == last_frag) {
1008 mapping = dma_map_single(toe->dev, buffer, buflen,
1010 if (dma_mapping_error(toe->dev, mapping) ||
1011 !(mapping & PAGE_MASK))
1014 txd = txq->ring + w;
1015 txd->word0.bits32 = buflen;
1016 txd->word1.bits32 = word1;
1017 txd->word2.buf_adr = mapping;
1018 txd->word3.bits32 = word3;
1020 word3 &= MTU_SIZE_BIT_MASK;
1030 while (w != *desc) {
1034 dma_unmap_page(toe->dev, txq->ring[w].word2.buf_adr,
1035 txq->ring[w].word0.bits.buffer_size, DMA_TO_DEVICE);
1040 static int gmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
1042 struct gmac_private *gmac = netdev_priv(dev);
1044 void __iomem *ptr_reg;
1045 struct gmac_txq *txq;
1046 struct netdev_queue *ntxq;
1047 int txq_num, nfrags;
1049 unsigned short r, w, d;
1050 unsigned short m = (1 << gmac->txq_order) - 1;
1052 SKB_FRAG_ASSERT(skb);
1054 if (unlikely(skb->len >= 0x10000))
1057 txq_num = skb_get_queue_mapping(skb);
1058 ptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE_PTR_REG(txq_num);
1059 txq = &gmac->txq[txq_num];
1060 ntxq = netdev_get_tx_queue(dev, txq_num);
1061 nfrags = skb_shinfo(skb)->nr_frags;
1063 rw.bits32 = readl(ptr_reg);
1067 d = txq->cptr - w - 1;
1070 if (unlikely(d < nfrags+2))
1072 gmac_clean_txq(dev, txq, r);
1073 d = txq->cptr - w - 1;
1076 if (unlikely(d < nfrags+2)) {
1077 netif_tx_stop_queue(ntxq);
1079 d = txq->cptr + nfrags + 16;
1081 txq->ring[d].word3.bits.eofie = 1;
1082 gmac_tx_irq_enable(dev, txq_num, 1);
1084 u64_stats_update_begin(&gmac->tx_stats_syncp);
1085 dev->stats.tx_fifo_errors++;
1086 u64_stats_update_end(&gmac->tx_stats_syncp);
1087 return NETDEV_TX_BUSY;
1091 if (unlikely(gmac_map_tx_bufs(dev, skb, txq, &w))) {
1092 if (skb_linearize(skb))
1095 if (unlikely(gmac_map_tx_bufs(dev, skb, txq, &w)))
1098 u64_stats_update_begin(&gmac->tx_stats_syncp);
1099 gmac->tx_frags_linearized++;
1100 u64_stats_update_end(&gmac->tx_stats_syncp);
1103 writew(w, ptr_reg+2);
1105 gmac_clean_txq(dev, txq, r);
1106 return NETDEV_TX_OK;
1111 u64_stats_update_begin(&gmac->tx_stats_syncp);
1112 gmac->stats.tx_dropped++;
1113 u64_stats_update_end(&gmac->tx_stats_syncp);
1114 return NETDEV_TX_OK;
1117 static void gmac_tx_timeout(struct net_device *dev)
1119 netdev_err(dev, "Tx timeout\n");
1120 gmac_dump_dma_state(dev);
1123 static void gmac_enable_irq(struct net_device *dev, int enable)
1125 struct gmac_private *gmac = netdev_priv(dev);
1126 struct toe_private *toe = gmac->toe;
1127 unsigned long flags;
1130 spin_lock_irqsave(&toe->irq_lock, flags);
1132 mask = GMAC0_IRQ0_2 << (dev->dev_id * 2);
1133 val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
1134 val = enable ? (val | mask) : (val & ~mask);
1135 writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
1137 mask = DEFAULT_Q0_INT_BIT << dev->dev_id;
1138 val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1139 val = enable ? (val | mask) : (val & ~mask);
1140 writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1142 mask = GMAC0_IRQ4_8 << (dev->dev_id * 8);
1143 val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
1144 val = enable ? (val | mask) : (val & ~mask);
1145 writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
1147 spin_unlock_irqrestore(&toe->irq_lock, flags);
1150 static void gmac_enable_rx_irq(struct net_device *dev, int enable)
1152 struct gmac_private *gmac = netdev_priv(dev);
1153 struct toe_private *toe = gmac->toe;
1154 unsigned long flags;
1157 spin_lock_irqsave(&toe->irq_lock, flags);
1158 mask = DEFAULT_Q0_INT_BIT << dev->dev_id;
1160 val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1161 val = enable ? (val | mask) : (val & ~mask);
1162 writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1164 spin_unlock_irqrestore(&toe->irq_lock, flags);
1167 static struct sk_buff *gmac_skb_if_good_frame(struct gmac_private *gmac,
1168 GMAC_RXDESC_0_T word0, unsigned frame_len)
1170 struct sk_buff *skb = NULL;
1171 unsigned rx_status = word0.bits.status;
1172 unsigned rx_csum = word0.bits.chksum_status;
1174 gmac->rx_stats[rx_status]++;
1175 gmac->rx_csum_stats[rx_csum]++;
1177 if (word0.bits.derr || word0.bits.perr ||
1178 rx_status || frame_len < ETH_ZLEN ||
1179 rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) {
1180 gmac->stats.rx_errors++;
1182 if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status))
1183 gmac->stats.rx_length_errors++;
1184 if (RX_ERROR_OVER(rx_status))
1185 gmac->stats.rx_over_errors++;
1186 if (RX_ERROR_CRC(rx_status))
1187 gmac->stats.rx_crc_errors++;
1188 if (RX_ERROR_FRAME(rx_status))
1189 gmac->stats.rx_frame_errors++;
1194 skb = napi_get_frags(&gmac->napi);
1198 if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK)
1199 skb->ip_summed = CHECKSUM_UNNECESSARY;
1201 gmac->stats.rx_bytes += frame_len;
1202 gmac->stats.rx_packets++;
1206 static unsigned gmac_rx(struct net_device *dev, unsigned budget)
1208 struct gmac_private *gmac = netdev_priv(dev);
1209 struct toe_private *toe = gmac->toe;
1210 void __iomem *ptr_reg = gmac->rxq_rwptr;
1212 static struct sk_buff *skb;
1215 unsigned short r, w;
1216 unsigned short m = (1 << gmac->rxq_order) -1;
1217 GMAC_RXDESC_T *rx = NULL;
1218 struct page* page = NULL;
1220 unsigned int frame_len, frag_len;
1223 GMAC_RXDESC_0_T word0;
1224 GMAC_RXDESC_1_T word1;
1226 GMAC_RXDESC_3_T word3;
1228 rw.bits32 = readl(ptr_reg);
1229 /* Reset interrupt as all packages until here are taken into account */
1230 writel(DEFAULT_Q0_INT_BIT << dev->dev_id,
1231 toe->iomem + GLOBAL_INTERRUPT_STATUS_1_REG);
1235 while (budget && w != r) {
1236 rx = gmac->rxq_ring + r;
1239 mapping = rx->word2.buf_adr;
1245 frag_len = word0.bits.buffer_size;
1246 frame_len =word1.bits.byte_count;
1247 page_offs = mapping & ~PAGE_MASK;
1249 if (unlikely(!mapping)) {
1250 netdev_err(dev, "rxq[%u]: HW BUG: zero DMA desc\n", r);
1254 page = pfn_to_page(dma_to_pfn(toe->dev, mapping));
1256 if (word3.bits32 & SOF_BIT) {
1257 if (unlikely(skb)) {
1258 napi_free_frags(&gmac->napi);
1259 gmac->stats.rx_dropped++;
1262 skb = gmac_skb_if_good_frame(gmac, word0, frame_len);
1266 page_offs += NET_IP_ALIGN;
1267 frag_len -= NET_IP_ALIGN;
1275 if (word3.bits32 & EOF_BIT)
1276 frag_len = frame_len - skb->len;
1278 /* append page frag to skb */
1279 if (unlikely(frag_nr == MAX_SKB_FRAGS))
1283 netdev_err(dev, "Received fragment with len = 0\n");
1285 skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
1286 skb->len += frag_len;
1287 skb->data_len += frag_len;
1288 skb->truesize += frag_len;
1291 if (word3.bits32 & EOF_BIT) {
1292 napi_gro_frags(&gmac->napi);
1300 napi_free_frags(&gmac->napi);
1307 gmac->stats.rx_dropped++;
1314 static int gmac_napi_poll(struct napi_struct *napi, int budget)
1316 struct gmac_private *gmac = netdev_priv(napi->dev);
1317 struct toe_private *toe = gmac->toe;
1319 unsigned freeq_threshold = 1 << (toe->freeq_order - 1);
1321 u64_stats_update_begin(&gmac->rx_stats_syncp);
1323 rx = budget - gmac_rx(napi->dev, budget);
1326 napi_gro_flush(napi, false);
1327 __napi_complete(napi);
1328 gmac_enable_rx_irq(napi->dev, 1);
1329 ++gmac->rx_napi_exits;
1332 gmac->freeq_refill += rx;
1333 if (gmac->freeq_refill > freeq_threshold) {
1334 gmac->freeq_refill -= freeq_threshold;
1335 toe_fill_freeq(toe, 0);
1338 u64_stats_update_end(&gmac->rx_stats_syncp);
1342 static void gmac_dump_dma_state(struct net_device *dev)
1344 struct gmac_private *gmac = netdev_priv(dev);
1345 struct toe_private *toe = gmac->toe;
1346 void __iomem *ptr_reg;
1349 /* Interrupt status */
1350 reg[0] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG);
1351 reg[1] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_1_REG);
1352 reg[2] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_2_REG);
1353 reg[3] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_3_REG);
1354 reg[4] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
1355 netdev_err(dev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1356 reg[0], reg[1], reg[2], reg[3], reg[4]);
1358 /* Interrupt enable */
1359 reg[0] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
1360 reg[1] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1361 reg[2] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_2_REG);
1362 reg[3] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_3_REG);
1363 reg[4] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
1364 netdev_err(dev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1365 reg[0], reg[1], reg[2], reg[3], reg[4]);
1368 reg[0] = readl(gmac->dma_iomem + GMAC_DMA_RX_FIRST_DESC_REG);
1369 reg[1] = readl(gmac->dma_iomem + GMAC_DMA_RX_CURR_DESC_REG);
1370 reg[2] = GET_RPTR(gmac->rxq_rwptr);
1371 reg[3] = GET_WPTR(gmac->rxq_rwptr);
1372 netdev_err(dev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1373 reg[0], reg[1], reg[2], reg[3]);
1375 reg[0] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD0_REG);
1376 reg[1] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD1_REG);
1377 reg[2] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD2_REG);
1378 reg[3] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD3_REG);
1379 netdev_err(dev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1380 reg[0], reg[1], reg[2], reg[3]);
1383 ptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE0_PTR_REG;
1385 reg[0] = readl(gmac->dma_iomem + GMAC_DMA_TX_FIRST_DESC_REG);
1386 reg[1] = readl(gmac->dma_iomem + GMAC_DMA_TX_CURR_DESC_REG);
1387 reg[2] = GET_RPTR(ptr_reg);
1388 reg[3] = GET_WPTR(ptr_reg);
1389 netdev_err(dev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1390 reg[0], reg[1], reg[2], reg[3]);
1392 reg[0] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD0_REG);
1393 reg[1] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD1_REG);
1394 reg[2] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD2_REG);
1395 reg[3] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD3_REG);
1396 netdev_err(dev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1397 reg[0], reg[1], reg[2], reg[3]);
1399 /* FREE queues status */
1400 ptr_reg = toe->iomem + GLOBAL_SWFQ_RWPTR_REG;
1402 reg[0] = GET_RPTR(ptr_reg);
1403 reg[1] = GET_WPTR(ptr_reg);
1405 ptr_reg = toe->iomem + GLOBAL_HWFQ_RWPTR_REG;
1407 reg[2] = GET_RPTR(ptr_reg);
1408 reg[3] = GET_WPTR(ptr_reg);
1409 netdev_err(dev, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
1410 reg[0], reg[1], reg[2], reg[3]);
1413 static void gmac_update_hw_stats(struct net_device *dev)
1415 struct gmac_private *gmac = netdev_priv(dev);
1416 struct toe_private *toe = gmac->toe;
1417 unsigned long flags;
1418 unsigned int rx_discards, rx_mcast, rx_bcast;
1420 spin_lock_irqsave(&toe->irq_lock, flags);
1421 u64_stats_update_begin(&gmac->ir_stats_syncp);
1423 gmac->hw_stats[0] += rx_discards = readl(gmac->ctl_iomem + GMAC_IN_DISCARDS);
1424 gmac->hw_stats[1] += readl(gmac->ctl_iomem + GMAC_IN_ERRORS);
1425 gmac->hw_stats[2] += rx_mcast = readl(gmac->ctl_iomem + GMAC_IN_MCAST);
1426 gmac->hw_stats[3] += rx_bcast = readl(gmac->ctl_iomem + GMAC_IN_BCAST);
1427 gmac->hw_stats[4] += readl(gmac->ctl_iomem + GMAC_IN_MAC1);
1428 gmac->hw_stats[5] += readl(gmac->ctl_iomem + GMAC_IN_MAC2);
1430 gmac->stats.rx_missed_errors += rx_discards;
1431 gmac->stats.multicast += rx_mcast;
1432 gmac->stats.multicast += rx_bcast;
1434 writel(GMAC0_MIB_INT_BIT << (dev->dev_id * 8),
1435 toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
1437 u64_stats_update_end(&gmac->ir_stats_syncp);
1438 spin_unlock_irqrestore(&toe->irq_lock, flags);
1441 static inline unsigned gmac_get_intr_flags(struct net_device *dev, int i)
1443 struct gmac_private *gmac = netdev_priv(dev);
1444 struct toe_private *toe = gmac->toe;
1445 void __iomem *irqif_reg, *irqen_reg;
1448 offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG - GLOBAL_INTERRUPT_STATUS_0_REG);
1450 irqif_reg = toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG + offs;
1451 irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG + offs;
1453 val = readl(irqif_reg) & readl(irqen_reg);
1457 enum hrtimer_restart gmac_coalesce_delay_expired( struct hrtimer *timer )
1459 struct gmac_private *gmac = container_of(timer, struct gmac_private, rx_coalesce_timer);
1461 napi_schedule(&gmac->napi);
1462 return HRTIMER_NORESTART;
1465 static irqreturn_t gmac_irq(int irq, void *data)
1467 struct net_device *dev = data;
1468 struct gmac_private *gmac = netdev_priv(dev);
1469 struct toe_private *toe = gmac->toe;
1470 unsigned val, orr = 0;
1472 orr |= val = gmac_get_intr_flags(dev, 0);
1474 if (unlikely(val & (GMAC0_IRQ0_2 << (dev->dev_id * 2)))) {
1476 netdev_err(dev, "hw failure/sw bug\n");
1477 gmac_dump_dma_state(dev);
1479 /* don't know how to recover, just reduce losses */
1480 gmac_enable_irq(dev, 0);
1484 if (val & (GMAC0_IRQ0_TXQ0_INTS << (dev->dev_id * 6)))
1485 gmac_tx_irq(dev, 0);
1487 orr |= val = gmac_get_intr_flags(dev, 1);
1489 if (val & (DEFAULT_Q0_INT_BIT << dev->dev_id)) {
1491 gmac_enable_rx_irq(dev, 0);
1493 if (!gmac->rx_coalesce_nsecs)
1494 napi_schedule(&gmac->napi);
1497 ktime = ktime_set(0, gmac->rx_coalesce_nsecs);
1498 hrtimer_start(&gmac->rx_coalesce_timer, ktime, HRTIMER_MODE_REL);
1502 orr |= val = gmac_get_intr_flags(dev, 4);
1504 if (unlikely(val & (GMAC0_MIB_INT_BIT << (dev->dev_id * 8))))
1505 gmac_update_hw_stats(dev);
1507 if (unlikely(val & (GMAC0_RX_OVERRUN_INT_BIT << (dev->dev_id * 8)))) {
1508 writel(GMAC0_RXDERR_INT_BIT << (dev->dev_id * 8),
1509 toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
1511 spin_lock(&toe->irq_lock);
1512 u64_stats_update_begin(&gmac->ir_stats_syncp);
1513 ++gmac->stats.rx_fifo_errors;
1514 u64_stats_update_end(&gmac->ir_stats_syncp);
1515 spin_unlock(&toe->irq_lock);
1518 return orr ? IRQ_HANDLED : IRQ_NONE;
1521 static void gmac_start_dma(struct gmac_private *gmac)
1523 void __iomem *dma_ctrl_reg = gmac->dma_iomem + GMAC_DMA_CTRL_REG;
1524 GMAC_DMA_CTRL_T dma_ctrl;
1526 dma_ctrl.bits32 = readl(dma_ctrl_reg);
1527 dma_ctrl.bits.rd_enable = 1;
1528 dma_ctrl.bits.td_enable = 1;
1529 dma_ctrl.bits.loopback = 0;
1530 dma_ctrl.bits.drop_small_ack = 0;
1531 dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN;
1532 dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED;
1533 dma_ctrl.bits.rd_burst_size = HBURST_INCR8;
1534 dma_ctrl.bits.rd_bus = HSIZE_8;
1535 dma_ctrl.bits.td_prot = HPROT_DATA_CACHE;
1536 dma_ctrl.bits.td_burst_size = HBURST_INCR8;
1537 dma_ctrl.bits.td_bus = HSIZE_8;
1539 writel(dma_ctrl.bits32, dma_ctrl_reg);
1542 static void gmac_stop_dma(struct gmac_private *gmac)
1544 void __iomem *dma_ctrl_reg = gmac->dma_iomem + GMAC_DMA_CTRL_REG;
1545 GMAC_DMA_CTRL_T dma_ctrl;
1547 dma_ctrl.bits32 = readl(dma_ctrl_reg);
1548 dma_ctrl.bits.rd_enable = 0;
1549 dma_ctrl.bits.td_enable = 0;
1550 writel(dma_ctrl.bits32, dma_ctrl_reg);
1553 static int gmac_open(struct net_device *dev)
1555 struct gmac_private *gmac = netdev_priv(dev);
1559 err = gmac_setup_phy(dev);
1561 netif_err(gmac, ifup, dev,
1562 "PHY init failed: %d\n", err);
1567 err = request_irq(dev->irq, gmac_irq,
1568 IRQF_SHARED, dev->name, dev);
1572 netif_carrier_off(dev);
1573 phy_start(dev->phydev);
1575 err = toe_resize_freeq(gmac->toe, dev->dev_id);
1579 err = gmac_setup_rxq(dev);
1583 err = gmac_setup_txqs(dev);
1584 if (unlikely(err)) {
1585 gmac_cleanup_rxq(dev);
1589 napi_enable(&gmac->napi);
1591 gmac_start_dma(gmac);
1592 gmac_enable_irq(dev, 1);
1593 gmac_enable_tx_rx(dev);
1594 netif_tx_start_all_queues(dev);
1596 hrtimer_init(&gmac->rx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1597 gmac->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
1601 phy_stop(dev->phydev);
1602 free_irq(dev->irq, dev);
1606 static int gmac_stop(struct net_device *dev)
1608 struct gmac_private *gmac = netdev_priv(dev);
1610 hrtimer_cancel(&gmac->rx_coalesce_timer);
1611 netif_tx_stop_all_queues(dev);
1612 gmac_disable_tx_rx(dev);
1613 gmac_stop_dma(gmac);
1614 napi_disable(&gmac->napi);
1616 gmac_enable_irq(dev, 0);
1617 gmac_cleanup_rxq(dev);
1618 gmac_cleanup_txqs(dev);
1620 phy_stop(dev->phydev);
1621 free_irq(dev->irq, dev);
1623 gmac_update_hw_stats(dev);
1627 static void gmac_set_rx_mode(struct net_device *dev)
1629 struct gmac_private *gmac = netdev_priv(dev);
1630 struct netdev_hw_addr *ha;
1633 GMAC_RX_FLTR_T filter = { .bits = {
1639 mc_filter[1] = mc_filter[0] = 0;
1641 if (dev->flags & IFF_PROMISC) {
1642 filter.bits.error = 1;
1643 filter.bits.promiscuous = 1;
1644 } else if (!(dev->flags & IFF_ALLMULTI)) {
1645 mc_filter[1] = mc_filter[0] = 0;
1646 netdev_for_each_mc_addr(ha, dev) {
1647 bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f;
1648 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f);
1652 writel(mc_filter[0], gmac->ctl_iomem + GMAC_MCAST_FIL0);
1653 writel(mc_filter[1], gmac->ctl_iomem + GMAC_MCAST_FIL1);
1654 writel(filter.bits32, gmac->ctl_iomem + GMAC_RX_FLTR);
1657 static void __gmac_set_mac_address(struct net_device *dev)
1659 struct gmac_private *gmac = netdev_priv(dev);
1662 memset(addr, 0, sizeof(addr));
1663 memcpy(addr, dev->dev_addr, ETH_ALEN);
1665 writel(le32_to_cpu(addr[0]), gmac->ctl_iomem + GMAC_STA_ADD0);
1666 writel(le32_to_cpu(addr[1]), gmac->ctl_iomem + GMAC_STA_ADD1);
1667 writel(le32_to_cpu(addr[2]), gmac->ctl_iomem + GMAC_STA_ADD2);
1670 static int gmac_set_mac_address(struct net_device *dev, void *addr)
1672 struct sockaddr *sa = addr;
1674 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1675 __gmac_set_mac_address(dev);
1680 static void gmac_clear_hw_stats(struct net_device *dev)
1682 struct gmac_private *gmac = netdev_priv(dev);
1684 readl(gmac->ctl_iomem + GMAC_IN_DISCARDS);
1685 readl(gmac->ctl_iomem + GMAC_IN_ERRORS);
1686 readl(gmac->ctl_iomem + GMAC_IN_MCAST);
1687 readl(gmac->ctl_iomem + GMAC_IN_BCAST);
1688 readl(gmac->ctl_iomem + GMAC_IN_MAC1);
1689 readl(gmac->ctl_iomem + GMAC_IN_MAC2);
1692 static struct rtnl_link_stats64 *gmac_get_stats64(struct net_device *dev,
1693 struct rtnl_link_stats64 *storage)
1695 struct gmac_private *gmac = netdev_priv(dev);
1698 gmac_update_hw_stats(dev);
1700 /* racing with RX NAPI */
1702 start = u64_stats_fetch_begin(&gmac->rx_stats_syncp);
1704 storage->rx_packets = gmac->stats.rx_packets;
1705 storage->rx_bytes = gmac->stats.rx_bytes;
1706 storage->rx_errors = gmac->stats.rx_errors;
1707 storage->rx_dropped = gmac->stats.rx_dropped;
1709 storage->rx_length_errors = gmac->stats.rx_length_errors;
1710 storage->rx_over_errors = gmac->stats.rx_over_errors;
1711 storage->rx_crc_errors = gmac->stats.rx_crc_errors;
1712 storage->rx_frame_errors = gmac->stats.rx_frame_errors;
1714 } while (u64_stats_fetch_retry(&gmac->rx_stats_syncp, start));
1716 /* racing with MIB and TX completion interrupts */
1718 start = u64_stats_fetch_begin(&gmac->ir_stats_syncp);
1720 storage->tx_errors = gmac->stats.tx_errors;
1721 storage->tx_packets = gmac->stats.tx_packets;
1722 storage->tx_bytes = gmac->stats.tx_bytes;
1724 storage->multicast = gmac->stats.multicast;
1725 storage->rx_missed_errors = gmac->stats.rx_missed_errors;
1726 storage->rx_fifo_errors = gmac->stats.rx_fifo_errors;
1728 } while (u64_stats_fetch_retry(&gmac->ir_stats_syncp, start));
1730 /* racing with hard_start_xmit */
1732 start = u64_stats_fetch_begin(&gmac->tx_stats_syncp);
1734 storage->tx_dropped = gmac->stats.tx_dropped;
1736 } while (u64_stats_fetch_retry(&gmac->tx_stats_syncp, start));
1738 storage->rx_dropped += storage->rx_missed_errors;
1743 static int gmac_change_mtu(struct net_device *dev, int new_mtu)
1745 int max_len = gmac_pick_rx_max_len(new_mtu);
1750 gmac_disable_tx_rx(dev);
1753 gmac_update_config0_reg(dev,
1754 max_len << CONFIG0_MAXLEN_SHIFT,
1755 CONFIG0_MAXLEN_MASK);
1757 netdev_update_features(dev);
1759 gmac_enable_tx_rx(dev);
1764 static netdev_features_t gmac_fix_features(struct net_device *dev, netdev_features_t features)
1766 if (dev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
1767 features &= ~GMAC_OFFLOAD_FEATURES;
1772 static int gmac_set_features(struct net_device *dev, netdev_features_t features)
1774 struct gmac_private *gmac = netdev_priv(dev);
1775 int enable = features & NETIF_F_RXCSUM;
1776 unsigned long flags;
1779 spin_lock_irqsave(&gmac->config_lock, flags);
1781 reg = readl(gmac->ctl_iomem + GMAC_CONFIG0);
1782 reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM;
1783 writel(reg, gmac->ctl_iomem + GMAC_CONFIG0);
1785 spin_unlock_irqrestore(&gmac->config_lock, flags);
1789 static int gmac_get_sset_count(struct net_device *dev, int sset)
1791 return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0;
1794 static void gmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1796 if (stringset != ETH_SS_STATS)
1799 memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings));
1802 static void gmac_get_ethtool_stats(struct net_device *dev,
1803 struct ethtool_stats *estats, u64 *values)
1805 struct gmac_private *gmac = netdev_priv(dev);
1810 gmac_update_hw_stats(dev);
1812 /* racing with MIB interrupt */
1815 start = u64_stats_fetch_begin(&gmac->ir_stats_syncp);
1817 for (i = 0; i < RX_STATS_NUM; ++i)
1818 *p++ = gmac->hw_stats[i];
1820 } while (u64_stats_fetch_retry(&gmac->ir_stats_syncp, start));
1823 /* racing with RX NAPI */
1826 start = u64_stats_fetch_begin(&gmac->rx_stats_syncp);
1828 for (i = 0; i < RX_STATUS_NUM; ++i)
1829 *p++ = gmac->rx_stats[i];
1830 for (i = 0; i < RX_CHKSUM_NUM; ++i)
1831 *p++ = gmac->rx_csum_stats[i];
1832 *p++ = gmac->rx_napi_exits;
1834 } while (u64_stats_fetch_retry(&gmac->rx_stats_syncp, start));
1837 /* racing with TX start_xmit */
1840 start = u64_stats_fetch_begin(&gmac->tx_stats_syncp);
1842 for (i = 0; i < TX_MAX_FRAGS; ++i) {
1843 *values++ = gmac->tx_frag_stats[i];
1844 gmac->tx_frag_stats[i] = 0;
1846 *values++ = gmac->tx_frags_linearized;
1847 *values++ = gmac->tx_hw_csummed;
1849 } while (u64_stats_fetch_retry(&gmac->tx_stats_syncp, start));
1852 static int gmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1856 return phy_ethtool_gset(dev->phydev, cmd);
1859 static int gmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1863 return phy_ethtool_sset(dev->phydev, cmd);
1866 static int gmac_nway_reset(struct net_device *dev)
1870 return phy_start_aneg(dev->phydev);
1873 static void gmac_get_pauseparam(struct net_device *dev,
1874 struct ethtool_pauseparam *pparam)
1876 struct gmac_private *gmac = netdev_priv(dev);
1877 GMAC_CONFIG0_T config0;
1879 config0.bits32 = readl(gmac->ctl_iomem + GMAC_CONFIG0);
1881 pparam->rx_pause = config0.bits.rx_fc_en;
1882 pparam->tx_pause = config0.bits.tx_fc_en;
1883 pparam->autoneg = true;
1886 static void gmac_get_ringparam(struct net_device *dev,
1887 struct ethtool_ringparam *rp)
1889 struct gmac_private *gmac = netdev_priv(dev);
1890 GMAC_CONFIG0_T config0;
1892 config0.bits32 = readl(gmac->ctl_iomem + GMAC_CONFIG0);
1894 rp->rx_max_pending = 1 << 15;
1895 rp->rx_mini_max_pending = 0;
1896 rp->rx_jumbo_max_pending = 0;
1897 rp->tx_max_pending = 1 << 15;
1899 rp->rx_pending = 1 << gmac->rxq_order;
1900 rp->rx_mini_pending = 0;
1901 rp->rx_jumbo_pending = 0;
1902 rp->tx_pending = 1 << gmac->txq_order;
1905 static int toe_resize_freeq(struct toe_private *toe, int changing_dev_id);
1907 static int gmac_set_ringparam(struct net_device *dev,
1908 struct ethtool_ringparam *rp)
1910 struct gmac_private *gmac = netdev_priv(dev);
1911 struct toe_private *toe = gmac->toe;
1914 if (netif_running(dev))
1917 if (rp->rx_pending) {
1918 gmac->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1);
1919 err = toe_resize_freeq(toe, dev->dev_id);
1924 gmac->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1);
1925 gmac->irq_every_tx_packets = 1 << (gmac->txq_order - 2);
1931 static int gmac_get_coalesce(struct net_device *dev,
1932 struct ethtool_coalesce *ecmd)
1934 struct gmac_private *gmac = netdev_priv(dev);
1936 ecmd->rx_max_coalesced_frames = 1;
1937 ecmd->tx_max_coalesced_frames = gmac->irq_every_tx_packets;
1938 ecmd->rx_coalesce_usecs = gmac->rx_coalesce_nsecs/1000;
1943 static int gmac_set_coalesce(struct net_device *dev,
1944 struct ethtool_coalesce *ecmd)
1946 struct gmac_private *gmac = netdev_priv(dev);
1948 if (ecmd->tx_max_coalesced_frames < 1)
1950 if (ecmd->tx_max_coalesced_frames >= 1 << gmac->txq_order)
1953 gmac->irq_every_tx_packets = ecmd->tx_max_coalesced_frames;
1954 gmac->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000;
1959 static u32 gmac_get_msglevel(struct net_device *dev)
1961 struct gmac_private *gmac = netdev_priv(dev);
1962 return gmac->msg_enable;
1965 static void gmac_set_msglevel(struct net_device *dev, u32 level)
1967 struct gmac_private *gmac = netdev_priv(dev);
1968 gmac->msg_enable = level;
1971 static void gmac_get_drvinfo(struct net_device *dev,
1972 struct ethtool_drvinfo *info)
1974 strcpy(info->driver, DRV_NAME);
1975 strcpy(info->version, DRV_VERSION);
1976 strcpy(info->bus_info, dev->dev_id ? "1" : "0");
1979 static const struct net_device_ops gmac_351x_ops = {
1980 .ndo_init = gmac_init,
1981 .ndo_uninit = gmac_uninit,
1982 .ndo_open = gmac_open,
1983 .ndo_stop = gmac_stop,
1984 .ndo_start_xmit = gmac_start_xmit,
1985 .ndo_tx_timeout = gmac_tx_timeout,
1986 .ndo_set_rx_mode = gmac_set_rx_mode,
1987 .ndo_set_mac_address = gmac_set_mac_address,
1988 .ndo_get_stats64 = gmac_get_stats64,
1989 .ndo_change_mtu = gmac_change_mtu,
1990 .ndo_fix_features = gmac_fix_features,
1991 .ndo_set_features = gmac_set_features,
1994 static const struct ethtool_ops gmac_351x_ethtool_ops = {
1995 .get_sset_count = gmac_get_sset_count,
1996 .get_strings = gmac_get_strings,
1997 .get_ethtool_stats = gmac_get_ethtool_stats,
1998 .get_settings = gmac_get_settings,
1999 .set_settings = gmac_set_settings,
2000 .get_link = ethtool_op_get_link,
2001 .nway_reset = gmac_nway_reset,
2002 .get_pauseparam = gmac_get_pauseparam,
2003 .get_ringparam = gmac_get_ringparam,
2004 .set_ringparam = gmac_set_ringparam,
2005 .get_coalesce = gmac_get_coalesce,
2006 .set_coalesce = gmac_set_coalesce,
2007 .get_msglevel = gmac_get_msglevel,
2008 .set_msglevel = gmac_set_msglevel,
2009 .get_drvinfo = gmac_get_drvinfo,
2012 static int gmac_init_netdev(struct toe_private *toe, int num,
2013 struct platform_device *pdev)
2015 struct gemini_gmac_platform_data *pdata = pdev->dev.platform_data;
2016 struct gmac_private *gmac;
2017 struct net_device *dev;
2020 if (!pdata->bus_id[num])
2023 irq = platform_get_irq(pdev, num);
2025 dev_err(toe->dev, "No IRQ for ethernet device #%d\n", num);
2029 dev = alloc_etherdev_mq(sizeof(*gmac), TX_QUEUE_NUM);
2031 dev_err(toe->dev, "Can't allocate ethernet device #%d\n", num);
2035 gmac = netdev_priv(dev);
2038 SET_NETDEV_DEV(dev, toe->dev);
2040 toe->netdev[num] = dev;
2043 gmac->ctl_iomem = toe->iomem + TOE_GMAC_BASE(num);
2044 gmac->dma_iomem = toe->iomem + TOE_GMAC_DMA_BASE(num);
2047 dev->netdev_ops = &gmac_351x_ops;
2048 dev->ethtool_ops = &gmac_351x_ethtool_ops;
2050 spin_lock_init(&gmac->config_lock);
2051 gmac_clear_hw_stats(dev);
2053 dev->hw_features = GMAC_OFFLOAD_FEATURES;
2054 dev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
2056 gmac->freeq_refill = 0;
2057 netif_napi_add(dev, &gmac->napi, gmac_napi_poll, DEFAULT_NAPI_WEIGHT);
2059 if (is_valid_ether_addr((void *)toe->mac_addr[num]))
2060 memcpy(dev->dev_addr, toe->mac_addr[num], ETH_ALEN);
2062 random_ether_addr(dev->dev_addr);
2063 __gmac_set_mac_address(dev);
2065 err = gmac_setup_phy(dev);
2067 netif_warn(gmac, probe, dev,
2068 "PHY init failed: %d, deferring to ifup time\n", err);
2070 err = register_netdev(dev);
2073 pr_info(DRV_NAME " %s: irq %d, dma base 0x%p, io base 0x%p\n",
2074 dev->name, irq, gmac->dma_iomem, gmac->ctl_iomem);
2078 toe->netdev[num] = NULL;
2083 static irqreturn_t toe_irq_thread(int irq, void *data)
2085 struct toe_private *toe = data;
2086 void __iomem *irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG;
2087 void __iomem *irqif_reg = toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG;
2088 unsigned long irqmask = SWFQ_EMPTY_INT_BIT;
2089 unsigned long flags;
2091 toe_fill_freeq(toe, 0);
2093 /* Ack and enable interrupt */
2094 spin_lock_irqsave(&toe->irq_lock, flags);
2095 writel(irqmask, irqif_reg);
2096 irqmask |= readl(irqen_reg);
2097 writel(irqmask, irqen_reg);
2098 spin_unlock_irqrestore(&toe->irq_lock, flags);
2103 static irqreturn_t toe_irq(int irq, void *data)
2105 struct toe_private *toe = data;
2106 void __iomem *irqif_reg = toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG;
2107 void __iomem *irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG;
2108 unsigned long val, en;
2109 irqreturn_t ret = IRQ_NONE;
2111 spin_lock(&toe->irq_lock);
2113 val = readl(irqif_reg);
2114 en = readl(irqen_reg);
2116 if (val & en & SWFQ_EMPTY_INT_BIT) {
2117 en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT
2118 | GMAC1_RX_OVERRUN_INT_BIT);
2119 writel(en, irqen_reg);
2120 ret = IRQ_WAKE_THREAD;
2123 spin_unlock(&toe->irq_lock);
2127 static int toe_init(struct toe_private *toe,
2128 struct platform_device *pdev)
2132 writel(0, toe->iomem + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
2133 writel(0, toe->iomem + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
2134 writel(0, toe->iomem + GLOBAL_SWFQ_RWPTR_REG);
2135 writel(0, toe->iomem + GLOBAL_HWFQ_RWPTR_REG);
2137 toe->freeq_frag_order = DEFAULT_RX_BUF_ORDER;
2138 toe->freeq_order = ~0;
2140 err = request_threaded_irq(toe->irq, toe_irq,
2141 toe_irq_thread, IRQF_SHARED, DRV_NAME " toe", toe);
2148 toe_cleanup_freeq(toe);
2152 static void toe_deinit(struct toe_private *toe)
2154 free_irq(toe->irq, toe);
2155 toe_cleanup_freeq(toe);
2158 static int toe_reset(struct toe_private *toe)
2160 unsigned int reg = 0, retry = 5;
2162 reg = readl((void __iomem*)(IO_ADDRESS(GEMINI_GLOBAL_BASE) +
2164 reg |= RESET_GMAC1 | RESET_GMAC0;
2165 writel(reg, (void __iomem*)(IO_ADDRESS(GEMINI_GLOBAL_BASE) +
2170 reg = readl((void __iomem*)(toe->iomem +
2171 GLOBAL_TOE_VERSION_REG));
2173 } while (!reg && --retry);
2175 return reg ? 0 : -EIO;
2181 * GMAC0 intr bits ------> int0 ----> eth0
2182 * GMAC1 intr bits ------> int1 ----> eth1
2183 * TOE intr -------------> int1 ----> eth1
2184 * Classification Intr --> int0 ----> eth0
2185 * Default Q0 -----------> int0 ----> eth0
2186 * Default Q1 -----------> int1 ----> eth1
2187 * FreeQ intr -----------> int1 ----> eth1
2189 static void toe_init_irq(struct toe_private *toe)
2191 writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
2192 writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
2193 writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_2_REG);
2194 writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_3_REG);
2195 writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
2197 writel(0xCCFC0FC0, toe->iomem + GLOBAL_INTERRUPT_SELECT_0_REG);
2198 writel(0x00F00002, toe->iomem + GLOBAL_INTERRUPT_SELECT_1_REG);
2199 writel(0xFFFFFFFF, toe->iomem + GLOBAL_INTERRUPT_SELECT_2_REG);
2200 writel(0xFFFFFFFF, toe->iomem + GLOBAL_INTERRUPT_SELECT_3_REG);
2201 writel(0xFF000003, toe->iomem + GLOBAL_INTERRUPT_SELECT_4_REG);
2203 /* edge-triggered interrupts packed to level-triggered one... */
2204 writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG);
2205 writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_1_REG);
2206 writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_2_REG);
2207 writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_3_REG);
2208 writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
2211 static void toe_save_mac_addr(struct toe_private *toe,
2212 struct platform_device *pdev)
2214 struct gemini_gmac_platform_data *pdata = pdev->dev.platform_data;
2218 for (i = 0; i < 2; i++) {
2219 if (pdata->bus_id[i]) {
2220 ctl = toe->iomem + TOE_GMAC_BASE(i);
2221 toe->mac_addr[i][0] = cpu_to_le32(readl(ctl + GMAC_STA_ADD0));
2222 toe->mac_addr[i][1] = cpu_to_le32(readl(ctl + GMAC_STA_ADD1));
2223 toe->mac_addr[i][2] = cpu_to_le32(readl(ctl + GMAC_STA_ADD2));
2228 static int gemini_gmac_probe(struct platform_device *pdev)
2230 struct resource *res;
2231 struct toe_private *toe;
2234 if (!pdev->dev.platform_data)
2237 irq = platform_get_irq(pdev, 1);
2241 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2243 dev_err(&pdev->dev, "can't get device resources\n");
2247 toe = kzalloc(sizeof(*toe), GFP_KERNEL);
2251 platform_set_drvdata(pdev, toe);
2252 toe->dev = &pdev->dev;
2255 toe->iomem = ioremap(res->start, resource_size(res));
2257 dev_err(toe->dev, "ioremap failed\n");
2262 toe_save_mac_addr(toe, pdev);
2264 retval = toe_reset(toe);
2268 pr_info(DRV_NAME " toe: irq %d, io base 0x%08x, version %d\n",
2269 irq, res->start, retval);
2271 spin_lock_init(&toe->irq_lock);
2272 spin_lock_init(&toe->freeq_lock);
2276 retval = toe_init(toe, pdev);
2280 retval = gmac_init_netdev(toe, 0, pdev);
2284 retval = gmac_init_netdev(toe, 1, pdev);
2292 unregister_netdev(toe->netdev[0]);
2295 iounmap(toe->iomem);
2301 static int gemini_gmac_remove(struct platform_device *pdev)
2303 struct toe_private *toe = platform_get_drvdata(pdev);
2306 for (i = 0; i < 2; i++)
2308 unregister_netdev(toe->netdev[i]);
2313 iounmap(toe->iomem);
2319 static struct platform_driver gemini_gmac_driver = {
2320 .probe = gemini_gmac_probe,
2321 .remove = gemini_gmac_remove,
2322 .driver.name = DRV_NAME,
2323 .driver.owner = THIS_MODULE,
2326 static int __init gemini_gmac_init(void)
2328 #ifdef CONFIG_MDIO_GPIO_MODULE
2329 request_module("mdio-gpio");
2331 return platform_driver_register(&gemini_gmac_driver);
2334 static void __exit gemini_gmac_exit(void)
2336 platform_driver_unregister(&gemini_gmac_driver);
2339 module_init(gemini_gmac_init);
2340 module_exit(gemini_gmac_exit);