gemini: add Linux 4.4 support
[librecmc/librecmc.git] / target / linux / gemini / files-4.4 / drivers / net / ethernet / gemini / sl351x.c
1 /*
2  *  Ethernet device driver for Gemini SoC (SL351x GMAC).
3  *
4  *  Copyright (C) 2011, Tobias Waldvogel <tobias.waldvogel@gmail.com>
5  *
6  *  Based on work by Michał Mirosław <mirq-linux@rere.qmqm.pl> and
7  *  Paulius Zaleckas <paulius.zaleckas@gmail.com> and
8  *  Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it> and
9  *  GPLd spaghetti code from Raidsonic and other Gemini-based NAS vendors.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  */
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20
21 #include <linux/spinlock.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/cache.h>
25 #include <linux/interrupt.h>
26
27 #include <linux/platform_device.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_vlan.h>
30 #include <linux/skbuff.h>
31 #include <linux/phy.h>
32 #include <linux/crc32.h>
33 #include <linux/ethtool.h>
34 #include <linux/tcp.h>
35 #include <linux/u64_stats_sync.h>
36
37 #include <linux/in.h>
38 #include <linux/ip.h>
39 #include <linux/ipv6.h>
40
41 #include <mach/hardware.h>
42 #include <mach/global_reg.h>
43
44 #include <mach/gmac.h>
45 #include "sl351x_hw.h"
46
47 #define DRV_NAME                "gmac-gemini"
48 #define DRV_VERSION             "1.0"
49
50 #define HSIZE_8                 0b00
51 #define HSIZE_16                0b01
52 #define HSIZE_32                0b10
53
54 #define HBURST_SINGLE           0b00
55 #define HBURST_INCR             0b01
56 #define HBURST_INCR4            0b10
57 #define HBURST_INCR8            0b11
58
59 #define HPROT_DATA_CACHE        BIT(0)
60 #define HPROT_PRIVILIGED        BIT(1)
61 #define HPROT_BUFFERABLE        BIT(2)
62 #define HPROT_CACHABLE          BIT(3)
63
64 #define DEFAULT_RX_COALESCE_NSECS       0
65 #define DEFAULT_GMAC_RXQ_ORDER          9
66 #define DEFAULT_GMAC_TXQ_ORDER          8
67 #define DEFAULT_RX_BUF_ORDER            11
68 #define DEFAULT_NAPI_WEIGHT             64
69 #define TX_MAX_FRAGS                    16
70 #define TX_QUEUE_NUM                    1       /* max: 6 */
71 #define RX_MAX_ALLOC_ORDER              2
72
73 #define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT|GMAC0_TXPERR_INT_BIT| \
74         GMAC0_RXDERR_INT_BIT|GMAC0_RXPERR_INT_BIT)
75 #define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT| \
76                               GMAC0_SWTQ00_FIN_INT_BIT)
77 #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT|GMAC0_RX_OVERRUN_INT_BIT)
78
79 #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
80                 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
81                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
82
83 MODULE_AUTHOR("Tobias Waldvogel");
84 MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
85 MODULE_LICENSE("GPL");
86 MODULE_ALIAS("platform:" DRV_NAME);
87
88 struct toe_private {
89         void __iomem    *iomem;
90         spinlock_t      irq_lock;
91
92         struct net_device *netdev[2];
93         __le32          mac_addr[2][3];
94
95         struct device   *dev;
96         int             irq;
97
98         unsigned int    freeq_order;
99         unsigned int    freeq_frag_order;
100         GMAC_RXDESC_T   *freeq_ring;
101         dma_addr_t      freeq_dma_base;
102         struct page     **freeq_page_tab;
103         spinlock_t      freeq_lock;
104 };
105
106 struct gmac_txq {
107         GMAC_TXDESC_T   *ring;
108         struct sk_buff  **skb;
109         unsigned int    cptr;
110         unsigned int    noirq_packets;
111 };
112
113 struct gmac_private {
114         unsigned int            num;
115         struct toe_private      *toe;
116         void __iomem            *ctl_iomem;
117         void __iomem            *dma_iomem;
118
119         void __iomem            *rxq_rwptr;
120         GMAC_RXDESC_T           *rxq_ring;
121         unsigned int            rxq_order;
122
123         struct napi_struct      napi;
124         struct hrtimer          rx_coalesce_timer;
125         unsigned int            rx_coalesce_nsecs; 
126         unsigned int            freeq_refill;
127         struct gmac_txq         txq[TX_QUEUE_NUM];
128         unsigned int            txq_order;
129         unsigned int            irq_every_tx_packets;
130
131         dma_addr_t              rxq_dma_base;
132         dma_addr_t              txq_dma_base;
133
134         unsigned int            msg_enable;
135         spinlock_t              config_lock;
136
137         struct u64_stats_sync   tx_stats_syncp;
138         struct u64_stats_sync   rx_stats_syncp;
139         struct u64_stats_sync   ir_stats_syncp;
140
141         struct rtnl_link_stats64 stats;
142         u64                     hw_stats[RX_STATS_NUM];
143         u64                     rx_stats[RX_STATUS_NUM];
144         u64                     rx_csum_stats[RX_CHKSUM_NUM];
145         u64                     rx_napi_exits;
146         u64                     tx_frag_stats[TX_MAX_FRAGS];
147         u64                     tx_frags_linearized;
148         u64                     tx_hw_csummed;
149 };
150
151 #define GMAC_STATS_NUM  ( \
152         RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
153         TX_MAX_FRAGS + 2)
154
155 static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = {
156         "GMAC_IN_DISCARDS",
157         "GMAC_IN_ERRORS",
158         "GMAC_IN_MCAST",
159         "GMAC_IN_BCAST",
160         "GMAC_IN_MAC1",
161         "GMAC_IN_MAC2",
162         "RX_STATUS_GOOD_FRAME",
163         "RX_STATUS_TOO_LONG_GOOD_CRC",
164         "RX_STATUS_RUNT_FRAME",
165         "RX_STATUS_SFD_NOT_FOUND",
166         "RX_STATUS_CRC_ERROR",
167         "RX_STATUS_TOO_LONG_BAD_CRC",
168         "RX_STATUS_ALIGNMENT_ERROR",
169         "RX_STATUS_TOO_LONG_BAD_ALIGN",
170         "RX_STATUS_RX_ERR",
171         "RX_STATUS_DA_FILTERED",
172         "RX_STATUS_BUFFER_FULL",
173         "RX_STATUS_11",
174         "RX_STATUS_12",
175         "RX_STATUS_13",
176         "RX_STATUS_14",
177         "RX_STATUS_15",
178         "RX_CHKSUM_IP_UDP_TCP_OK",
179         "RX_CHKSUM_IP_OK_ONLY",
180         "RX_CHKSUM_NONE",
181         "RX_CHKSUM_3",
182         "RX_CHKSUM_IP_ERR_UNKNOWN",
183         "RX_CHKSUM_IP_ERR",
184         "RX_CHKSUM_TCP_UDP_ERR",
185         "RX_CHKSUM_7",
186         "RX_NAPI_EXITS",
187         "TX_FRAGS[1]",
188         "TX_FRAGS[2]",
189         "TX_FRAGS[3]",
190         "TX_FRAGS[4]",
191         "TX_FRAGS[5]",
192         "TX_FRAGS[6]",
193         "TX_FRAGS[7]",
194         "TX_FRAGS[8]",
195         "TX_FRAGS[9]",
196         "TX_FRAGS[10]",
197         "TX_FRAGS[11]",
198         "TX_FRAGS[12]",
199         "TX_FRAGS[13]",
200         "TX_FRAGS[14]",
201         "TX_FRAGS[15]",
202         "TX_FRAGS[16+]",
203         "TX_FRAGS_LINEARIZED",
204         "TX_HW_CSUMMED",
205 };
206
207 static void gmac_dump_dma_state(struct net_device *dev);
208
209 static void gmac_update_config0_reg(struct net_device *dev, u32 val, u32 vmask)
210 {
211         struct gmac_private *gmac = netdev_priv(dev);
212         unsigned long flags;
213         u32 reg;
214
215         spin_lock_irqsave(&gmac->config_lock, flags);
216
217         reg = readl(gmac->ctl_iomem + GMAC_CONFIG0);
218         reg = (reg & ~vmask) | val;
219         writel(reg, gmac->ctl_iomem + GMAC_CONFIG0);
220
221         spin_unlock_irqrestore(&gmac->config_lock, flags);
222 }
223
224 static void gmac_enable_tx_rx(struct net_device *dev)
225 {
226         struct gmac_private *gmac = netdev_priv(dev);
227         void __iomem *config0 = gmac->ctl_iomem + GMAC_CONFIG0;
228         unsigned long flags;
229         u32 reg;
230
231         spin_lock_irqsave(&gmac->config_lock, flags);
232
233         reg = readl(config0);
234         reg &= ~CONFIG0_TX_RX_DISABLE;
235         writel(reg, config0);
236
237         spin_unlock_irqrestore(&gmac->config_lock, flags);
238 }
239
240 static void gmac_disable_tx_rx(struct net_device *dev)
241 {
242         struct gmac_private *gmac = netdev_priv(dev);
243         void __iomem *config0 = gmac->ctl_iomem + GMAC_CONFIG0;
244         unsigned long flags;
245         u32 reg;
246
247         spin_lock_irqsave(&gmac->config_lock, flags);
248
249         reg = readl(config0);
250         reg |= CONFIG0_TX_RX_DISABLE;
251         writel(reg, config0);
252
253         spin_unlock_irqrestore(&gmac->config_lock, flags);
254
255         mdelay(10);     /* let GMAC consume packet */
256 }
257
258 static void gmac_set_flow_control(struct net_device *dev, bool tx, bool rx)
259 {
260         struct gmac_private *gmac = netdev_priv(dev);
261         void __iomem *config0 = gmac->ctl_iomem + GMAC_CONFIG0;
262         unsigned long flags;
263         u32 reg;
264
265         spin_lock_irqsave(&gmac->config_lock, flags);
266
267         reg = readl(config0);
268         reg &= ~CONFIG0_FLOW_CTL;
269         if (tx)
270                 reg |= CONFIG0_FLOW_TX;
271         if (rx)
272                 reg |= CONFIG0_FLOW_RX;
273         writel(reg, config0);
274
275         spin_unlock_irqrestore(&gmac->config_lock, flags);
276 }
277
278 static void gmac_update_link_state(struct net_device *dev)
279 {
280         struct gmac_private *gmac = netdev_priv(dev);
281         void __iomem *status_reg = gmac->ctl_iomem + GMAC_STATUS;
282         struct phy_device *phydev = dev->phydev;
283         GMAC_STATUS_T status, old_status;
284         int pause_tx=0, pause_rx=0;
285
286         old_status.bits32 = status.bits32 = readl(status_reg);
287
288         status.bits.link = phydev->link;
289         status.bits.duplex = phydev->duplex;
290
291         switch (phydev->speed) {
292         case 1000:
293                 status.bits.speed = GMAC_SPEED_1000;
294                 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
295                         status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
296                 break;
297         case 100:
298                 status.bits.speed = GMAC_SPEED_100;
299                 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
300                         status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
301                 break;
302         case 10:
303                 status.bits.speed = GMAC_SPEED_10;
304                 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
305                         status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
306                 break;
307         default:
308                 netdev_warn(dev, "Not supported PHY speed (%d)\n",
309                         phydev->speed);
310         }
311
312         if (phydev->duplex == DUPLEX_FULL) {
313                 u16 lcladv = phy_read(phydev, MII_ADVERTISE);
314                 u16 rmtadv = phy_read(phydev, MII_LPA);
315                 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
316
317                 if (cap & FLOW_CTRL_RX)
318                         pause_rx=1;
319                 if (cap & FLOW_CTRL_TX)
320                         pause_tx=1;
321         } 
322
323         gmac_set_flow_control(dev, pause_tx, pause_rx);
324
325         if (old_status.bits32 == status.bits32)
326                 return;
327
328         if (netif_msg_link(gmac)) {
329                 phy_print_status(phydev);
330                 netdev_info(dev, "link flow control: %s\n",
331                         phydev->pause
332                                 ? (phydev->asym_pause ? "tx" : "both")
333                                 : (phydev->asym_pause ? "rx" : "none")
334                 );
335         }
336
337         gmac_disable_tx_rx(dev);
338         writel(status.bits32, status_reg);
339         gmac_enable_tx_rx(dev);
340 }
341
342 static int gmac_setup_phy(struct net_device *dev)
343 {
344         struct gmac_private *gmac = netdev_priv(dev);
345         struct toe_private *toe = gmac->toe;
346         struct gemini_gmac_platform_data *pdata = toe->dev->platform_data;
347         GMAC_STATUS_T status = { .bits32 = 0 };
348         int num = dev->dev_id;
349
350         dev->phydev = phy_connect(dev, pdata->bus_id[num],
351                 &gmac_update_link_state, pdata->interface[num]);
352
353         if (IS_ERR(dev->phydev)) {
354                 int err = PTR_ERR(dev->phydev);
355                 dev->phydev = NULL;
356                 return err;
357         }
358
359         dev->phydev->supported &= PHY_GBIT_FEATURES;
360         dev->phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
361         dev->phydev->advertising = dev->phydev->supported;
362
363         /* set PHY interface type */
364         switch (dev->phydev->interface) {
365         case PHY_INTERFACE_MODE_MII:
366                 status.bits.mii_rmii = GMAC_PHY_MII;
367                 break;
368         case PHY_INTERFACE_MODE_GMII:
369                 status.bits.mii_rmii = GMAC_PHY_GMII;
370                 break;
371         case PHY_INTERFACE_MODE_RGMII:
372                 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
373                 break;
374         default:
375                 netdev_err(dev, "Unsupported MII interface\n");
376                 phy_disconnect(dev->phydev);
377                 dev->phydev = NULL;
378                 return -EINVAL;
379         }
380         writel(status.bits32, gmac->ctl_iomem + GMAC_STATUS);
381
382         return 0;
383 }
384
385 static int gmac_pick_rx_max_len(int max_l3_len)
386 {
387         /* index = CONFIG_MAXLEN_XXX values */
388         static const int max_len[8] = {
389                 1536, 1518, 1522, 1542,
390                 9212, 10236, 1518, 1518
391         };
392         int i, n = 5;
393
394         max_l3_len += ETH_HLEN + VLAN_HLEN;
395
396         if (max_l3_len > max_len[n])
397                 return -1;
398
399         for (i = 0; i < 5; ++i) {
400                 if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
401                         n = i;
402         }
403
404         return n;
405 }
406
407 static int gmac_init(struct net_device *dev)
408 {
409         struct gmac_private *gmac = netdev_priv(dev);
410         u32 val;
411
412         GMAC_CONFIG0_T config0 = { .bits = {
413                 .dis_tx = 1,
414                 .dis_rx = 1,
415                 .ipv4_rx_chksum = 1,
416                 .ipv6_rx_chksum = 1,
417                 .rx_err_detect = 1,
418                 .rgmm_edge = 1,
419                 .port0_chk_hwq = 1,
420                 .port1_chk_hwq = 1,
421                 .port0_chk_toeq = 1,
422                 .port1_chk_toeq = 1,
423                 .port0_chk_classq = 1,
424                 .port1_chk_classq = 1,
425         } };
426         GMAC_AHB_WEIGHT_T ahb_weight = { .bits = {
427                 .rx_weight = 1,
428                 .tx_weight = 1,
429                 .hash_weight = 1,
430                 .pre_req = 0x1f,
431                 .tqDV_threshold = 0,
432         } };
433         GMAC_TX_WCR0_T hw_weigh = { .bits = {
434                 .hw_tq3 = 1,
435                 .hw_tq2 = 1,
436                 .hw_tq1 = 1,
437                 .hw_tq0 = 1,
438         } };
439         GMAC_TX_WCR1_T sw_weigh = { .bits = {
440                 .sw_tq5 = 1,
441                 .sw_tq4 = 1,
442                 .sw_tq3 = 1,
443                 .sw_tq2 = 1,
444                 .sw_tq1 = 1,
445                 .sw_tq0 = 1,
446         } };
447         GMAC_CONFIG1_T config1 = { .bits = {
448                 .set_threshold = 16,
449                 .rel_threshold = 24,
450         } };
451         GMAC_CONFIG2_T config2 = { .bits = {
452                 .set_threshold = 16,
453                 .rel_threshold = 32,
454         } };
455         GMAC_CONFIG3_T config3 = { .bits = {
456                 .set_threshold = 0,
457                 .rel_threshold = 0,
458         } };
459
460         config0.bits.max_len = gmac_pick_rx_max_len(dev->mtu);
461
462         val = readl(gmac->ctl_iomem + GMAC_CONFIG0);
463         config0.bits.reserved = ((GMAC_CONFIG0_T)val).bits.reserved;
464         writel(config0.bits32, gmac->ctl_iomem + GMAC_CONFIG0);
465         writel(config1.bits32, gmac->ctl_iomem + GMAC_CONFIG1);
466         writel(config2.bits32, gmac->ctl_iomem + GMAC_CONFIG2);
467         writel(config3.bits32, gmac->ctl_iomem + GMAC_CONFIG3);
468
469         val = readl(gmac->dma_iomem + GMAC_AHB_WEIGHT_REG);
470         writel(ahb_weight.bits32, gmac->dma_iomem + GMAC_AHB_WEIGHT_REG);
471
472         writel(hw_weigh.bits32,
473                 gmac->dma_iomem + GMAC_TX_WEIGHTING_CTRL_0_REG);
474         writel(sw_weigh.bits32,
475                 gmac->dma_iomem + GMAC_TX_WEIGHTING_CTRL_1_REG);
476
477         gmac->rxq_order = DEFAULT_GMAC_RXQ_ORDER;
478         gmac->txq_order = DEFAULT_GMAC_TXQ_ORDER;
479         gmac->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS;
480
481         /* Mark every quarter of the queue a packet for interrupt
482            in order to be able to wake up the queue if it was stopped */
483         gmac->irq_every_tx_packets = 1 << (gmac->txq_order - 2);
484
485         return 0;
486 }
487
488 static void gmac_uninit(struct net_device *dev)
489 {
490         if (dev->phydev)
491                 phy_disconnect(dev->phydev);
492 }
493
494 static int gmac_setup_txqs(struct net_device *dev)
495 {
496         struct gmac_private *gmac = netdev_priv(dev);
497         struct toe_private *toe = gmac->toe;
498         void __iomem *rwptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE0_PTR_REG;
499         void __iomem *base_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE_BASE_REG;
500
501         unsigned int n_txq = dev->num_tx_queues;
502         size_t entries = 1 <<gmac->txq_order;
503         size_t len = n_txq * entries;
504         struct gmac_txq *txq = gmac->txq;
505         GMAC_TXDESC_T *desc_ring;
506         struct sk_buff **skb_tab;
507         unsigned int r;
508         int i;
509
510         skb_tab = kzalloc(len * sizeof(*skb_tab), GFP_KERNEL);
511         if (!skb_tab)
512                 return -ENOMEM;
513
514         desc_ring = dma_alloc_coherent(toe->dev, len * sizeof(*desc_ring),
515                 &gmac->txq_dma_base, GFP_KERNEL);
516
517         if (!desc_ring) {
518                 kfree(skb_tab);
519                 return -ENOMEM;
520         }
521
522         BUG_ON(gmac->txq_dma_base & ~DMA_Q_BASE_MASK);
523
524         writel(gmac->txq_dma_base | gmac->txq_order, base_reg);
525
526         for (i = 0; i < n_txq; i++) {
527                 txq->ring = desc_ring;
528                 txq->skb = skb_tab;
529                 txq->noirq_packets = 0;
530
531                 r = readw(rwptr_reg);
532                 rwptr_reg += 2;
533                 writew(r, rwptr_reg);
534                 rwptr_reg +=2;
535                 txq->cptr = r;
536                 
537                 txq++;
538                 desc_ring += entries;
539                 skb_tab += entries;
540         }
541
542         return 0;
543 }
544
545 static void gmac_clean_txq(struct net_device *dev, struct gmac_txq *txq,
546                            unsigned int r)
547 {
548         struct gmac_private *gmac = netdev_priv(dev);
549         struct toe_private *toe = gmac->toe;
550         unsigned int errs = 0;
551         unsigned int pkts = 0;
552         unsigned int hwchksum = 0;
553         unsigned long bytes = 0;
554         unsigned int m = (1 << gmac->txq_order) - 1;
555         unsigned int c = txq->cptr;
556         GMAC_TXDESC_0_T word0;
557         GMAC_TXDESC_1_T word1;
558         unsigned int word3;
559         dma_addr_t mapping;
560         GMAC_TXDESC_T *txd;
561         unsigned short nfrags;
562
563         if (unlikely(c == r))
564                 return;
565
566         rmb();
567         while (c != r) {
568                 txd = txq->ring + c;
569                 word0 = txd->word0;
570                 word1 = txd->word1;
571                 mapping = txd->word2.buf_adr;
572                 word3 = txd->word3.bits32;
573
574                 dma_unmap_single(toe->dev, mapping, word0.bits.buffer_size, DMA_TO_DEVICE);
575
576                 if (word3 & EOF_BIT)
577                         dev_kfree_skb(txq->skb[c]);
578
579                 c++;
580                 c &= m;
581
582                 if (!(word3 & SOF_BIT))
583                         continue;
584                 
585                 if (!word0.bits.status_tx_ok) {
586                         errs++;
587                         continue;
588                 }
589
590                 pkts++;
591                 bytes += txd->word1.bits.byte_count;
592
593                 if (word1.bits32 & TSS_CHECKUM_ENABLE)
594                         hwchksum++;
595
596                 nfrags = word0.bits.desc_count - 1;
597                 if (nfrags) {
598                         if (nfrags >= TX_MAX_FRAGS)
599                                 nfrags = TX_MAX_FRAGS - 1;
600
601                         u64_stats_update_begin(&gmac->tx_stats_syncp);
602                         gmac->tx_frag_stats[nfrags]++;
603                         u64_stats_update_end(&gmac->ir_stats_syncp);
604                 }
605         }
606
607         u64_stats_update_begin(&gmac->ir_stats_syncp);
608         gmac->stats.tx_errors += errs;
609         gmac->stats.tx_packets += pkts;
610         gmac->stats.tx_bytes += bytes;
611         gmac->tx_hw_csummed += hwchksum;
612         u64_stats_update_end(&gmac->ir_stats_syncp);
613
614         txq->cptr = c;
615 }
616
617 static void gmac_cleanup_txqs(struct net_device *dev)
618 {
619         struct gmac_private *gmac = netdev_priv(dev);
620         struct toe_private *toe = gmac->toe;
621         void __iomem *rwptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE0_PTR_REG;
622         void __iomem *base_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE_BASE_REG;
623
624         unsigned n_txq = dev->num_tx_queues;
625         unsigned int r, i;
626
627         for (i = 0; i < n_txq; i++) {
628                 r = readw(rwptr_reg);
629                 rwptr_reg += 2;
630                 writew(r, rwptr_reg);
631                 rwptr_reg += 2;
632
633                 gmac_clean_txq(dev, gmac->txq + i, r);
634         }
635         writel(0, base_reg);
636
637         kfree(gmac->txq->skb);
638         dma_free_coherent(toe->dev,
639                 n_txq * sizeof(*gmac->txq->ring) << gmac->txq_order,
640                 gmac->txq->ring, gmac->txq_dma_base);
641 }
642
643 static int gmac_setup_rxq(struct net_device *dev)
644 {
645         struct gmac_private *gmac = netdev_priv(dev);
646         struct toe_private *toe = gmac->toe;
647         NONTOE_QHDR_T __iomem *qhdr = toe->iomem + TOE_DEFAULT_Q_HDR_BASE(dev->dev_id);
648
649         gmac->rxq_rwptr = &qhdr->word1;
650         gmac->rxq_ring = dma_alloc_coherent(toe->dev,
651                 sizeof(*gmac->rxq_ring) << gmac->rxq_order,
652                 &gmac->rxq_dma_base, GFP_KERNEL);
653         if (!gmac->rxq_ring)
654                 return -ENOMEM;
655
656         BUG_ON(gmac->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK);
657         
658         writel(gmac->rxq_dma_base | gmac->rxq_order, &qhdr->word0);
659         writel(0, gmac->rxq_rwptr);
660         return 0;
661 }
662
663 static void gmac_cleanup_rxq(struct net_device *dev)
664 {
665         struct gmac_private *gmac = netdev_priv(dev);
666         struct toe_private *toe = gmac->toe;
667
668         NONTOE_QHDR_T __iomem *qhdr = toe->iomem + TOE_DEFAULT_Q_HDR_BASE(dev->dev_id);
669         void __iomem *dma_reg = &qhdr->word0;
670         void __iomem *ptr_reg = &qhdr->word1;
671         GMAC_RXDESC_T *rxd = gmac->rxq_ring;
672         DMA_RWPTR_T rw;
673         unsigned int r, w;
674         unsigned int m = (1 <<gmac->rxq_order) - 1;
675         struct page *page;
676         dma_addr_t mapping;
677
678         rw.bits32 = readl(ptr_reg);
679         r = rw.bits.rptr;
680         w = rw.bits.wptr;
681         writew(r, ptr_reg + 2);
682
683         writel(0, dma_reg);
684
685         rmb();
686         while (r != w) {
687                 mapping = rxd[r].word2.buf_adr;
688                 r++;
689                 r &= m;
690
691                 if (!mapping)
692                         continue;
693
694                 page = pfn_to_page(dma_to_pfn(toe->dev, mapping));
695                 put_page(page);
696         }
697
698         dma_free_coherent(toe->dev, sizeof(*gmac->rxq_ring) << gmac->rxq_order,
699                 gmac->rxq_ring, gmac->rxq_dma_base);
700 }
701
702 static struct page *toe_freeq_alloc_map_page(struct toe_private *toe, int pn)
703 {
704         unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
705         unsigned int frag_len = 1 << toe->freeq_frag_order;
706         GMAC_RXDESC_T *freeq_entry;
707         dma_addr_t mapping;
708         struct page *page;
709         int i;
710
711         page = alloc_page(__GFP_COLD | GFP_ATOMIC);
712         if (!page)
713                 return NULL;
714
715         mapping = dma_map_single(toe->dev, page_address(page),
716                                 PAGE_SIZE, DMA_FROM_DEVICE);
717
718         if (unlikely(dma_mapping_error(toe->dev, mapping) || !mapping)) {
719                 put_page(page);
720                 return NULL;
721         }
722
723         freeq_entry = toe->freeq_ring + (pn << fpp_order);
724         for (i = 1 << fpp_order; i > 0; --i) {
725                 freeq_entry->word2.buf_adr = mapping;
726                 freeq_entry++;
727                 mapping += frag_len;
728         }
729
730         if (toe->freeq_page_tab[pn]) {
731                 mapping = toe->freeq_ring[pn << fpp_order].word2.buf_adr;
732                 dma_unmap_single(toe->dev, mapping, frag_len, DMA_FROM_DEVICE);
733                 put_page(toe->freeq_page_tab[pn]);
734         }
735
736         toe->freeq_page_tab[pn] = page;
737         return page;
738 }
739
740 static unsigned int toe_fill_freeq(struct toe_private *toe, int reset)
741 {
742         void __iomem *rwptr_reg = toe->iomem + GLOBAL_SWFQ_RWPTR_REG;
743
744         DMA_RWPTR_T rw;
745         unsigned int pn, epn;
746         unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
747         unsigned int m_pn = (1 << (toe->freeq_order - fpp_order)) - 1;
748         struct page *page;
749         unsigned int count = 0;
750         unsigned long flags;
751
752         spin_lock_irqsave(&toe->freeq_lock, flags);
753
754         rw.bits32 = readl(rwptr_reg);
755         pn = (reset ? rw.bits.rptr : rw.bits.wptr) >> fpp_order;
756         epn = (rw.bits.rptr >> fpp_order) - 1;
757         epn &= m_pn;
758
759         while (pn != epn) {
760                 page = toe->freeq_page_tab[pn];
761
762                 if (atomic_read(&page->_count) > 1) {
763                         unsigned int fl = (pn -epn) & m_pn;
764
765                         if (fl > 64 >> fpp_order)
766                                 break;
767
768                         page = toe_freeq_alloc_map_page(toe, pn);
769                         if (!page)
770                                 break;
771                 }
772
773                 atomic_add(1 << fpp_order, &page->_count);
774                 count += 1 << fpp_order;
775                 pn++;
776                 pn &= m_pn;
777         }
778
779         wmb();
780         writew(pn << fpp_order, rwptr_reg+2);
781
782         spin_unlock_irqrestore(&toe->freeq_lock, flags);
783         return count;
784 }
785
786 static int toe_setup_freeq(struct toe_private *toe)
787 {
788         void __iomem *dma_reg = toe->iomem + GLOBAL_SW_FREEQ_BASE_SIZE_REG;
789         QUEUE_THRESHOLD_T qt;
790         DMA_SKB_SIZE_T skbsz;
791         unsigned int filled;
792         unsigned int frag_len = 1 << toe->freeq_frag_order;
793         unsigned int len = 1 << toe->freeq_order;
794         unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
795         unsigned int pages = len >> fpp_order;
796         dma_addr_t mapping;
797         unsigned int pn;
798
799         toe->freeq_ring = dma_alloc_coherent(toe->dev,
800                 sizeof(*toe->freeq_ring) << toe->freeq_order,
801                 &toe->freeq_dma_base, GFP_KERNEL);
802         if (!toe->freeq_ring)
803                 return -ENOMEM;
804
805         BUG_ON(toe->freeq_dma_base & ~DMA_Q_BASE_MASK);
806
807         toe->freeq_page_tab = kzalloc(pages * sizeof(*toe->freeq_page_tab),
808                                                         GFP_KERNEL);
809         if (!toe->freeq_page_tab)
810                 goto err_freeq;
811
812         for (pn = 0; pn < pages; pn++)
813                 if (!toe_freeq_alloc_map_page(toe, pn))
814                         goto err_freeq_alloc;
815
816         filled = toe_fill_freeq(toe, 1);
817         if (!filled)
818                 goto err_freeq_alloc;
819
820         qt.bits32 = readl(toe->iomem + GLOBAL_QUEUE_THRESHOLD_REG);
821         qt.bits.swfq_empty = 32;
822         writel(qt.bits32, toe->iomem + GLOBAL_QUEUE_THRESHOLD_REG);
823
824         skbsz.bits.sw_skb_size = 1 << toe->freeq_frag_order;
825         writel(skbsz.bits32, toe->iomem + GLOBAL_DMA_SKB_SIZE_REG);
826         writel(toe->freeq_dma_base | toe->freeq_order, dma_reg);
827
828         return 0;
829
830 err_freeq_alloc:
831         while (pn > 0) {
832                 --pn;
833                 mapping = toe->freeq_ring[pn << fpp_order].word2.buf_adr;
834                 dma_unmap_single(toe->dev, mapping, frag_len, DMA_FROM_DEVICE);
835                 put_page(toe->freeq_page_tab[pn]);
836         }
837
838 err_freeq:
839         dma_free_coherent(toe->dev,
840                 sizeof(*toe->freeq_ring) << toe->freeq_order,
841                 toe->freeq_ring, toe->freeq_dma_base);
842         toe->freeq_ring = NULL;
843         return -ENOMEM;
844 }
845
846 static void toe_cleanup_freeq(struct toe_private *toe)
847 {
848         void __iomem *dma_reg = toe->iomem + GLOBAL_SW_FREEQ_BASE_SIZE_REG;
849         void __iomem *ptr_reg = toe->iomem + GLOBAL_SWFQ_RWPTR_REG;
850
851         unsigned int frag_len = 1 << toe->freeq_frag_order;
852         unsigned int len = 1 << toe->freeq_order;
853         unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
854         unsigned int pages = len >> fpp_order;
855         struct page *page;
856         dma_addr_t mapping;
857         unsigned int pn;
858
859         writew(readw(ptr_reg), ptr_reg + 2);
860         writel(0, dma_reg);
861
862         for (pn = 0; pn < pages; pn++) {
863                 mapping = toe->freeq_ring[pn << fpp_order].word2.buf_adr;
864                 dma_unmap_single(toe->dev, mapping, frag_len, DMA_FROM_DEVICE);
865
866                 page = toe->freeq_page_tab[pn];
867                 while (atomic_read(&page->_count) > 0)
868                         put_page(page);
869         }
870
871         kfree(toe->freeq_page_tab);
872
873         dma_free_coherent(toe->dev,
874                 sizeof(*toe->freeq_ring) << toe->freeq_order,
875                 toe->freeq_ring, toe->freeq_dma_base);
876 }
877
878 static int toe_resize_freeq(struct toe_private *toe, int changing_dev_id)
879 {
880         void __iomem *irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG;
881         struct gmac_private *gmac;
882         struct net_device *other = toe->netdev[1 - changing_dev_id];
883         unsigned new_size = 0;
884         unsigned new_order;
885         int err;
886         unsigned long flags;
887         unsigned en;
888
889         if (other && netif_running(other))
890                 return -EBUSY;
891
892         if (toe->netdev[0]) {
893                 gmac = netdev_priv(toe->netdev[0]);
894                 new_size  = 1 << (gmac->rxq_order + 1);
895         }
896
897         if (toe->netdev[1]) {
898                 gmac = netdev_priv(toe->netdev[1]);
899                 new_size  += 1 << (gmac->rxq_order + 1);
900         }
901
902         new_order = min(15, ilog2(new_size - 1) + 1);
903         if (toe->freeq_order == new_order)
904                 return 0;
905
906         spin_lock_irqsave(&toe->irq_lock, flags);
907         en = readl(irqen_reg);
908         en &= ~SWFQ_EMPTY_INT_BIT;
909         writel(en, irqen_reg);
910
911         if (toe->freeq_ring)
912                 toe_cleanup_freeq(toe);
913
914         toe->freeq_order = new_order;
915         err = toe_setup_freeq(toe);
916
917         en |= SWFQ_EMPTY_INT_BIT;
918         writel(en, irqen_reg);
919         spin_unlock_irqrestore(&toe->irq_lock, flags);
920
921         return err;
922 }
923
924 static void gmac_tx_irq_enable(struct net_device *dev, unsigned txq, int en)
925 {
926         struct gmac_private *gmac = netdev_priv(dev);
927         struct toe_private *toe = gmac->toe;
928         unsigned val, mask;
929
930         mask = GMAC0_IRQ0_TXQ0_INTS << (6 * dev->dev_id + txq);
931
932         if (en)
933                 writel(mask, toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG);
934
935         val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
936         val = en ? val | mask : val & ~mask;
937         writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
938 }
939
940
941 static void gmac_tx_irq(struct net_device *dev, unsigned txq_num)
942 {
943         struct netdev_queue *ntxq = netdev_get_tx_queue(dev, txq_num);
944
945         gmac_tx_irq_enable(dev, txq_num, 0);
946         netif_tx_wake_queue(ntxq);
947 }
948
949 static int gmac_map_tx_bufs(struct net_device *dev, struct sk_buff *skb,
950                             struct gmac_txq *txq, unsigned short *desc)
951 {
952         struct gmac_private *gmac = netdev_priv(dev);
953         struct toe_private *toe = gmac->toe;
954         struct skb_shared_info *skb_si =  skb_shinfo(skb);
955         skb_frag_t *skb_frag;
956         short frag, last_frag = skb_si->nr_frags - 1;
957         unsigned short m = (1 << gmac->txq_order) -1;
958         unsigned short w = *desc;
959         unsigned word1, word3, buflen;
960         dma_addr_t mapping;
961         void *buffer;
962         unsigned short mtu;
963         GMAC_TXDESC_T *txd;
964
965         mtu  = ETH_HLEN;
966         mtu += dev->mtu;
967         if (skb->protocol == htons(ETH_P_8021Q))
968                 mtu += VLAN_HLEN;
969
970         word1 = skb->len;
971         word3 = SOF_BIT;
972                 
973         if (word1 > mtu) {
974                 word1 |= TSS_MTU_ENABLE_BIT;
975                 word3 += mtu;
976         }
977
978         if (skb->ip_summed != CHECKSUM_NONE) {
979                 int tcp = 0;
980                 if (skb->protocol == htons(ETH_P_IP)) {
981                         word1 |= TSS_IP_CHKSUM_BIT;
982                         tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
983                 } else { /* IPv6 */
984                         word1 |= TSS_IPV6_ENABLE_BIT;
985                         tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
986                 }
987
988                 word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
989         }
990
991         frag = -1;
992         while (frag <= last_frag) {
993                 if (frag == -1) {
994                         buffer = skb->data;
995                         buflen = skb_headlen(skb);
996                 } else {
997                         skb_frag = skb_si->frags + frag;
998                         buffer = page_address(skb_frag_page(skb_frag)) +
999                                  skb_frag->page_offset;
1000                         buflen = skb_frag->size;
1001                 }
1002
1003                 if (frag == last_frag) {
1004                         word3 |= EOF_BIT;
1005                         txq->skb[w] = skb;
1006                 }
1007
1008                 mapping = dma_map_single(toe->dev, buffer, buflen,
1009                                         DMA_TO_DEVICE);
1010                 if (dma_mapping_error(toe->dev, mapping) ||
1011                         !(mapping & PAGE_MASK))
1012                         goto map_error;
1013
1014                 txd = txq->ring + w;
1015                 txd->word0.bits32 = buflen;
1016                 txd->word1.bits32 = word1;
1017                 txd->word2.buf_adr = mapping;
1018                 txd->word3.bits32 = word3;
1019
1020                 word3 &= MTU_SIZE_BIT_MASK;
1021                 w++;
1022                 w &= m;
1023                 frag++;
1024         }
1025
1026         *desc = w;
1027         return 0;
1028
1029 map_error:
1030         while (w != *desc) {
1031                 w--;
1032                 w &= m;
1033
1034                 dma_unmap_page(toe->dev, txq->ring[w].word2.buf_adr,
1035                         txq->ring[w].word0.bits.buffer_size, DMA_TO_DEVICE);
1036         }
1037         return ENOMEM;
1038 }
1039
1040 static int gmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
1041 {
1042         struct gmac_private *gmac = netdev_priv(dev);
1043
1044         void __iomem *ptr_reg;
1045         struct gmac_txq *txq;
1046         struct netdev_queue *ntxq;
1047         int txq_num, nfrags;
1048         DMA_RWPTR_T rw;
1049         unsigned short r, w, d;
1050         unsigned short m = (1 << gmac->txq_order) - 1;
1051
1052         SKB_FRAG_ASSERT(skb);
1053
1054         if (unlikely(skb->len >= 0x10000))
1055                 goto out_drop_free;
1056
1057                 txq_num = skb_get_queue_mapping(skb);
1058         ptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE_PTR_REG(txq_num);
1059         txq = &gmac->txq[txq_num];
1060         ntxq = netdev_get_tx_queue(dev, txq_num);
1061         nfrags = skb_shinfo(skb)->nr_frags;
1062
1063         rw.bits32 = readl(ptr_reg);
1064         r = rw.bits.rptr;
1065         w = rw.bits.wptr;
1066
1067         d = txq->cptr - w - 1;
1068         d &= m;
1069
1070         if (unlikely(d < nfrags+2))
1071         {
1072                 gmac_clean_txq(dev, txq, r);
1073                 d = txq->cptr - w - 1;
1074                 d &= m;
1075
1076                 if (unlikely(d < nfrags+2)) {
1077                         netif_tx_stop_queue(ntxq);
1078
1079                         d = txq->cptr + nfrags + 16;
1080                         d &= m;
1081                         txq->ring[d].word3.bits.eofie = 1;
1082                         gmac_tx_irq_enable(dev, txq_num, 1);
1083
1084                         u64_stats_update_begin(&gmac->tx_stats_syncp);
1085                         dev->stats.tx_fifo_errors++;
1086                         u64_stats_update_end(&gmac->tx_stats_syncp);
1087                         return NETDEV_TX_BUSY;
1088                 }
1089         }
1090
1091         if (unlikely(gmac_map_tx_bufs(dev, skb, txq, &w))) {
1092                 if (skb_linearize(skb))
1093                         goto out_drop;
1094
1095                 if (unlikely(gmac_map_tx_bufs(dev, skb, txq, &w)))
1096                         goto out_drop_free;
1097
1098                 u64_stats_update_begin(&gmac->tx_stats_syncp);
1099                 gmac->tx_frags_linearized++;
1100                 u64_stats_update_end(&gmac->tx_stats_syncp);
1101         }
1102
1103         writew(w, ptr_reg+2);
1104
1105         gmac_clean_txq(dev, txq, r);
1106         return NETDEV_TX_OK;
1107
1108 out_drop_free:
1109         dev_kfree_skb(skb);
1110 out_drop:
1111         u64_stats_update_begin(&gmac->tx_stats_syncp);
1112         gmac->stats.tx_dropped++;
1113         u64_stats_update_end(&gmac->tx_stats_syncp);
1114         return NETDEV_TX_OK;
1115 }
1116
1117 static void gmac_tx_timeout(struct net_device *dev)
1118 {
1119         netdev_err(dev, "Tx timeout\n");
1120         gmac_dump_dma_state(dev);
1121 }
1122
1123 static void gmac_enable_irq(struct net_device *dev, int enable)
1124 {
1125         struct gmac_private *gmac = netdev_priv(dev);
1126         struct toe_private *toe = gmac->toe;
1127         unsigned long flags;
1128         unsigned val, mask;
1129
1130         spin_lock_irqsave(&toe->irq_lock, flags);
1131
1132         mask = GMAC0_IRQ0_2 << (dev->dev_id * 2);
1133         val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
1134         val = enable ? (val | mask) : (val & ~mask);
1135         writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
1136
1137         mask = DEFAULT_Q0_INT_BIT << dev->dev_id;
1138         val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1139         val = enable ? (val | mask) : (val & ~mask);
1140         writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1141
1142         mask = GMAC0_IRQ4_8 << (dev->dev_id * 8);
1143         val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
1144         val = enable ? (val | mask) : (val & ~mask);
1145         writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
1146
1147         spin_unlock_irqrestore(&toe->irq_lock, flags);
1148 }
1149
1150 static void gmac_enable_rx_irq(struct net_device *dev, int enable)
1151 {
1152         struct gmac_private *gmac = netdev_priv(dev);
1153         struct toe_private *toe = gmac->toe;
1154         unsigned long flags;
1155         unsigned val, mask;
1156
1157         spin_lock_irqsave(&toe->irq_lock, flags);
1158         mask = DEFAULT_Q0_INT_BIT << dev->dev_id;
1159
1160         val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1161         val = enable ? (val | mask) : (val & ~mask);
1162         writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1163
1164         spin_unlock_irqrestore(&toe->irq_lock, flags);
1165 }
1166
1167 static struct sk_buff *gmac_skb_if_good_frame(struct gmac_private *gmac,
1168         GMAC_RXDESC_0_T word0, unsigned frame_len)
1169 {
1170         struct sk_buff *skb = NULL;
1171         unsigned rx_status = word0.bits.status;
1172         unsigned rx_csum = word0.bits.chksum_status;
1173
1174         gmac->rx_stats[rx_status]++;
1175         gmac->rx_csum_stats[rx_csum]++;
1176
1177         if (word0.bits.derr || word0.bits.perr ||
1178             rx_status || frame_len < ETH_ZLEN ||
1179             rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) {
1180                 gmac->stats.rx_errors++;
1181
1182                 if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status))
1183                         gmac->stats.rx_length_errors++;
1184                 if (RX_ERROR_OVER(rx_status))
1185                         gmac->stats.rx_over_errors++;
1186                 if (RX_ERROR_CRC(rx_status))
1187                         gmac->stats.rx_crc_errors++;
1188                 if (RX_ERROR_FRAME(rx_status))
1189                         gmac->stats.rx_frame_errors++;
1190
1191                 return NULL;
1192         }
1193
1194         skb = napi_get_frags(&gmac->napi);
1195         if (!skb)
1196                 return NULL;
1197
1198         if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK)
1199                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1200
1201         gmac->stats.rx_bytes += frame_len;
1202         gmac->stats.rx_packets++;
1203         return skb;
1204 }
1205
1206 static unsigned gmac_rx(struct net_device *dev, unsigned budget)
1207 {
1208         struct gmac_private *gmac = netdev_priv(dev);
1209         struct toe_private *toe = gmac->toe;
1210         void __iomem *ptr_reg = gmac->rxq_rwptr;
1211
1212         static struct sk_buff *skb;
1213
1214         DMA_RWPTR_T rw;
1215         unsigned short r, w;
1216         unsigned short m = (1 << gmac->rxq_order) -1;
1217         GMAC_RXDESC_T *rx = NULL;
1218         struct page* page = NULL;
1219         unsigned page_offs;
1220         unsigned int frame_len, frag_len;
1221         int frag_nr = 0;
1222         
1223         GMAC_RXDESC_0_T word0;
1224         GMAC_RXDESC_1_T word1;
1225         dma_addr_t mapping;
1226         GMAC_RXDESC_3_T word3;
1227
1228         rw.bits32 = readl(ptr_reg);
1229         /* Reset interrupt as all packages until here are taken into account */
1230         writel(DEFAULT_Q0_INT_BIT << dev->dev_id,
1231                 toe->iomem + GLOBAL_INTERRUPT_STATUS_1_REG);
1232         r = rw.bits.rptr;
1233         w = rw.bits.wptr;
1234         
1235         while (budget && w != r) {
1236                 rx = gmac->rxq_ring + r;
1237                 word0 = rx->word0;
1238                 word1 = rx->word1;
1239                 mapping = rx->word2.buf_adr;
1240                 word3 = rx->word3;
1241                 
1242                 r++;
1243                 r &= m;
1244
1245                 frag_len = word0.bits.buffer_size;
1246                 frame_len =word1.bits.byte_count;
1247                 page_offs = mapping & ~PAGE_MASK;
1248
1249                 if (unlikely(!mapping)) {
1250                         netdev_err(dev, "rxq[%u]: HW BUG: zero DMA desc\n", r);
1251                         goto err_drop;
1252                 }
1253
1254                 page = pfn_to_page(dma_to_pfn(toe->dev, mapping));
1255
1256                 if (word3.bits32 & SOF_BIT) {
1257                         if (unlikely(skb)) {
1258                                 napi_free_frags(&gmac->napi);
1259                                 gmac->stats.rx_dropped++;
1260                         }
1261
1262                         skb = gmac_skb_if_good_frame(gmac, word0, frame_len);
1263                         if (unlikely(!skb))
1264                                 goto err_drop;
1265
1266                         page_offs += NET_IP_ALIGN;
1267                         frag_len -= NET_IP_ALIGN;
1268                         frag_nr = 0;
1269
1270                 } else if (!skb) {
1271                         put_page(page);
1272                         continue;
1273                 }
1274
1275                 if (word3.bits32 & EOF_BIT)
1276                         frag_len = frame_len - skb->len;
1277
1278                 /* append page frag to skb */
1279                 if (unlikely(frag_nr == MAX_SKB_FRAGS))
1280                         goto err_drop;
1281
1282                 if (frag_len == 0)
1283                         netdev_err(dev, "Received fragment with len = 0\n");
1284
1285                 skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
1286                 skb->len += frag_len;
1287                 skb->data_len += frag_len;
1288                 skb->truesize += frag_len;
1289                 frag_nr++;
1290
1291                 if (word3.bits32 & EOF_BIT) {
1292                         napi_gro_frags(&gmac->napi);
1293                         skb = NULL;
1294                         --budget;
1295                 }
1296                 continue;
1297                 
1298 err_drop:
1299                 if (skb) {
1300                         napi_free_frags(&gmac->napi);
1301                         skb = NULL;
1302                 }
1303
1304                 if (mapping)
1305                         put_page(page);
1306
1307                 gmac->stats.rx_dropped++;
1308         }
1309
1310         writew(r, ptr_reg);
1311         return budget;
1312 }
1313
1314 static int gmac_napi_poll(struct napi_struct *napi, int budget)
1315 {
1316         struct gmac_private *gmac = netdev_priv(napi->dev);
1317         struct toe_private *toe = gmac->toe;
1318         unsigned rx;
1319         unsigned freeq_threshold = 1 << (toe->freeq_order - 1);
1320
1321         u64_stats_update_begin(&gmac->rx_stats_syncp);
1322         
1323         rx = budget - gmac_rx(napi->dev, budget);
1324
1325         if (rx == 0) {
1326                 napi_gro_flush(napi, false);
1327                 __napi_complete(napi);
1328                 gmac_enable_rx_irq(napi->dev, 1);
1329                 ++gmac->rx_napi_exits;
1330         }
1331
1332         gmac->freeq_refill += rx;
1333         if (gmac->freeq_refill > freeq_threshold) {
1334                 gmac->freeq_refill -= freeq_threshold;
1335                 toe_fill_freeq(toe, 0);
1336         }
1337
1338         u64_stats_update_end(&gmac->rx_stats_syncp);
1339         return budget;
1340 }
1341
1342 static void gmac_dump_dma_state(struct net_device *dev)
1343 {
1344         struct gmac_private *gmac = netdev_priv(dev);
1345         struct toe_private *toe = gmac->toe;
1346         void __iomem *ptr_reg;
1347         unsigned reg[5];
1348
1349         /* Interrupt status */
1350         reg[0] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG);
1351         reg[1] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_1_REG);
1352         reg[2] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_2_REG);
1353         reg[3] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_3_REG);
1354         reg[4] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
1355         netdev_err(dev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1356                 reg[0], reg[1], reg[2], reg[3], reg[4]);
1357
1358         /* Interrupt enable */
1359         reg[0] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
1360         reg[1] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
1361         reg[2] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_2_REG);
1362         reg[3] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_3_REG);
1363         reg[4] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
1364         netdev_err(dev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1365                 reg[0], reg[1], reg[2], reg[3], reg[4]);
1366
1367         /* RX DMA status */
1368         reg[0] = readl(gmac->dma_iomem + GMAC_DMA_RX_FIRST_DESC_REG);
1369         reg[1] = readl(gmac->dma_iomem + GMAC_DMA_RX_CURR_DESC_REG);
1370         reg[2] = GET_RPTR(gmac->rxq_rwptr);
1371         reg[3] = GET_WPTR(gmac->rxq_rwptr);
1372         netdev_err(dev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1373                 reg[0], reg[1], reg[2], reg[3]);
1374
1375         reg[0] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD0_REG);
1376         reg[1] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD1_REG);
1377         reg[2] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD2_REG);
1378         reg[3] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD3_REG);
1379         netdev_err(dev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1380                 reg[0], reg[1], reg[2], reg[3]);
1381
1382         /* TX DMA status */
1383         ptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE0_PTR_REG;
1384
1385         reg[0] = readl(gmac->dma_iomem + GMAC_DMA_TX_FIRST_DESC_REG);
1386         reg[1] = readl(gmac->dma_iomem + GMAC_DMA_TX_CURR_DESC_REG);
1387         reg[2] = GET_RPTR(ptr_reg);
1388         reg[3] = GET_WPTR(ptr_reg);
1389         netdev_err(dev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1390                 reg[0], reg[1], reg[2], reg[3]);
1391
1392         reg[0] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD0_REG);
1393         reg[1] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD1_REG);
1394         reg[2] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD2_REG);
1395         reg[3] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD3_REG);
1396         netdev_err(dev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1397                 reg[0], reg[1], reg[2], reg[3]);
1398
1399         /* FREE queues status */
1400         ptr_reg = toe->iomem + GLOBAL_SWFQ_RWPTR_REG;
1401
1402         reg[0] = GET_RPTR(ptr_reg);
1403         reg[1] = GET_WPTR(ptr_reg);
1404
1405         ptr_reg = toe->iomem + GLOBAL_HWFQ_RWPTR_REG;
1406
1407         reg[2] = GET_RPTR(ptr_reg);
1408         reg[3] = GET_WPTR(ptr_reg);
1409         netdev_err(dev, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
1410                 reg[0], reg[1], reg[2], reg[3]);
1411 }
1412
1413 static void gmac_update_hw_stats(struct net_device *dev)
1414 {
1415         struct gmac_private *gmac = netdev_priv(dev);
1416         struct toe_private *toe = gmac->toe;
1417         unsigned long flags;
1418         unsigned int rx_discards, rx_mcast, rx_bcast;
1419
1420         spin_lock_irqsave(&toe->irq_lock, flags);
1421         u64_stats_update_begin(&gmac->ir_stats_syncp);
1422
1423         gmac->hw_stats[0] += rx_discards = readl(gmac->ctl_iomem + GMAC_IN_DISCARDS);
1424         gmac->hw_stats[1] += readl(gmac->ctl_iomem + GMAC_IN_ERRORS);
1425         gmac->hw_stats[2] += rx_mcast = readl(gmac->ctl_iomem + GMAC_IN_MCAST);
1426         gmac->hw_stats[3] += rx_bcast = readl(gmac->ctl_iomem + GMAC_IN_BCAST);
1427         gmac->hw_stats[4] += readl(gmac->ctl_iomem + GMAC_IN_MAC1);
1428         gmac->hw_stats[5] += readl(gmac->ctl_iomem + GMAC_IN_MAC2);
1429
1430         gmac->stats.rx_missed_errors += rx_discards;
1431         gmac->stats.multicast += rx_mcast;
1432         gmac->stats.multicast += rx_bcast;
1433
1434         writel(GMAC0_MIB_INT_BIT << (dev->dev_id * 8),
1435                 toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
1436
1437         u64_stats_update_end(&gmac->ir_stats_syncp);
1438         spin_unlock_irqrestore(&toe->irq_lock, flags);
1439 }
1440
1441 static inline unsigned gmac_get_intr_flags(struct net_device *dev, int i)
1442 {
1443         struct gmac_private *gmac = netdev_priv(dev);
1444         struct toe_private *toe = gmac->toe;
1445         void __iomem *irqif_reg, *irqen_reg;
1446         unsigned offs, val;
1447
1448         offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG - GLOBAL_INTERRUPT_STATUS_0_REG);
1449
1450         irqif_reg = toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG + offs;
1451         irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG + offs;
1452
1453         val = readl(irqif_reg) & readl(irqen_reg);
1454         return val;
1455 }
1456
1457 enum hrtimer_restart gmac_coalesce_delay_expired( struct hrtimer *timer )
1458 {
1459         struct gmac_private *gmac = container_of(timer, struct gmac_private, rx_coalesce_timer);
1460
1461         napi_schedule(&gmac->napi);
1462         return HRTIMER_NORESTART;
1463 }
1464
1465 static irqreturn_t gmac_irq(int irq, void *data)
1466 {
1467         struct net_device *dev = data;
1468         struct gmac_private *gmac = netdev_priv(dev);
1469         struct toe_private *toe = gmac->toe;
1470         unsigned val, orr = 0;
1471
1472         orr |= val = gmac_get_intr_flags(dev, 0);
1473
1474         if (unlikely(val & (GMAC0_IRQ0_2 << (dev->dev_id * 2)))) {
1475                 /* oh, crap. */
1476                 netdev_err(dev, "hw failure/sw bug\n");
1477                 gmac_dump_dma_state(dev);
1478
1479                 /* don't know how to recover, just reduce losses */
1480                 gmac_enable_irq(dev, 0);
1481                 return IRQ_HANDLED;
1482         }
1483
1484         if (val & (GMAC0_IRQ0_TXQ0_INTS << (dev->dev_id * 6)))
1485                 gmac_tx_irq(dev, 0);
1486
1487         orr |= val = gmac_get_intr_flags(dev, 1);
1488
1489         if (val & (DEFAULT_Q0_INT_BIT << dev->dev_id)) {
1490
1491                 gmac_enable_rx_irq(dev, 0);
1492
1493                 if (!gmac->rx_coalesce_nsecs)
1494                         napi_schedule(&gmac->napi);
1495                 else {
1496                         ktime_t ktime;
1497                         ktime = ktime_set(0, gmac->rx_coalesce_nsecs);
1498                         hrtimer_start(&gmac->rx_coalesce_timer, ktime, HRTIMER_MODE_REL);
1499                 }
1500         }
1501
1502         orr |= val = gmac_get_intr_flags(dev, 4);
1503
1504         if (unlikely(val & (GMAC0_MIB_INT_BIT << (dev->dev_id * 8))))
1505                 gmac_update_hw_stats(dev);
1506
1507         if (unlikely(val & (GMAC0_RX_OVERRUN_INT_BIT << (dev->dev_id * 8)))) {
1508                 writel(GMAC0_RXDERR_INT_BIT << (dev->dev_id * 8),
1509                         toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
1510
1511                 spin_lock(&toe->irq_lock);
1512                 u64_stats_update_begin(&gmac->ir_stats_syncp);
1513                 ++gmac->stats.rx_fifo_errors;
1514                 u64_stats_update_end(&gmac->ir_stats_syncp);
1515                 spin_unlock(&toe->irq_lock);
1516         }
1517
1518         return orr ? IRQ_HANDLED : IRQ_NONE;
1519 }
1520
1521 static void gmac_start_dma(struct gmac_private *gmac)
1522 {
1523         void __iomem *dma_ctrl_reg = gmac->dma_iomem + GMAC_DMA_CTRL_REG;
1524         GMAC_DMA_CTRL_T dma_ctrl;
1525
1526         dma_ctrl.bits32 = readl(dma_ctrl_reg);
1527         dma_ctrl.bits.rd_enable = 1;
1528         dma_ctrl.bits.td_enable = 1;
1529         dma_ctrl.bits.loopback = 0;
1530         dma_ctrl.bits.drop_small_ack = 0;
1531         dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN;
1532         dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED;
1533         dma_ctrl.bits.rd_burst_size = HBURST_INCR8;
1534         dma_ctrl.bits.rd_bus = HSIZE_8;
1535         dma_ctrl.bits.td_prot = HPROT_DATA_CACHE;
1536         dma_ctrl.bits.td_burst_size = HBURST_INCR8;
1537         dma_ctrl.bits.td_bus = HSIZE_8;
1538
1539         writel(dma_ctrl.bits32, dma_ctrl_reg);
1540 }
1541
1542 static void gmac_stop_dma(struct gmac_private *gmac)
1543 {
1544         void __iomem *dma_ctrl_reg = gmac->dma_iomem + GMAC_DMA_CTRL_REG;
1545         GMAC_DMA_CTRL_T dma_ctrl;
1546
1547         dma_ctrl.bits32 = readl(dma_ctrl_reg);
1548         dma_ctrl.bits.rd_enable = 0;
1549         dma_ctrl.bits.td_enable = 0;
1550         writel(dma_ctrl.bits32, dma_ctrl_reg);
1551 }
1552
1553 static int gmac_open(struct net_device *dev)
1554 {
1555         struct gmac_private *gmac = netdev_priv(dev);
1556         int err;
1557
1558         if (!dev->phydev) {
1559                 err = gmac_setup_phy(dev);
1560                 if (err) {
1561                         netif_err(gmac, ifup, dev,
1562                                 "PHY init failed: %d\n", err);
1563                         return err;
1564                 }
1565         }
1566
1567         err = request_irq(dev->irq, gmac_irq,
1568                 IRQF_SHARED, dev->name, dev);
1569         if (unlikely(err))
1570                 return err;
1571
1572         netif_carrier_off(dev);
1573         phy_start(dev->phydev);
1574
1575         err = toe_resize_freeq(gmac->toe, dev->dev_id);
1576         if (unlikely(err))
1577                 goto err_stop_phy;
1578
1579         err = gmac_setup_rxq(dev);
1580         if (unlikely(err))
1581                 goto err_stop_phy;
1582
1583         err = gmac_setup_txqs(dev);
1584         if (unlikely(err)) {
1585                 gmac_cleanup_rxq(dev);
1586                 goto err_stop_phy;
1587         }
1588
1589         napi_enable(&gmac->napi);
1590
1591         gmac_start_dma(gmac);
1592         gmac_enable_irq(dev, 1);
1593         gmac_enable_tx_rx(dev);
1594         netif_tx_start_all_queues(dev);
1595
1596         hrtimer_init(&gmac->rx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1597         gmac->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
1598         return 0;
1599
1600 err_stop_phy:
1601         phy_stop(dev->phydev);
1602         free_irq(dev->irq, dev);
1603         return err;
1604 }
1605
1606 static int gmac_stop(struct net_device *dev)
1607 {
1608         struct gmac_private *gmac = netdev_priv(dev);
1609
1610         hrtimer_cancel(&gmac->rx_coalesce_timer);
1611         netif_tx_stop_all_queues(dev);
1612         gmac_disable_tx_rx(dev);
1613         gmac_stop_dma(gmac);
1614         napi_disable(&gmac->napi);
1615
1616         gmac_enable_irq(dev, 0);
1617         gmac_cleanup_rxq(dev);
1618         gmac_cleanup_txqs(dev);
1619
1620         phy_stop(dev->phydev);
1621         free_irq(dev->irq, dev);
1622
1623         gmac_update_hw_stats(dev);
1624         return 0;
1625 }
1626
1627 static void gmac_set_rx_mode(struct net_device *dev)
1628 {
1629         struct gmac_private *gmac = netdev_priv(dev);
1630         struct netdev_hw_addr *ha;
1631         __u32 mc_filter[2];
1632         unsigned bit_nr;
1633         GMAC_RX_FLTR_T filter = { .bits = {
1634                 .broadcast = 1,
1635                 .multicast = 1,
1636                 .unicast = 1,
1637         } };
1638
1639         mc_filter[1] = mc_filter[0] = 0;
1640
1641         if (dev->flags & IFF_PROMISC) {
1642                 filter.bits.error = 1;
1643                 filter.bits.promiscuous = 1;
1644         } else if (!(dev->flags & IFF_ALLMULTI)) {
1645                 mc_filter[1] = mc_filter[0] = 0;
1646                 netdev_for_each_mc_addr(ha, dev) {
1647                         bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f;
1648                         mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f);
1649                 }
1650         }
1651
1652         writel(mc_filter[0], gmac->ctl_iomem + GMAC_MCAST_FIL0);
1653         writel(mc_filter[1], gmac->ctl_iomem + GMAC_MCAST_FIL1);
1654         writel(filter.bits32, gmac->ctl_iomem + GMAC_RX_FLTR);
1655 }
1656
1657 static void __gmac_set_mac_address(struct net_device *dev)
1658 {
1659         struct gmac_private *gmac = netdev_priv(dev);
1660         __le32 addr[3];
1661
1662         memset(addr, 0, sizeof(addr));
1663         memcpy(addr, dev->dev_addr, ETH_ALEN);
1664
1665         writel(le32_to_cpu(addr[0]), gmac->ctl_iomem + GMAC_STA_ADD0);
1666         writel(le32_to_cpu(addr[1]), gmac->ctl_iomem + GMAC_STA_ADD1);
1667         writel(le32_to_cpu(addr[2]), gmac->ctl_iomem + GMAC_STA_ADD2);
1668 }
1669
1670 static int gmac_set_mac_address(struct net_device *dev, void *addr)
1671 {
1672         struct sockaddr *sa = addr;
1673
1674         memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1675         __gmac_set_mac_address(dev);
1676
1677         return 0;
1678 }
1679
1680 static void gmac_clear_hw_stats(struct net_device *dev)
1681 {
1682         struct gmac_private *gmac = netdev_priv(dev);
1683
1684         readl(gmac->ctl_iomem + GMAC_IN_DISCARDS);
1685         readl(gmac->ctl_iomem + GMAC_IN_ERRORS);
1686         readl(gmac->ctl_iomem + GMAC_IN_MCAST);
1687         readl(gmac->ctl_iomem + GMAC_IN_BCAST);
1688         readl(gmac->ctl_iomem + GMAC_IN_MAC1);
1689         readl(gmac->ctl_iomem + GMAC_IN_MAC2);
1690 }
1691
1692 static struct rtnl_link_stats64 *gmac_get_stats64(struct net_device *dev,
1693         struct rtnl_link_stats64 *storage)
1694 {
1695         struct gmac_private *gmac = netdev_priv(dev);
1696         unsigned int start;
1697
1698         gmac_update_hw_stats(dev);
1699
1700         /* racing with RX NAPI */
1701         do {
1702                 start = u64_stats_fetch_begin(&gmac->rx_stats_syncp);
1703
1704                 storage->rx_packets = gmac->stats.rx_packets;
1705                 storage->rx_bytes = gmac->stats.rx_bytes;
1706                 storage->rx_errors = gmac->stats.rx_errors;
1707                 storage->rx_dropped = gmac->stats.rx_dropped;
1708
1709                 storage->rx_length_errors = gmac->stats.rx_length_errors;
1710                 storage->rx_over_errors = gmac->stats.rx_over_errors;
1711                 storage->rx_crc_errors = gmac->stats.rx_crc_errors;
1712                 storage->rx_frame_errors = gmac->stats.rx_frame_errors;
1713
1714         } while (u64_stats_fetch_retry(&gmac->rx_stats_syncp, start));
1715
1716         /* racing with MIB and TX completion interrupts */
1717         do {
1718                 start = u64_stats_fetch_begin(&gmac->ir_stats_syncp);
1719
1720                 storage->tx_errors = gmac->stats.tx_errors;
1721                 storage->tx_packets = gmac->stats.tx_packets;
1722                 storage->tx_bytes = gmac->stats.tx_bytes;
1723
1724                 storage->multicast = gmac->stats.multicast;
1725                 storage->rx_missed_errors = gmac->stats.rx_missed_errors;
1726                 storage->rx_fifo_errors = gmac->stats.rx_fifo_errors;
1727
1728         } while (u64_stats_fetch_retry(&gmac->ir_stats_syncp, start));
1729
1730         /* racing with hard_start_xmit */
1731         do {
1732                 start = u64_stats_fetch_begin(&gmac->tx_stats_syncp);
1733
1734                 storage->tx_dropped = gmac->stats.tx_dropped;
1735
1736         } while (u64_stats_fetch_retry(&gmac->tx_stats_syncp, start));
1737
1738         storage->rx_dropped += storage->rx_missed_errors;
1739
1740         return storage;
1741 }
1742
1743 static int gmac_change_mtu(struct net_device *dev, int new_mtu)
1744 {
1745         int max_len = gmac_pick_rx_max_len(new_mtu);
1746
1747         if (max_len < 0)
1748                 return -EINVAL;
1749
1750         gmac_disable_tx_rx(dev);
1751
1752         dev->mtu = new_mtu;
1753         gmac_update_config0_reg(dev,
1754                 max_len << CONFIG0_MAXLEN_SHIFT,
1755                 CONFIG0_MAXLEN_MASK);
1756
1757         netdev_update_features(dev);
1758
1759         gmac_enable_tx_rx(dev);
1760
1761         return 0;
1762 }
1763
1764 static netdev_features_t gmac_fix_features(struct net_device *dev, netdev_features_t features)
1765 {
1766         if (dev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
1767                 features &= ~GMAC_OFFLOAD_FEATURES;
1768
1769         return features;
1770 }
1771
1772 static int gmac_set_features(struct net_device *dev, netdev_features_t features)
1773 {
1774         struct gmac_private *gmac = netdev_priv(dev);
1775         int enable = features & NETIF_F_RXCSUM;
1776         unsigned long flags;
1777         u32 reg;
1778
1779         spin_lock_irqsave(&gmac->config_lock, flags);
1780
1781         reg = readl(gmac->ctl_iomem + GMAC_CONFIG0);
1782         reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM;
1783         writel(reg, gmac->ctl_iomem + GMAC_CONFIG0);
1784
1785         spin_unlock_irqrestore(&gmac->config_lock, flags);
1786         return 0;
1787 }
1788
1789 static int gmac_get_sset_count(struct net_device *dev, int sset)
1790 {
1791         return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0;
1792 }
1793
1794 static void gmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1795 {
1796         if (stringset != ETH_SS_STATS)
1797                 return;
1798
1799         memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings));
1800 }
1801
1802 static void gmac_get_ethtool_stats(struct net_device *dev,
1803         struct ethtool_stats *estats, u64 *values)
1804 {
1805         struct gmac_private *gmac = netdev_priv(dev);
1806         unsigned int start;
1807         u64 *p;
1808         int i;
1809
1810         gmac_update_hw_stats(dev);
1811
1812         /* racing with MIB interrupt */
1813         do {
1814                 p = values;
1815                 start = u64_stats_fetch_begin(&gmac->ir_stats_syncp);
1816
1817                 for (i = 0; i < RX_STATS_NUM; ++i)
1818                         *p++ = gmac->hw_stats[i];
1819
1820         } while (u64_stats_fetch_retry(&gmac->ir_stats_syncp, start));
1821         values = p;
1822
1823         /* racing with RX NAPI */
1824         do {
1825                 p = values;
1826                 start = u64_stats_fetch_begin(&gmac->rx_stats_syncp);
1827
1828                 for (i = 0; i < RX_STATUS_NUM; ++i)
1829                         *p++ = gmac->rx_stats[i];
1830                 for (i = 0; i < RX_CHKSUM_NUM; ++i)
1831                         *p++ = gmac->rx_csum_stats[i];
1832                 *p++ = gmac->rx_napi_exits;
1833
1834         } while (u64_stats_fetch_retry(&gmac->rx_stats_syncp, start));
1835         values = p;
1836
1837         /* racing with TX start_xmit */
1838         do {
1839                 p = values;
1840                 start = u64_stats_fetch_begin(&gmac->tx_stats_syncp);
1841
1842                 for (i = 0; i < TX_MAX_FRAGS; ++i) {
1843                         *values++ = gmac->tx_frag_stats[i];
1844                         gmac->tx_frag_stats[i] = 0;
1845                 }
1846                 *values++ = gmac->tx_frags_linearized;
1847                 *values++ = gmac->tx_hw_csummed;
1848
1849         } while (u64_stats_fetch_retry(&gmac->tx_stats_syncp, start));
1850 }
1851
1852 static int gmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1853 {
1854         if (!dev->phydev)
1855                 return -ENXIO;
1856         return phy_ethtool_gset(dev->phydev, cmd);
1857 }
1858
1859 static int gmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1860 {
1861         if (!dev->phydev)
1862                 return -ENXIO;
1863         return phy_ethtool_sset(dev->phydev, cmd);
1864 }
1865
1866 static int gmac_nway_reset(struct net_device *dev)
1867 {
1868         if (!dev->phydev)
1869                 return -ENXIO;
1870         return phy_start_aneg(dev->phydev);
1871 }
1872
1873 static void gmac_get_pauseparam(struct net_device *dev,
1874         struct ethtool_pauseparam *pparam)
1875 {
1876         struct gmac_private *gmac = netdev_priv(dev);
1877         GMAC_CONFIG0_T config0;
1878
1879         config0.bits32 = readl(gmac->ctl_iomem + GMAC_CONFIG0);
1880
1881         pparam->rx_pause = config0.bits.rx_fc_en;
1882         pparam->tx_pause = config0.bits.tx_fc_en;
1883         pparam->autoneg = true;
1884 }
1885
1886 static void gmac_get_ringparam(struct net_device *dev,
1887         struct ethtool_ringparam *rp)
1888 {
1889         struct gmac_private *gmac = netdev_priv(dev);
1890         GMAC_CONFIG0_T config0;
1891
1892         config0.bits32 = readl(gmac->ctl_iomem + GMAC_CONFIG0);
1893
1894         rp->rx_max_pending = 1 << 15;
1895         rp->rx_mini_max_pending = 0;
1896         rp->rx_jumbo_max_pending = 0;
1897         rp->tx_max_pending = 1 << 15;
1898
1899         rp->rx_pending = 1 << gmac->rxq_order;
1900         rp->rx_mini_pending = 0;
1901         rp->rx_jumbo_pending = 0;
1902         rp->tx_pending = 1 << gmac->txq_order;
1903 }
1904
1905 static int toe_resize_freeq(struct toe_private *toe, int changing_dev_id);
1906
1907 static int gmac_set_ringparam(struct net_device *dev,
1908         struct ethtool_ringparam *rp)
1909 {
1910         struct gmac_private *gmac = netdev_priv(dev);
1911         struct toe_private *toe = gmac->toe;
1912         int err = 0;
1913
1914         if (netif_running(dev))
1915                 return -EBUSY;
1916
1917         if (rp->rx_pending) {
1918                 gmac->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1);
1919                 err = toe_resize_freeq(toe, dev->dev_id);
1920         }
1921
1922         if (rp->tx_pending)
1923         {
1924                 gmac->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1);
1925                 gmac->irq_every_tx_packets = 1 << (gmac->txq_order - 2);
1926         }
1927
1928         return err;
1929 }
1930
1931 static int gmac_get_coalesce(struct net_device *dev,
1932         struct ethtool_coalesce *ecmd)
1933 {
1934         struct gmac_private *gmac = netdev_priv(dev);
1935
1936         ecmd->rx_max_coalesced_frames = 1;
1937         ecmd->tx_max_coalesced_frames = gmac->irq_every_tx_packets;
1938         ecmd->rx_coalesce_usecs = gmac->rx_coalesce_nsecs/1000;
1939
1940         return 0;
1941 }
1942
1943 static int gmac_set_coalesce(struct net_device *dev,
1944         struct ethtool_coalesce *ecmd)
1945 {
1946         struct gmac_private *gmac = netdev_priv(dev);
1947
1948         if (ecmd->tx_max_coalesced_frames < 1)
1949                 return -EINVAL;
1950         if (ecmd->tx_max_coalesced_frames >= 1 << gmac->txq_order)
1951                 return -EINVAL;
1952
1953         gmac->irq_every_tx_packets = ecmd->tx_max_coalesced_frames;
1954         gmac->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000;
1955
1956         return 0;
1957 }
1958
1959 static u32 gmac_get_msglevel(struct net_device *dev)
1960 {
1961         struct gmac_private *gmac = netdev_priv(dev);
1962         return gmac->msg_enable;
1963 }
1964
1965 static void gmac_set_msglevel(struct net_device *dev, u32 level)
1966 {
1967         struct gmac_private *gmac = netdev_priv(dev);
1968         gmac->msg_enable = level;
1969 }
1970
1971 static void gmac_get_drvinfo(struct net_device *dev,
1972         struct ethtool_drvinfo *info)
1973 {
1974         strcpy(info->driver,  DRV_NAME);
1975         strcpy(info->version, DRV_VERSION);
1976         strcpy(info->bus_info, dev->dev_id ? "1" : "0");
1977 }
1978
1979 static const struct net_device_ops gmac_351x_ops = {
1980         .ndo_init               = gmac_init,
1981         .ndo_uninit             = gmac_uninit,
1982         .ndo_open               = gmac_open,
1983         .ndo_stop               = gmac_stop,
1984         .ndo_start_xmit         = gmac_start_xmit,
1985         .ndo_tx_timeout         = gmac_tx_timeout,
1986         .ndo_set_rx_mode        = gmac_set_rx_mode,
1987         .ndo_set_mac_address    = gmac_set_mac_address,
1988         .ndo_get_stats64        = gmac_get_stats64,
1989         .ndo_change_mtu         = gmac_change_mtu,
1990         .ndo_fix_features       = gmac_fix_features,
1991         .ndo_set_features       = gmac_set_features,
1992 };
1993
1994 static const struct ethtool_ops gmac_351x_ethtool_ops = {
1995         .get_sset_count = gmac_get_sset_count,
1996         .get_strings    = gmac_get_strings,
1997         .get_ethtool_stats = gmac_get_ethtool_stats,
1998         .get_settings   = gmac_get_settings,
1999         .set_settings   = gmac_set_settings,
2000         .get_link       = ethtool_op_get_link,
2001         .nway_reset     = gmac_nway_reset,
2002         .get_pauseparam = gmac_get_pauseparam,
2003         .get_ringparam  = gmac_get_ringparam,
2004         .set_ringparam  = gmac_set_ringparam,
2005         .get_coalesce   = gmac_get_coalesce,
2006         .set_coalesce   = gmac_set_coalesce,
2007         .get_msglevel   = gmac_get_msglevel,
2008         .set_msglevel   = gmac_set_msglevel,
2009         .get_drvinfo    = gmac_get_drvinfo,
2010 };
2011
2012 static int gmac_init_netdev(struct toe_private *toe, int num,
2013         struct platform_device *pdev)
2014 {
2015         struct gemini_gmac_platform_data *pdata = pdev->dev.platform_data;
2016         struct gmac_private *gmac;
2017         struct net_device *dev;
2018         int irq, err;
2019
2020         if (!pdata->bus_id[num])
2021                 return 0;
2022
2023         irq = platform_get_irq(pdev, num);
2024         if (irq < 0) {
2025                 dev_err(toe->dev, "No IRQ for ethernet device #%d\n", num);
2026                 return irq;
2027         }
2028
2029         dev = alloc_etherdev_mq(sizeof(*gmac), TX_QUEUE_NUM);
2030         if (!dev) {
2031                 dev_err(toe->dev, "Can't allocate ethernet device #%d\n", num);
2032                 return -ENOMEM;
2033         }
2034
2035         gmac = netdev_priv(dev);
2036         gmac->num = num;
2037         gmac->toe = toe;
2038         SET_NETDEV_DEV(dev, toe->dev);
2039
2040         toe->netdev[num] = dev;
2041         dev->dev_id = num;
2042
2043         gmac->ctl_iomem = toe->iomem + TOE_GMAC_BASE(num);
2044         gmac->dma_iomem = toe->iomem + TOE_GMAC_DMA_BASE(num);
2045         dev->irq = irq;
2046
2047         dev->netdev_ops = &gmac_351x_ops;
2048         dev->ethtool_ops = &gmac_351x_ethtool_ops;
2049
2050         spin_lock_init(&gmac->config_lock);
2051         gmac_clear_hw_stats(dev);
2052
2053         dev->hw_features = GMAC_OFFLOAD_FEATURES;
2054         dev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
2055
2056         gmac->freeq_refill = 0;
2057         netif_napi_add(dev, &gmac->napi, gmac_napi_poll, DEFAULT_NAPI_WEIGHT);
2058
2059         if (is_valid_ether_addr((void *)toe->mac_addr[num]))
2060                 memcpy(dev->dev_addr, toe->mac_addr[num], ETH_ALEN);
2061         else
2062                 random_ether_addr(dev->dev_addr);
2063         __gmac_set_mac_address(dev);
2064
2065         err = gmac_setup_phy(dev);
2066         if (err)
2067                 netif_warn(gmac, probe, dev,
2068                         "PHY init failed: %d, deferring to ifup time\n", err);
2069
2070         err = register_netdev(dev);
2071         if (!err)
2072         {
2073                 pr_info(DRV_NAME " %s: irq %d, dma base 0x%p, io base 0x%p\n",
2074                         dev->name, irq, gmac->dma_iomem, gmac->ctl_iomem);
2075                 return 0;
2076         }
2077
2078         toe->netdev[num] = NULL;
2079         free_netdev(dev);
2080         return err;
2081 }
2082
2083 static irqreturn_t toe_irq_thread(int irq, void *data)
2084 {
2085         struct toe_private *toe = data;
2086         void __iomem *irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG;
2087         void __iomem *irqif_reg = toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG;
2088         unsigned long irqmask = SWFQ_EMPTY_INT_BIT;
2089         unsigned long flags;
2090
2091         toe_fill_freeq(toe, 0);
2092
2093         /* Ack and enable interrupt */
2094         spin_lock_irqsave(&toe->irq_lock, flags);
2095         writel(irqmask, irqif_reg);
2096         irqmask |= readl(irqen_reg);
2097         writel(irqmask, irqen_reg);
2098         spin_unlock_irqrestore(&toe->irq_lock, flags);
2099
2100         return IRQ_HANDLED;
2101 }
2102
2103 static irqreturn_t toe_irq(int irq, void *data)
2104 {
2105         struct toe_private *toe = data;
2106         void __iomem *irqif_reg = toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG;
2107         void __iomem *irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG;
2108         unsigned long val, en;
2109         irqreturn_t ret = IRQ_NONE;
2110
2111         spin_lock(&toe->irq_lock);
2112
2113         val = readl(irqif_reg);
2114         en = readl(irqen_reg);
2115
2116         if (val & en & SWFQ_EMPTY_INT_BIT) {
2117                 en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT
2118                                            | GMAC1_RX_OVERRUN_INT_BIT);
2119                 writel(en, irqen_reg);
2120                 ret = IRQ_WAKE_THREAD;
2121         }
2122
2123         spin_unlock(&toe->irq_lock);
2124         return ret;
2125 }
2126
2127 static int toe_init(struct toe_private *toe,
2128         struct platform_device *pdev)
2129 {
2130         int err;
2131
2132         writel(0, toe->iomem + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
2133         writel(0, toe->iomem + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
2134         writel(0, toe->iomem + GLOBAL_SWFQ_RWPTR_REG);
2135         writel(0, toe->iomem + GLOBAL_HWFQ_RWPTR_REG);
2136
2137         toe->freeq_frag_order = DEFAULT_RX_BUF_ORDER;
2138         toe->freeq_order = ~0;
2139
2140         err = request_threaded_irq(toe->irq, toe_irq,
2141                 toe_irq_thread, IRQF_SHARED, DRV_NAME " toe", toe);
2142         if (err)
2143                 goto err_freeq;
2144
2145         return 0;
2146
2147 err_freeq:
2148         toe_cleanup_freeq(toe);
2149         return err;
2150 }
2151
2152 static void toe_deinit(struct toe_private *toe)
2153 {
2154         free_irq(toe->irq, toe);
2155         toe_cleanup_freeq(toe);
2156 }
2157
2158 static int toe_reset(struct toe_private *toe)
2159 {
2160         unsigned int reg = 0, retry = 5;
2161
2162         reg = readl((void __iomem*)(IO_ADDRESS(GEMINI_GLOBAL_BASE) +
2163                 GLOBAL_RESET));
2164         reg |= RESET_GMAC1 | RESET_GMAC0;
2165         writel(reg, (void __iomem*)(IO_ADDRESS(GEMINI_GLOBAL_BASE) +
2166                 GLOBAL_RESET));
2167
2168         do {
2169                 udelay(2);
2170                 reg = readl((void __iomem*)(toe->iomem +
2171                         GLOBAL_TOE_VERSION_REG));
2172                 barrier();
2173         } while (!reg && --retry);
2174
2175         return reg ? 0 : -EIO;
2176 }
2177
2178 /*
2179  * Interrupt config:
2180  *
2181  *      GMAC0 intr bits ------> int0 ----> eth0
2182  *      GMAC1 intr bits ------> int1 ----> eth1
2183  *      TOE intr -------------> int1 ----> eth1
2184  *      Classification Intr --> int0 ----> eth0
2185  *      Default Q0 -----------> int0 ----> eth0
2186  *      Default Q1 -----------> int1 ----> eth1
2187  *      FreeQ intr -----------> int1 ----> eth1
2188  */
2189 static void toe_init_irq(struct toe_private *toe)
2190 {
2191         writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
2192         writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
2193         writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_2_REG);
2194         writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_3_REG);
2195         writel(0, toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
2196
2197         writel(0xCCFC0FC0, toe->iomem + GLOBAL_INTERRUPT_SELECT_0_REG);
2198         writel(0x00F00002, toe->iomem + GLOBAL_INTERRUPT_SELECT_1_REG);
2199         writel(0xFFFFFFFF, toe->iomem + GLOBAL_INTERRUPT_SELECT_2_REG);
2200         writel(0xFFFFFFFF, toe->iomem + GLOBAL_INTERRUPT_SELECT_3_REG);
2201         writel(0xFF000003, toe->iomem + GLOBAL_INTERRUPT_SELECT_4_REG);
2202
2203         /* edge-triggered interrupts packed to level-triggered one... */
2204         writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG);
2205         writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_1_REG);
2206         writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_2_REG);
2207         writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_3_REG);
2208         writel(~0, toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
2209 }
2210
2211 static void toe_save_mac_addr(struct toe_private *toe,
2212                         struct platform_device *pdev)
2213 {
2214         struct gemini_gmac_platform_data *pdata = pdev->dev.platform_data;
2215         void __iomem *ctl;
2216         int i;
2217
2218         for (i = 0; i < 2; i++) {
2219                 if (pdata->bus_id[i]) {
2220                         ctl = toe->iomem + TOE_GMAC_BASE(i);
2221                         toe->mac_addr[i][0] = cpu_to_le32(readl(ctl + GMAC_STA_ADD0));
2222                         toe->mac_addr[i][1] = cpu_to_le32(readl(ctl + GMAC_STA_ADD1));
2223                         toe->mac_addr[i][2] = cpu_to_le32(readl(ctl + GMAC_STA_ADD2));
2224                 }
2225         }
2226 }
2227
2228 static int gemini_gmac_probe(struct platform_device *pdev)
2229 {
2230         struct resource *res;
2231         struct toe_private *toe;
2232         int irq, retval;
2233
2234         if (!pdev->dev.platform_data)
2235                 return -EINVAL;
2236
2237         irq = platform_get_irq(pdev, 1);
2238         if (irq < 0)
2239                 return irq;
2240
2241         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2242         if (!res) {
2243                 dev_err(&pdev->dev, "can't get device resources\n");
2244                 return -ENODEV;
2245         }
2246
2247         toe = kzalloc(sizeof(*toe), GFP_KERNEL);
2248         if (!toe)
2249                 return -ENOMEM;
2250
2251         platform_set_drvdata(pdev, toe);
2252         toe->dev = &pdev->dev;
2253         toe->irq = irq;
2254
2255         toe->iomem = ioremap(res->start, resource_size(res));
2256         if (!toe->iomem) {
2257                 dev_err(toe->dev, "ioremap failed\n");
2258                 retval = -EIO;
2259                 goto err_data;
2260         }
2261
2262         toe_save_mac_addr(toe, pdev);
2263
2264         retval = toe_reset(toe);
2265         if (retval < 0)
2266                 goto err_unmap;
2267
2268         pr_info(DRV_NAME " toe: irq %d, io base 0x%08x, version %d\n",
2269                 irq, res->start, retval);
2270
2271         spin_lock_init(&toe->irq_lock);
2272         spin_lock_init(&toe->freeq_lock);
2273
2274         toe_init_irq(toe);
2275
2276         retval = toe_init(toe, pdev);
2277         if (retval)
2278                 goto err_unmap;
2279
2280         retval = gmac_init_netdev(toe, 0, pdev);
2281         if (retval)
2282                 goto err_uninit;
2283
2284         retval = gmac_init_netdev(toe, 1, pdev);
2285         if (retval)
2286                 goto err_uninit;
2287
2288         return 0;
2289
2290 err_uninit:
2291         if (toe->netdev[0])
2292                 unregister_netdev(toe->netdev[0]);
2293         toe_deinit(toe);
2294 err_unmap:
2295         iounmap(toe->iomem);
2296 err_data:
2297         kfree(toe);
2298         return retval;
2299 }
2300
2301 static int gemini_gmac_remove(struct platform_device *pdev)
2302 {
2303         struct toe_private *toe = platform_get_drvdata(pdev);
2304         int i;
2305
2306         for (i = 0; i < 2; i++)
2307                 if (toe->netdev[i])
2308                         unregister_netdev(toe->netdev[i]);
2309
2310         toe_init_irq(toe);
2311         toe_deinit(toe);
2312
2313         iounmap(toe->iomem);
2314         kfree(toe);
2315
2316         return 0;
2317 }
2318
2319 static struct platform_driver gemini_gmac_driver = {
2320         .probe          = gemini_gmac_probe,
2321         .remove         = gemini_gmac_remove,
2322         .driver.name    = DRV_NAME,
2323         .driver.owner   = THIS_MODULE,
2324 };
2325
2326 static int __init gemini_gmac_init(void)
2327 {
2328 #ifdef CONFIG_MDIO_GPIO_MODULE
2329         request_module("mdio-gpio");
2330 #endif
2331         return platform_driver_register(&gemini_gmac_driver);
2332 }
2333
2334 static void __exit gemini_gmac_exit(void)
2335 {
2336         platform_driver_unregister(&gemini_gmac_driver);
2337 }
2338
2339 module_init(gemini_gmac_init);
2340 module_exit(gemini_gmac_exit);