1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name>
12 * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/platform_device.h>
24 #include <linux/of_device.h>
25 #include <linux/clk.h>
26 #include <linux/of_net.h>
27 #include <linux/of_mdio.h>
28 #include <linux/if_vlan.h>
29 #include <linux/reset.h>
30 #include <linux/tcp.h>
32 #include <linux/bug.h>
34 #include <asm/mach-ralink/ralink_regs.h>
36 #include "mtk_eth_soc.h"
40 #define MAX_RX_LENGTH 1536
41 #define FE_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
42 #define FE_RX_HLEN (NET_SKB_PAD + FE_RX_ETH_HLEN + NET_IP_ALIGN)
43 #define DMA_DUMMY_DESC 0xffffffff
44 #define FE_DEFAULT_MSG_ENABLE \
54 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
55 #define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
56 #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
57 #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
59 #define SYSC_REG_RSTCTRL 0x34
61 static int fe_msg_level = -1;
62 module_param_named(msg_level, fe_msg_level, int, 0);
63 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
65 static const u16 fe_reg_table_default[FE_REG_COUNT] = {
66 [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG,
67 [FE_REG_PDMA_RST_CFG] = FE_PDMA_RST_CFG,
68 [FE_REG_DLY_INT_CFG] = FE_DLY_INT_CFG,
69 [FE_REG_TX_BASE_PTR0] = FE_TX_BASE_PTR0,
70 [FE_REG_TX_MAX_CNT0] = FE_TX_MAX_CNT0,
71 [FE_REG_TX_CTX_IDX0] = FE_TX_CTX_IDX0,
72 [FE_REG_TX_DTX_IDX0] = FE_TX_DTX_IDX0,
73 [FE_REG_RX_BASE_PTR0] = FE_RX_BASE_PTR0,
74 [FE_REG_RX_MAX_CNT0] = FE_RX_MAX_CNT0,
75 [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0,
76 [FE_REG_RX_DRX_IDX0] = FE_RX_DRX_IDX0,
77 [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE,
78 [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS,
79 [FE_REG_FE_DMA_VID_BASE] = FE_DMA_VID0,
80 [FE_REG_FE_COUNTER_BASE] = FE_GDMA1_TX_GBCNT,
81 [FE_REG_FE_RST_GL] = FE_FE_RST_GL,
84 static const u16 *fe_reg_table = fe_reg_table_default;
88 void (*action)(struct fe_priv *);
91 static void __iomem *fe_base;
93 void fe_w32(u32 val, unsigned reg)
95 __raw_writel(val, fe_base + reg);
98 u32 fe_r32(unsigned reg)
100 return __raw_readl(fe_base + reg);
103 void fe_reg_w32(u32 val, enum fe_reg reg)
105 fe_w32(val, fe_reg_table[reg]);
108 u32 fe_reg_r32(enum fe_reg reg)
110 return fe_r32(fe_reg_table[reg]);
113 void fe_reset(u32 reset_bits)
117 t = rt_sysc_r32(SYSC_REG_RSTCTRL);
119 rt_sysc_w32(t, SYSC_REG_RSTCTRL);
120 usleep_range(10, 20);
123 rt_sysc_w32(t, SYSC_REG_RSTCTRL);
124 usleep_range(10, 20);
127 static inline void fe_int_disable(u32 mask)
129 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask,
130 FE_REG_FE_INT_ENABLE);
132 fe_reg_r32(FE_REG_FE_INT_ENABLE);
135 static inline void fe_int_enable(u32 mask)
137 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) | mask,
138 FE_REG_FE_INT_ENABLE);
140 fe_reg_r32(FE_REG_FE_INT_ENABLE);
143 static inline void fe_hw_set_macaddr(struct fe_priv *priv, unsigned char *mac)
147 spin_lock_irqsave(&priv->page_lock, flags);
148 fe_w32((mac[0] << 8) | mac[1], FE_GDMA1_MAC_ADRH);
149 fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
151 spin_unlock_irqrestore(&priv->page_lock, flags);
154 static int fe_set_mac_address(struct net_device *dev, void *p)
156 int ret = eth_mac_addr(dev, p);
159 struct fe_priv *priv = netdev_priv(dev);
161 if (priv->soc->set_mac)
162 priv->soc->set_mac(priv, dev->dev_addr);
164 fe_hw_set_macaddr(priv, p);
170 static inline int fe_max_frag_size(int mtu)
172 /* make sure buf_size will be at least MAX_RX_LENGTH */
173 if (mtu + FE_RX_ETH_HLEN < MAX_RX_LENGTH)
174 mtu = MAX_RX_LENGTH - FE_RX_ETH_HLEN;
176 return SKB_DATA_ALIGN(FE_RX_HLEN + mtu) +
177 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
180 static inline int fe_max_buf_size(int frag_size)
182 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
183 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
185 BUG_ON(buf_size < MAX_RX_LENGTH);
189 static inline void fe_get_rxd(struct fe_rx_dma *rxd, struct fe_rx_dma *dma_rxd)
191 rxd->rxd1 = dma_rxd->rxd1;
192 rxd->rxd2 = dma_rxd->rxd2;
193 rxd->rxd3 = dma_rxd->rxd3;
194 rxd->rxd4 = dma_rxd->rxd4;
197 static inline void fe_set_txd(struct fe_tx_dma *txd, struct fe_tx_dma *dma_txd)
199 dma_txd->txd1 = txd->txd1;
200 dma_txd->txd3 = txd->txd3;
201 dma_txd->txd4 = txd->txd4;
202 /* clean dma done flag last */
203 dma_txd->txd2 = txd->txd2;
206 static void fe_clean_rx(struct fe_priv *priv)
209 struct fe_rx_ring *ring = &priv->rx_ring;
212 for (i = 0; i < ring->rx_ring_size; i++)
213 if (ring->rx_data[i]) {
214 if (ring->rx_dma && ring->rx_dma[i].rxd1)
215 dma_unmap_single(&priv->netdev->dev,
216 ring->rx_dma[i].rxd1,
219 put_page(virt_to_head_page(ring->rx_data[i]));
222 kfree(ring->rx_data);
223 ring->rx_data = NULL;
227 dma_free_coherent(&priv->netdev->dev,
228 ring->rx_ring_size * sizeof(*ring->rx_dma),
235 static int fe_alloc_rx(struct fe_priv *priv)
237 struct net_device *netdev = priv->netdev;
238 struct fe_rx_ring *ring = &priv->rx_ring;
241 ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
246 for (i = 0; i < ring->rx_ring_size; i++) {
247 ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
248 if (!ring->rx_data[i])
252 ring->rx_dma = dma_alloc_coherent(&netdev->dev,
253 ring->rx_ring_size * sizeof(*ring->rx_dma),
255 GFP_ATOMIC | __GFP_ZERO);
259 if (priv->flags & FE_FLAG_RX_2B_OFFSET)
263 for (i = 0; i < ring->rx_ring_size; i++) {
264 dma_addr_t dma_addr = dma_map_single(&netdev->dev,
265 ring->rx_data[i] + NET_SKB_PAD + pad,
268 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr)))
270 ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
272 if (priv->flags & FE_FLAG_RX_SG_DMA)
273 ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
275 ring->rx_dma[i].rxd2 = RX_DMA_LSO;
277 ring->rx_calc_idx = ring->rx_ring_size - 1;
278 /* make sure that all changes to the dma ring are flushed before we
283 fe_reg_w32(ring->rx_phys, FE_REG_RX_BASE_PTR0);
284 fe_reg_w32(ring->rx_ring_size, FE_REG_RX_MAX_CNT0);
285 fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0);
286 fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG);
294 static void fe_txd_unmap(struct device *dev, struct fe_tx_buf *tx_buf)
296 if (tx_buf->flags & FE_TX_FLAGS_SINGLE0) {
297 dma_unmap_single(dev,
298 dma_unmap_addr(tx_buf, dma_addr0),
299 dma_unmap_len(tx_buf, dma_len0),
301 } else if (tx_buf->flags & FE_TX_FLAGS_PAGE0) {
303 dma_unmap_addr(tx_buf, dma_addr0),
304 dma_unmap_len(tx_buf, dma_len0),
307 if (tx_buf->flags & FE_TX_FLAGS_PAGE1)
309 dma_unmap_addr(tx_buf, dma_addr1),
310 dma_unmap_len(tx_buf, dma_len1),
314 if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
315 dev_kfree_skb_any(tx_buf->skb);
319 static void fe_clean_tx(struct fe_priv *priv)
322 struct device *dev = &priv->netdev->dev;
323 struct fe_tx_ring *ring = &priv->tx_ring;
326 for (i = 0; i < ring->tx_ring_size; i++)
327 fe_txd_unmap(dev, &ring->tx_buf[i]);
333 dma_free_coherent(dev,
334 ring->tx_ring_size * sizeof(*ring->tx_dma),
340 netdev_reset_queue(priv->netdev);
343 static int fe_alloc_tx(struct fe_priv *priv)
346 struct fe_tx_ring *ring = &priv->tx_ring;
348 ring->tx_free_idx = 0;
349 ring->tx_next_idx = 0;
350 ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
353 ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
358 ring->tx_dma = dma_alloc_coherent(&priv->netdev->dev,
359 ring->tx_ring_size * sizeof(*ring->tx_dma),
361 GFP_ATOMIC | __GFP_ZERO);
365 for (i = 0; i < ring->tx_ring_size; i++) {
366 if (priv->soc->tx_dma)
367 priv->soc->tx_dma(&ring->tx_dma[i]);
368 ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
370 /* make sure that all changes to the dma ring are flushed before we
375 fe_reg_w32(ring->tx_phys, FE_REG_TX_BASE_PTR0);
376 fe_reg_w32(ring->tx_ring_size, FE_REG_TX_MAX_CNT0);
377 fe_reg_w32(0, FE_REG_TX_CTX_IDX0);
378 fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG);
386 static int fe_init_dma(struct fe_priv *priv)
390 err = fe_alloc_tx(priv);
394 err = fe_alloc_rx(priv);
401 static void fe_free_dma(struct fe_priv *priv)
407 void fe_stats_update(struct fe_priv *priv)
409 struct fe_hw_stats *hwstats = priv->hw_stats;
410 unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
413 u64_stats_update_begin(&hwstats->syncp);
415 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
416 hwstats->rx_bytes += fe_r32(base);
417 stats = fe_r32(base + 0x04);
419 hwstats->rx_bytes += (stats << 32);
420 hwstats->rx_packets += fe_r32(base + 0x08);
421 hwstats->rx_overflow += fe_r32(base + 0x10);
422 hwstats->rx_fcs_errors += fe_r32(base + 0x14);
423 hwstats->rx_short_errors += fe_r32(base + 0x18);
424 hwstats->rx_long_errors += fe_r32(base + 0x1c);
425 hwstats->rx_checksum_errors += fe_r32(base + 0x20);
426 hwstats->rx_flow_control_packets += fe_r32(base + 0x24);
427 hwstats->tx_skip += fe_r32(base + 0x28);
428 hwstats->tx_collisions += fe_r32(base + 0x2c);
429 hwstats->tx_bytes += fe_r32(base + 0x30);
430 stats = fe_r32(base + 0x34);
432 hwstats->tx_bytes += (stats << 32);
433 hwstats->tx_packets += fe_r32(base + 0x38);
435 hwstats->tx_bytes += fe_r32(base);
436 hwstats->tx_packets += fe_r32(base + 0x04);
437 hwstats->tx_skip += fe_r32(base + 0x08);
438 hwstats->tx_collisions += fe_r32(base + 0x0c);
439 hwstats->rx_bytes += fe_r32(base + 0x20);
440 hwstats->rx_packets += fe_r32(base + 0x24);
441 hwstats->rx_overflow += fe_r32(base + 0x28);
442 hwstats->rx_fcs_errors += fe_r32(base + 0x2c);
443 hwstats->rx_short_errors += fe_r32(base + 0x30);
444 hwstats->rx_long_errors += fe_r32(base + 0x34);
445 hwstats->rx_checksum_errors += fe_r32(base + 0x38);
446 hwstats->rx_flow_control_packets += fe_r32(base + 0x3c);
449 u64_stats_update_end(&hwstats->syncp);
452 static struct rtnl_link_stats64 *fe_get_stats64(struct net_device *dev,
453 struct rtnl_link_stats64 *storage)
455 struct fe_priv *priv = netdev_priv(dev);
456 struct fe_hw_stats *hwstats = priv->hw_stats;
457 unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
461 netdev_stats_to_stats64(storage, &dev->stats);
465 if (netif_running(dev) && netif_device_present(dev)) {
466 if (spin_trylock(&hwstats->stats_lock)) {
467 fe_stats_update(priv);
468 spin_unlock(&hwstats->stats_lock);
473 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
474 storage->rx_packets = hwstats->rx_packets;
475 storage->tx_packets = hwstats->tx_packets;
476 storage->rx_bytes = hwstats->rx_bytes;
477 storage->tx_bytes = hwstats->tx_bytes;
478 storage->collisions = hwstats->tx_collisions;
479 storage->rx_length_errors = hwstats->rx_short_errors +
480 hwstats->rx_long_errors;
481 storage->rx_over_errors = hwstats->rx_overflow;
482 storage->rx_crc_errors = hwstats->rx_fcs_errors;
483 storage->rx_errors = hwstats->rx_checksum_errors;
484 storage->tx_aborted_errors = hwstats->tx_skip;
485 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
487 storage->tx_errors = priv->netdev->stats.tx_errors;
488 storage->rx_dropped = priv->netdev->stats.rx_dropped;
489 storage->tx_dropped = priv->netdev->stats.tx_dropped;
494 static int fe_vlan_rx_add_vid(struct net_device *dev,
495 __be16 proto, u16 vid)
497 struct fe_priv *priv = netdev_priv(dev);
498 u32 idx = (vid & 0xf);
501 if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) &&
502 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
505 if (test_bit(idx, &priv->vlan_map)) {
506 netdev_warn(dev, "disable tx vlan offload\n");
507 dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
508 netdev_update_features(dev);
510 vlan_cfg = fe_r32(fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
514 vlan_cfg |= (vid << 16);
516 vlan_cfg &= 0xffff0000;
519 fe_w32(vlan_cfg, fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
521 set_bit(idx, &priv->vlan_map);
527 static int fe_vlan_rx_kill_vid(struct net_device *dev,
528 __be16 proto, u16 vid)
530 struct fe_priv *priv = netdev_priv(dev);
531 u32 idx = (vid & 0xf);
533 if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) &&
534 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
537 clear_bit(idx, &priv->vlan_map);
542 static inline u32 fe_empty_txd(struct fe_tx_ring *ring)
545 return (u32)(ring->tx_ring_size -
546 ((ring->tx_next_idx - ring->tx_free_idx) &
547 (ring->tx_ring_size - 1)));
550 static int fe_tx_map_dma(struct sk_buff *skb, struct net_device *dev,
551 int tx_num, struct fe_tx_ring *ring)
553 struct fe_priv *priv = netdev_priv(dev);
554 struct skb_frag_struct *frag;
555 struct fe_tx_dma txd, *ptxd;
556 struct fe_tx_buf *tx_buf;
557 dma_addr_t mapped_addr;
558 unsigned int nr_frags;
560 int i, j, k, frag_size, frag_map_size, offset;
562 tx_buf = &ring->tx_buf[ring->tx_next_idx];
563 memset(tx_buf, 0, sizeof(*tx_buf));
564 memset(&txd, 0, sizeof(txd));
565 nr_frags = skb_shinfo(skb)->nr_frags;
567 /* init tx descriptor */
568 if (priv->soc->tx_dma)
569 priv->soc->tx_dma(&txd);
571 txd.txd4 = TX_DMA_DESP4_DEF;
574 /* TX Checksum offload */
575 if (skb->ip_summed == CHECKSUM_PARTIAL)
576 txd.txd4 |= TX_DMA_CHKSUM;
578 /* VLAN header offload */
579 if (skb_vlan_tag_present(skb)) {
580 u16 tag = skb_vlan_tag_get(skb);
582 if (IS_ENABLED(CONFIG_SOC_MT7621))
583 txd.txd4 |= TX_DMA_INS_VLAN_MT7621 | tag;
585 txd.txd4 |= TX_DMA_INS_VLAN |
586 ((tag >> VLAN_PRIO_SHIFT) << 4) |
590 /* TSO: fill MSS info in tcp checksum field */
591 if (skb_is_gso(skb)) {
592 if (skb_cow_head(skb, 0)) {
593 netif_warn(priv, tx_err, dev,
594 "GSO expand head fail.\n");
597 if (skb_shinfo(skb)->gso_type &
598 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
599 txd.txd4 |= TX_DMA_TSO;
600 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
604 mapped_addr = dma_map_single(&dev->dev, skb->data,
605 skb_headlen(skb), DMA_TO_DEVICE);
606 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
608 txd.txd1 = mapped_addr;
609 txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
611 tx_buf->flags |= FE_TX_FLAGS_SINGLE0;
612 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
613 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
616 j = ring->tx_next_idx;
618 for (i = 0; i < nr_frags; i++) {
620 frag = &skb_shinfo(skb)->frags[i];
621 frag_size = skb_frag_size(frag);
623 while (frag_size > 0) {
624 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
625 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
628 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
632 j = NEXT_TX_DESP_IDX(j);
633 txd.txd1 = mapped_addr;
634 txd.txd2 = TX_DMA_PLEN0(frag_map_size);
637 tx_buf = &ring->tx_buf[j];
638 memset(tx_buf, 0, sizeof(*tx_buf));
640 tx_buf->flags |= FE_TX_FLAGS_PAGE0;
641 dma_unmap_addr_set(tx_buf, dma_addr0,
643 dma_unmap_len_set(tx_buf, dma_len0,
646 txd.txd3 = mapped_addr;
647 txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
649 tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
650 tx_buf->flags |= FE_TX_FLAGS_PAGE1;
651 dma_unmap_addr_set(tx_buf, dma_addr1,
653 dma_unmap_len_set(tx_buf, dma_len1,
656 if (!((i == (nr_frags - 1)) &&
657 (frag_map_size == frag_size))) {
658 fe_set_txd(&txd, &ring->tx_dma[j]);
659 memset(&txd, 0, sizeof(txd));
662 frag_size -= frag_map_size;
663 offset += frag_map_size;
668 /* set last segment */
670 txd.txd2 |= TX_DMA_LS1;
672 txd.txd2 |= TX_DMA_LS0;
673 fe_set_txd(&txd, &ring->tx_dma[j]);
675 /* store skb to cleanup */
678 netdev_sent_queue(dev, skb->len);
679 skb_tx_timestamp(skb);
681 ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
682 /* make sure that all changes to the dma ring are flushed before we
686 if (unlikely(fe_empty_txd(ring) <= ring->tx_thresh)) {
687 netif_stop_queue(dev);
689 if (unlikely(fe_empty_txd(ring) > ring->tx_thresh))
690 netif_wake_queue(dev);
693 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
694 fe_reg_w32(ring->tx_next_idx, FE_REG_TX_CTX_IDX0);
699 j = ring->tx_next_idx;
700 for (i = 0; i < tx_num; i++) {
701 ptxd = &ring->tx_dma[j];
702 tx_buf = &ring->tx_buf[j];
705 fe_txd_unmap(&dev->dev, tx_buf);
707 ptxd->txd2 = TX_DMA_DESP2_DEF;
708 j = NEXT_TX_DESP_IDX(j);
710 /* make sure that all changes to the dma ring are flushed before we
719 static inline int fe_skb_padto(struct sk_buff *skb, struct fe_priv *priv)
725 if (unlikely(skb->len < VLAN_ETH_ZLEN)) {
726 if ((priv->flags & FE_FLAG_PADDING_64B) &&
727 !(priv->flags & FE_FLAG_PADDING_BUG))
730 if (skb_vlan_tag_present(skb))
732 else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
734 else if (!(priv->flags & FE_FLAG_PADDING_64B))
739 if (skb->len < len) {
740 ret = skb_pad(skb, len - skb->len);
744 skb_set_tail_pointer(skb, len);
751 static inline int fe_cal_txd_req(struct sk_buff *skb)
754 struct skb_frag_struct *frag;
757 if (skb_is_gso(skb)) {
758 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
759 frag = &skb_shinfo(skb)->frags[i];
760 nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
763 nfrags += skb_shinfo(skb)->nr_frags;
766 return DIV_ROUND_UP(nfrags, 2);
769 static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev)
771 struct fe_priv *priv = netdev_priv(dev);
772 struct fe_tx_ring *ring = &priv->tx_ring;
773 struct net_device_stats *stats = &dev->stats;
777 if (fe_skb_padto(skb, priv)) {
778 netif_warn(priv, tx_err, dev, "tx padding failed!\n");
782 tx_num = fe_cal_txd_req(skb);
783 if (unlikely(fe_empty_txd(ring) <= tx_num)) {
784 netif_stop_queue(dev);
785 netif_err(priv, tx_queued, dev,
786 "Tx Ring full when queue awake!\n");
787 return NETDEV_TX_BUSY;
790 if (fe_tx_map_dma(skb, dev, tx_num, ring) < 0) {
794 stats->tx_bytes += len;
800 static int fe_poll_rx(struct napi_struct *napi, int budget,
801 struct fe_priv *priv, u32 rx_intr)
803 struct net_device *netdev = priv->netdev;
804 struct net_device_stats *stats = &netdev->stats;
805 struct fe_soc_data *soc = priv->soc;
806 struct fe_rx_ring *ring = &priv->rx_ring;
807 int idx = ring->rx_calc_idx;
811 struct fe_rx_dma *rxd, trxd;
814 if (netdev->features & NETIF_F_RXCSUM)
815 checksum_bit = soc->checksum_bit;
819 if (priv->flags & FE_FLAG_RX_2B_OFFSET)
824 while (done < budget) {
828 idx = NEXT_RX_DESP_IDX(idx);
829 rxd = &ring->rx_dma[idx];
830 data = ring->rx_data[idx];
832 fe_get_rxd(&trxd, rxd);
833 if (!(trxd.rxd2 & RX_DMA_DONE))
836 /* alloc new buffer */
837 new_data = netdev_alloc_frag(ring->frag_size);
838 if (unlikely(!new_data)) {
842 dma_addr = dma_map_single(&netdev->dev,
843 new_data + NET_SKB_PAD + pad,
846 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
847 put_page(virt_to_head_page(new_data));
852 skb = build_skb(data, ring->frag_size);
853 if (unlikely(!skb)) {
854 put_page(virt_to_head_page(new_data));
857 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
859 dma_unmap_single(&netdev->dev, trxd.rxd1,
860 ring->rx_buf_size, DMA_FROM_DEVICE);
861 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
863 skb_put(skb, pktlen);
864 if (trxd.rxd4 & checksum_bit)
865 skb->ip_summed = CHECKSUM_UNNECESSARY;
867 skb_checksum_none_assert(skb);
868 skb->protocol = eth_type_trans(skb, netdev);
871 stats->rx_bytes += pktlen;
873 napi_gro_receive(napi, skb);
875 ring->rx_data[idx] = new_data;
876 rxd->rxd1 = (unsigned int)dma_addr;
879 if (priv->flags & FE_FLAG_RX_SG_DMA)
880 rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
882 rxd->rxd2 = RX_DMA_LSO;
884 ring->rx_calc_idx = idx;
885 /* make sure that all changes to the dma ring are flushed before
889 fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0);
894 fe_reg_w32(rx_intr, FE_REG_FE_INT_STATUS);
899 static int fe_poll_tx(struct fe_priv *priv, int budget, u32 tx_intr,
902 struct net_device *netdev = priv->netdev;
903 struct device *dev = &netdev->dev;
904 unsigned int bytes_compl = 0;
906 struct fe_tx_buf *tx_buf;
909 struct fe_tx_ring *ring = &priv->tx_ring;
911 idx = ring->tx_free_idx;
912 hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0);
914 while ((idx != hwidx) && budget) {
915 tx_buf = &ring->tx_buf[idx];
921 if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
922 bytes_compl += skb->len;
926 fe_txd_unmap(dev, tx_buf);
927 idx = NEXT_TX_DESP_IDX(idx);
929 ring->tx_free_idx = idx;
932 /* read hw index again make sure no new tx packet */
933 hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0);
935 fe_reg_w32(tx_intr, FE_REG_FE_INT_STATUS);
943 netdev_completed_queue(netdev, done, bytes_compl);
945 if (unlikely(netif_queue_stopped(netdev) &&
946 (fe_empty_txd(ring) > ring->tx_thresh)))
947 netif_wake_queue(netdev);
953 static int fe_poll(struct napi_struct *napi, int budget)
955 struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi);
956 struct fe_hw_stats *hwstat = priv->hw_stats;
957 int tx_done, rx_done, tx_again;
958 u32 status, fe_status, status_reg, mask;
959 u32 tx_intr, rx_intr, status_intr;
961 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
963 tx_intr = priv->soc->tx_int;
964 rx_intr = priv->soc->rx_int;
965 status_intr = priv->soc->status_int;
970 if (fe_reg_table[FE_REG_FE_INT_STATUS2]) {
971 fe_status = fe_reg_r32(FE_REG_FE_INT_STATUS2);
972 status_reg = FE_REG_FE_INT_STATUS2;
974 status_reg = FE_REG_FE_INT_STATUS;
977 if (status & tx_intr)
978 tx_done = fe_poll_tx(priv, budget, tx_intr, &tx_again);
980 if (status & rx_intr)
981 rx_done = fe_poll_rx(napi, budget, priv, rx_intr);
983 if (unlikely(fe_status & status_intr)) {
984 if (hwstat && spin_trylock(&hwstat->stats_lock)) {
985 fe_stats_update(priv);
986 spin_unlock(&hwstat->stats_lock);
988 fe_reg_w32(status_intr, status_reg);
991 if (unlikely(netif_msg_intr(priv))) {
992 mask = fe_reg_r32(FE_REG_FE_INT_ENABLE);
993 netdev_info(priv->netdev,
994 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
995 tx_done, rx_done, status, mask);
998 if (!tx_again && (rx_done < budget)) {
999 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
1000 if (status & (tx_intr | rx_intr)) {
1001 /* let napi poll again */
1006 napi_complete(napi);
1007 fe_int_enable(tx_intr | rx_intr);
1016 static void fe_tx_timeout(struct net_device *dev)
1018 struct fe_priv *priv = netdev_priv(dev);
1019 struct fe_tx_ring *ring = &priv->tx_ring;
1021 priv->netdev->stats.tx_errors++;
1022 netif_err(priv, tx_err, dev,
1023 "transmit timed out\n");
1024 netif_info(priv, drv, dev, "dma_cfg:%08x\n",
1025 fe_reg_r32(FE_REG_PDMA_GLO_CFG));
1026 netif_info(priv, drv, dev, "tx_ring=%d, "
1027 "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
1028 0, fe_reg_r32(FE_REG_TX_BASE_PTR0),
1029 fe_reg_r32(FE_REG_TX_MAX_CNT0),
1030 fe_reg_r32(FE_REG_TX_CTX_IDX0),
1031 fe_reg_r32(FE_REG_TX_DTX_IDX0),
1034 netif_info(priv, drv, dev,
1035 "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
1036 0, fe_reg_r32(FE_REG_RX_BASE_PTR0),
1037 fe_reg_r32(FE_REG_RX_MAX_CNT0),
1038 fe_reg_r32(FE_REG_RX_CALC_IDX0),
1039 fe_reg_r32(FE_REG_RX_DRX_IDX0));
1041 if (!test_and_set_bit(FE_FLAG_RESET_PENDING, priv->pending_flags))
1042 schedule_work(&priv->pending_work);
1045 static irqreturn_t fe_handle_irq(int irq, void *dev)
1047 struct fe_priv *priv = netdev_priv(dev);
1048 u32 status, int_mask;
1050 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
1052 if (unlikely(!status))
1055 int_mask = (priv->soc->rx_int | priv->soc->tx_int);
1056 if (likely(status & int_mask)) {
1057 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1058 fe_int_disable(int_mask);
1059 __napi_schedule(&priv->rx_napi);
1062 fe_reg_w32(status, FE_REG_FE_INT_STATUS);
1068 #ifdef CONFIG_NET_POLL_CONTROLLER
1069 static void fe_poll_controller(struct net_device *dev)
1071 struct fe_priv *priv = netdev_priv(dev);
1072 u32 int_mask = priv->soc->tx_int | priv->soc->rx_int;
1074 fe_int_disable(int_mask);
1075 fe_handle_irq(dev->irq, dev);
1076 fe_int_enable(int_mask);
1080 int fe_set_clock_cycle(struct fe_priv *priv)
1082 unsigned long sysclk = priv->sysclk;
1084 sysclk /= FE_US_CYC_CNT_DIVISOR;
1085 sysclk <<= FE_US_CYC_CNT_SHIFT;
1087 fe_w32((fe_r32(FE_FE_GLO_CFG) &
1088 ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) |
1094 void fe_fwd_config(struct fe_priv *priv)
1098 fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
1100 /* disable jumbo frame */
1101 if (priv->flags & FE_FLAG_JUMBO_FRAME)
1102 fwd_cfg &= ~FE_GDM1_JMB_EN;
1104 /* set unicast/multicast/broadcast frame to cpu */
1107 fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
1110 static void fe_rxcsum_config(bool enable)
1113 fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN |
1114 FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
1117 fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~(FE_GDM1_ICS_EN |
1118 FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
1122 static void fe_txcsum_config(bool enable)
1125 fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN |
1126 FE_TCS_GEN_EN | FE_UCS_GEN_EN),
1129 fe_w32(fe_r32(FE_CDMA_CSG_CFG) & ~(FE_ICS_GEN_EN |
1130 FE_TCS_GEN_EN | FE_UCS_GEN_EN),
1134 void fe_csum_config(struct fe_priv *priv)
1136 struct net_device *dev = priv_netdev(priv);
1138 fe_txcsum_config((dev->features & NETIF_F_IP_CSUM));
1139 fe_rxcsum_config((dev->features & NETIF_F_RXCSUM));
1142 static int fe_hw_init(struct net_device *dev)
1144 struct fe_priv *priv = netdev_priv(dev);
1147 err = devm_request_irq(priv->device, dev->irq, fe_handle_irq, 0,
1148 dev_name(priv->device), dev);
1152 if (priv->soc->set_mac)
1153 priv->soc->set_mac(priv, dev->dev_addr);
1155 fe_hw_set_macaddr(priv, dev->dev_addr);
1157 /* disable delay interrupt */
1158 fe_reg_w32(0, FE_REG_DLY_INT_CFG);
1160 fe_int_disable(priv->soc->tx_int | priv->soc->rx_int);
1162 /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc */
1163 if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
1164 for (i = 0; i < 16; i += 2)
1165 fe_w32(((i + 1) << 16) + i,
1166 fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
1169 if (priv->soc->fwd_config(priv))
1170 netdev_err(dev, "unable to get clock\n");
1172 if (fe_reg_table[FE_REG_FE_RST_GL]) {
1173 fe_reg_w32(1, FE_REG_FE_RST_GL);
1174 fe_reg_w32(0, FE_REG_FE_RST_GL);
1180 static int fe_open(struct net_device *dev)
1182 struct fe_priv *priv = netdev_priv(dev);
1183 unsigned long flags;
1187 err = fe_init_dma(priv);
1193 spin_lock_irqsave(&priv->page_lock, flags);
1195 val = FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN;
1196 if (priv->flags & FE_FLAG_RX_2B_OFFSET)
1197 val |= FE_RX_2B_OFFSET;
1198 val |= priv->soc->pdma_glo_cfg;
1199 fe_reg_w32(val, FE_REG_PDMA_GLO_CFG);
1201 spin_unlock_irqrestore(&priv->page_lock, flags);
1204 priv->phy->start(priv);
1206 if (priv->soc->has_carrier && priv->soc->has_carrier(priv))
1207 netif_carrier_on(dev);
1209 napi_enable(&priv->rx_napi);
1210 fe_int_enable(priv->soc->tx_int | priv->soc->rx_int);
1211 netif_start_queue(dev);
1216 static int fe_stop(struct net_device *dev)
1218 struct fe_priv *priv = netdev_priv(dev);
1219 unsigned long flags;
1222 netif_tx_disable(dev);
1223 fe_int_disable(priv->soc->tx_int | priv->soc->rx_int);
1224 napi_disable(&priv->rx_napi);
1227 priv->phy->stop(priv);
1229 spin_lock_irqsave(&priv->page_lock, flags);
1231 fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG) &
1232 ~(FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN),
1233 FE_REG_PDMA_GLO_CFG);
1234 spin_unlock_irqrestore(&priv->page_lock, flags);
1237 for (i = 0; i < 10; i++) {
1238 if (fe_reg_r32(FE_REG_PDMA_GLO_CFG) &
1239 (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)) {
1251 static int __init fe_init(struct net_device *dev)
1253 struct fe_priv *priv = netdev_priv(dev);
1254 struct device_node *port;
1255 const char *mac_addr;
1258 priv->soc->reset_fe();
1260 if (priv->soc->switch_init)
1261 if (priv->soc->switch_init(priv)) {
1262 netdev_err(dev, "failed to initialize switch core\n");
1266 mac_addr = of_get_mac_address(priv->device->of_node);
1268 ether_addr_copy(dev->dev_addr, mac_addr);
1270 /* If the mac address is invalid, use random mac address */
1271 if (!is_valid_ether_addr(dev->dev_addr)) {
1272 random_ether_addr(dev->dev_addr);
1273 dev_err(priv->device, "generated random MAC address %pM\n",
1277 err = fe_mdio_init(priv);
1281 if (priv->soc->port_init)
1282 for_each_child_of_node(priv->device->of_node, port)
1283 if (of_device_is_compatible(port, "mediatek,eth-port") &&
1284 of_device_is_available(port))
1285 priv->soc->port_init(priv, port);
1288 err = priv->phy->connect(priv);
1290 goto err_phy_disconnect;
1293 err = fe_hw_init(dev);
1295 goto err_phy_disconnect;
1297 if ((priv->flags & FE_FLAG_HAS_SWITCH) && priv->soc->switch_config)
1298 priv->soc->switch_config(priv);
1304 priv->phy->disconnect(priv);
1305 fe_mdio_cleanup(priv);
1310 static void fe_uninit(struct net_device *dev)
1312 struct fe_priv *priv = netdev_priv(dev);
1315 priv->phy->disconnect(priv);
1316 fe_mdio_cleanup(priv);
1318 fe_reg_w32(0, FE_REG_FE_INT_ENABLE);
1319 free_irq(dev->irq, dev);
1322 static int fe_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1324 struct fe_priv *priv = netdev_priv(dev);
1331 return phy_ethtool_ioctl(priv->phy_dev,
1332 (void *) ifr->ifr_data);
1336 return phy_mii_ioctl(priv->phy_dev, ifr, cmd);
1344 static int fe_change_mtu(struct net_device *dev, int new_mtu)
1346 struct fe_priv *priv = netdev_priv(dev);
1347 int frag_size, old_mtu;
1350 if (!(priv->flags & FE_FLAG_JUMBO_FRAME))
1351 return eth_change_mtu(dev, new_mtu);
1353 frag_size = fe_max_frag_size(new_mtu);
1354 if (new_mtu < 68 || frag_size > PAGE_SIZE)
1360 /* return early if the buffer sizes will not change */
1361 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1363 if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
1366 if (new_mtu <= ETH_DATA_LEN)
1367 priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN);
1369 priv->rx_ring.frag_size = PAGE_SIZE;
1370 priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size);
1372 if (!netif_running(dev))
1376 fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
1377 if (new_mtu <= ETH_DATA_LEN) {
1378 fwd_cfg &= ~FE_GDM1_JMB_EN;
1380 fwd_cfg &= ~(FE_GDM1_JMB_LEN_MASK << FE_GDM1_JMB_LEN_SHIFT);
1381 fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
1382 FE_GDM1_JMB_LEN_SHIFT) | FE_GDM1_JMB_EN;
1384 fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
1386 return fe_open(dev);
1389 static const struct net_device_ops fe_netdev_ops = {
1390 .ndo_init = fe_init,
1391 .ndo_uninit = fe_uninit,
1392 .ndo_open = fe_open,
1393 .ndo_stop = fe_stop,
1394 .ndo_start_xmit = fe_start_xmit,
1395 .ndo_set_mac_address = fe_set_mac_address,
1396 .ndo_validate_addr = eth_validate_addr,
1397 .ndo_do_ioctl = fe_do_ioctl,
1398 .ndo_change_mtu = fe_change_mtu,
1399 .ndo_tx_timeout = fe_tx_timeout,
1400 .ndo_get_stats64 = fe_get_stats64,
1401 .ndo_vlan_rx_add_vid = fe_vlan_rx_add_vid,
1402 .ndo_vlan_rx_kill_vid = fe_vlan_rx_kill_vid,
1403 #ifdef CONFIG_NET_POLL_CONTROLLER
1404 .ndo_poll_controller = fe_poll_controller,
1408 static void fe_reset_pending(struct fe_priv *priv)
1410 struct net_device *dev = priv->netdev;
1418 netif_alert(priv, ifup, dev,
1419 "Driver up/down cycle failed, closing device.\n");
1425 static const struct fe_work_t fe_work[] = {
1426 {FE_FLAG_RESET_PENDING, fe_reset_pending},
1429 static void fe_pending_work(struct work_struct *work)
1431 struct fe_priv *priv = container_of(work, struct fe_priv, pending_work);
1435 for (i = 0; i < ARRAY_SIZE(fe_work); i++) {
1436 pending = test_and_clear_bit(fe_work[i].bitnr,
1437 priv->pending_flags);
1439 fe_work[i].action(priv);
1443 static int fe_probe(struct platform_device *pdev)
1445 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1446 const struct of_device_id *match;
1447 struct fe_soc_data *soc;
1448 struct net_device *netdev;
1449 struct fe_priv *priv;
1451 int err, napi_weight;
1453 device_reset(&pdev->dev);
1455 match = of_match_device(of_fe_match, &pdev->dev);
1456 soc = (struct fe_soc_data *)match->data;
1459 fe_reg_table = soc->reg_table;
1461 soc->reg_table = fe_reg_table;
1463 fe_base = devm_ioremap_resource(&pdev->dev, res);
1465 err = -EADDRNOTAVAIL;
1469 netdev = alloc_etherdev(sizeof(*priv));
1471 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1476 SET_NETDEV_DEV(netdev, &pdev->dev);
1477 netdev->netdev_ops = &fe_netdev_ops;
1478 netdev->base_addr = (unsigned long)fe_base;
1480 netdev->irq = platform_get_irq(pdev, 0);
1481 if (netdev->irq < 0) {
1482 dev_err(&pdev->dev, "no IRQ resource found\n");
1488 soc->init_data(soc, netdev);
1489 netdev->vlan_features = netdev->hw_features & ~NETIF_F_HW_VLAN_CTAG_TX;
1490 netdev->features |= netdev->hw_features;
1492 /* fake rx vlan filter func. to support tx vlan offload func */
1493 if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
1494 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1496 priv = netdev_priv(netdev);
1497 spin_lock_init(&priv->page_lock);
1498 if (fe_reg_table[FE_REG_FE_COUNTER_BASE]) {
1499 priv->hw_stats = kzalloc(sizeof(*priv->hw_stats), GFP_KERNEL);
1500 if (!priv->hw_stats) {
1504 spin_lock_init(&priv->hw_stats->stats_lock);
1507 sysclk = devm_clk_get(&pdev->dev, NULL);
1508 if (!IS_ERR(sysclk)) {
1509 priv->sysclk = clk_get_rate(sysclk);
1510 } else if ((priv->flags & FE_FLAG_CALIBRATE_CLK)) {
1511 dev_err(&pdev->dev, "this soc needs a clk for calibration\n");
1516 priv->switch_np = of_parse_phandle(pdev->dev.of_node, "mediatek,switch", 0);
1517 if ((priv->flags & FE_FLAG_HAS_SWITCH) && !priv->switch_np) {
1518 dev_err(&pdev->dev, "failed to read switch phandle\n");
1523 priv->netdev = netdev;
1524 priv->device = &pdev->dev;
1526 priv->msg_enable = netif_msg_init(fe_msg_level, FE_DEFAULT_MSG_ENABLE);
1527 priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN);
1528 priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size);
1529 priv->tx_ring.tx_ring_size = NUM_DMA_DESC;
1530 priv->rx_ring.rx_ring_size = NUM_DMA_DESC;
1531 INIT_WORK(&priv->pending_work, fe_pending_work);
1534 if (priv->flags & FE_FLAG_NAPI_WEIGHT) {
1536 priv->tx_ring.tx_ring_size *= 4;
1537 priv->rx_ring.rx_ring_size *= 4;
1539 netif_napi_add(netdev, &priv->rx_napi, fe_poll, napi_weight);
1540 fe_set_ethtool_ops(netdev);
1542 err = register_netdev(netdev);
1544 dev_err(&pdev->dev, "error bringing up device\n");
1548 platform_set_drvdata(pdev, netdev);
1550 netif_info(priv, probe, netdev, "mediatek frame engine at 0x%08lx, irq %d\n",
1551 netdev->base_addr, netdev->irq);
1556 free_netdev(netdev);
1558 devm_iounmap(&pdev->dev, fe_base);
1563 static int fe_remove(struct platform_device *pdev)
1565 struct net_device *dev = platform_get_drvdata(pdev);
1566 struct fe_priv *priv = netdev_priv(dev);
1568 netif_napi_del(&priv->rx_napi);
1569 kfree(priv->hw_stats);
1571 cancel_work_sync(&priv->pending_work);
1573 unregister_netdev(dev);
1575 platform_set_drvdata(pdev, NULL);
1580 static struct platform_driver fe_driver = {
1582 .remove = fe_remove,
1584 .name = "mtk_soc_eth",
1585 .owner = THIS_MODULE,
1586 .of_match_table = of_fe_match,
1590 module_platform_driver(fe_driver);
1592 MODULE_LICENSE("GPL");
1593 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1594 MODULE_DESCRIPTION("Ethernet driver for Ralink SoC");
1595 MODULE_VERSION(MTK_FE_DRV_VERSION);