2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2009-2013 John Crispin <blogic@openwrt.org>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27 #include <linux/of_device.h>
28 #include <linux/clk.h>
29 #include <linux/of_net.h>
30 #include <linux/of_mdio.h>
31 #include <linux/if_vlan.h>
32 #include <linux/reset.h>
33 #include <linux/tcp.h>
36 #include <asm/mach-ralink/ralink_regs.h>
38 #include "ralink_soc_eth.h"
39 #include "esw_rt3052.h"
41 #include "ralink_ethtool.h"
43 #define TX_TIMEOUT (2 * HZ)
44 #define MAX_RX_LENGTH 1536
45 #define FE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
46 #define FE_RX_HLEN (FE_RX_OFFSET + VLAN_ETH_HLEN + VLAN_HLEN + \
48 #define DMA_DUMMY_DESC 0xffffffff
49 #define FE_DEFAULT_MSG_ENABLE \
59 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
60 #define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
61 #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (NUM_DMA_DESC - 1))
62 #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (NUM_DMA_DESC - 1))
64 static int fe_msg_level = -1;
65 module_param_named(msg_level, fe_msg_level, int, 0);
66 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
68 static const u32 fe_reg_table_default[FE_REG_COUNT] = {
69 [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG,
70 [FE_REG_PDMA_RST_CFG] = FE_PDMA_RST_CFG,
71 [FE_REG_DLY_INT_CFG] = FE_DLY_INT_CFG,
72 [FE_REG_TX_BASE_PTR0] = FE_TX_BASE_PTR0,
73 [FE_REG_TX_MAX_CNT0] = FE_TX_MAX_CNT0,
74 [FE_REG_TX_CTX_IDX0] = FE_TX_CTX_IDX0,
75 [FE_REG_RX_BASE_PTR0] = FE_RX_BASE_PTR0,
76 [FE_REG_RX_MAX_CNT0] = FE_RX_MAX_CNT0,
77 [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0,
78 [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE,
79 [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS,
80 [FE_REG_FE_DMA_VID_BASE] = FE_DMA_VID0,
81 [FE_REG_FE_COUNTER_BASE] = FE_GDMA1_TX_GBCNT,
82 [FE_REG_FE_RST_GL] = FE_FE_RST_GL,
85 static const u32 *fe_reg_table = fe_reg_table_default;
87 static void __iomem *fe_base = 0;
89 void fe_w32(u32 val, unsigned reg)
91 __raw_writel(val, fe_base + reg);
94 u32 fe_r32(unsigned reg)
96 return __raw_readl(fe_base + reg);
99 void fe_reg_w32(u32 val, enum fe_reg reg)
101 fe_w32(val, fe_reg_table[reg]);
104 u32 fe_reg_r32(enum fe_reg reg)
106 return fe_r32(fe_reg_table[reg]);
109 static inline void fe_int_disable(u32 mask)
111 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask,
112 FE_REG_FE_INT_ENABLE);
114 fe_reg_r32(FE_REG_FE_INT_ENABLE);
117 static inline void fe_int_enable(u32 mask)
119 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) | mask,
120 FE_REG_FE_INT_ENABLE);
122 fe_reg_r32(FE_REG_FE_INT_ENABLE);
125 static inline void fe_hw_set_macaddr(struct fe_priv *priv, unsigned char *mac)
129 spin_lock_irqsave(&priv->page_lock, flags);
130 fe_w32((mac[0] << 8) | mac[1], FE_GDMA1_MAC_ADRH);
131 fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
133 spin_unlock_irqrestore(&priv->page_lock, flags);
136 static int fe_set_mac_address(struct net_device *dev, void *p)
138 int ret = eth_mac_addr(dev, p);
141 struct fe_priv *priv = netdev_priv(dev);
143 if (priv->soc->set_mac)
144 priv->soc->set_mac(priv, dev->dev_addr);
146 fe_hw_set_macaddr(priv, p);
152 static inline int fe_max_frag_size(int mtu)
154 return SKB_DATA_ALIGN(FE_RX_HLEN + mtu) +
155 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
158 static inline int fe_max_buf_size(int frag_size)
160 return frag_size - FE_RX_HLEN -
161 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
164 static void fe_clean_rx(struct fe_priv *priv)
169 for (i = 0; i < NUM_DMA_DESC; i++)
170 if (priv->rx_data[i]) {
171 if (priv->rx_dma && priv->rx_dma[i].rxd1)
172 dma_unmap_single(&priv->netdev->dev,
173 priv->rx_dma[i].rxd1,
176 put_page(virt_to_head_page(priv->rx_data[i]));
179 kfree(priv->rx_data);
180 priv->rx_data = NULL;
184 dma_free_coherent(&priv->netdev->dev,
185 NUM_DMA_DESC * sizeof(*priv->rx_dma),
192 static int fe_alloc_rx(struct fe_priv *priv)
194 struct net_device *netdev = priv->netdev;
197 priv->rx_data = kcalloc(NUM_DMA_DESC, sizeof(*priv->rx_data),
202 for (i = 0; i < NUM_DMA_DESC; i++) {
203 priv->rx_data[i] = netdev_alloc_frag(priv->frag_size);
204 if (!priv->rx_data[i])
208 priv->rx_dma = dma_alloc_coherent(&netdev->dev,
209 NUM_DMA_DESC * sizeof(*priv->rx_dma),
211 GFP_ATOMIC | __GFP_ZERO);
215 for (i = 0; i < NUM_DMA_DESC; i++) {
216 dma_addr_t dma_addr = dma_map_single(&netdev->dev,
217 priv->rx_data[i] + FE_RX_OFFSET,
220 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr)))
222 priv->rx_dma[i].rxd1 = (unsigned int) dma_addr;
224 if (priv->soc->rx_dma)
225 priv->soc->rx_dma(priv, i, priv->rx_buf_size);
227 priv->rx_dma[i].rxd2 = RX_DMA_LSO;
231 fe_reg_w32(priv->rx_phys, FE_REG_RX_BASE_PTR0);
232 fe_reg_w32(NUM_DMA_DESC, FE_REG_RX_MAX_CNT0);
233 fe_reg_w32((NUM_DMA_DESC - 1), FE_REG_RX_CALC_IDX0);
234 fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG);
242 static void fe_clean_tx(struct fe_priv *priv)
247 for (i = 0; i < NUM_DMA_DESC; i++) {
249 dev_kfree_skb_any(priv->tx_skb[i]);
256 dma_free_coherent(&priv->netdev->dev,
257 NUM_DMA_DESC * sizeof(*priv->tx_dma),
264 static int fe_alloc_tx(struct fe_priv *priv)
268 priv->tx_free_idx = 0;
270 priv->tx_skb = kcalloc(NUM_DMA_DESC, sizeof(*priv->tx_skb),
275 priv->tx_dma = dma_alloc_coherent(&priv->netdev->dev,
276 NUM_DMA_DESC * sizeof(*priv->tx_dma),
278 GFP_ATOMIC | __GFP_ZERO);
282 for (i = 0; i < NUM_DMA_DESC; i++) {
283 if (priv->soc->tx_dma) {
284 priv->soc->tx_dma(priv, i, NULL);
287 priv->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
291 fe_reg_w32(priv->tx_phys, FE_REG_TX_BASE_PTR0);
292 fe_reg_w32(NUM_DMA_DESC, FE_REG_TX_MAX_CNT0);
293 fe_reg_w32(0, FE_REG_TX_CTX_IDX0);
294 fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG);
302 static int fe_init_dma(struct fe_priv *priv)
306 err = fe_alloc_tx(priv);
310 err = fe_alloc_rx(priv);
317 static void fe_free_dma(struct fe_priv *priv)
322 netdev_reset_queue(priv->netdev);
325 static inline void txd_unmap_single(struct device *dev, struct fe_tx_dma *txd)
327 if (txd->txd1 && TX_DMA_GET_PLEN0(txd->txd2))
328 dma_unmap_single(dev, txd->txd1,
329 TX_DMA_GET_PLEN0(txd->txd2),
333 static inline void txd_unmap_page0(struct device *dev, struct fe_tx_dma *txd)
335 if (txd->txd1 && TX_DMA_GET_PLEN0(txd->txd2))
336 dma_unmap_page(dev, txd->txd1,
337 TX_DMA_GET_PLEN0(txd->txd2),
341 static inline void txd_unmap_page1(struct device *dev, struct fe_tx_dma *txd)
343 if (txd->txd3 && TX_DMA_GET_PLEN1(txd->txd2))
344 dma_unmap_page(dev, txd->txd3,
345 TX_DMA_GET_PLEN1(txd->txd2),
349 void fe_stats_update(struct fe_priv *priv)
351 struct fe_hw_stats *hwstats = priv->hw_stats;
352 unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
354 u64_stats_update_begin(&hwstats->syncp);
356 hwstats->tx_bytes += fe_r32(base);
357 hwstats->tx_packets += fe_r32(base + 0x04);
358 hwstats->tx_skip += fe_r32(base + 0x08);
359 hwstats->tx_collisions += fe_r32(base + 0x0c);
360 hwstats->rx_bytes += fe_r32(base + 0x20);
361 hwstats->rx_packets += fe_r32(base + 0x24);
362 hwstats->rx_overflow += fe_r32(base + 0x28);
363 hwstats->rx_fcs_errors += fe_r32(base + 0x2c);
364 hwstats->rx_short_errors += fe_r32(base + 0x30);
365 hwstats->rx_long_errors += fe_r32(base + 0x34);
366 hwstats->rx_checksum_errors += fe_r32(base + 0x38);
367 hwstats->rx_flow_control_packets += fe_r32(base + 0x3c);
369 u64_stats_update_end(&hwstats->syncp);
372 static struct rtnl_link_stats64 *fe_get_stats64(struct net_device *dev,
373 struct rtnl_link_stats64 *storage)
375 struct fe_priv *priv = netdev_priv(dev);
376 struct fe_hw_stats *hwstats = priv->hw_stats;
377 unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
381 netdev_stats_to_stats64(storage, &dev->stats);
385 if (netif_running(dev) && netif_device_present(dev)) {
386 if (spin_trylock(&hwstats->stats_lock)) {
387 fe_stats_update(priv);
388 spin_unlock(&hwstats->stats_lock);
393 start = u64_stats_fetch_begin_bh(&hwstats->syncp);
394 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
395 storage->rx_packets = dev->stats.rx_packets;
396 storage->tx_packets = dev->stats.tx_packets;
397 storage->rx_bytes = dev->stats.rx_bytes;
398 storage->tx_bytes = dev->stats.tx_bytes;
400 storage->rx_packets = dev->stats.rx_packets;
401 storage->tx_packets = dev->stats.tx_packets;
402 storage->rx_bytes = dev->stats.rx_bytes;
403 storage->tx_bytes = dev->stats.tx_bytes;
405 storage->collisions = hwstats->tx_collisions;
406 storage->rx_length_errors = hwstats->rx_short_errors +
407 hwstats->rx_long_errors;
408 storage->rx_over_errors = hwstats->rx_overflow;
409 storage->rx_crc_errors = hwstats->rx_fcs_errors;
410 storage->rx_errors = hwstats->rx_checksum_errors;
411 storage->tx_aborted_errors = hwstats->tx_skip;
412 } while (u64_stats_fetch_retry_bh(&hwstats->syncp, start));
414 storage->tx_errors = priv->netdev->stats.tx_errors;
415 storage->rx_dropped = priv->netdev->stats.rx_dropped;
416 storage->tx_dropped = priv->netdev->stats.tx_dropped;
421 static int fe_tx_map_dma(struct sk_buff *skb, struct net_device *dev,
424 struct fe_priv *priv = netdev_priv(dev);
425 struct skb_frag_struct *frag;
426 struct fe_tx_dma *txd;
427 dma_addr_t mapped_addr;
428 unsigned int nr_frags;
430 int i, j, unmap_idx, tx_num;
432 txd = &priv->tx_dma[idx];
433 nr_frags = skb_shinfo(skb)->nr_frags;
434 tx_num = 1 + (nr_frags >> 1);
436 /* init tx descriptor */
437 if (priv->soc->tx_dma)
438 priv->soc->tx_dma(priv, idx, skb);
440 txd->txd4 = TX_DMA_DESP4_DEF;
441 def_txd4 = txd->txd4;
443 /* use dma_unmap_single to free it */
444 txd->txd4 |= priv->soc->tx_udf_bit;
446 /* TX Checksum offload */
447 if (skb->ip_summed == CHECKSUM_PARTIAL)
448 txd->txd4 |= TX_DMA_CHKSUM;
450 /* VLAN header offload */
451 if (vlan_tx_tag_present(skb)) {
452 if (IS_ENABLED(CONFIG_SOC_MT7620))
453 txd->txd4 |= TX_DMA_INS_VLAN |
454 ((vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT) << 4) |
455 (vlan_tx_tag_get(skb) & 0xF);
457 txd->txd4 |= TX_DMA_INS_VLAN_MT7621 | vlan_tx_tag_get(skb);
460 /* TSO: fill MSS info in tcp checksum field */
461 if (skb_is_gso(skb)) {
462 if (skb_cow_head(skb, 0)) {
463 netif_warn(priv, tx_err, dev,
464 "GSO expand head fail.\n");
467 if (skb_shinfo(skb)->gso_type &
468 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
469 txd->txd4 |= TX_DMA_TSO;
470 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
474 mapped_addr = dma_map_single(&dev->dev, skb->data,
475 skb_headlen(skb), DMA_TO_DEVICE);
476 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
478 txd->txd1 = mapped_addr;
479 txd2 = TX_DMA_PLEN0(skb_headlen(skb));
483 for (i = 0; i < nr_frags; i++) {
485 frag = &skb_shinfo(skb)->frags[i];
486 mapped_addr = skb_frag_dma_map(&dev->dev, frag, 0,
487 skb_frag_size(frag), DMA_TO_DEVICE);
488 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
492 j = NEXT_TX_DESP_IDX(j);
493 txd = &priv->tx_dma[j];
494 txd->txd1 = mapped_addr;
495 txd2 = TX_DMA_PLEN0(frag->size);
496 txd->txd4 = def_txd4;
498 txd->txd3 = mapped_addr;
499 txd2 |= TX_DMA_PLEN1(frag->size);
500 if (i != (nr_frags -1))
502 priv->tx_skb[j] = (struct sk_buff *) DMA_DUMMY_DESC;
506 /* set last segment */
508 txd->txd2 = (txd2 | TX_DMA_LS1);
510 txd->txd2 = (txd2 | TX_DMA_LS0);
512 /* store skb to cleanup */
513 priv->tx_skb[j] = skb;
516 j = NEXT_TX_DESP_IDX(j);
517 fe_reg_w32(j, FE_REG_TX_CTX_IDX0);
523 txd = &priv->tx_dma[idx];
524 txd_unmap_single(&dev->dev, txd);
528 for (i = 0; i < unmap_idx; i++) {
530 j = NEXT_TX_DESP_IDX(j);
531 txd = &priv->tx_dma[j];
532 txd_unmap_page0(&dev->dev, txd);
534 txd_unmap_page1(&dev->dev, txd);
539 /* reinit descriptors and skb */
541 for (i = 0; i < tx_num; i++) {
542 priv->tx_dma[j].txd2 = TX_DMA_DESP2_DEF;
543 priv->tx_skb[j] = NULL;
544 j = NEXT_TX_DESP_IDX(j);
551 static inline int fe_skb_padto(struct sk_buff *skb, struct fe_priv *priv) {
556 if (unlikely(skb->len < VLAN_ETH_ZLEN)) {
557 if ((priv->flags & FE_FLAG_PADDING_64B) &&
558 !(priv->flags & FE_FLAG_PADDING_BUG))
561 if (vlan_tx_tag_present(skb))
563 else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
565 else if(!(priv->flags & FE_FLAG_PADDING_64B))
570 if (skb->len < len) {
571 if ((ret = skb_pad(skb, len - skb->len)) < 0)
574 skb_set_tail_pointer(skb, len);
581 static inline u32 fe_empty_txd(struct fe_priv *priv, u32 tx_fill_idx)
583 return (u32)(NUM_DMA_DESC - ((tx_fill_idx - priv->tx_free_idx) &
584 (NUM_DMA_DESC - 1)));
587 static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev)
589 struct fe_priv *priv = netdev_priv(dev);
590 struct net_device_stats *stats = &dev->stats;
594 if (fe_skb_padto(skb, priv)) {
595 netif_warn(priv, tx_err, dev, "tx padding failed!\n");
599 spin_lock(&priv->page_lock);
600 tx_num = 1 + (skb_shinfo(skb)->nr_frags >> 1);
601 tx = fe_reg_r32(FE_REG_TX_CTX_IDX0);
602 if (unlikely(fe_empty_txd(priv, tx) <= tx_num))
604 netif_stop_queue(dev);
605 spin_unlock(&priv->page_lock);
606 netif_err(priv, tx_queued,dev,
607 "Tx Ring full when queue awake!\n");
608 return NETDEV_TX_BUSY;
611 if (fe_tx_map_dma(skb, dev, tx) < 0) {
616 netdev_sent_queue(dev, skb->len);
617 skb_tx_timestamp(skb);
620 stats->tx_bytes += skb->len;
623 spin_unlock(&priv->page_lock);
628 static inline void fe_rx_vlan(struct sk_buff *skb)
633 if (!__vlan_get_tag(skb, &vlanid)) {
634 /* pop the vlan tag */
635 ehdr = (struct ethhdr *)skb->data;
636 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
637 skb_pull(skb, VLAN_HLEN);
638 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
642 static int fe_poll_rx(struct napi_struct *napi, int budget,
643 struct fe_priv *priv)
645 struct net_device *netdev = priv->netdev;
646 struct net_device_stats *stats = &netdev->stats;
647 struct fe_soc_data *soc = priv->soc;
649 int idx = fe_reg_r32(FE_REG_RX_CALC_IDX0);
652 struct fe_rx_dma *rxd;
654 bool rx_vlan = netdev->features & NETIF_F_HW_VLAN_CTAG_RX;
656 if (netdev->features & NETIF_F_RXCSUM)
657 checksum_bit = soc->checksum_bit;
661 while (done < budget) {
664 idx = NEXT_RX_DESP_IDX(idx);
665 rxd = &priv->rx_dma[idx];
666 data = priv->rx_data[idx];
668 if (!(rxd->rxd2 & RX_DMA_DONE))
671 /* alloc new buffer */
672 new_data = netdev_alloc_frag(priv->frag_size);
673 if (unlikely(!new_data)) {
677 dma_addr = dma_map_single(&netdev->dev,
678 new_data + FE_RX_OFFSET,
681 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
682 put_page(virt_to_head_page(new_data));
687 skb = build_skb(data, priv->frag_size);
688 if (unlikely(!skb)) {
689 put_page(virt_to_head_page(new_data));
692 skb_reserve(skb, FE_RX_OFFSET);
694 dma_unmap_single(&netdev->dev, rxd->rxd1,
695 priv->rx_buf_size, DMA_FROM_DEVICE);
696 pktlen = RX_DMA_PLEN0(rxd->rxd2);
697 skb_put(skb, pktlen);
699 if (rxd->rxd4 & checksum_bit) {
700 skb->ip_summed = CHECKSUM_UNNECESSARY;
702 skb_checksum_none_assert(skb);
706 skb->protocol = eth_type_trans(skb, netdev);
709 stats->rx_bytes += pktlen;
711 napi_gro_receive(napi, skb);
713 priv->rx_data[idx] = new_data;
714 rxd->rxd1 = (unsigned int) dma_addr;
718 soc->rx_dma(priv, idx, priv->rx_buf_size);
720 rxd->rxd2 = RX_DMA_LSO;
723 fe_reg_w32(idx, FE_REG_RX_CALC_IDX0);
730 static int fe_poll_tx(struct fe_priv *priv, int budget)
732 struct net_device *netdev = priv->netdev;
733 struct device *dev = &netdev->dev;
734 unsigned int bytes_compl = 0;
736 struct fe_tx_dma *txd;
738 u32 udf_bit = priv->soc->tx_udf_bit;
740 idx = priv->tx_free_idx;
741 while (done < budget) {
742 txd = &priv->tx_dma[idx];
743 skb = priv->tx_skb[idx];
745 if (!(txd->txd2 & TX_DMA_DONE) || !skb)
748 txd_unmap_page1(dev, txd);
750 if (txd->txd4 & udf_bit)
751 txd_unmap_single(dev, txd);
753 txd_unmap_page0(dev, txd);
755 if (skb != (struct sk_buff *) DMA_DUMMY_DESC) {
756 bytes_compl += skb->len;
757 dev_kfree_skb_any(skb);
760 priv->tx_skb[idx] = NULL;
761 idx = NEXT_TX_DESP_IDX(idx);
763 priv->tx_free_idx = idx;
768 netdev_completed_queue(netdev, done, bytes_compl);
769 if (unlikely(netif_queue_stopped(netdev) &&
770 netif_carrier_ok(netdev))) {
771 netif_wake_queue(netdev);
777 static int fe_poll(struct napi_struct *napi, int budget)
779 struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi);
780 struct fe_hw_stats *hwstat = priv->hw_stats;
781 int tx_done, rx_done;
783 u32 tx_intr, rx_intr;
785 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
786 tx_intr = priv->soc->tx_dly_int;
787 rx_intr = priv->soc->rx_dly_int;
788 tx_done = rx_done = 0;
791 if (status & tx_intr) {
792 tx_done += fe_poll_tx(priv, budget - tx_done);
793 if (tx_done < budget) {
794 fe_reg_w32(tx_intr, FE_REG_FE_INT_STATUS);
796 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
799 if (status & rx_intr) {
800 rx_done += fe_poll_rx(napi, budget - rx_done, priv);
801 if (rx_done < budget) {
802 fe_reg_w32(rx_intr, FE_REG_FE_INT_STATUS);
806 if (unlikely(hwstat && (status & FE_CNT_GDM_AF))) {
807 if (spin_trylock(&hwstat->stats_lock)) {
808 fe_stats_update(priv);
809 spin_unlock(&hwstat->stats_lock);
811 fe_reg_w32(FE_CNT_GDM_AF, FE_REG_FE_INT_STATUS);
814 if (unlikely(netif_msg_intr(priv))) {
815 mask = fe_reg_r32(FE_REG_FE_INT_ENABLE);
816 netdev_info(priv->netdev,
817 "done tx %d, rx %d, intr 0x%x/0x%x\n",
818 tx_done, rx_done, status, mask);
821 if ((tx_done < budget) && (rx_done < budget)) {
822 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
823 if (status & (tx_intr | rx_intr )) {
827 fe_int_enable(tx_intr | rx_intr);
833 static void fe_tx_timeout(struct net_device *dev)
835 struct fe_priv *priv = netdev_priv(dev);
837 priv->netdev->stats.tx_errors++;
838 netif_err(priv, tx_err, dev,
839 "transmit timed out, waking up the queue\n");
840 netif_info(priv, drv, dev, ": dma_cfg:%08x, free_idx:%d, " \
841 "dma_ctx_idx=%u, dma_crx_idx=%u\n",
842 fe_reg_r32(FE_REG_PDMA_GLO_CFG), priv->tx_free_idx,
843 fe_reg_r32(FE_REG_TX_CTX_IDX0),
844 fe_reg_r32(FE_REG_RX_CALC_IDX0));
845 netif_wake_queue(dev);
848 static irqreturn_t fe_handle_irq(int irq, void *dev)
850 struct fe_priv *priv = netdev_priv(dev);
853 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
855 if (unlikely(!status))
858 dly_int = (priv->soc->rx_dly_int | priv->soc->tx_dly_int);
859 if (likely(status & dly_int)) {
860 fe_int_disable(dly_int);
861 napi_schedule(&priv->rx_napi);
863 fe_reg_w32(status, FE_REG_FE_INT_STATUS);
869 int fe_set_clock_cycle(struct fe_priv *priv)
871 unsigned long sysclk = priv->sysclk;
877 sysclk /= FE_US_CYC_CNT_DIVISOR;
878 sysclk <<= FE_US_CYC_CNT_SHIFT;
880 fe_w32((fe_r32(FE_FE_GLO_CFG) &
881 ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) |
887 void fe_fwd_config(struct fe_priv *priv)
891 fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
893 /* disable jumbo frame */
894 if (priv->flags & FE_FLAG_JUMBO_FRAME)
895 fwd_cfg &= ~FE_GDM1_JMB_EN;
897 /* set unicast/multicast/broadcast frame to cpu */
900 fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
903 static void fe_rxcsum_config(bool enable)
906 fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN |
907 FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
910 fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~(FE_GDM1_ICS_EN |
911 FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
915 static void fe_txcsum_config(bool enable)
918 fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN |
919 FE_TCS_GEN_EN | FE_UCS_GEN_EN),
922 fe_w32(fe_r32(FE_CDMA_CSG_CFG) & ~(FE_ICS_GEN_EN |
923 FE_TCS_GEN_EN | FE_UCS_GEN_EN),
927 void fe_csum_config(struct fe_priv *priv)
929 struct net_device *dev = priv_netdev(priv);
931 fe_txcsum_config((dev->features & NETIF_F_IP_CSUM));
932 fe_rxcsum_config((dev->features & NETIF_F_RXCSUM));
935 static int fe_hw_init(struct net_device *dev)
937 struct fe_priv *priv = netdev_priv(dev);
940 err = devm_request_irq(priv->device, dev->irq, fe_handle_irq, 0,
941 dev_name(priv->device), dev);
945 if (priv->soc->set_mac)
946 priv->soc->set_mac(priv, dev->dev_addr);
948 fe_hw_set_macaddr(priv, dev->dev_addr);
950 fe_reg_w32(FE_DELAY_INIT, FE_REG_DLY_INT_CFG);
952 fe_int_disable(priv->soc->tx_dly_int | priv->soc->rx_dly_int);
954 /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
955 if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
956 for (i = 0; i < 16; i += 2)
957 fe_w32(((i + 1) << 16) + i,
958 fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
961 BUG_ON(!priv->soc->fwd_config);
962 if (priv->soc->fwd_config(priv))
963 netdev_err(dev, "unable to get clock\n");
965 if (fe_reg_table[FE_REG_FE_RST_GL]) {
966 fe_reg_w32(1, FE_REG_FE_RST_GL);
967 fe_reg_w32(0, FE_REG_FE_RST_GL);
973 static int fe_open(struct net_device *dev)
975 struct fe_priv *priv = netdev_priv(dev);
980 err = fe_init_dma(priv);
984 spin_lock_irqsave(&priv->page_lock, flags);
985 napi_enable(&priv->rx_napi);
987 val = FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN;
988 val |= priv->soc->pdma_glo_cfg;
989 fe_reg_w32(val, FE_REG_PDMA_GLO_CFG);
991 spin_unlock_irqrestore(&priv->page_lock, flags);
994 priv->phy->start(priv);
996 if (priv->soc->has_carrier && priv->soc->has_carrier(priv))
997 netif_carrier_on(dev);
999 netif_start_queue(dev);
1000 fe_int_enable(priv->soc->tx_dly_int | priv->soc->rx_dly_int);
1009 static int fe_stop(struct net_device *dev)
1011 struct fe_priv *priv = netdev_priv(dev);
1012 unsigned long flags;
1015 fe_int_disable(priv->soc->tx_dly_int | priv->soc->rx_dly_int);
1017 netif_tx_disable(dev);
1020 priv->phy->stop(priv);
1022 spin_lock_irqsave(&priv->page_lock, flags);
1023 napi_disable(&priv->rx_napi);
1025 fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG) &
1026 ~(FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN),
1027 FE_REG_PDMA_GLO_CFG);
1028 spin_unlock_irqrestore(&priv->page_lock, flags);
1031 for (i = 0; i < 10; i++) {
1032 if (fe_reg_r32(FE_REG_PDMA_GLO_CFG) &
1033 (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)) {
1045 static int __init fe_init(struct net_device *dev)
1047 struct fe_priv *priv = netdev_priv(dev);
1048 struct device_node *port;
1051 BUG_ON(!priv->soc->reset_fe);
1052 priv->soc->reset_fe();
1054 if (priv->soc->switch_init)
1055 priv->soc->switch_init(priv);
1057 memcpy(dev->dev_addr, priv->soc->mac, ETH_ALEN);
1058 of_get_mac_address_mtd(priv->device->of_node, dev->dev_addr);
1060 err = fe_mdio_init(priv);
1064 if (priv->soc->port_init)
1065 for_each_child_of_node(priv->device->of_node, port)
1066 if (of_device_is_compatible(port, "ralink,eth-port") && of_device_is_available(port))
1067 priv->soc->port_init(priv, port);
1070 err = priv->phy->connect(priv);
1072 goto err_phy_disconnect;
1075 err = fe_hw_init(dev);
1077 goto err_phy_disconnect;
1079 if (priv->soc->switch_config)
1080 priv->soc->switch_config(priv);
1086 priv->phy->disconnect(priv);
1087 fe_mdio_cleanup(priv);
1092 static void fe_uninit(struct net_device *dev)
1094 struct fe_priv *priv = netdev_priv(dev);
1097 priv->phy->disconnect(priv);
1098 fe_mdio_cleanup(priv);
1100 fe_reg_w32(0, FE_REG_FE_INT_ENABLE);
1101 free_irq(dev->irq, dev);
1104 static int fe_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1106 struct fe_priv *priv = netdev_priv(dev);
1113 return phy_ethtool_ioctl(priv->phy_dev,
1114 (void *) ifr->ifr_data);
1118 return phy_mii_ioctl(priv->phy_dev, ifr, cmd);
1126 static int fe_change_mtu(struct net_device *dev, int new_mtu)
1128 struct fe_priv *priv = netdev_priv(dev);
1129 int frag_size, old_mtu;
1132 if (!(priv->flags & FE_FLAG_JUMBO_FRAME))
1133 return eth_change_mtu(dev, new_mtu);
1135 frag_size = fe_max_frag_size(new_mtu);
1136 if (new_mtu < 68 || frag_size > PAGE_SIZE)
1142 /* return early if the buffer sizes will not change */
1143 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1145 if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
1148 if (new_mtu <= ETH_DATA_LEN) {
1149 priv->frag_size = fe_max_frag_size(ETH_DATA_LEN);
1150 priv->rx_buf_size = fe_max_buf_size(ETH_DATA_LEN);
1152 priv->frag_size = PAGE_SIZE;
1153 priv->rx_buf_size = fe_max_buf_size(PAGE_SIZE);
1156 if (!netif_running(dev))
1160 fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
1161 if (new_mtu <= ETH_DATA_LEN)
1162 fwd_cfg &= ~FE_GDM1_JMB_EN;
1164 fwd_cfg &= ~(FE_GDM1_JMB_LEN_MASK << FE_GDM1_JMB_LEN_SHIFT);
1165 fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
1166 FE_GDM1_JMB_LEN_SHIFT) | FE_GDM1_JMB_EN;
1168 fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
1170 return fe_open(dev);
1173 static const struct net_device_ops fe_netdev_ops = {
1174 .ndo_init = fe_init,
1175 .ndo_uninit = fe_uninit,
1176 .ndo_open = fe_open,
1177 .ndo_stop = fe_stop,
1178 .ndo_start_xmit = fe_start_xmit,
1179 .ndo_set_mac_address = fe_set_mac_address,
1180 .ndo_validate_addr = eth_validate_addr,
1181 .ndo_do_ioctl = fe_do_ioctl,
1182 .ndo_change_mtu = fe_change_mtu,
1183 .ndo_tx_timeout = fe_tx_timeout,
1184 .ndo_get_stats64 = fe_get_stats64,
1187 static int fe_probe(struct platform_device *pdev)
1189 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1190 const struct of_device_id *match;
1191 struct fe_soc_data *soc;
1192 struct net_device *netdev;
1193 struct fe_priv *priv;
1197 device_reset(&pdev->dev);
1199 match = of_match_device(of_fe_match, &pdev->dev);
1200 soc = (struct fe_soc_data *) match->data;
1203 fe_reg_table = soc->reg_table;
1205 soc->reg_table = fe_reg_table;
1207 fe_base = devm_request_and_ioremap(&pdev->dev, res);
1209 err = -EADDRNOTAVAIL;
1213 netdev = alloc_etherdev(sizeof(*priv));
1215 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1220 SET_NETDEV_DEV(netdev, &pdev->dev);
1221 netdev->netdev_ops = &fe_netdev_ops;
1222 netdev->base_addr = (unsigned long) fe_base;
1223 netdev->watchdog_timeo = TX_TIMEOUT;
1225 netdev->irq = platform_get_irq(pdev, 0);
1226 if (netdev->irq < 0) {
1227 dev_err(&pdev->dev, "no IRQ resource found\n");
1233 soc->init_data(soc, netdev);
1234 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
1235 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1236 netdev->vlan_features = netdev->hw_features &
1237 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1238 netdev->features |= netdev->hw_features;
1240 priv = netdev_priv(netdev);
1241 spin_lock_init(&priv->page_lock);
1242 if (fe_reg_table[FE_REG_FE_COUNTER_BASE]) {
1243 priv->hw_stats = kzalloc(sizeof(*priv->hw_stats), GFP_KERNEL);
1244 if (!priv->hw_stats) {
1248 spin_lock_init(&priv->hw_stats->stats_lock);
1251 sysclk = devm_clk_get(&pdev->dev, NULL);
1252 if (!IS_ERR(sysclk))
1253 priv->sysclk = clk_get_rate(sysclk);
1255 priv->netdev = netdev;
1256 priv->device = &pdev->dev;
1258 priv->msg_enable = netif_msg_init(fe_msg_level, FE_DEFAULT_MSG_ENABLE);
1259 priv->frag_size = fe_max_frag_size(ETH_DATA_LEN);
1260 priv->rx_buf_size = fe_max_buf_size(ETH_DATA_LEN);
1261 if (priv->frag_size > PAGE_SIZE) {
1262 dev_err(&pdev->dev, "error frag size.\n");
1267 netif_napi_add(netdev, &priv->rx_napi, fe_poll, 32);
1268 fe_set_ethtool_ops(netdev);
1270 err = register_netdev(netdev);
1272 dev_err(&pdev->dev, "error bringing up device\n");
1276 platform_set_drvdata(pdev, netdev);
1278 netif_info(priv, probe, netdev, "ralink at 0x%08lx, irq %d\n",
1279 netdev->base_addr, netdev->irq);
1284 free_netdev(netdev);
1286 devm_iounmap(&pdev->dev, fe_base);
1291 static int fe_remove(struct platform_device *pdev)
1293 struct net_device *dev = platform_get_drvdata(pdev);
1294 struct fe_priv *priv = netdev_priv(dev);
1296 netif_napi_del(&priv->rx_napi);
1298 kfree(priv->hw_stats);
1300 unregister_netdev(dev);
1302 platform_set_drvdata(pdev, NULL);
1307 static struct platform_driver fe_driver = {
1309 .remove = fe_remove,
1311 .name = "ralink_soc_eth",
1312 .owner = THIS_MODULE,
1313 .of_match_table = of_fe_match,
1317 static int __init init_rtfe(void)
1325 ret = platform_driver_register(&fe_driver);
1332 static void __exit exit_rtfe(void)
1334 platform_driver_unregister(&fe_driver);
1338 module_init(init_rtfe);
1339 module_exit(exit_rtfe);
1341 MODULE_LICENSE("GPL");
1342 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1343 MODULE_DESCRIPTION("Ethernet driver for Ralink SoC");
1344 MODULE_VERSION(FE_DRV_VERSION);