2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2009-2013 John Crispin <blogic@openwrt.org>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27 #include <linux/of_device.h>
28 #include <linux/clk.h>
29 #include <linux/of_net.h>
30 #include <linux/of_mdio.h>
31 #include <linux/if_vlan.h>
32 #include <linux/reset.h>
33 #include <linux/tcp.h>
36 #include <asm/mach-ralink/ralink_regs.h>
38 #include "ralink_soc_eth.h"
39 #include "esw_rt3052.h"
41 #include "ralink_ethtool.h"
43 #define MAX_RX_LENGTH 1536
44 #define FE_RX_HLEN (NET_SKB_PAD + VLAN_ETH_HLEN + VLAN_HLEN + \
45 + NET_IP_ALIGN + ETH_FCS_LEN)
46 #define DMA_DUMMY_DESC 0xffffffff
47 #define FE_DEFAULT_MSG_ENABLE \
57 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
58 #define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
59 #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (NUM_DMA_DESC - 1))
60 #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (NUM_DMA_DESC - 1))
62 #define SYSC_REG_RSTCTRL 0x34
64 static int fe_msg_level = -1;
65 module_param_named(msg_level, fe_msg_level, int, 0);
66 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
68 static const u32 fe_reg_table_default[FE_REG_COUNT] = {
69 [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG,
70 [FE_REG_PDMA_RST_CFG] = FE_PDMA_RST_CFG,
71 [FE_REG_DLY_INT_CFG] = FE_DLY_INT_CFG,
72 [FE_REG_TX_BASE_PTR0] = FE_TX_BASE_PTR0,
73 [FE_REG_TX_MAX_CNT0] = FE_TX_MAX_CNT0,
74 [FE_REG_TX_CTX_IDX0] = FE_TX_CTX_IDX0,
75 [FE_REG_TX_DTX_IDX0] = FE_TX_DTX_IDX0,
76 [FE_REG_RX_BASE_PTR0] = FE_RX_BASE_PTR0,
77 [FE_REG_RX_MAX_CNT0] = FE_RX_MAX_CNT0,
78 [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0,
79 [FE_REG_RX_DRX_IDX0] = FE_RX_DRX_IDX0,
80 [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE,
81 [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS,
82 [FE_REG_FE_DMA_VID_BASE] = FE_DMA_VID0,
83 [FE_REG_FE_COUNTER_BASE] = FE_GDMA1_TX_GBCNT,
84 [FE_REG_FE_RST_GL] = FE_FE_RST_GL,
87 static const u32 *fe_reg_table = fe_reg_table_default;
91 void (*action)(struct fe_priv *);
94 static void __iomem *fe_base = 0;
96 void fe_w32(u32 val, unsigned reg)
98 __raw_writel(val, fe_base + reg);
101 u32 fe_r32(unsigned reg)
103 return __raw_readl(fe_base + reg);
106 void fe_reg_w32(u32 val, enum fe_reg reg)
108 fe_w32(val, fe_reg_table[reg]);
111 u32 fe_reg_r32(enum fe_reg reg)
113 return fe_r32(fe_reg_table[reg]);
116 void fe_reset(u32 reset_bits)
120 t = rt_sysc_r32(SYSC_REG_RSTCTRL);
122 rt_sysc_w32(t , SYSC_REG_RSTCTRL);
126 rt_sysc_w32(t, SYSC_REG_RSTCTRL);
130 static inline void fe_int_disable(u32 mask)
132 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask,
133 FE_REG_FE_INT_ENABLE);
135 fe_reg_r32(FE_REG_FE_INT_ENABLE);
138 static inline void fe_int_enable(u32 mask)
140 fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) | mask,
141 FE_REG_FE_INT_ENABLE);
143 fe_reg_r32(FE_REG_FE_INT_ENABLE);
146 static inline void fe_hw_set_macaddr(struct fe_priv *priv, unsigned char *mac)
150 spin_lock_irqsave(&priv->page_lock, flags);
151 fe_w32((mac[0] << 8) | mac[1], FE_GDMA1_MAC_ADRH);
152 fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
154 spin_unlock_irqrestore(&priv->page_lock, flags);
157 static int fe_set_mac_address(struct net_device *dev, void *p)
159 int ret = eth_mac_addr(dev, p);
162 struct fe_priv *priv = netdev_priv(dev);
164 if (priv->soc->set_mac)
165 priv->soc->set_mac(priv, dev->dev_addr);
167 fe_hw_set_macaddr(priv, p);
173 static inline int fe_max_frag_size(int mtu)
175 return SKB_DATA_ALIGN(FE_RX_HLEN + mtu) +
176 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
179 static inline int fe_max_buf_size(int frag_size)
181 return frag_size - NET_SKB_PAD - NET_IP_ALIGN -
182 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
185 static inline void fe_get_rxd(struct fe_rx_dma *rxd, struct fe_rx_dma *dma_rxd)
187 rxd->rxd1 = dma_rxd->rxd1;
188 rxd->rxd2 = dma_rxd->rxd2;
189 rxd->rxd3 = dma_rxd->rxd3;
190 rxd->rxd4 = dma_rxd->rxd4;
193 static inline void fe_set_txd(struct fe_tx_dma *txd, struct fe_tx_dma *dma_txd)
195 dma_txd->txd1 = txd->txd1;
196 dma_txd->txd3 = txd->txd3;
197 dma_txd->txd4 = txd->txd4;
198 /* clean dma done flag last */
199 dma_txd->txd2 = txd->txd2;
202 static void fe_clean_rx(struct fe_priv *priv)
207 for (i = 0; i < NUM_DMA_DESC; i++)
208 if (priv->rx_data[i]) {
209 if (priv->rx_dma && priv->rx_dma[i].rxd1)
210 dma_unmap_single(&priv->netdev->dev,
211 priv->rx_dma[i].rxd1,
214 put_page(virt_to_head_page(priv->rx_data[i]));
217 kfree(priv->rx_data);
218 priv->rx_data = NULL;
222 dma_free_coherent(&priv->netdev->dev,
223 NUM_DMA_DESC * sizeof(*priv->rx_dma),
230 static int fe_alloc_rx(struct fe_priv *priv)
232 struct net_device *netdev = priv->netdev;
235 priv->rx_data = kcalloc(NUM_DMA_DESC, sizeof(*priv->rx_data),
240 for (i = 0; i < NUM_DMA_DESC; i++) {
241 priv->rx_data[i] = netdev_alloc_frag(priv->frag_size);
242 if (!priv->rx_data[i])
246 priv->rx_dma = dma_alloc_coherent(&netdev->dev,
247 NUM_DMA_DESC * sizeof(*priv->rx_dma),
249 GFP_ATOMIC | __GFP_ZERO);
253 if (priv->flags & FE_FLAG_RX_2B_OFFSET)
257 for (i = 0; i < NUM_DMA_DESC; i++) {
258 dma_addr_t dma_addr = dma_map_single(&netdev->dev,
259 priv->rx_data[i] + NET_SKB_PAD + pad,
262 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr)))
264 priv->rx_dma[i].rxd1 = (unsigned int) dma_addr;
266 if (priv->flags & FE_FLAG_RX_SG_DMA)
267 priv->rx_dma[i].rxd2 = RX_DMA_PLEN0(priv->rx_buf_size);
269 priv->rx_dma[i].rxd2 = RX_DMA_LSO;
273 fe_reg_w32(priv->rx_phys, FE_REG_RX_BASE_PTR0);
274 fe_reg_w32(NUM_DMA_DESC, FE_REG_RX_MAX_CNT0);
275 fe_reg_w32((NUM_DMA_DESC - 1), FE_REG_RX_CALC_IDX0);
276 fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG);
284 static void fe_txd_unmap(struct device *dev, struct fe_tx_buf *tx_buf)
286 if (tx_buf->flags & FE_TX_FLAGS_SINGLE0) {
287 dma_unmap_single(dev,
288 dma_unmap_addr(tx_buf, dma_addr0),
289 dma_unmap_len(tx_buf, dma_len0),
291 } else if (tx_buf->flags & FE_TX_FLAGS_PAGE0) {
293 dma_unmap_addr(tx_buf, dma_addr0),
294 dma_unmap_len(tx_buf, dma_len0),
297 if (tx_buf->flags & FE_TX_FLAGS_PAGE1)
299 dma_unmap_addr(tx_buf, dma_addr1),
300 dma_unmap_len(tx_buf, dma_len1),
304 if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *) DMA_DUMMY_DESC)) {
305 dev_kfree_skb_any(tx_buf->skb);
310 static void fe_clean_tx(struct fe_priv *priv)
315 for (i = 0; i < NUM_DMA_DESC; i++)
316 fe_txd_unmap(&priv->netdev->dev, &priv->tx_buf[i]);
322 dma_free_coherent(&priv->netdev->dev,
323 NUM_DMA_DESC * sizeof(*priv->tx_dma),
330 static int fe_alloc_tx(struct fe_priv *priv)
334 priv->tx_free_idx = 0;
336 priv->tx_buf = kcalloc(NUM_DMA_DESC, sizeof(*priv->tx_buf),
341 priv->tx_dma = dma_alloc_coherent(&priv->netdev->dev,
342 NUM_DMA_DESC * sizeof(*priv->tx_dma),
344 GFP_ATOMIC | __GFP_ZERO);
348 for (i = 0; i < NUM_DMA_DESC; i++) {
349 if (priv->soc->tx_dma) {
350 priv->soc->tx_dma(&priv->tx_dma[i]);
352 priv->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
356 fe_reg_w32(priv->tx_phys, FE_REG_TX_BASE_PTR0);
357 fe_reg_w32(NUM_DMA_DESC, FE_REG_TX_MAX_CNT0);
358 fe_reg_w32(0, FE_REG_TX_CTX_IDX0);
359 fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG);
367 static int fe_init_dma(struct fe_priv *priv)
371 err = fe_alloc_tx(priv);
375 err = fe_alloc_rx(priv);
382 static void fe_free_dma(struct fe_priv *priv)
387 netdev_reset_queue(priv->netdev);
390 void fe_stats_update(struct fe_priv *priv)
392 struct fe_hw_stats *hwstats = priv->hw_stats;
393 unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
396 u64_stats_update_begin(&hwstats->syncp);
398 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
399 hwstats->rx_bytes += fe_r32(base);
400 stats = fe_r32(base + 0x04);
402 hwstats->rx_bytes += (stats << 32);
403 hwstats->rx_packets += fe_r32(base + 0x08);
404 hwstats->rx_overflow += fe_r32(base + 0x10);
405 hwstats->rx_fcs_errors += fe_r32(base + 0x14);
406 hwstats->rx_short_errors += fe_r32(base + 0x18);
407 hwstats->rx_long_errors += fe_r32(base + 0x1c);
408 hwstats->rx_checksum_errors += fe_r32(base + 0x20);
409 hwstats->rx_flow_control_packets += fe_r32(base + 0x24);
410 hwstats->tx_skip += fe_r32(base + 0x28);
411 hwstats->tx_collisions += fe_r32(base + 0x2c);
412 hwstats->tx_bytes += fe_r32(base + 0x30);
413 stats = fe_r32(base + 0x34);
415 hwstats->tx_bytes += (stats << 32);
416 hwstats->tx_packets += fe_r32(base + 0x38);
418 hwstats->tx_bytes += fe_r32(base);
419 hwstats->tx_packets += fe_r32(base + 0x04);
420 hwstats->tx_skip += fe_r32(base + 0x08);
421 hwstats->tx_collisions += fe_r32(base + 0x0c);
422 hwstats->rx_bytes += fe_r32(base + 0x20);
423 hwstats->rx_packets += fe_r32(base + 0x24);
424 hwstats->rx_overflow += fe_r32(base + 0x28);
425 hwstats->rx_fcs_errors += fe_r32(base + 0x2c);
426 hwstats->rx_short_errors += fe_r32(base + 0x30);
427 hwstats->rx_long_errors += fe_r32(base + 0x34);
428 hwstats->rx_checksum_errors += fe_r32(base + 0x38);
429 hwstats->rx_flow_control_packets += fe_r32(base + 0x3c);
432 u64_stats_update_end(&hwstats->syncp);
435 static struct rtnl_link_stats64 *fe_get_stats64(struct net_device *dev,
436 struct rtnl_link_stats64 *storage)
438 struct fe_priv *priv = netdev_priv(dev);
439 struct fe_hw_stats *hwstats = priv->hw_stats;
440 unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
444 netdev_stats_to_stats64(storage, &dev->stats);
448 if (netif_running(dev) && netif_device_present(dev)) {
449 if (spin_trylock(&hwstats->stats_lock)) {
450 fe_stats_update(priv);
451 spin_unlock(&hwstats->stats_lock);
456 start = u64_stats_fetch_begin_bh(&hwstats->syncp);
457 storage->rx_packets = hwstats->rx_packets;
458 storage->tx_packets = hwstats->tx_packets;
459 storage->rx_bytes = hwstats->rx_bytes;
460 storage->tx_bytes = hwstats->tx_bytes;
461 storage->collisions = hwstats->tx_collisions;
462 storage->rx_length_errors = hwstats->rx_short_errors +
463 hwstats->rx_long_errors;
464 storage->rx_over_errors = hwstats->rx_overflow;
465 storage->rx_crc_errors = hwstats->rx_fcs_errors;
466 storage->rx_errors = hwstats->rx_checksum_errors;
467 storage->tx_aborted_errors = hwstats->tx_skip;
468 } while (u64_stats_fetch_retry_bh(&hwstats->syncp, start));
470 storage->tx_errors = priv->netdev->stats.tx_errors;
471 storage->rx_dropped = priv->netdev->stats.rx_dropped;
472 storage->tx_dropped = priv->netdev->stats.tx_dropped;
477 static int fe_vlan_rx_add_vid(struct net_device *dev,
478 __be16 proto, u16 vid)
480 struct fe_priv *priv = netdev_priv(dev);
481 u32 idx = (vid & 0xf);
484 if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) &&
485 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
488 if (test_bit(idx, &priv->vlan_map)) {
489 netdev_warn(dev, "disable tx vlan offload\n");
490 dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
491 netdev_update_features(dev);
493 vlan_cfg = fe_r32(fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
497 vlan_cfg |= (vid << 16);
499 vlan_cfg &= 0xffff0000;
502 fe_w32(vlan_cfg, fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
504 set_bit(idx, &priv->vlan_map);
510 static int fe_vlan_rx_kill_vid(struct net_device *dev,
511 __be16 proto, u16 vid)
513 struct fe_priv *priv = netdev_priv(dev);
514 u32 idx = (vid & 0xf);
516 if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) &&
517 (dev->features | NETIF_F_HW_VLAN_CTAG_TX)))
520 clear_bit(idx, &priv->vlan_map);
525 static int fe_tx_map_dma(struct sk_buff *skb, struct net_device *dev,
528 struct fe_priv *priv = netdev_priv(dev);
529 struct skb_frag_struct *frag;
530 struct fe_tx_dma txd, *ptxd;
531 struct fe_tx_buf *tx_buf;
532 dma_addr_t mapped_addr;
533 unsigned int nr_frags;
535 int i, j, k, frag_size, frag_map_size, offset;
537 tx_buf = &priv->tx_buf[idx];
538 memset(tx_buf, 0, sizeof(*tx_buf));
539 memset(&txd, 0, sizeof(txd));
540 nr_frags = skb_shinfo(skb)->nr_frags;
542 /* init tx descriptor */
543 if (priv->soc->tx_dma)
544 priv->soc->tx_dma(&txd);
546 txd.txd4 = TX_DMA_DESP4_DEF;
549 /* TX Checksum offload */
550 if (skb->ip_summed == CHECKSUM_PARTIAL)
551 txd.txd4 |= TX_DMA_CHKSUM;
553 /* VLAN header offload */
554 if (vlan_tx_tag_present(skb)) {
555 if (IS_ENABLED(CONFIG_SOC_MT7621))
556 txd.txd4 |= TX_DMA_INS_VLAN_MT7621 | vlan_tx_tag_get(skb);
558 txd.txd4 |= TX_DMA_INS_VLAN |
559 ((vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT) << 4) |
560 (vlan_tx_tag_get(skb) & 0xF);
563 /* TSO: fill MSS info in tcp checksum field */
564 if (skb_is_gso(skb)) {
565 if (skb_cow_head(skb, 0)) {
566 netif_warn(priv, tx_err, dev,
567 "GSO expand head fail.\n");
570 if (skb_shinfo(skb)->gso_type &
571 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
572 txd.txd4 |= TX_DMA_TSO;
573 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
577 mapped_addr = dma_map_single(&dev->dev, skb->data,
578 skb_headlen(skb), DMA_TO_DEVICE);
579 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
581 txd.txd1 = mapped_addr;
582 txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
584 tx_buf->flags |= FE_TX_FLAGS_SINGLE0;
585 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
586 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
591 for (i = 0; i < nr_frags; i++) {
593 frag = &skb_shinfo(skb)->frags[i];
594 frag_size = skb_frag_size(frag);
596 while (frag_size > 0) {
597 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
598 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
599 frag_map_size, DMA_TO_DEVICE);
600 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
604 j = NEXT_TX_DESP_IDX(j);
605 txd.txd1 = mapped_addr;
606 txd.txd2 = TX_DMA_PLEN0(frag_map_size);
609 tx_buf = &priv->tx_buf[j];
610 memset(tx_buf, 0, sizeof(*tx_buf));
612 tx_buf->flags |= FE_TX_FLAGS_PAGE0;
613 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
614 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
616 txd.txd3 = mapped_addr;
617 txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
619 tx_buf->skb = (struct sk_buff *) DMA_DUMMY_DESC;
620 tx_buf->flags |= FE_TX_FLAGS_PAGE1;
621 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
622 dma_unmap_len_set(tx_buf, dma_len1, frag_map_size);
624 if (!((i == (nr_frags -1)) &&
625 (frag_map_size == frag_size))) {
626 fe_set_txd(&txd, &priv->tx_dma[j]);
627 memset(&txd, 0, sizeof(txd));
630 frag_size -= frag_map_size;
631 offset += frag_map_size;
636 /* set last segment */
638 txd.txd2 |= TX_DMA_LS1;
640 txd.txd2 |= TX_DMA_LS0;
641 fe_set_txd(&txd, &priv->tx_dma[j]);
643 /* store skb to cleanup */
646 netdev_sent_queue(dev, skb->len);
647 skb_tx_timestamp(skb);
649 j = NEXT_TX_DESP_IDX(j);
651 fe_reg_w32(j, FE_REG_TX_CTX_IDX0);
657 for (i = 0; i < tx_num; i++) {
658 ptxd = &priv->tx_dma[j];
659 tx_buf = &priv->tx_buf[j];
662 fe_txd_unmap(&dev->dev, tx_buf);
664 ptxd->txd2 = TX_DMA_DESP2_DEF;
665 j = NEXT_TX_DESP_IDX(j);
673 static inline int fe_skb_padto(struct sk_buff *skb, struct fe_priv *priv) {
678 if (unlikely(skb->len < VLAN_ETH_ZLEN)) {
679 if ((priv->flags & FE_FLAG_PADDING_64B) &&
680 !(priv->flags & FE_FLAG_PADDING_BUG))
683 if (vlan_tx_tag_present(skb))
685 else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
687 else if(!(priv->flags & FE_FLAG_PADDING_64B))
692 if (skb->len < len) {
693 if ((ret = skb_pad(skb, len - skb->len)) < 0)
696 skb_set_tail_pointer(skb, len);
703 static inline u32 fe_empty_txd(struct fe_priv *priv, u32 tx_fill_idx)
705 return (u32)(NUM_DMA_DESC - ((tx_fill_idx - priv->tx_free_idx) &
706 (NUM_DMA_DESC - 1)));
709 static inline int fe_cal_txd_req(struct sk_buff *skb)
712 struct skb_frag_struct *frag;
715 if (skb_is_gso(skb)) {
716 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
717 frag = &skb_shinfo(skb)->frags[i];
718 nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
721 nfrags += skb_shinfo(skb)->nr_frags;
724 return DIV_ROUND_UP(nfrags, 2);
727 static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev)
729 struct fe_priv *priv = netdev_priv(dev);
730 struct net_device_stats *stats = &dev->stats;
735 if (fe_skb_padto(skb, priv)) {
736 netif_warn(priv, tx_err, dev, "tx padding failed!\n");
740 tx_num = fe_cal_txd_req(skb);
741 tx = fe_reg_r32(FE_REG_TX_CTX_IDX0);
742 if (unlikely(fe_empty_txd(priv, tx) <= tx_num))
744 netif_stop_queue(dev);
745 netif_err(priv, tx_queued,dev,
746 "Tx Ring full when queue awake!\n");
747 return NETDEV_TX_BUSY;
750 if (fe_tx_map_dma(skb, dev, tx, tx_num) < 0) {
754 stats->tx_bytes += len;
760 static inline void fe_rx_vlan(struct sk_buff *skb)
765 if (!__vlan_get_tag(skb, &vlanid)) {
766 /* pop the vlan tag */
767 ehdr = (struct ethhdr *)skb->data;
768 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
769 skb_pull(skb, VLAN_HLEN);
770 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
774 static int fe_poll_rx(struct napi_struct *napi, int budget,
775 struct fe_priv *priv, u32 rx_intr)
777 struct net_device *netdev = priv->netdev;
778 struct net_device_stats *stats = &netdev->stats;
779 struct fe_soc_data *soc = priv->soc;
781 int idx = fe_reg_r32(FE_REG_RX_CALC_IDX0);
784 struct fe_rx_dma *rxd, trxd;
786 bool rx_vlan = netdev->features & NETIF_F_HW_VLAN_CTAG_RX;
788 if (netdev->features & NETIF_F_RXCSUM)
789 checksum_bit = soc->checksum_bit;
793 if (priv->flags & FE_FLAG_RX_2B_OFFSET)
798 while (done < budget) {
801 idx = NEXT_RX_DESP_IDX(idx);
802 rxd = &priv->rx_dma[idx];
803 data = priv->rx_data[idx];
805 fe_get_rxd(&trxd, rxd);
806 if (!(trxd.rxd2 & RX_DMA_DONE))
809 /* alloc new buffer */
810 new_data = netdev_alloc_frag(priv->frag_size);
811 if (unlikely(!new_data)) {
815 dma_addr = dma_map_single(&netdev->dev,
816 new_data + NET_SKB_PAD + pad,
819 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
820 put_page(virt_to_head_page(new_data));
825 skb = build_skb(data, priv->frag_size);
826 if (unlikely(!skb)) {
827 put_page(virt_to_head_page(new_data));
830 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
832 dma_unmap_single(&netdev->dev, trxd.rxd1,
833 priv->rx_buf_size, DMA_FROM_DEVICE);
834 pktlen = RX_DMA_PLEN0(trxd.rxd2);
836 skb_put(skb, pktlen);
837 if (trxd.rxd4 & checksum_bit) {
838 skb->ip_summed = CHECKSUM_UNNECESSARY;
840 skb_checksum_none_assert(skb);
844 skb->protocol = eth_type_trans(skb, netdev);
847 stats->rx_bytes += pktlen;
849 napi_gro_receive(napi, skb);
851 priv->rx_data[idx] = new_data;
852 rxd->rxd1 = (unsigned int) dma_addr;
855 if (priv->flags & FE_FLAG_RX_SG_DMA)
856 rxd->rxd2 = RX_DMA_PLEN0(priv->rx_buf_size);
858 rxd->rxd2 = RX_DMA_LSO;
861 fe_reg_w32(idx, FE_REG_RX_CALC_IDX0);
866 fe_reg_w32(rx_intr, FE_REG_FE_INT_STATUS);
871 static int fe_poll_tx(struct fe_priv *priv, int budget, u32 tx_intr)
873 struct net_device *netdev = priv->netdev;
874 struct device *dev = &netdev->dev;
875 unsigned int bytes_compl = 0;
877 struct fe_tx_buf *tx_buf;
881 hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0);
882 idx = priv->tx_free_idx;
885 while ((idx != hwidx) && budget) {
886 tx_buf = &priv->tx_buf[idx];
892 if (skb != (struct sk_buff *) DMA_DUMMY_DESC) {
893 bytes_compl += skb->len;
897 fe_txd_unmap(dev, tx_buf);
898 idx = NEXT_TX_DESP_IDX(idx);
900 priv->tx_free_idx = idx;
906 hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0);
909 fe_reg_w32(tx_intr, FE_REG_FE_INT_STATUS);
912 netdev_completed_queue(netdev, done, bytes_compl);
913 if (unlikely(netif_queue_stopped(netdev) &&
914 netif_carrier_ok(netdev))) {
915 netif_wake_queue(netdev);
921 static int fe_poll(struct napi_struct *napi, int budget)
923 struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi);
924 struct fe_hw_stats *hwstat = priv->hw_stats;
925 int tx_done, rx_done;
927 u32 tx_intr, rx_intr;
929 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
930 tx_intr = priv->soc->tx_int;
931 rx_intr = priv->soc->rx_int;
932 tx_done = rx_done = 0;
934 if (status & tx_intr)
935 tx_done = fe_poll_tx(priv, budget, tx_intr);
937 if (status & rx_intr)
938 rx_done = fe_poll_rx(napi, budget, priv, rx_intr);
940 if (unlikely(hwstat && (status & FE_CNT_GDM_AF))) {
941 if (spin_trylock(&hwstat->stats_lock)) {
942 fe_stats_update(priv);
943 spin_unlock(&hwstat->stats_lock);
945 fe_reg_w32(FE_CNT_GDM_AF, FE_REG_FE_INT_STATUS);
948 if (unlikely(netif_msg_intr(priv))) {
949 mask = fe_reg_r32(FE_REG_FE_INT_ENABLE);
950 netdev_info(priv->netdev,
951 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
952 tx_done, rx_done, status, mask);
955 if ((tx_done < budget) && (rx_done < budget)) {
956 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
957 if (status & (tx_intr | rx_intr ))
961 fe_int_enable(tx_intr | rx_intr);
968 static void fe_tx_timeout(struct net_device *dev)
970 struct fe_priv *priv = netdev_priv(dev);
972 priv->netdev->stats.tx_errors++;
973 netif_err(priv, tx_err, dev,
974 "transmit timed out\n");
975 netif_info(priv, drv, dev, "dma_cfg:%08x\n",
976 fe_reg_r32(FE_REG_PDMA_GLO_CFG));
977 netif_info(priv, drv, dev, "tx_ring=%d, " \
978 "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%d\n", 0,
979 fe_reg_r32(FE_REG_TX_BASE_PTR0),
980 fe_reg_r32(FE_REG_TX_MAX_CNT0),
981 fe_reg_r32(FE_REG_TX_CTX_IDX0),
982 fe_reg_r32(FE_REG_TX_DTX_IDX0),
985 netif_info(priv, drv, dev, "rx_ring=%d, " \
986 "base=%08x, max=%u, calc=%u, drx=%u\n", 0,
987 fe_reg_r32(FE_REG_RX_BASE_PTR0),
988 fe_reg_r32(FE_REG_RX_MAX_CNT0),
989 fe_reg_r32(FE_REG_RX_CALC_IDX0),
990 fe_reg_r32(FE_REG_RX_DRX_IDX0)
993 if (!test_and_set_bit(FE_FLAG_RESET_PENDING, priv->pending_flags))
994 schedule_work(&priv->pending_work);
997 static irqreturn_t fe_handle_irq(int irq, void *dev)
999 struct fe_priv *priv = netdev_priv(dev);
1000 u32 status, int_mask;
1002 status = fe_reg_r32(FE_REG_FE_INT_STATUS);
1004 if (unlikely(!status))
1007 int_mask = (priv->soc->rx_int | priv->soc->tx_int);
1008 if (likely(status & int_mask)) {
1009 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1010 fe_int_disable(int_mask);
1011 __napi_schedule(&priv->rx_napi);
1014 fe_reg_w32(status, FE_REG_FE_INT_STATUS);
1020 #ifdef CONFIG_NET_POLL_CONTROLLER
1021 static void fe_poll_controller(struct net_device *dev)
1023 struct fe_priv *priv = netdev_priv(dev);
1024 u32 int_mask = priv->soc->tx_int | priv->soc->rx_int;
1026 fe_int_disable(int_mask);
1027 fe_handle_irq(dev->irq, dev);
1028 fe_int_enable(int_mask);
1032 int fe_set_clock_cycle(struct fe_priv *priv)
1034 unsigned long sysclk = priv->sysclk;
1040 sysclk /= FE_US_CYC_CNT_DIVISOR;
1041 sysclk <<= FE_US_CYC_CNT_SHIFT;
1043 fe_w32((fe_r32(FE_FE_GLO_CFG) &
1044 ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) |
1050 void fe_fwd_config(struct fe_priv *priv)
1054 fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
1056 /* disable jumbo frame */
1057 if (priv->flags & FE_FLAG_JUMBO_FRAME)
1058 fwd_cfg &= ~FE_GDM1_JMB_EN;
1060 /* set unicast/multicast/broadcast frame to cpu */
1063 fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
1066 static void fe_rxcsum_config(bool enable)
1069 fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN |
1070 FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
1073 fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~(FE_GDM1_ICS_EN |
1074 FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
1078 static void fe_txcsum_config(bool enable)
1081 fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN |
1082 FE_TCS_GEN_EN | FE_UCS_GEN_EN),
1085 fe_w32(fe_r32(FE_CDMA_CSG_CFG) & ~(FE_ICS_GEN_EN |
1086 FE_TCS_GEN_EN | FE_UCS_GEN_EN),
1090 void fe_csum_config(struct fe_priv *priv)
1092 struct net_device *dev = priv_netdev(priv);
1094 fe_txcsum_config((dev->features & NETIF_F_IP_CSUM));
1095 fe_rxcsum_config((dev->features & NETIF_F_RXCSUM));
1098 static int fe_hw_init(struct net_device *dev)
1100 struct fe_priv *priv = netdev_priv(dev);
1103 err = devm_request_irq(priv->device, dev->irq, fe_handle_irq, 0,
1104 dev_name(priv->device), dev);
1108 if (priv->soc->set_mac)
1109 priv->soc->set_mac(priv, dev->dev_addr);
1111 fe_hw_set_macaddr(priv, dev->dev_addr);
1113 /* disable delay interrupt */
1114 fe_reg_w32(0, FE_REG_DLY_INT_CFG);
1116 fe_int_disable(priv->soc->tx_int | priv->soc->rx_int);
1118 /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
1119 if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
1120 for (i = 0; i < 16; i += 2)
1121 fe_w32(((i + 1) << 16) + i,
1122 fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
1125 BUG_ON(!priv->soc->fwd_config);
1126 if (priv->soc->fwd_config(priv))
1127 netdev_err(dev, "unable to get clock\n");
1129 if (fe_reg_table[FE_REG_FE_RST_GL]) {
1130 fe_reg_w32(1, FE_REG_FE_RST_GL);
1131 fe_reg_w32(0, FE_REG_FE_RST_GL);
1137 static int fe_open(struct net_device *dev)
1139 struct fe_priv *priv = netdev_priv(dev);
1140 unsigned long flags;
1144 err = fe_init_dma(priv);
1148 spin_lock_irqsave(&priv->page_lock, flags);
1149 napi_enable(&priv->rx_napi);
1151 val = FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN;
1152 if (priv->flags & FE_FLAG_RX_2B_OFFSET)
1153 val |= FE_RX_2B_OFFSET;
1154 val |= priv->soc->pdma_glo_cfg;
1155 fe_reg_w32(val, FE_REG_PDMA_GLO_CFG);
1157 spin_unlock_irqrestore(&priv->page_lock, flags);
1160 priv->phy->start(priv);
1162 if (priv->soc->has_carrier && priv->soc->has_carrier(priv))
1163 netif_carrier_on(dev);
1165 netif_start_queue(dev);
1166 fe_int_enable(priv->soc->tx_int | priv->soc->rx_int);
1175 static int fe_stop(struct net_device *dev)
1177 struct fe_priv *priv = netdev_priv(dev);
1178 unsigned long flags;
1181 fe_int_disable(priv->soc->tx_int | priv->soc->rx_int);
1183 netif_tx_disable(dev);
1186 priv->phy->stop(priv);
1188 spin_lock_irqsave(&priv->page_lock, flags);
1189 napi_disable(&priv->rx_napi);
1191 fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG) &
1192 ~(FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN),
1193 FE_REG_PDMA_GLO_CFG);
1194 spin_unlock_irqrestore(&priv->page_lock, flags);
1197 for (i = 0; i < 10; i++) {
1198 if (fe_reg_r32(FE_REG_PDMA_GLO_CFG) &
1199 (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)) {
1211 static int __init fe_init(struct net_device *dev)
1213 struct fe_priv *priv = netdev_priv(dev);
1214 struct device_node *port;
1217 BUG_ON(!priv->soc->reset_fe);
1218 priv->soc->reset_fe();
1220 if (priv->soc->switch_init)
1221 priv->soc->switch_init(priv);
1223 of_get_mac_address_mtd(priv->device->of_node, dev->dev_addr);
1224 /*If the mac address is invalid, use default mac address */
1225 if (!is_valid_ether_addr(dev->dev_addr))
1226 memcpy(dev->dev_addr, priv->soc->mac, ETH_ALEN);
1228 err = fe_mdio_init(priv);
1232 if (priv->soc->port_init)
1233 for_each_child_of_node(priv->device->of_node, port)
1234 if (of_device_is_compatible(port, "ralink,eth-port") && of_device_is_available(port))
1235 priv->soc->port_init(priv, port);
1238 err = priv->phy->connect(priv);
1240 goto err_phy_disconnect;
1243 err = fe_hw_init(dev);
1245 goto err_phy_disconnect;
1247 if (priv->soc->switch_config)
1248 priv->soc->switch_config(priv);
1254 priv->phy->disconnect(priv);
1255 fe_mdio_cleanup(priv);
1260 static void fe_uninit(struct net_device *dev)
1262 struct fe_priv *priv = netdev_priv(dev);
1265 priv->phy->disconnect(priv);
1266 fe_mdio_cleanup(priv);
1268 fe_reg_w32(0, FE_REG_FE_INT_ENABLE);
1269 free_irq(dev->irq, dev);
1272 static int fe_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1274 struct fe_priv *priv = netdev_priv(dev);
1281 return phy_ethtool_ioctl(priv->phy_dev,
1282 (void *) ifr->ifr_data);
1286 return phy_mii_ioctl(priv->phy_dev, ifr, cmd);
1294 static int fe_change_mtu(struct net_device *dev, int new_mtu)
1296 struct fe_priv *priv = netdev_priv(dev);
1297 int frag_size, old_mtu;
1300 if (!(priv->flags & FE_FLAG_JUMBO_FRAME))
1301 return eth_change_mtu(dev, new_mtu);
1303 frag_size = fe_max_frag_size(new_mtu);
1304 if (new_mtu < 68 || frag_size > PAGE_SIZE)
1310 /* return early if the buffer sizes will not change */
1311 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1313 if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
1316 if (new_mtu <= ETH_DATA_LEN)
1317 priv->frag_size = fe_max_frag_size(ETH_DATA_LEN);
1319 priv->frag_size = PAGE_SIZE;
1320 priv->rx_buf_size = fe_max_buf_size(priv->frag_size);
1322 if (!netif_running(dev))
1326 fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
1327 if (new_mtu <= ETH_DATA_LEN)
1328 fwd_cfg &= ~FE_GDM1_JMB_EN;
1330 fwd_cfg &= ~(FE_GDM1_JMB_LEN_MASK << FE_GDM1_JMB_LEN_SHIFT);
1331 fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
1332 FE_GDM1_JMB_LEN_SHIFT) | FE_GDM1_JMB_EN;
1334 fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
1336 return fe_open(dev);
1339 static const struct net_device_ops fe_netdev_ops = {
1340 .ndo_init = fe_init,
1341 .ndo_uninit = fe_uninit,
1342 .ndo_open = fe_open,
1343 .ndo_stop = fe_stop,
1344 .ndo_start_xmit = fe_start_xmit,
1345 .ndo_set_mac_address = fe_set_mac_address,
1346 .ndo_validate_addr = eth_validate_addr,
1347 .ndo_do_ioctl = fe_do_ioctl,
1348 .ndo_change_mtu = fe_change_mtu,
1349 .ndo_tx_timeout = fe_tx_timeout,
1350 .ndo_get_stats64 = fe_get_stats64,
1351 .ndo_vlan_rx_add_vid = fe_vlan_rx_add_vid,
1352 .ndo_vlan_rx_kill_vid = fe_vlan_rx_kill_vid,
1353 #ifdef CONFIG_NET_POLL_CONTROLLER
1354 .ndo_poll_controller = fe_poll_controller,
1358 static void fe_reset_pending(struct fe_priv *priv)
1360 struct net_device *dev = priv->netdev;
1373 netif_alert(priv, ifup, dev,
1374 "Driver up/down cycle failed, closing device.\n");
1379 static const struct fe_work_t fe_work[] = {
1380 {FE_FLAG_RESET_PENDING, fe_reset_pending},
1383 static void fe_pending_work(struct work_struct *work)
1385 struct fe_priv *priv = container_of(work, struct fe_priv, pending_work);
1389 for (i = 0; i < ARRAY_SIZE(fe_work); i++) {
1390 pending = test_and_clear_bit(fe_work[i].bitnr,
1391 priv->pending_flags);
1393 fe_work[i].action(priv);
1397 static int fe_probe(struct platform_device *pdev)
1399 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1400 const struct of_device_id *match;
1401 struct fe_soc_data *soc;
1402 struct net_device *netdev;
1403 struct fe_priv *priv;
1405 int err, napi_weight;
1407 device_reset(&pdev->dev);
1409 match = of_match_device(of_fe_match, &pdev->dev);
1410 soc = (struct fe_soc_data *) match->data;
1413 fe_reg_table = soc->reg_table;
1415 soc->reg_table = fe_reg_table;
1417 fe_base = devm_request_and_ioremap(&pdev->dev, res);
1419 err = -EADDRNOTAVAIL;
1423 netdev = alloc_etherdev(sizeof(*priv));
1425 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1430 SET_NETDEV_DEV(netdev, &pdev->dev);
1431 netdev->netdev_ops = &fe_netdev_ops;
1432 netdev->base_addr = (unsigned long) fe_base;
1434 netdev->irq = platform_get_irq(pdev, 0);
1435 if (netdev->irq < 0) {
1436 dev_err(&pdev->dev, "no IRQ resource found\n");
1442 soc->init_data(soc, netdev);
1443 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
1444 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1445 netdev->vlan_features = netdev->hw_features &
1446 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1447 netdev->features |= netdev->hw_features;
1449 /* fake rx vlan filter func. to support tx vlan offload func */
1450 if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
1451 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1453 priv = netdev_priv(netdev);
1454 spin_lock_init(&priv->page_lock);
1455 if (fe_reg_table[FE_REG_FE_COUNTER_BASE]) {
1456 priv->hw_stats = kzalloc(sizeof(*priv->hw_stats), GFP_KERNEL);
1457 if (!priv->hw_stats) {
1461 spin_lock_init(&priv->hw_stats->stats_lock);
1464 sysclk = devm_clk_get(&pdev->dev, NULL);
1465 if (!IS_ERR(sysclk))
1466 priv->sysclk = clk_get_rate(sysclk);
1468 priv->netdev = netdev;
1469 priv->device = &pdev->dev;
1471 priv->msg_enable = netif_msg_init(fe_msg_level, FE_DEFAULT_MSG_ENABLE);
1472 priv->frag_size = fe_max_frag_size(ETH_DATA_LEN);
1473 priv->rx_buf_size = fe_max_buf_size(priv->frag_size);
1474 if (priv->frag_size > PAGE_SIZE) {
1475 dev_err(&pdev->dev, "error frag size.\n");
1479 INIT_WORK(&priv->pending_work, fe_pending_work);
1482 if (priv->flags & FE_FLAG_NAPI_WEIGHT)
1484 netif_napi_add(netdev, &priv->rx_napi, fe_poll, napi_weight);
1485 fe_set_ethtool_ops(netdev);
1487 err = register_netdev(netdev);
1489 dev_err(&pdev->dev, "error bringing up device\n");
1493 platform_set_drvdata(pdev, netdev);
1495 netif_info(priv, probe, netdev, "ralink at 0x%08lx, irq %d\n",
1496 netdev->base_addr, netdev->irq);
1501 free_netdev(netdev);
1503 devm_iounmap(&pdev->dev, fe_base);
1508 static int fe_remove(struct platform_device *pdev)
1510 struct net_device *dev = platform_get_drvdata(pdev);
1511 struct fe_priv *priv = netdev_priv(dev);
1513 netif_napi_del(&priv->rx_napi);
1515 kfree(priv->hw_stats);
1517 cancel_work_sync(&priv->pending_work);
1519 unregister_netdev(dev);
1521 platform_set_drvdata(pdev, NULL);
1526 static struct platform_driver fe_driver = {
1528 .remove = fe_remove,
1530 .name = "ralink_soc_eth",
1531 .owner = THIS_MODULE,
1532 .of_match_table = of_fe_match,
1536 static int __init init_rtfe(void)
1544 ret = platform_driver_register(&fe_driver);
1551 static void __exit exit_rtfe(void)
1553 platform_driver_unregister(&fe_driver);
1557 module_init(init_rtfe);
1558 module_exit(exit_rtfe);
1560 MODULE_LICENSE("GPL");
1561 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1562 MODULE_DESCRIPTION("Ethernet driver for Ralink SoC");
1563 MODULE_VERSION(FE_DRV_VERSION);