2 * Cavium CNS3xxx Gigabit driver for Linux
4 * Copyright 2011 Gateworks Corporation
5 * Chris Lang <clang@gateworks.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/etherdevice.h>
18 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/phy.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
24 #include <mach/irqs.h>
25 #include <mach/platform.h>
27 #define DRV_NAME "cns3xxx_eth"
31 #define TX_DESC_RESERVE 20
33 #define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
34 #define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
37 #define RX_BUFFER_ALIGN 64
38 #define RX_BUFFER_ALIGN_MASK (~(RX_BUFFER_ALIGN - 1))
40 #define SKB_HEAD_ALIGN (((PAGE_SIZE - NET_SKB_PAD) % RX_BUFFER_ALIGN) + NET_SKB_PAD + NET_IP_ALIGN)
41 #define RX_SEGMENT_ALLOC_SIZE 2048
42 #define RX_SEGMENT_BUFSIZE (SKB_WITH_OVERHEAD(RX_SEGMENT_ALLOC_SIZE))
43 #define RX_SEGMENT_MRU (((RX_SEGMENT_BUFSIZE - SKB_HEAD_ALIGN) & RX_BUFFER_ALIGN_MASK) - NET_IP_ALIGN)
46 #define NAPI_WEIGHT 64
49 #define MDIO_CMD_COMPLETE 0x00008000
50 #define MDIO_WRITE_COMMAND 0x00002000
51 #define MDIO_READ_COMMAND 0x00004000
52 #define MDIO_REG_OFFSET 8
53 #define MDIO_VALUE_OFFSET 16
55 /* Descritor Defines */
56 #define END_OF_RING 0x40000000
57 #define FIRST_SEGMENT 0x20000000
58 #define LAST_SEGMENT 0x10000000
59 #define FORCE_ROUTE 0x04000000
60 #define IP_CHECKSUM 0x00040000
61 #define UDP_CHECKSUM 0x00020000
62 #define TCP_CHECKSUM 0x00010000
64 /* Port Config Defines */
65 #define PORT_BP_ENABLE 0x00020000
66 #define PORT_DISABLE 0x00040000
67 #define PORT_LEARN_DIS 0x00080000
68 #define PORT_BLOCK_STATE 0x00100000
69 #define PORT_BLOCK_MODE 0x00200000
71 #define PROMISC_OFFSET 29
73 /* Global Config Defines */
74 #define UNKNOWN_VLAN_TO_CPU 0x02000000
75 #define ACCEPT_CRC_PACKET 0x00200000
76 #define CRC_STRIPPING 0x00100000
78 /* VLAN Config Defines */
79 #define NIC_MODE 0x00008000
80 #define VLAN_UNAWARE 0x00000001
82 /* DMA AUTO Poll Defines */
83 #define TS_POLL_EN 0x00000020
84 #define TS_SUSPEND 0x00000010
85 #define FS_POLL_EN 0x00000002
86 #define FS_SUSPEND 0x00000001
88 /* DMA Ring Control Defines */
89 #define QUEUE_THRESHOLD 0x000000f0
90 #define CLR_FS_STATE 0x80000000
92 /* Interrupt Status Defines */
93 #define MAC0_STATUS_CHANGE 0x00004000
94 #define MAC1_STATUS_CHANGE 0x00008000
95 #define MAC2_STATUS_CHANGE 0x00010000
96 #define MAC0_RX_ERROR 0x00100000
97 #define MAC1_RX_ERROR 0x00200000
98 #define MAC2_RX_ERROR 0x00400000
102 u32 sdp; /* segment data pointer */
106 u32 sdl:16; /* segment data length */
110 u32 rsv_1:3; /* reserve */
112 u32 fp:1; /* force priority */
152 u8 alignment[16]; /* for 32 byte */
157 u32 sdp; /* segment data pointer */
161 u32 sdl:16; /* segment data length */
206 u8 alignment[16]; /* for 32 byte alignment */
215 u32 mac_pri_ctrl[5], __res;
237 u32 fc_input_thrs, __res1[2];
239 u32 mac_glob_cfg_ext, __res2[2];
241 u32 dma_auto_poll_cfg;
242 u32 delay_intr_cfg, __res3;
245 u32 ts_desc_base_addr0, __res4;
248 u32 fs_desc_base_addr0, __res5;
251 u32 ts_desc_base_addr1, __res6;
254 u32 fs_desc_base_addr1;
256 u32 mac_counter0[13];
260 struct tx_desc *desc;
261 dma_addr_t phys_addr;
262 struct tx_desc *cur_addr;
263 struct sk_buff *buff_tab[TX_DESCS];
264 unsigned int phys_tab[TX_DESCS];
274 struct rx_desc *desc;
275 dma_addr_t phys_addr;
276 struct rx_desc *cur_addr;
277 void *buff_tab[RX_DESCS];
278 unsigned int phys_tab[RX_DESCS];
285 struct resource *mem_res;
286 struct switch_regs __iomem *regs;
287 struct napi_struct napi;
288 struct cns3xxx_plat_info *plat;
289 struct _tx_ring tx_ring;
290 struct _rx_ring rx_ring;
291 struct sk_buff *frag_first;
292 struct sk_buff *frag_last;
296 struct net_device *netdev;
297 struct phy_device *phydev;
299 int id; /* logical port ID */
303 static spinlock_t mdio_lock;
304 static DEFINE_SPINLOCK(tx_lock);
305 static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
306 struct mii_bus *mdio_bus;
307 static int ports_open;
308 static struct port *switch_port_tab[4];
309 static struct dma_pool *rx_dma_pool;
310 static struct dma_pool *tx_dma_pool;
311 struct net_device *napi_dev;
313 static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
319 temp = __raw_readl(&mdio_regs->phy_control);
320 temp |= MDIO_CMD_COMPLETE;
321 __raw_writel(temp, &mdio_regs->phy_control);
325 temp = (cmd << MDIO_VALUE_OFFSET);
326 temp |= MDIO_WRITE_COMMAND;
328 temp = MDIO_READ_COMMAND;
330 temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
331 temp |= (phy_id & 0x1f);
333 __raw_writel(temp, &mdio_regs->phy_control);
335 while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0)
341 if (cycles == 5000) {
342 printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
347 temp = __raw_readl(&mdio_regs->phy_control);
348 temp |= MDIO_CMD_COMPLETE;
349 __raw_writel(temp, &mdio_regs->phy_control);
354 return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF);
357 static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
362 spin_lock_irqsave(&mdio_lock, flags);
363 ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
364 spin_unlock_irqrestore(&mdio_lock, flags);
368 static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
374 spin_lock_irqsave(&mdio_lock, flags);
375 ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
376 spin_unlock_irqrestore(&mdio_lock, flags);
380 static int cns3xxx_mdio_register(void)
384 if (!(mdio_bus = mdiobus_alloc()))
387 mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
389 spin_lock_init(&mdio_lock);
390 mdio_bus->name = "CNS3xxx MII Bus";
391 mdio_bus->read = &cns3xxx_mdio_read;
392 mdio_bus->write = &cns3xxx_mdio_write;
393 strcpy(mdio_bus->id, "0");
395 if ((err = mdiobus_register(mdio_bus)))
396 mdiobus_free(mdio_bus);
400 static void cns3xxx_mdio_remove(void)
402 mdiobus_unregister(mdio_bus);
403 mdiobus_free(mdio_bus);
406 static void enable_tx_dma(struct sw *sw)
408 __raw_writel(0x1, &sw->regs->ts_dma_ctrl0);
411 static void enable_rx_dma(struct sw *sw)
413 __raw_writel(0x1, &sw->regs->fs_dma_ctrl0);
416 static void cns3xxx_adjust_link(struct net_device *dev)
418 struct port *port = netdev_priv(dev);
419 struct phy_device *phydev = port->phydev;
424 printk(KERN_INFO "%s: link down\n", dev->name);
429 if (port->speed == phydev->speed && port->duplex == phydev->duplex)
432 port->speed = phydev->speed;
433 port->duplex = phydev->duplex;
435 printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
436 dev->name, port->speed, port->duplex ? "full" : "half");
439 irqreturn_t eth_rx_irq(int irq, void *pdev)
441 struct net_device *dev = pdev;
442 struct sw *sw = netdev_priv(dev);
443 if (likely(napi_schedule_prep(&sw->napi))) {
444 disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
445 __napi_schedule(&sw->napi);
447 return (IRQ_HANDLED);
450 irqreturn_t eth_stat_irq(int irq, void *pdev)
452 struct net_device *dev = pdev;
453 struct sw *sw = netdev_priv(dev);
455 u32 stat = __raw_readl(&sw->regs->intr_stat);
456 __raw_writel(0xffffffff, &sw->regs->intr_stat);
458 if (stat & MAC2_RX_ERROR)
459 switch_port_tab[3]->netdev->stats.rx_dropped++;
460 if (stat & MAC1_RX_ERROR)
461 switch_port_tab[1]->netdev->stats.rx_dropped++;
462 if (stat & MAC0_RX_ERROR)
463 switch_port_tab[0]->netdev->stats.rx_dropped++;
465 if (stat & MAC0_STATUS_CHANGE) {
466 cfg = __raw_readl(&sw->regs->mac_cfg[0]);
467 switch_port_tab[0]->phydev->link = (cfg & 0x1);
468 switch_port_tab[0]->phydev->duplex = ((cfg >> 4) & 0x1);
469 if (((cfg >> 2) & 0x3) == 2)
470 switch_port_tab[0]->phydev->speed = 1000;
471 else if (((cfg >> 2) & 0x3) == 1)
472 switch_port_tab[0]->phydev->speed = 100;
474 switch_port_tab[0]->phydev->speed = 10;
475 cns3xxx_adjust_link(switch_port_tab[0]->netdev);
478 if (stat & MAC1_STATUS_CHANGE) {
479 cfg = __raw_readl(&sw->regs->mac_cfg[1]);
480 switch_port_tab[1]->phydev->link = (cfg & 0x1);
481 switch_port_tab[1]->phydev->duplex = ((cfg >> 4) & 0x1);
482 if (((cfg >> 2) & 0x3) == 2)
483 switch_port_tab[1]->phydev->speed = 1000;
484 else if (((cfg >> 2) & 0x3) == 1)
485 switch_port_tab[1]->phydev->speed = 100;
487 switch_port_tab[1]->phydev->speed = 10;
488 cns3xxx_adjust_link(switch_port_tab[1]->netdev);
491 if (stat & MAC2_STATUS_CHANGE) {
492 cfg = __raw_readl(&sw->regs->mac_cfg[3]);
493 switch_port_tab[3]->phydev->link = (cfg & 0x1);
494 switch_port_tab[3]->phydev->duplex = ((cfg >> 4) & 0x1);
495 if (((cfg >> 2) & 0x3) == 2)
496 switch_port_tab[3]->phydev->speed = 1000;
497 else if (((cfg >> 2) & 0x3) == 1)
498 switch_port_tab[3]->phydev->speed = 100;
500 switch_port_tab[3]->phydev->speed = 10;
501 cns3xxx_adjust_link(switch_port_tab[3]->netdev);
504 return (IRQ_HANDLED);
508 static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
510 struct _rx_ring *rx_ring = &sw->rx_ring;
511 unsigned int i = rx_ring->alloc_index;
512 struct rx_desc *desc = &(rx_ring)->desc[i];
516 for (received += rx_ring->alloc_count; received > 0; received--) {
517 buf = kmalloc(RX_SEGMENT_ALLOC_SIZE, GFP_ATOMIC);
521 phys = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
522 RX_SEGMENT_MRU, DMA_FROM_DEVICE);
523 if (dma_mapping_error(NULL, phys)) {
528 desc->sdl = RX_SEGMENT_MRU;
533 /* put the new buffer on RX-free queue */
534 rx_ring->buff_tab[i] = buf;
535 rx_ring->phys_tab[i] = phys;
536 if (i == RX_DESCS - 1) {
538 desc->config0 = END_OF_RING | FIRST_SEGMENT |
539 LAST_SEGMENT | RX_SEGMENT_MRU;
540 desc = &(rx_ring)->desc[i];
542 desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
549 rx_ring->alloc_count = received;
550 rx_ring->alloc_index = i;
553 static void eth_check_num_used(struct _tx_ring *tx_ring)
558 if (tx_ring->num_used >= TX_DESCS - TX_DESC_RESERVE)
561 if (tx_ring->stopped == stop)
564 tx_ring->stopped = stop;
565 for (i = 0; i < 4; i++) {
566 struct port *port = switch_port_tab[i];
567 struct net_device *dev;
574 netif_stop_queue(dev);
576 netif_wake_queue(dev);
580 static void eth_complete_tx(struct sw *sw)
582 struct _tx_ring *tx_ring = &sw->tx_ring;
583 struct tx_desc *desc;
586 int num_used = tx_ring->num_used;
589 index = tx_ring->free_index;
590 desc = &(tx_ring)->desc[index];
591 for (i = 0; i < num_used; i++) {
593 skb = tx_ring->buff_tab[index];
594 tx_ring->buff_tab[index] = 0;
596 dev_kfree_skb_any(skb);
597 dma_unmap_single(NULL, tx_ring->phys_tab[index],
598 desc->sdl, DMA_TO_DEVICE);
599 if (++index == TX_DESCS) {
601 desc = &(tx_ring)->desc[index];
609 tx_ring->free_index = index;
610 tx_ring->num_used -= i;
611 eth_check_num_used(tx_ring);
614 static int eth_poll(struct napi_struct *napi, int budget)
616 struct sw *sw = container_of(napi, struct sw, napi);
617 struct _rx_ring *rx_ring = &sw->rx_ring;
620 unsigned int i = rx_ring->cur_index;
621 struct rx_desc *desc = &(rx_ring)->desc[i];
625 int reserve = SKB_HEAD_ALIGN;
627 if (received >= budget)
630 /* process received frame */
631 dma_unmap_single(NULL, rx_ring->phys_tab[i],
632 RX_SEGMENT_MRU, DMA_FROM_DEVICE);
634 skb = build_skb(rx_ring->buff_tab[i], 0);
638 skb->dev = switch_port_tab[desc->sp]->netdev;
641 if (desc->fsd && !desc->lsd)
642 length = RX_SEGMENT_MRU;
645 reserve -= NET_IP_ALIGN;
647 length += NET_IP_ALIGN;
650 skb_reserve(skb, reserve);
651 skb_put(skb, length);
654 sw->frag_first = skb;
656 if (sw->frag_first == sw->frag_last)
657 skb_frag_add_head(sw->frag_first, skb);
659 sw->frag_last->next = skb;
660 sw->frag_first->len += skb->len;
661 sw->frag_first->data_len += skb->len;
662 sw->frag_first->truesize += skb->truesize;
667 struct net_device *dev;
669 skb = sw->frag_first;
671 skb->protocol = eth_type_trans(skb, dev);
673 dev->stats.rx_packets++;
674 dev->stats.rx_bytes += skb->len;
676 /* RX Hardware checksum offload */
677 skb->ip_summed = CHECKSUM_NONE;
678 switch (desc->prot) {
686 skb->ip_summed = CHECKSUM_UNNECESSARY;
687 napi_gro_receive(napi, skb);
692 netif_receive_skb(skb);
696 sw->frag_first = NULL;
697 sw->frag_last = NULL;
701 if (++i == RX_DESCS) {
703 desc = &(rx_ring)->desc[i];
711 enable_irq(IRQ_CNS3XXX_SW_R0RXC);
714 cns3xxx_alloc_rx_buf(sw, received);
716 rx_ring->cur_index = i;
721 spin_lock_bh(&tx_lock);
723 spin_unlock_bh(&tx_lock);
728 static void eth_set_desc(struct _tx_ring *tx_ring, int index, int index_last,
729 void *data, int len, u32 config0, u32 pmap)
731 struct tx_desc *tx_desc = &(tx_ring)->desc[index];
734 phys = dma_map_single(NULL, data, len, DMA_TO_DEVICE);
736 tx_desc->pmap = pmap;
737 tx_ring->phys_tab[index] = phys;
740 if (index == TX_DESCS - 1)
741 config0 |= END_OF_RING;
742 if (index == index_last)
743 config0 |= LAST_SEGMENT;
746 tx_desc->config0 = config0;
749 static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
751 struct port *port = netdev_priv(dev);
752 struct sw *sw = port->sw;
753 struct _tx_ring *tx_ring = &sw->tx_ring;
754 struct sk_buff *skb1;
755 char pmap = (1 << port->id);
756 int nr_frags = skb_shinfo(skb)->nr_frags;
757 int nr_desc = nr_frags;
758 int index0, index, index_last;
766 skb_walk_frags(skb, skb1)
769 spin_lock_bh(&tx_lock);
772 if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
773 spin_unlock_bh(&tx_lock);
774 return NETDEV_TX_BUSY;
777 index = index0 = tx_ring->cur_index;
778 index_last = (index0 + nr_desc) % TX_DESCS;
779 tx_ring->cur_index = (index_last + 1) % TX_DESCS;
781 spin_unlock_bh(&tx_lock);
783 config0 = FORCE_ROUTE;
784 if (skb->ip_summed == CHECKSUM_PARTIAL)
785 config0 |= UDP_CHECKSUM | TCP_CHECKSUM;
790 for (i = 0; i < nr_frags; i++) {
791 struct skb_frag_struct *frag;
794 index = (index + 1) % TX_DESCS;
796 frag = &skb_shinfo(skb)->frags[i];
797 addr = page_address(skb_frag_page(frag)) + frag->page_offset;
799 eth_set_desc(tx_ring, index, index_last, addr, frag->size,
804 len0 = skb->len - skb->data_len;
806 skb_walk_frags(skb, skb1) {
807 index = (index + 1) % TX_DESCS;
810 eth_set_desc(tx_ring, index, index_last, skb1->data, skb1->len,
814 tx_ring->buff_tab[index0] = skb;
815 eth_set_desc(tx_ring, index0, index_last, skb->data, len0,
816 config0 | FIRST_SEGMENT, pmap);
821 tx_ring->num_used += nr_desc + 1;
822 spin_unlock(&tx_lock);
824 dev->stats.tx_packets++;
825 dev->stats.tx_bytes += skb->len;
832 static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
834 struct port *port = netdev_priv(dev);
836 if (!netif_running(dev))
838 return phy_mii_ioctl(port->phydev, req, cmd);
841 /* ethtool support */
843 static void cns3xxx_get_drvinfo(struct net_device *dev,
844 struct ethtool_drvinfo *info)
846 strcpy(info->driver, DRV_NAME);
847 strcpy(info->bus_info, "internal");
850 static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
852 struct port *port = netdev_priv(dev);
853 return phy_ethtool_gset(port->phydev, cmd);
856 static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
858 struct port *port = netdev_priv(dev);
859 return phy_ethtool_sset(port->phydev, cmd);
862 static int cns3xxx_nway_reset(struct net_device *dev)
864 struct port *port = netdev_priv(dev);
865 return phy_start_aneg(port->phydev);
868 static struct ethtool_ops cns3xxx_ethtool_ops = {
869 .get_drvinfo = cns3xxx_get_drvinfo,
870 .get_settings = cns3xxx_get_settings,
871 .set_settings = cns3xxx_set_settings,
872 .nway_reset = cns3xxx_nway_reset,
873 .get_link = ethtool_op_get_link,
877 static int init_rings(struct sw *sw)
880 struct _rx_ring *rx_ring = &sw->rx_ring;
881 struct _tx_ring *tx_ring = &sw->tx_ring;
883 __raw_writel(0, &sw->regs->fs_dma_ctrl0);
884 __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
885 __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
886 __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
888 __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
890 if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL,
891 RX_POOL_ALLOC_SIZE, 32, 0)))
894 if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
895 &rx_ring->phys_addr)))
897 memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
899 /* Setup RX buffers */
900 for (i = 0; i < RX_DESCS; i++) {
901 struct rx_desc *desc = &(rx_ring)->desc[i];
904 buf = kzalloc(RX_SEGMENT_ALLOC_SIZE, GFP_KERNEL);
908 desc->sdl = RX_SEGMENT_MRU;
909 if (i == (RX_DESCS - 1))
914 desc->sdp = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
915 RX_SEGMENT_MRU, DMA_FROM_DEVICE);
916 if (dma_mapping_error(NULL, desc->sdp))
919 rx_ring->buff_tab[i] = buf;
920 rx_ring->phys_tab[i] = desc->sdp;
923 __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
924 __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
926 if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL,
927 TX_POOL_ALLOC_SIZE, 32, 0)))
930 if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
931 &tx_ring->phys_addr)))
933 memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
935 /* Setup TX buffers */
936 for (i = 0; i < TX_DESCS; i++) {
937 struct tx_desc *desc = &(tx_ring)->desc[i];
938 tx_ring->buff_tab[i] = 0;
940 if (i == (TX_DESCS - 1))
944 __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
945 __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
950 static void destroy_rings(struct sw *sw)
953 if (sw->rx_ring.desc) {
954 for (i = 0; i < RX_DESCS; i++) {
955 struct _rx_ring *rx_ring = &sw->rx_ring;
956 struct rx_desc *desc = &(rx_ring)->desc[i];
957 struct sk_buff *skb = sw->rx_ring.buff_tab[i];
962 dma_unmap_single(NULL, desc->sdp, RX_SEGMENT_MRU,
966 dma_pool_free(rx_dma_pool, sw->rx_ring.desc, sw->rx_ring.phys_addr);
967 dma_pool_destroy(rx_dma_pool);
969 sw->rx_ring.desc = 0;
971 if (sw->tx_ring.desc) {
972 for (i = 0; i < TX_DESCS; i++) {
973 struct _tx_ring *tx_ring = &sw->tx_ring;
974 struct tx_desc *desc = &(tx_ring)->desc[i];
975 struct sk_buff *skb = sw->tx_ring.buff_tab[i];
977 dma_unmap_single(NULL, desc->sdp,
978 skb->len, DMA_TO_DEVICE);
982 dma_pool_free(tx_dma_pool, sw->tx_ring.desc, sw->tx_ring.phys_addr);
983 dma_pool_destroy(tx_dma_pool);
985 sw->tx_ring.desc = 0;
989 static int eth_open(struct net_device *dev)
991 struct port *port = netdev_priv(dev);
992 struct sw *sw = port->sw;
995 port->speed = 0; /* force "link up" message */
996 phy_start(port->phydev);
998 netif_start_queue(dev);
1001 request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
1002 request_irq(IRQ_CNS3XXX_SW_STATUS, eth_stat_irq, IRQF_SHARED, "gig_stat", napi_dev);
1003 napi_enable(&sw->napi);
1004 netif_start_queue(napi_dev);
1006 __raw_writel(~(MAC0_STATUS_CHANGE | MAC1_STATUS_CHANGE | MAC2_STATUS_CHANGE |
1007 MAC0_RX_ERROR | MAC1_RX_ERROR | MAC2_RX_ERROR), &sw->regs->intr_mask);
1009 temp = __raw_readl(&sw->regs->mac_cfg[2]);
1010 temp &= ~(PORT_DISABLE);
1011 __raw_writel(temp, &sw->regs->mac_cfg[2]);
1013 temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
1014 temp &= ~(TS_SUSPEND | FS_SUSPEND);
1015 __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1019 temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1020 temp &= ~(PORT_DISABLE);
1021 __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1024 netif_carrier_on(dev);
1029 static int eth_close(struct net_device *dev)
1031 struct port *port = netdev_priv(dev);
1032 struct sw *sw = port->sw;
1037 temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1038 temp |= (PORT_DISABLE);
1039 __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1041 netif_stop_queue(dev);
1043 phy_stop(port->phydev);
1046 disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1047 free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
1048 disable_irq(IRQ_CNS3XXX_SW_STATUS);
1049 free_irq(IRQ_CNS3XXX_SW_STATUS, napi_dev);
1050 napi_disable(&sw->napi);
1051 netif_stop_queue(napi_dev);
1052 temp = __raw_readl(&sw->regs->mac_cfg[2]);
1053 temp |= (PORT_DISABLE);
1054 __raw_writel(temp, &sw->regs->mac_cfg[2]);
1056 __raw_writel(TS_SUSPEND | FS_SUSPEND,
1057 &sw->regs->dma_auto_poll_cfg);
1060 netif_carrier_off(dev);
1064 static void eth_rx_mode(struct net_device *dev)
1066 struct port *port = netdev_priv(dev);
1067 struct sw *sw = port->sw;
1070 temp = __raw_readl(&sw->regs->mac_glob_cfg);
1072 if (dev->flags & IFF_PROMISC) {
1074 temp |= ((1 << 2) << PROMISC_OFFSET);
1076 temp |= ((1 << port->id) << PROMISC_OFFSET);
1079 temp &= ~((1 << 2) << PROMISC_OFFSET);
1081 temp &= ~((1 << port->id) << PROMISC_OFFSET);
1083 __raw_writel(temp, &sw->regs->mac_glob_cfg);
1086 static int eth_set_mac(struct net_device *netdev, void *p)
1088 struct port *port = netdev_priv(netdev);
1089 struct sw *sw = port->sw;
1090 struct sockaddr *addr = p;
1093 if (!is_valid_ether_addr(addr->sa_data))
1094 return -EADDRNOTAVAIL;
1096 /* Invalidate old ARL Entry */
1098 __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1100 __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1101 __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
1102 (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
1103 &sw->regs->arl_ctrl[1]);
1105 __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
1107 &sw->regs->arl_ctrl[2]);
1108 __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
1110 while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1117 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1120 __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1122 __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1123 __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
1124 (addr->sa_data[2] << 8) | (addr->sa_data[3])),
1125 &sw->regs->arl_ctrl[1]);
1127 __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
1128 (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
1129 __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
1131 while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1139 static int cns3xxx_change_mtu(struct net_device *dev, int new_mtu)
1141 if (new_mtu > MAX_MTU)
1148 static const struct net_device_ops cns3xxx_netdev_ops = {
1149 .ndo_open = eth_open,
1150 .ndo_stop = eth_close,
1151 .ndo_start_xmit = eth_xmit,
1152 .ndo_set_rx_mode = eth_rx_mode,
1153 .ndo_do_ioctl = eth_ioctl,
1154 .ndo_change_mtu = cns3xxx_change_mtu,
1155 .ndo_set_mac_address = eth_set_mac,
1156 .ndo_validate_addr = eth_validate_addr,
1159 static int eth_init_one(struct platform_device *pdev)
1164 struct net_device *dev;
1165 struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1167 char phy_id[MII_BUS_ID_SIZE + 3];
1171 if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1173 strcpy(napi_dev->name, "switch%d");
1174 napi_dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST;
1176 SET_NETDEV_DEV(napi_dev, &pdev->dev);
1177 sw = netdev_priv(napi_dev);
1178 memset(sw, 0, sizeof(struct sw));
1179 sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1180 regs_phys = CNS3XXX_SWITCH_BASE;
1181 sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1187 temp = __raw_readl(&sw->regs->phy_auto_addr);
1188 temp |= (3 << 30); /* maximum frame length: 9600 bytes */
1189 __raw_writel(temp, &sw->regs->phy_auto_addr);
1191 for (i = 0; i < 4; i++) {
1192 temp = __raw_readl(&sw->regs->mac_cfg[i]);
1193 temp |= (PORT_DISABLE);
1194 __raw_writel(temp, &sw->regs->mac_cfg[i]);
1197 temp = PORT_DISABLE;
1198 __raw_writel(temp, &sw->regs->mac_cfg[2]);
1200 temp = __raw_readl(&sw->regs->vlan_cfg);
1201 temp |= NIC_MODE | VLAN_UNAWARE;
1202 __raw_writel(temp, &sw->regs->vlan_cfg);
1204 __raw_writel(UNKNOWN_VLAN_TO_CPU |
1205 CRC_STRIPPING, &sw->regs->mac_glob_cfg);
1207 if ((err = init_rings(sw)) != 0) {
1212 platform_set_drvdata(pdev, napi_dev);
1214 netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1216 for (i = 0; i < 3; i++) {
1217 if (!(plat->ports & (1 << i))) {
1221 if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1225 port = netdev_priv(dev);
1233 temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1234 temp |= (PORT_DISABLE | PORT_BLOCK_STATE | PORT_LEARN_DIS);
1235 __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1237 dev->netdev_ops = &cns3xxx_netdev_ops;
1238 dev->ethtool_ops = &cns3xxx_ethtool_ops;
1239 dev->tx_queue_len = 1000;
1240 dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST;
1242 switch_port_tab[port->id] = port;
1243 memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1245 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1246 port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1247 PHY_INTERFACE_MODE_RGMII);
1248 if ((err = IS_ERR(port->phydev))) {
1249 switch_port_tab[port->id] = 0;
1254 port->phydev->irq = PHY_IGNORE_INTERRUPT;
1256 if ((err = register_netdev(dev))) {
1257 phy_disconnect(port->phydev);
1258 switch_port_tab[port->id] = 0;
1263 printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1264 netif_carrier_off(dev);
1272 for (--i; i >= 0; i--) {
1273 if (switch_port_tab[i]) {
1274 port = switch_port_tab[i];
1276 unregister_netdev(dev);
1277 phy_disconnect(port->phydev);
1278 switch_port_tab[i] = 0;
1283 free_netdev(napi_dev);
1287 static int eth_remove_one(struct platform_device *pdev)
1289 struct net_device *dev = platform_get_drvdata(pdev);
1290 struct sw *sw = netdev_priv(dev);
1294 for (i = 3; i >= 0; i--) {
1295 if (switch_port_tab[i]) {
1296 struct port *port = switch_port_tab[i];
1297 struct net_device *dev = port->netdev;
1298 unregister_netdev(dev);
1299 phy_disconnect(port->phydev);
1300 switch_port_tab[i] = 0;
1305 release_resource(sw->mem_res);
1306 free_netdev(napi_dev);
1310 static struct platform_driver cns3xxx_eth_driver = {
1311 .driver.name = DRV_NAME,
1312 .probe = eth_init_one,
1313 .remove = eth_remove_one,
1316 static int __init eth_init_module(void)
1319 if ((err = cns3xxx_mdio_register()))
1321 return platform_driver_register(&cns3xxx_eth_driver);
1324 static void __exit eth_cleanup_module(void)
1326 platform_driver_unregister(&cns3xxx_eth_driver);
1327 cns3xxx_mdio_remove();
1330 module_init(eth_init_module);
1331 module_exit(eth_cleanup_module);
1333 MODULE_AUTHOR("Chris Lang");
1334 MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1335 MODULE_LICENSE("GPL v2");
1336 MODULE_ALIAS("platform:cns3xxx_eth");