2 +++ b/drivers/net/ethernet/cavium/cns3xxx_eth.c
5 + * Cavium CNS3xxx Gigabit driver for Linux
7 + * Copyright 2011 Gateworks Corporation
8 + * Chris Lang <clang@gateworks.com>
10 + * This program is free software; you can redistribute it and/or modify it
11 + * under the terms of version 2 of the GNU General Public License
12 + * as published by the Free Software Foundation.
16 +#include <linux/delay.h>
17 +#include <linux/module.h>
18 +#include <linux/dma-mapping.h>
19 +#include <linux/dmapool.h>
20 +#include <linux/etherdevice.h>
21 +#include <linux/interrupt.h>
22 +#include <linux/io.h>
23 +#include <linux/kernel.h>
24 +#include <linux/phy.h>
25 +#include <linux/platform_device.h>
26 +#include <linux/skbuff.h>
27 +#include <mach/irqs.h>
28 +#include <mach/platform.h>
30 +#define DRV_NAME "cns3xxx_eth"
34 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
36 +#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
37 +#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
38 +#define REGS_SIZE 336
41 +#define NAPI_WEIGHT 64
44 +#define MDIO_CMD_COMPLETE 0x00008000
45 +#define MDIO_WRITE_COMMAND 0x00002000
46 +#define MDIO_READ_COMMAND 0x00004000
47 +#define MDIO_REG_OFFSET 8
48 +#define MDIO_VALUE_OFFSET 16
50 +/* Descritor Defines */
51 +#define END_OF_RING 0x40000000
52 +#define FIRST_SEGMENT 0x20000000
53 +#define LAST_SEGMENT 0x10000000
54 +#define FORCE_ROUTE 0x04000000
55 +#define IP_CHECKSUM 0x00040000
56 +#define UDP_CHECKSUM 0x00020000
57 +#define TCP_CHECKSUM 0x00010000
59 +/* Port Config Defines */
60 +#define PORT_DISABLE 0x00040000
61 +#define PROMISC_OFFSET 29
63 +/* Global Config Defines */
64 +#define UNKNOWN_VLAN_TO_CPU 0x02000000
65 +#define ACCEPT_CRC_PACKET 0x00200000
66 +#define CRC_STRIPPING 0x00100000
68 +/* VLAN Config Defines */
69 +#define NIC_MODE 0x00008000
70 +#define VLAN_UNAWARE 0x00000001
72 +/* DMA AUTO Poll Defines */
73 +#define TS_POLL_EN 0x00000020
74 +#define TS_SUSPEND 0x00000010
75 +#define FS_POLL_EN 0x00000002
76 +#define FS_SUSPEND 0x00000001
78 +/* DMA Ring Control Defines */
79 +#define QUEUE_THRESHOLD 0x000000f0
80 +#define CLR_FS_STATE 0x80000000
84 + u32 sdp; /* segment data pointer */
88 + u32 sdl:16; /* segment data length */
92 + u32 rsv_1:3; /* reserve */
94 + u32 fp:1; /* force priority */
134 + u8 alignment[16]; /* for 32 byte */
139 + u32 sdp; /* segment data pointer */
143 + u32 sdl:16; /* segment data length */
188 + u8 alignment[16]; /* for 32 byte alignment */
191 +struct switch_regs {
196 + u32 mac_pri_ctrl[5], __res;
199 + u32 prio_etype_udp;
200 + u32 prio_ipdscp[8];
205 + u32 mc_fc_glob_thrs;
218 + u32 fc_input_thrs, __res1[2];
220 + u32 mac_glob_cfg_ext, __res2[2];
222 + u32 dma_auto_poll_cfg;
223 + u32 delay_intr_cfg, __res3;
226 + u32 ts_desc_base_addr0, __res4;
229 + u32 fs_desc_base_addr0, __res5;
232 + u32 ts_desc_base_addr1, __res6;
235 + u32 fs_desc_base_addr1;
239 + struct tx_desc *desc;
240 + dma_addr_t phys_addr;
241 + struct tx_desc *cur_addr;
242 + struct sk_buff *buff_tab[TX_DESCS];
251 + struct rx_desc *desc;
252 + dma_addr_t phys_addr;
253 + struct rx_desc *cur_addr;
254 + struct sk_buff *buff_tab[RX_DESCS];
261 + struct resource *mem_res;
262 + struct switch_regs __iomem *regs;
263 + struct napi_struct napi;
264 + struct cns3xxx_plat_info *plat;
265 + struct _tx_ring *tx_ring;
266 + struct _rx_ring *rx_ring;
271 + struct net_device *netdev;
272 + struct phy_device *phydev;
274 + int id; /* logical port ID */
279 +static spinlock_t mdio_lock;
280 +static spinlock_t tx_lock;
281 +static spinlock_t stat_lock;
282 +static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
283 +struct mii_bus *mdio_bus;
284 +static int ports_open;
285 +static struct port *switch_port_tab[3];
286 +static struct dma_pool *rx_dma_pool;
287 +static struct dma_pool *tx_dma_pool;
288 +struct net_device *napi_dev;
290 +static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
291 + int write, u16 cmd)
296 + temp = __raw_readl(&mdio_regs->phy_control);
297 + temp |= MDIO_CMD_COMPLETE;
298 + __raw_writel(temp, &mdio_regs->phy_control);
302 + temp = (cmd << MDIO_VALUE_OFFSET);
303 + temp |= MDIO_WRITE_COMMAND;
305 + temp = MDIO_READ_COMMAND;
307 + temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
308 + temp |= (phy_id & 0x1f);
310 + __raw_writel(temp, &mdio_regs->phy_control);
312 + while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0)
313 + && cycles < 5000) {
318 + if (cycles == 5000) {
319 + printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
324 + temp = __raw_readl(&mdio_regs->phy_control);
325 + temp |= MDIO_CMD_COMPLETE;
326 + __raw_writel(temp, &mdio_regs->phy_control);
331 + return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF);
334 +static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
336 + unsigned long flags;
339 + spin_lock_irqsave(&mdio_lock, flags);
340 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
341 + spin_unlock_irqrestore(&mdio_lock, flags);
345 +static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
348 + unsigned long flags;
351 + spin_lock_irqsave(&mdio_lock, flags);
352 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
353 + spin_unlock_irqrestore(&mdio_lock, flags);
357 +static int cns3xxx_mdio_register(void)
361 + if (!(mdio_bus = mdiobus_alloc()))
364 + mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
366 + spin_lock_init(&mdio_lock);
367 + mdio_bus->name = "CNS3xxx MII Bus";
368 + mdio_bus->read = &cns3xxx_mdio_read;
369 + mdio_bus->write = &cns3xxx_mdio_write;
370 + strcpy(mdio_bus->id, "0");
372 + if ((err = mdiobus_register(mdio_bus)))
373 + mdiobus_free(mdio_bus);
377 +static void cns3xxx_mdio_remove(void)
379 + mdiobus_unregister(mdio_bus);
380 + mdiobus_free(mdio_bus);
383 +static void cns3xxx_adjust_link(struct net_device *dev)
385 + struct port *port = netdev_priv(dev);
386 + struct phy_device *phydev = port->phydev;
388 + if (!phydev->link) {
391 + printk(KERN_INFO "%s: link down\n", dev->name);
396 + if (port->speed == phydev->speed && port->duplex == phydev->duplex)
399 + port->speed = phydev->speed;
400 + port->duplex = phydev->duplex;
402 + printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
403 + dev->name, port->speed, port->duplex ? "full" : "half");
406 +irqreturn_t eth_rx_irq(int irq, void *pdev)
408 + struct net_device *dev = pdev;
409 + struct sw *sw = netdev_priv(dev);
410 + if (likely(napi_schedule_prep(&sw->napi))) {
411 + disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
412 + __napi_schedule(&sw->napi);
414 + return (IRQ_HANDLED);
417 +static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
419 + struct _rx_ring *rx_ring = sw->rx_ring;
420 + unsigned int i = rx_ring->alloc_index;
421 + struct rx_desc *desc;
422 + struct sk_buff *skb;
425 + rx_ring->alloc_count += received;
427 + for (received = rx_ring->alloc_count; received > 0; received--) {
428 + desc = &(rx_ring)->desc[i];
430 + if ((skb = dev_alloc_skb(mtu))) {
431 + if (SKB_DMA_REALIGN)
432 + skb_reserve(skb, SKB_DMA_REALIGN);
433 + skb_reserve(skb, NET_IP_ALIGN);
434 + desc->sdp = dma_map_single(NULL, skb->data,
435 + mtu, DMA_FROM_DEVICE);
436 + if (dma_mapping_error(NULL, desc->sdp)) {
437 + dev_kfree_skb(skb);
438 + /* Failed to map, better luck next time */
442 + /* Failed to allocate skb, try again next time */
446 + /* put the new buffer on RX-free queue */
447 + rx_ring->buff_tab[i] = skb;
449 + if (++i == RX_DESCS) {
451 + desc->config0 = END_OF_RING | FIRST_SEGMENT |
452 + LAST_SEGMENT | mtu;
454 + desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | mtu;
458 + rx_ring->alloc_count = received;
459 + rx_ring->alloc_index = i;
462 +static void update_tx_stats(struct sw *sw)
464 + struct _tx_ring *tx_ring = sw->tx_ring;
465 + struct tx_desc *desc;
466 + struct tx_desc *next_desc;
467 + struct sk_buff *skb;
472 + spin_lock_bh(&stat_lock);
474 + num_count = tx_ring->num_count;
477 + spin_unlock_bh(&stat_lock);
481 + index = tx_ring->count_index;
482 + desc = &(tx_ring)->desc[index];
483 + for (i = 0; i < num_count; i++) {
484 + skb = tx_ring->buff_tab[index];
486 + tx_ring->buff_tab[index] = 0;
487 + if (unlikely(++index == TX_DESCS)) index = 0;
488 + next_desc = &(tx_ring)->desc[index];
489 + prefetch(next_desc + 4);
491 + skb->dev->stats.tx_packets++;
492 + skb->dev->stats.tx_bytes += skb->len;
493 + dev_kfree_skb_any(skb);
500 + tx_ring->num_count -= i;
501 + tx_ring->count_index = index;
503 + spin_unlock_bh(&stat_lock);
506 +static void clear_tx_desc(struct sw *sw)
508 + struct _tx_ring *tx_ring = sw->tx_ring;
509 + struct tx_desc *desc;
510 + struct tx_desc *next_desc;
513 + int num_used = tx_ring->num_used - tx_ring->num_count;
515 + if (num_used < (TX_DESCS >> 1))
518 + index = tx_ring->free_index;
519 + desc = &(tx_ring)->desc[index];
520 + for (i = 0; i < num_used; i++) {
522 + if (unlikely(++index == TX_DESCS)) index = 0;
523 + next_desc = &(tx_ring)->desc[index];
524 + prefetch(next_desc);
525 + prefetch(next_desc + 4);
526 + if (likely(desc->sdp))
527 + dma_unmap_single(NULL, desc->sdp,
528 + desc->sdl, DMA_TO_DEVICE);
534 + tx_ring->free_index = index;
535 + tx_ring->num_used -= i;
538 +static int eth_poll(struct napi_struct *napi, int budget)
540 + struct sw *sw = container_of(napi, struct sw, napi);
541 + struct net_device *dev;
542 + struct _rx_ring *rx_ring = sw->rx_ring;
544 + unsigned int length;
545 + unsigned int i = rx_ring->cur_index;
546 + struct rx_desc *next_desc;
547 + struct rx_desc *desc = &(rx_ring)->desc[i];
550 + while (desc->cown) {
551 + struct sk_buff *skb;
553 + if (received >= budget)
556 + skb = rx_ring->buff_tab[i];
558 + if (++i == RX_DESCS) i = 0;
559 + next_desc = &(rx_ring)->desc[i];
560 + prefetch(next_desc);
562 + port_id = desc->sp;
564 + dev = switch_port_tab[2]->netdev;
566 + dev = switch_port_tab[port_id]->netdev;
568 + length = desc->sdl;
569 + /* process received frame */
570 + dma_unmap_single(&dev->dev, desc->sdp,
571 + length, DMA_FROM_DEVICE);
573 + skb_put(skb, length);
576 + skb->protocol = eth_type_trans(skb, dev);
578 + dev->stats.rx_packets++;
579 + dev->stats.rx_bytes += length;
581 + switch (desc->prot) {
589 + skb->ip_summed = CHECKSUM_NONE;
591 + skb->ip_summed = CHECKSUM_UNNECESSARY;
594 + skb->ip_summed = CHECKSUM_NONE;
598 + napi_gro_receive(napi, skb);
604 + cns3xxx_alloc_rx_buf(sw, received);
605 + rx_ring->cur_index = i;
607 + if (received != budget) {
608 + napi_complete(napi);
609 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);
615 +static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
617 + struct port *port = netdev_priv(dev);
618 + struct sw *sw = port->sw;
619 + struct _tx_ring *tx_ring = sw->tx_ring;
620 + struct tx_desc *tx_desc;
622 + int len = skb->len;
623 + char pmap = (1 << port->id);
628 + if (unlikely(len > sw->mtu)) {
629 + dev_kfree_skb(skb);
630 + dev->stats.tx_errors++;
631 + return NETDEV_TX_OK;
634 + update_tx_stats(sw);
636 + spin_lock_bh(&tx_lock);
640 + if (unlikely(tx_ring->num_used == TX_DESCS)) {
641 + spin_unlock_bh(&tx_lock);
642 + return NETDEV_TX_BUSY;
645 + index = tx_ring->cur_index;
647 + if (unlikely(++tx_ring->cur_index == TX_DESCS))
648 + tx_ring->cur_index = 0;
650 + tx_ring->num_used++;
651 + tx_ring->num_count++;
653 + spin_unlock_bh(&tx_lock);
655 + tx_desc = &(tx_ring)->desc[index];
657 + tx_desc->sdp = dma_map_single(NULL, skb->data, len,
660 + if (dma_mapping_error(NULL, tx_desc->sdp)) {
661 + dev_kfree_skb(skb);
662 + dev->stats.tx_errors++;
663 + return NETDEV_TX_OK;
666 + tx_desc->pmap = pmap;
667 + tx_ring->buff_tab[index] = skb;
669 + if (index == TX_DESCS - 1) {
670 + tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | LAST_SEGMENT |
671 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
672 + TCP_CHECKSUM | len;
674 + tx_desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
675 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
676 + TCP_CHECKSUM | len;
679 + return NETDEV_TX_OK;
682 +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
684 + struct port *port = netdev_priv(dev);
686 + if (!netif_running(dev))
688 + return phy_mii_ioctl(port->phydev, req, cmd);
691 +/* ethtool support */
693 +static void cns3xxx_get_drvinfo(struct net_device *dev,
694 + struct ethtool_drvinfo *info)
696 + strcpy(info->driver, DRV_NAME);
697 + strcpy(info->bus_info, "internal");
700 +static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
702 + struct port *port = netdev_priv(dev);
703 + return phy_ethtool_gset(port->phydev, cmd);
706 +static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
708 + struct port *port = netdev_priv(dev);
709 + return phy_ethtool_sset(port->phydev, cmd);
712 +static int cns3xxx_nway_reset(struct net_device *dev)
714 + struct port *port = netdev_priv(dev);
715 + return phy_start_aneg(port->phydev);
718 +static struct ethtool_ops cns3xxx_ethtool_ops = {
719 + .get_drvinfo = cns3xxx_get_drvinfo,
720 + .get_settings = cns3xxx_get_settings,
721 + .set_settings = cns3xxx_set_settings,
722 + .nway_reset = cns3xxx_nway_reset,
723 + .get_link = ethtool_op_get_link,
727 +static int init_rings(struct sw *sw)
730 + struct _rx_ring *rx_ring = sw->rx_ring;
731 + struct _tx_ring *tx_ring = sw->tx_ring;
733 + __raw_writel(0, &sw->regs->fs_dma_ctrl0);
734 + __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
735 + __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
736 + __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
738 + __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
740 + if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL,
741 + RX_POOL_ALLOC_SIZE, 32, 0)))
744 + if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
745 + &rx_ring->phys_addr)))
747 + memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
749 + /* Setup RX buffers */
750 + for (i = 0; i < RX_DESCS; i++) {
751 + struct rx_desc *desc = &(rx_ring)->desc[i];
752 + struct sk_buff *skb;
753 + if (!(skb = dev_alloc_skb(sw->mtu)))
755 + if (SKB_DMA_REALIGN)
756 + skb_reserve(skb, SKB_DMA_REALIGN);
757 + skb_reserve(skb, NET_IP_ALIGN);
758 + desc->sdl = sw->mtu;
759 + if (i == (RX_DESCS - 1))
764 + desc->sdp = dma_map_single(NULL, skb->data,
765 + sw->mtu, DMA_FROM_DEVICE);
766 + if (dma_mapping_error(NULL, desc->sdp)) {
769 + rx_ring->buff_tab[i] = skb;
772 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
773 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
775 + if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL,
776 + TX_POOL_ALLOC_SIZE, 32, 0)))
779 + if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
780 + &tx_ring->phys_addr)))
782 + memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
784 + /* Setup TX buffers */
785 + for (i = 0; i < TX_DESCS; i++) {
786 + struct tx_desc *desc = &(tx_ring)->desc[i];
787 + tx_ring->buff_tab[i] = 0;
789 + if (i == (TX_DESCS - 1))
793 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
794 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
799 +static void destroy_rings(struct sw *sw)
802 + if (sw->rx_ring->desc) {
803 + for (i = 0; i < RX_DESCS; i++) {
804 + struct _rx_ring *rx_ring = sw->rx_ring;
805 + struct rx_desc *desc = &(rx_ring)->desc[i];
806 + struct sk_buff *skb = sw->rx_ring->buff_tab[i];
808 + dma_unmap_single(NULL,
810 + sw->mtu, DMA_FROM_DEVICE);
811 + dev_kfree_skb(skb);
814 + dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
815 + dma_pool_destroy(rx_dma_pool);
817 + sw->rx_ring->desc = 0;
819 + if (sw->tx_ring->desc) {
820 + for (i = 0; i < TX_DESCS; i++) {
821 + struct _tx_ring *tx_ring = sw->tx_ring;
822 + struct tx_desc *desc = &(tx_ring)->desc[i];
823 + struct sk_buff *skb = sw->tx_ring->buff_tab[i];
825 + dma_unmap_single(NULL, desc->sdp,
826 + skb->len, DMA_TO_DEVICE);
827 + dev_kfree_skb(skb);
830 + dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
831 + dma_pool_destroy(tx_dma_pool);
833 + sw->tx_ring->desc = 0;
837 +static int eth_open(struct net_device *dev)
839 + struct port *port = netdev_priv(dev);
840 + struct sw *sw = port->sw;
843 + port->speed = 0; /* force "link up" message */
844 + phy_start(port->phydev);
846 + netif_start_queue(dev);
849 + request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
850 + napi_enable(&sw->napi);
851 + netif_start_queue(napi_dev);
852 + //enable_irq(IRQ_CNS3XXX_SW_R0RXC);
854 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
855 + temp &= ~(PORT_DISABLE);
856 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
858 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
859 + temp &= ~(TS_SUSPEND | FS_SUSPEND);
860 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
862 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
864 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
865 + temp &= ~(PORT_DISABLE);
866 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
869 + netif_carrier_on(dev);
874 +static int eth_close(struct net_device *dev)
876 + struct port *port = netdev_priv(dev);
877 + struct sw *sw = port->sw;
882 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
883 + temp |= (PORT_DISABLE);
884 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
886 + netif_stop_queue(dev);
888 + phy_stop(port->phydev);
891 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);
892 + free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
893 + napi_disable(&sw->napi);
894 + netif_stop_queue(napi_dev);
895 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
896 + temp |= (PORT_DISABLE);
897 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
899 + __raw_writel(TS_SUSPEND | FS_SUSPEND,
900 + &sw->regs->dma_auto_poll_cfg);
903 + netif_carrier_off(dev);
907 +static void eth_rx_mode(struct net_device *dev)
909 + struct port *port = netdev_priv(dev);
910 + struct sw *sw = port->sw;
913 + temp = __raw_readl(&sw->regs->mac_glob_cfg);
915 + if (dev->flags & IFF_PROMISC) {
917 + temp |= ((1 << 2) << PROMISC_OFFSET);
919 + temp |= ((1 << port->id) << PROMISC_OFFSET);
922 + temp &= ~((1 << 2) << PROMISC_OFFSET);
924 + temp &= ~((1 << port->id) << PROMISC_OFFSET);
926 + __raw_writel(temp, &sw->regs->mac_glob_cfg);
929 +static int eth_set_mac(struct net_device *netdev, void *p)
931 + struct port *port = netdev_priv(netdev);
932 + struct sw *sw = port->sw;
933 + struct sockaddr *addr = p;
936 + if (!is_valid_ether_addr(addr->sa_data))
937 + return -EADDRNOTAVAIL;
939 + /* Invalidate old ARL Entry */
941 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
943 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
944 + __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
945 + (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
946 + &sw->regs->arl_ctrl[1]);
948 + __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
950 + &sw->regs->arl_ctrl[2]);
951 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
953 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
954 + && cycles < 5000) {
960 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
963 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
965 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
966 + __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
967 + (addr->sa_data[2] << 8) | (addr->sa_data[3])),
968 + &sw->regs->arl_ctrl[1]);
970 + __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
971 + (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
972 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
974 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
975 + && cycles < 5000) {
982 +static int cns3xxx_change_mtu(struct net_device *netdev, int new_mtu)
984 + struct port *port = netdev_priv(netdev);
985 + struct sw *sw = port->sw;
988 + struct _rx_ring *rx_ring = sw->rx_ring;
989 + struct rx_desc *desc;
990 + struct sk_buff *skb;
992 + if (new_mtu > MAX_MRU)
995 + netdev->mtu = new_mtu;
997 + new_mtu += 36 + SKB_DMA_REALIGN;
998 + port->mtu = new_mtu;
1001 + for (i = 0; i < 3; i++) {
1002 + if (switch_port_tab[i]) {
1003 + if (switch_port_tab[i]->mtu > new_mtu)
1004 + new_mtu = switch_port_tab[i]->mtu;
1009 + if (new_mtu == sw->mtu)
1012 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1014 + sw->mtu = new_mtu;
1017 + __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
1019 + for (i = 0; i < RX_DESCS; i++) {
1020 + desc = &(rx_ring)->desc[i];
1021 + /* Check if we own it, if we do, it will get set correctly
1022 + * when it is re-used */
1023 + if (!desc->cown) {
1024 + skb = rx_ring->buff_tab[i];
1025 + dma_unmap_single(NULL, desc->sdp, desc->sdl,
1027 + dev_kfree_skb(skb);
1029 + if ((skb = dev_alloc_skb(new_mtu))) {
1030 + if (SKB_DMA_REALIGN)
1031 + skb_reserve(skb, SKB_DMA_REALIGN);
1032 + skb_reserve(skb, NET_IP_ALIGN);
1033 + desc->sdp = dma_map_single(NULL, skb->data,
1034 + new_mtu, DMA_FROM_DEVICE);
1035 + if (dma_mapping_error(NULL, desc->sdp)) {
1036 + dev_kfree_skb(skb);
1041 + /* put the new buffer on RX-free queue */
1042 + rx_ring->buff_tab[i] = skb;
1044 + if (i == RX_DESCS - 1)
1045 + desc->config0 = END_OF_RING | FIRST_SEGMENT |
1046 + LAST_SEGMENT | new_mtu;
1048 + desc->config0 = FIRST_SEGMENT |
1049 + LAST_SEGMENT | new_mtu;
1053 + /* Re-ENABLE DMA */
1054 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
1055 + temp &= ~(TS_SUSPEND | FS_SUSPEND);
1056 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1058 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
1060 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);
1065 +static const struct net_device_ops cns3xxx_netdev_ops = {
1066 + .ndo_open = eth_open,
1067 + .ndo_stop = eth_close,
1068 + .ndo_start_xmit = eth_xmit,
1069 + .ndo_set_rx_mode = eth_rx_mode,
1070 + .ndo_do_ioctl = eth_ioctl,
1071 + .ndo_change_mtu = cns3xxx_change_mtu,
1072 + .ndo_set_mac_address = eth_set_mac,
1073 + .ndo_validate_addr = eth_validate_addr,
1076 +static int __devinit eth_init_one(struct platform_device *pdev)
1079 + struct port *port;
1081 + struct net_device *dev;
1082 + struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1084 + char phy_id[MII_BUS_ID_SIZE + 3];
1088 + spin_lock_init(&tx_lock);
1089 + spin_lock_init(&stat_lock);
1091 + if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1093 + strcpy(napi_dev->name, "switch%d");
1095 + SET_NETDEV_DEV(napi_dev, &pdev->dev);
1096 + sw = netdev_priv(napi_dev);
1097 + memset(sw, 0, sizeof(struct sw));
1098 + sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1099 + regs_phys = CNS3XXX_SWITCH_BASE;
1100 + sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1101 + if (!sw->mem_res) {
1106 + sw->mtu = 1536 + SKB_DMA_REALIGN;
1108 + for (i = 0; i < 4; i++) {
1109 + temp = __raw_readl(&sw->regs->mac_cfg[i]);
1110 + temp |= (PORT_DISABLE) | 0x80000000;
1111 + __raw_writel(temp, &sw->regs->mac_cfg[i]);
1114 + temp = PORT_DISABLE;
1115 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
1117 + temp = __raw_readl(&sw->regs->vlan_cfg);
1118 + temp |= NIC_MODE | VLAN_UNAWARE;
1119 + __raw_writel(temp, &sw->regs->vlan_cfg);
1121 + __raw_writel(UNKNOWN_VLAN_TO_CPU | ACCEPT_CRC_PACKET |
1122 + CRC_STRIPPING, &sw->regs->mac_glob_cfg);
1124 + if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
1128 + memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
1130 + if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
1134 + memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
1136 + if ((err = init_rings(sw)) != 0) {
1137 + destroy_rings(sw);
1139 + goto err_free_rings;
1141 + platform_set_drvdata(pdev, napi_dev);
1143 + netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1145 + for (i = 0; i < 3; i++) {
1146 + if (!(plat->ports & (1 << i))) {
1150 + if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1154 + //SET_NETDEV_DEV(dev, &pdev->dev);
1155 + port = netdev_priv(dev);
1156 + port->netdev = dev;
1162 + port->mtu = sw->mtu;
1164 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1165 + temp |= (PORT_DISABLE);
1166 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1168 + dev->netdev_ops = &cns3xxx_netdev_ops;
1169 + dev->ethtool_ops = &cns3xxx_ethtool_ops;
1170 + dev->tx_queue_len = 1000;
1171 + dev->features = NETIF_F_HW_CSUM;
1173 + dev->vlan_features = NETIF_F_HW_CSUM;
1175 + switch_port_tab[i] = port;
1176 + memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1178 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1179 + port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1180 + PHY_INTERFACE_MODE_RGMII);
1181 + if ((err = IS_ERR(port->phydev))) {
1182 + switch_port_tab[i] = 0;
1187 + port->phydev->irq = PHY_POLL;
1189 + if ((err = register_netdev(dev))) {
1190 + phy_disconnect(port->phydev);
1191 + switch_port_tab[i] = 0;
1196 + printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1197 + netif_carrier_off(dev);
1205 + for (--i; i >= 0; i--) {
1206 + if (switch_port_tab[i]) {
1207 + port = switch_port_tab[i];
1208 + dev = port->netdev;
1209 + unregister_netdev(dev);
1210 + phy_disconnect(port->phydev);
1211 + switch_port_tab[i] = 0;
1216 + kfree(sw->tx_ring);
1218 + kfree(sw->rx_ring);
1220 + free_netdev(napi_dev);
1224 +static int __devexit eth_remove_one(struct platform_device *pdev)
1226 + struct net_device *dev = platform_get_drvdata(pdev);
1227 + struct sw *sw = netdev_priv(dev);
1229 + destroy_rings(sw);
1231 + for (i = 2; i >= 0; i--) {
1232 + if (switch_port_tab[i]) {
1233 + struct port *port = switch_port_tab[i];
1234 + struct net_device *dev = port->netdev;
1235 + unregister_netdev(dev);
1236 + phy_disconnect(port->phydev);
1237 + switch_port_tab[i] = 0;
1242 + release_resource(sw->mem_res);
1243 + free_netdev(napi_dev);
1247 +static struct platform_driver cns3xxx_eth_driver = {
1248 + .driver.name = DRV_NAME,
1249 + .probe = eth_init_one,
1250 + .remove = eth_remove_one,
1253 +static int __init eth_init_module(void)
1256 + if ((err = cns3xxx_mdio_register()))
1258 + return platform_driver_register(&cns3xxx_eth_driver);
1261 +static void __exit eth_cleanup_module(void)
1263 + platform_driver_unregister(&cns3xxx_eth_driver);
1264 + cns3xxx_mdio_remove();
1267 +module_init(eth_init_module);
1268 +module_exit(eth_cleanup_module);
1270 +MODULE_AUTHOR("Chris Lang");
1271 +MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1272 +MODULE_LICENSE("GPL v2");
1273 +MODULE_ALIAS("platform:cns3xxx_eth");
1275 +++ b/arch/arm/mach-cns3xxx/include/mach/platform.h
1278 + * arch/arm/mach-cns3xxx/include/mach/platform.h
1280 + * Copyright 2011 Gateworks Corporation
1281 + * Chris Lang <clang@gateworks.com
1283 + * This file is free software; you can redistribute it and/or modify
1284 + * it under the terms of the GNU General Public License, Version 2, as
1285 + * published by the Free Software Foundation.
1289 +#ifndef __ASM_ARCH_PLATFORM_H
1290 +#define __ASM_ARCH_PLATFORM_H
1292 +#ifndef __ASSEMBLY__
1294 +/* Information about built-in Ethernet MAC interfaces */
1295 +struct cns3xxx_plat_info {
1296 + u8 ports; /* Bitmap of enabled Ports */
1301 +#endif /* __ASM_ARCH_PLATFORM_H */
1303 --- a/drivers/net/ethernet/Kconfig
1304 +++ b/drivers/net/ethernet/Kconfig
1305 @@ -32,6 +32,7 @@ source "drivers/net/ethernet/calxeda/Kco
1306 source "drivers/net/ethernet/chelsio/Kconfig"
1307 source "drivers/net/ethernet/cirrus/Kconfig"
1308 source "drivers/net/ethernet/cisco/Kconfig"
1309 +source "drivers/net/ethernet/cavium/Kconfig"
1310 source "drivers/net/ethernet/davicom/Kconfig"
1313 --- a/drivers/net/ethernet/Makefile
1314 +++ b/drivers/net/ethernet/Makefile
1315 @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_BFIN) += adi/
1316 obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
1317 obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
1318 obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
1319 +obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
1320 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
1321 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
1322 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
1324 +++ b/drivers/net/ethernet/cavium/Kconfig
1326 +config NET_VENDOR_CAVIUM
1327 + bool "Cavium devices"
1329 + depends on ARCH_CNS3XXX
1331 + If you have a network (Ethernet) chipset belonging to this class,
1334 + Note that the answer to this question does not directly affect
1335 + the kernel: saying N will just case the configurator to skip all
1336 + the questions regarding AMD chipsets. If you say Y, you will be asked
1337 + for your specific chipset/driver in the following questions.
1339 +if NET_VENDOR_CAVIUM
1342 + tristate "Cavium CNS3xxx Ethernet support"
1343 + depends on ARCH_CNS3XXX
1346 + Say Y here if you want to use built-in Ethernet ports
1347 + on CNS3XXX processor.
1351 +++ b/drivers/net/ethernet/cavium/Makefile
1354 +# Makefile for the Cavium ethernet device drivers.
1357 +obj-$(CNS3XXX_ETH) += cns3xxx_eth.o