#define DRV_NAME "cns3xxx_eth"
-#define RX_DESCS 128
+#define RX_DESCS 256
#define TX_DESCS 128
#define TX_DESC_RESERVE 20
struct switch_regs __iomem *regs;
struct napi_struct napi;
struct cns3xxx_plat_info *plat;
- struct _tx_ring *tx_ring;
- struct _rx_ring *rx_ring;
+ struct _tx_ring tx_ring;
+ struct _rx_ring rx_ring;
struct sk_buff *frag_first;
struct sk_buff *frag_last;
};
dev->name, port->speed, port->duplex ? "full" : "half");
}
+static void eth_schedule_poll(struct sw *sw)
+{
+ if (unlikely(!napi_schedule_prep(&sw->napi)))
+ return;
+
+ disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
+ __napi_schedule(&sw->napi);
+}
+
irqreturn_t eth_rx_irq(int irq, void *pdev)
{
struct net_device *dev = pdev;
struct sw *sw = netdev_priv(dev);
- if (likely(napi_schedule_prep(&sw->napi))) {
- disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
- __napi_schedule(&sw->napi);
- }
+ eth_schedule_poll(sw);
return (IRQ_HANDLED);
}
static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
{
- struct _rx_ring *rx_ring = sw->rx_ring;
+ struct _rx_ring *rx_ring = &sw->rx_ring;
unsigned int i = rx_ring->alloc_index;
struct rx_desc *desc = &(rx_ring)->desc[i];
void *buf;
unsigned int phys;
for (received += rx_ring->alloc_count; received > 0; received--) {
- buf = kzalloc(RX_SEGMENT_ALLOC_SIZE, GFP_ATOMIC);
+ buf = kmalloc(RX_SEGMENT_ALLOC_SIZE, GFP_ATOMIC);
if (!buf)
break;
static void eth_complete_tx(struct sw *sw)
{
- struct _tx_ring *tx_ring = sw->tx_ring;
+ struct _tx_ring *tx_ring = &sw->tx_ring;
struct tx_desc *desc;
int i;
int index;
static int eth_poll(struct napi_struct *napi, int budget)
{
struct sw *sw = container_of(napi, struct sw, napi);
- struct _rx_ring *rx_ring = sw->rx_ring;
+ struct _rx_ring *rx_ring = &sw->rx_ring;
int received = 0;
unsigned int length;
unsigned int i = rx_ring->cur_index;
struct rx_desc *desc = &(rx_ring)->desc[i];
+ unsigned int alloc_count = rx_ring->alloc_count;
- while (desc->cown) {
+ while (desc->cown && alloc_count + received < RX_DESCS - 1) {
struct sk_buff *skb;
int reserve = SKB_HEAD_ALIGN;
}
}
- cns3xxx_alloc_rx_buf(sw, received);
-
- rx_ring->cur_index = i;
-
- if (received != budget) {
+ if (!received) {
napi_complete(napi);
enable_irq(IRQ_CNS3XXX_SW_R0RXC);
}
- wmb();
- enable_rx_dma(sw);
-
spin_lock_bh(&tx_lock);
eth_complete_tx(sw);
spin_unlock_bh(&tx_lock);
+ cns3xxx_alloc_rx_buf(sw, received);
+
+ rx_ring->cur_index = i;
+
+ wmb();
+ enable_rx_dma(sw);
+
return received;
}
{
struct port *port = netdev_priv(dev);
struct sw *sw = port->sw;
- struct _tx_ring *tx_ring = sw->tx_ring;
+ struct _tx_ring *tx_ring = &sw->tx_ring;
struct sk_buff *skb1;
char pmap = (1 << port->id);
int nr_frags = skb_shinfo(skb)->nr_frags;
skb_walk_frags(skb, skb1)
nr_desc++;
+ eth_schedule_poll(sw);
spin_lock_bh(&tx_lock);
-
- eth_complete_tx(sw);
if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
spin_unlock_bh(&tx_lock);
return NETDEV_TX_BUSY;
static int init_rings(struct sw *sw)
{
int i;
- struct _rx_ring *rx_ring = sw->rx_ring;
- struct _tx_ring *tx_ring = sw->tx_ring;
+ struct _rx_ring *rx_ring = &sw->rx_ring;
+ struct _tx_ring *tx_ring = &sw->tx_ring;
__raw_writel(0, &sw->regs->fs_dma_ctrl0);
__raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
static void destroy_rings(struct sw *sw)
{
int i;
- if (sw->rx_ring->desc) {
+ if (sw->rx_ring.desc) {
for (i = 0; i < RX_DESCS; i++) {
- struct _rx_ring *rx_ring = sw->rx_ring;
+ struct _rx_ring *rx_ring = &sw->rx_ring;
struct rx_desc *desc = &(rx_ring)->desc[i];
- struct sk_buff *skb = sw->rx_ring->buff_tab[i];
+ struct sk_buff *skb = sw->rx_ring.buff_tab[i];
if (!skb)
continue;
DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
- dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
+ dma_pool_free(rx_dma_pool, sw->rx_ring.desc, sw->rx_ring.phys_addr);
dma_pool_destroy(rx_dma_pool);
rx_dma_pool = 0;
- sw->rx_ring->desc = 0;
+ sw->rx_ring.desc = 0;
}
- if (sw->tx_ring->desc) {
+ if (sw->tx_ring.desc) {
for (i = 0; i < TX_DESCS; i++) {
- struct _tx_ring *tx_ring = sw->tx_ring;
+ struct _tx_ring *tx_ring = &sw->tx_ring;
struct tx_desc *desc = &(tx_ring)->desc[i];
- struct sk_buff *skb = sw->tx_ring->buff_tab[i];
+ struct sk_buff *skb = sw->tx_ring.buff_tab[i];
if (skb) {
dma_unmap_single(NULL, desc->sdp,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
}
}
- dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
+ dma_pool_free(tx_dma_pool, sw->tx_ring.desc, sw->tx_ring.phys_addr);
dma_pool_destroy(tx_dma_pool);
tx_dma_pool = 0;
- sw->tx_ring->desc = 0;
+ sw->tx_ring.desc = 0;
}
}
__raw_writel(UNKNOWN_VLAN_TO_CPU |
CRC_STRIPPING, &sw->regs->mac_glob_cfg);
- if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
- err = -ENOMEM;
- goto err_free;
- }
- memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
-
- if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
- err = -ENOMEM;
- goto err_free_rx;
- }
- memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
-
if ((err = init_rings(sw)) != 0) {
destroy_rings(sw);
err = -ENOMEM;
- goto err_free_rings;
+ goto err_free;
}
platform_set_drvdata(pdev, napi_dev);
free_netdev(dev);
}
}
-err_free_rings:
- kfree(sw->tx_ring);
-err_free_rx:
- kfree(sw->rx_ring);
err_free:
free_netdev(napi_dev);
return err;