add 2.6.32 support
[librecmc/librecmc.git] / target / linux / ubicom32 / files / drivers / net / ubi32-eth.c
1 /*
2  * drivers/net/ubi32-eth.c
3  *   Ubicom32 ethernet TIO interface driver.
4  *
5  * (C) Copyright 2009, Ubicom, Inc.
6  *
7  * This file is part of the Ubicom32 Linux Kernel Port.
8  *
9  * The Ubicom32 Linux Kernel Port is free software: you can redistribute
10  * it and/or modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation, either version 2 of the
12  * License, or (at your option) any later version.
13  *
14  * The Ubicom32 Linux Kernel Port is distributed in the hope that it
15  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
17  * the GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with the Ubicom32 Linux Kernel Port.  If not,
21  * see <http://www.gnu.org/licenses/>.
22  *
23  * Ubicom32 implementation derived from (with many thanks):
24  *   arch/m68knommu
25  *   arch/blackfin
26  *   arch/parisc
27  */
28 /*
29  * ubi32_eth.c
30  * Ethernet driver for Ip5k/Ip7K
31  */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/moduleparam.h>
36
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/interrupt.h>
42
43 #include <linux/in.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <linux/mii.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/skbuff.h>
51 #include <asm/checksum.h>
52 #include <asm/ip5000.h>
53 #include <asm/devtree.h>
54 #include <asm/system.h>
55
56 #define UBICOM32_USE_NAPI       /* define this to use NAPI instead of tasklet */
57 //#define UBICOM32_USE_POLLING  /* define this to use polling instead of interrupt */
58 #include "ubi32-eth.h"
59
60 /*
61  * TODO:
62  * mac address from flash
63  * multicast filter
64  * ethtool support
65  * sysfs support
66  * skb->nrfrag support
67  * ioctl
68  * monitor phy status
69  */
70
71 extern int ubi32_ocm_skbuf_max, ubi32_ocm_skbuf, ubi32_ddr_skbuf;
72 static const char *eth_if_name[UBI32_ETH_NUM_OF_DEVICES] =
73         {"eth_lan", "eth_wan"};
74 static struct net_device *ubi32_eth_devices[UBI32_ETH_NUM_OF_DEVICES] =
75         {NULL, NULL};
76 static u8_t mac_addr[UBI32_ETH_NUM_OF_DEVICES][ETH_ALEN] = {
77         {0x00, 0x03, 0x64, 'l', 'a', 'n'},
78         {0x00, 0x03, 0x64, 'w', 'a', 'n'}};
79
80 #if (defined(CONFIG_ZONE_DMA) && defined(CONFIG_UBICOM32_OCM_FOR_SKB))
81 static inline struct sk_buff *ubi32_alloc_skb_ocm(struct net_device *dev, unsigned int length)
82 {
83         return __dev_alloc_skb(length, GFP_ATOMIC | __GFP_NOWARN | __GFP_NORETRY | GFP_DMA);
84 }
85 #endif
86
87 static inline struct sk_buff *ubi32_alloc_skb(struct net_device *dev, unsigned int length)
88 {
89         return __dev_alloc_skb(length, GFP_ATOMIC | __GFP_NOWARN);
90 }
91
92 static void ubi32_eth_vp_rxtx_enable(struct net_device *dev)
93 {
94         struct ubi32_eth_private *priv = netdev_priv(dev);
95         priv->regs->command = UBI32_ETH_VP_CMD_RX_ENABLE | UBI32_ETH_VP_CMD_TX_ENABLE;
96         priv->regs->int_mask = (UBI32_ETH_VP_INT_RX | UBI32_ETH_VP_INT_TX);
97         ubicom32_set_interrupt(priv->vp_int_bit);
98 }
99
100 static void ubi32_eth_vp_rxtx_stop(struct net_device *dev)
101 {
102         struct ubi32_eth_private *priv = netdev_priv(dev);
103         priv->regs->command = 0;
104         priv->regs->int_mask = 0;
105         ubicom32_set_interrupt(priv->vp_int_bit);
106
107         /* Wait for graceful shutdown */
108         while (priv->regs->status & (UBI32_ETH_VP_STATUS_RX_STATE | UBI32_ETH_VP_STATUS_TX_STATE));
109 }
110
111 /*
112  * ubi32_eth_tx_done()
113  */
114 static int ubi32_eth_tx_done(struct net_device *dev)
115 {
116         struct ubi32_eth_private *priv;
117         struct sk_buff *skb;
118         volatile void *pdata;
119         struct ubi32_eth_dma_desc *desc;
120         u32_t   count = 0;
121
122         priv = netdev_priv(dev);
123
124         priv->regs->int_status &= ~UBI32_ETH_VP_INT_TX;
125         while (priv->tx_tail != priv->regs->tx_out) {
126                 pdata = priv->regs->tx_dma_ring[priv->tx_tail];
127                 BUG_ON(pdata == NULL);
128
129                 skb = container_of((void *)pdata, struct sk_buff, cb);
130                 desc = (struct ubi32_eth_dma_desc *)pdata;
131                 if (unlikely(!(desc->status & UBI32_ETH_VP_TX_OK))) {
132                         dev->stats.tx_errors++;
133                 } else {
134                         dev->stats.tx_packets++;
135                         dev->stats.tx_bytes += skb->len;
136                 }
137                 dev_kfree_skb_any(skb);
138                 priv->regs->tx_dma_ring[priv->tx_tail] = NULL;
139                 priv->tx_tail = (priv->tx_tail + 1) & TX_DMA_RING_MASK;
140                 count++;
141         }
142
143         if (unlikely(priv->regs->status & UBI32_ETH_VP_STATUS_TX_Q_FULL)) {
144                 spin_lock(&priv->lock);
145                 if (priv->regs->status & UBI32_ETH_VP_STATUS_TX_Q_FULL) {
146                         priv->regs->status &= ~UBI32_ETH_VP_STATUS_TX_Q_FULL;
147                         netif_wake_queue(dev);
148                 }
149                 spin_unlock(&priv->lock);
150         }
151         return count;
152 }
153
154 /*
155  * ubi32_eth_receive()
156  *      To avoid locking overhead, this is called only
157  *      by tasklet when not using NAPI, or
158  *      by NAPI poll when using NAPI.
159  *      return number of frames processed
160  */
161 static int ubi32_eth_receive(struct net_device *dev, int quota)
162 {
163         struct ubi32_eth_private *priv = netdev_priv(dev);
164         unsigned short rx_in = priv->regs->rx_in;
165         struct sk_buff *skb;
166         struct ubi32_eth_dma_desc *desc = NULL;
167         volatile void *pdata;
168
169         int extra_reserve_adj;
170         int extra_alloc = UBI32_ETH_RESERVE_SPACE + UBI32_ETH_TRASHED_MEMORY;
171         int replenish_cnt, count = 0;
172         int replenish_max = RX_DMA_MAX_QUEUE_SIZE;
173 #if (defined(CONFIG_ZONE_DMA) && defined(CONFIG_UBICOM32_OCM_FOR_SKB))
174         if (likely(dev == ubi32_eth_devices[0]))
175                 replenish_max = min(ubi32_ocm_skbuf_max, RX_DMA_MAX_QUEUE_SIZE);;
176 #endif
177
178         if (unlikely(rx_in == priv->regs->rx_out))
179                 priv->vp_stats.rx_q_full_cnt++;
180
181         priv->regs->int_status &= ~UBI32_ETH_VP_INT_RX;
182         while (priv->rx_tail != priv->regs->rx_out) {
183                 if (unlikely(count == quota)) {
184                         /* There is still frame pending to be processed */
185                         priv->vp_stats.rx_throttle++;
186                         break;
187                 }
188
189                 pdata = priv->regs->rx_dma_ring[priv->rx_tail];
190                 BUG_ON(pdata == NULL);
191
192                 desc = (struct ubi32_eth_dma_desc *)pdata;
193                 skb = container_of((void *)pdata, struct sk_buff, cb);
194                 count++;
195                 priv->regs->rx_dma_ring[priv->rx_tail] = NULL;
196                 priv->rx_tail = ((priv->rx_tail + 1) & RX_DMA_RING_MASK);
197
198                 /*
199                  * Check only RX_OK bit here.
200                  * The rest of status word is used as timestamp
201                  */
202                 if (unlikely(!(desc->status & UBI32_ETH_VP_RX_OK))) {
203                         dev->stats.rx_errors++;
204                         dev_kfree_skb_any(skb);
205                         continue;
206                 }
207
208                 skb_put(skb, desc->data_len);
209                 skb->dev = dev;
210                 skb->protocol = eth_type_trans(skb, dev);
211                 skb->ip_summed = CHECKSUM_NONE;
212                 dev->stats.rx_bytes += skb->len;
213                 dev->stats.rx_packets++;
214 #ifndef UBICOM32_USE_NAPI
215                 netif_rx(skb);
216 #else
217                 netif_receive_skb(skb);
218 #endif
219         }
220
221         /* fill in more descripor for VP*/
222         replenish_cnt =  replenish_max -
223                 ((RX_DMA_RING_SIZE + rx_in - priv->rx_tail) & RX_DMA_RING_MASK);
224         if (replenish_cnt > 0) {
225 #if (defined(CONFIG_ZONE_DMA) && defined(CONFIG_UBICOM32_OCM_FOR_SKB))
226                 /*
227                  * black magic for perforamnce:
228                  *   Try to allocate skb from OCM only for first Ethernet I/F.
229                  *   Also limit number of RX buffers to 21 due to limited OCM.
230                  */
231                 if (likely(dev == ubi32_eth_devices[0])) {
232                         do {
233                                 skb = ubi32_alloc_skb_ocm(dev, RX_BUF_SIZE + extra_alloc);
234                                 if (!skb) {
235                                         break;
236                                 }
237                                 /* set up dma descriptor */
238                                 ubi32_ocm_skbuf++;
239                                 desc = (struct ubi32_eth_dma_desc *)skb->cb;
240                                 extra_reserve_adj =
241                                         ((u32)skb->data + UBI32_ETH_RESERVE_SPACE + ETH_HLEN) &
242                                         (CACHE_LINE_SIZE - 1);
243                                 skb_reserve(skb, UBI32_ETH_RESERVE_SPACE - extra_reserve_adj);
244                                 desc->data_pointer = skb->data;
245                                 desc->buffer_len = RX_BUF_SIZE + UBI32_ETH_TRASHED_MEMORY;
246                                 desc->data_len = 0;
247                                 desc->status = 0;
248                                 priv->regs->rx_dma_ring[rx_in] = desc;
249                                 rx_in = (rx_in + 1) & RX_DMA_RING_MASK;
250                         } while (--replenish_cnt > 0);
251                 }
252 #endif
253
254                 while (replenish_cnt-- > 0) {
255                         skb = ubi32_alloc_skb(dev, RX_BUF_SIZE + extra_alloc);
256                         if (!skb) {
257                                 priv->vp_stats.rx_alloc_err++;
258                                 break;
259                         }
260                         /* set up dma descriptor */
261                         ubi32_ddr_skbuf++;
262                         desc = (struct ubi32_eth_dma_desc *)skb->cb;
263                         extra_reserve_adj =
264                                 ((u32)skb->data + UBI32_ETH_RESERVE_SPACE + ETH_HLEN) &
265                                 (CACHE_LINE_SIZE - 1);
266                         skb_reserve(skb, UBI32_ETH_RESERVE_SPACE - extra_reserve_adj);
267                         desc->data_pointer = skb->data;
268                         desc->buffer_len = RX_BUF_SIZE + UBI32_ETH_TRASHED_MEMORY;
269                         desc->data_len = 0;
270                         desc->status = 0;
271                         priv->regs->rx_dma_ring[rx_in] = desc;
272                         rx_in = (rx_in + 1) & RX_DMA_RING_MASK;
273                 }
274
275                 wmb();
276                 priv->regs->rx_in = rx_in;
277                 ubicom32_set_interrupt(priv->vp_int_bit);
278         }
279
280         if (likely(count > 0)) {
281                 dev->last_rx = jiffies;
282         }
283         return count;
284 }
285
286 #ifdef UBICOM32_USE_NAPI
287 static int ubi32_eth_napi_poll(struct napi_struct *napi, int budget)
288 {
289         struct ubi32_eth_private *priv = container_of(napi, struct ubi32_eth_private, napi);
290         struct net_device *dev = priv->dev;
291         u32_t count;
292
293         if (priv->tx_tail != priv->regs->tx_out) {
294                 ubi32_eth_tx_done(dev);
295         }
296
297         count = ubi32_eth_receive(dev, budget);
298
299         if (count < budget) {
300                 napi_complete(napi);
301                 priv->regs->int_mask |= (UBI32_ETH_VP_INT_RX | UBI32_ETH_VP_INT_TX);
302                 if ((priv->rx_tail != priv->regs->rx_out) || (priv->tx_tail != priv->regs->tx_out)) {
303                         if (napi_reschedule(napi)) {
304                                 priv->regs->int_mask = 0;
305                         }
306                 }
307         }
308         return count;
309 }
310
311 #else
312 static void ubi32_eth_do_tasklet(unsigned long arg)
313 {
314         struct net_device *dev = (struct net_device *)arg;
315         struct ubi32_eth_private *priv = netdev_priv(dev);
316
317         if (priv->tx_tail != priv->regs->tx_out) {
318                 ubi32_eth_tx_done(dev);
319         }
320
321         /* always call receive to process new RX frame as well as replenish RX buffers */
322         ubi32_eth_receive(dev, UBI32_RX_BOUND);
323
324         priv->regs->int_mask |= (UBI32_ETH_VP_INT_RX | UBI32_ETH_VP_INT_TX);
325         if ((priv->rx_tail != priv->regs->rx_out) || (priv->tx_tail != priv->regs->tx_out)) {
326                 priv->regs->int_mask = 0;
327                 tasklet_schedule(&priv->tsk);
328         }
329 }
330 #endif
331
332 #if defined(UBICOM32_USE_POLLING)
333 static struct timer_list eth_poll_timer;
334
335 static void ubi32_eth_poll(unsigned long arg)
336 {
337         struct net_device *dev;
338         struct ubi32_eth_private *priv;
339         int i;
340
341         for (i = 0; i < UBI32_ETH_NUM_OF_DEVICES; i++) {
342                 dev = ubi32_eth_devices[i];
343                 if (dev && (dev->flags & IFF_UP)) {
344                         priv = netdev_priv(dev);
345 #ifdef UBICOM32_USE_NAPI
346                         napi_schedule(&priv->napi);
347 #else
348                         tasklet_schedule(&priv->tsk);
349 #endif
350                 }
351         }
352
353         eth_poll_timer.expires = jiffies + 2;
354         add_timer(&eth_poll_timer);
355 }
356
357 #else
358 static irqreturn_t ubi32_eth_interrupt(int irq, void *dev_id)
359 {
360         struct ubi32_eth_private *priv;
361
362         struct net_device *dev = (struct net_device *)dev_id;
363         BUG_ON(irq != dev->irq);
364
365         priv = netdev_priv(dev);
366         if (unlikely(!(priv->regs->int_status & priv->regs->int_mask))) {
367                 return IRQ_NONE;
368         }
369
370         /*
371          * Disable port interrupt
372          */
373 #ifdef UBICOM32_USE_NAPI
374         if (napi_schedule_prep(&priv->napi)) {
375                 priv->regs->int_mask = 0;
376                 __napi_schedule(&priv->napi);
377         }
378 #else
379         priv->regs->int_mask = 0;
380         tasklet_schedule(&priv->tsk);
381 #endif
382         return IRQ_HANDLED;
383 }
384 #endif
385
386 /*
387  * ubi32_eth_open
388  */
389 static int ubi32_eth_open(struct net_device *dev)
390 {
391         struct ubi32_eth_private *priv = netdev_priv(dev);
392         int err;
393
394         printk(KERN_INFO "eth open %s\n",dev->name);
395 #ifndef UBICOM32_USE_POLLING
396         /* request_region() */
397         err = request_irq(dev->irq, ubi32_eth_interrupt, IRQF_DISABLED, dev->name, dev);
398         if (err) {
399                 printk(KERN_WARNING "fail to request_irq %d\n",err);
400                  return -ENODEV;
401         }
402 #endif
403 #ifdef  UBICOM32_USE_NAPI
404         napi_enable(&priv->napi);
405 #else
406         tasklet_init(&priv->tsk, ubi32_eth_do_tasklet, (unsigned long)dev);
407 #endif
408
409         /* call receive to supply RX buffers */
410         ubi32_eth_receive(dev, RX_DMA_MAX_QUEUE_SIZE);
411
412         /* check phy status and call netif_carrier_on */
413         ubi32_eth_vp_rxtx_enable(dev);
414         netif_start_queue(dev);
415         return 0;
416 }
417
418 static int ubi32_eth_close(struct net_device *dev)
419 {
420         struct ubi32_eth_private *priv = netdev_priv(dev);
421         volatile void *pdata;
422         struct sk_buff *skb;
423
424 #ifndef UBICOM32_USE_POLLING
425         free_irq(dev->irq, dev);
426 #endif
427         netif_stop_queue(dev); /* can't transmit any more */
428 #ifdef UBICOM32_USE_NAPI
429         napi_disable(&priv->napi);
430 #else
431         tasklet_kill(&priv->tsk);
432 #endif
433         ubi32_eth_vp_rxtx_stop(dev);
434
435         /*
436          * RX clean up
437          */
438         while (priv->rx_tail != priv->regs->rx_in) {
439                 pdata = priv->regs->rx_dma_ring[priv->rx_tail];
440                 skb = container_of((void *)pdata, struct sk_buff, cb);
441                 priv->regs->rx_dma_ring[priv->rx_tail] = NULL;
442                 dev_kfree_skb_any(skb);
443                 priv->rx_tail = ((priv->rx_tail + 1) & RX_DMA_RING_MASK);
444         }
445         priv->regs->rx_in = 0;
446         priv->regs->rx_out = priv->regs->rx_in;
447         priv->rx_tail = priv->regs->rx_in;
448
449         /*
450          * TX clean up
451          */
452         BUG_ON(priv->regs->tx_out != priv->regs->tx_in);
453         ubi32_eth_tx_done(dev);
454         BUG_ON(priv->tx_tail != priv->regs->tx_in);
455         priv->regs->tx_in = 0;
456         priv->regs->tx_out = priv->regs->tx_in;
457         priv->tx_tail = priv->regs->tx_in;
458
459         return 0;
460 }
461
462 /*
463  * ubi32_eth_set_config
464  */
465 static int ubi32_eth_set_config(struct net_device *dev, struct ifmap *map)
466 {
467         /* if must to down to config it */
468         printk(KERN_INFO "set_config %x\n", dev->flags);
469         if (dev->flags & IFF_UP)
470                 return -EBUSY;
471
472         /* I/O and IRQ can not be changed */
473         if (map->base_addr != dev->base_addr) {
474                 printk(KERN_WARNING "%s: Can't change I/O address\n", dev->name);
475                 return -EOPNOTSUPP;
476         }
477
478 #ifndef UBICOM32_USE_POLLING
479         if (map->irq != dev->irq) {
480                 printk(KERN_WARNING "%s: Can't change IRQ\n", dev->name);
481                 return -EOPNOTSUPP;
482         }
483 #endif
484
485         /* ignore other fields */
486         return 0;
487 }
488
489 static int ubi32_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
490 {
491         struct ubi32_eth_private *priv = netdev_priv(dev);
492         struct ubi32_eth_dma_desc *desc = NULL;
493         unsigned short space, tx_in;
494
495         tx_in = priv->regs->tx_in;
496
497         dev->trans_start = jiffies; /* save the timestamp */
498         space = TX_DMA_RING_MASK - ((TX_DMA_RING_SIZE + tx_in - priv->tx_tail) & TX_DMA_RING_MASK);
499
500         if (unlikely(space == 0)) {
501                 if (!(priv->regs->status & UBI32_ETH_VP_STATUS_TX_Q_FULL)) {
502                         spin_lock(&priv->lock);
503                         if (!(priv->regs->status & UBI32_ETH_VP_STATUS_TX_Q_FULL)) {
504                                 priv->regs->status |= UBI32_ETH_VP_STATUS_TX_Q_FULL;
505                                 priv->vp_stats.tx_q_full_cnt++;
506                                 netif_stop_queue(dev);
507                         }
508                         spin_unlock(&priv->lock);
509                 }
510
511                 /* give both HW and this driver an extra trigger */
512                 priv->regs->int_mask |= UBI32_ETH_VP_INT_TX;
513 #ifndef UBICOM32_USE_POLLING
514                 ubicom32_set_interrupt(dev->irq);
515 #endif
516                 ubicom32_set_interrupt(priv->vp_int_bit);
517
518                 return NETDEV_TX_BUSY;
519         }
520
521         /*still have room */
522         desc = (struct ubi32_eth_dma_desc *)skb->cb;
523         desc->data_pointer = skb->data;
524         desc->data_len = skb->len;
525         priv->regs->tx_dma_ring[tx_in] = desc;
526         tx_in = ((tx_in + 1) & TX_DMA_RING_MASK);
527         wmb();
528         priv->regs->tx_in = tx_in;
529         /* kick the HRT */
530         ubicom32_set_interrupt(priv->vp_int_bit);
531
532         return NETDEV_TX_OK;
533 }
534
535 /*
536  * Deal with a transmit timeout.
537  */
538 static void ubi32_eth_tx_timeout (struct net_device *dev)
539 {
540         struct ubi32_eth_private *priv = netdev_priv(dev);
541         dev->stats.tx_errors++;
542         priv->regs->int_mask |= UBI32_ETH_VP_INT_TX;
543 #ifndef UBICOM32_USE_POLLING
544         ubicom32_set_interrupt(dev->irq);
545 #endif
546         ubicom32_set_interrupt(priv->vp_int_bit);
547 }
548
549 static int ubi32_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
550 {
551         struct ubi32_eth_private *priv = netdev_priv(dev);
552         struct mii_ioctl_data *data = if_mii(rq);
553
554         printk(KERN_INFO "ioctl %s, %d\n", dev->name, cmd);
555         switch (cmd) {
556         case SIOCGMIIPHY:
557                 data->phy_id = 0;
558                 break;
559
560         case SIOCGMIIREG:
561                 if ((data->reg_num & 0x1F) == MII_BMCR) {
562                         /* Make up MII control register value from what we know */
563                         data->val_out = 0x0000
564                         | ((priv->regs->status & UBI32_ETH_VP_STATUS_DUPLEX)
565                                         ? BMCR_FULLDPLX : 0)
566                         | ((priv->regs->status & UBI32_ETH_VP_STATUS_SPEED100)
567                                         ? BMCR_SPEED100 : 0)
568                         | ((priv->regs->status & UBI32_ETH_VP_STATUS_SPEED1000)
569                                         ? BMCR_SPEED1000 : 0);
570                 } else if ((data->reg_num & 0x1F) == MII_BMSR) {
571                         /* Make up MII status register value from what we know */
572                         data->val_out =
573                         (BMSR_100FULL|BMSR_100HALF|BMSR_10FULL|BMSR_10HALF)
574                         | ((priv->regs->status & UBI32_ETH_VP_STATUS_LINK)
575                                         ? BMSR_LSTATUS : 0);
576                 } else {
577                         return -EIO;
578                 }
579                 break;
580
581         case SIOCSMIIREG:
582                 return -EOPNOTSUPP;
583                 break;
584
585         default:
586                 return -EOPNOTSUPP;
587         }
588
589         return 0;
590 }
591
592 /*
593  * Return statistics to the caller
594  */
595 static struct net_device_stats *ubi32_eth_get_stats(struct net_device *dev)
596 {
597         return &dev->stats;
598 }
599
600
601 static int ubi32_eth_change_mtu(struct net_device *dev, int new_mtu)
602 {
603         struct ubi32_eth_private *priv = netdev_priv(dev);
604         unsigned long flags;
605
606         if ((new_mtu < 68) || (new_mtu > 1500))
607                 return -EINVAL;
608
609         spin_lock_irqsave(&priv->lock, flags);
610         dev->mtu = new_mtu;
611         spin_unlock_irqrestore(&priv->lock, flags);
612         printk(KERN_INFO "set mtu to %d", new_mtu);
613         return 0;
614 }
615
616 /*
617  * ubi32_eth_cleanup: unload the module
618  */
619 void ubi32_eth_cleanup(void)
620 {
621         struct ubi32_eth_private *priv;
622         struct net_device *dev;
623         int i;
624
625         for (i = 0; i < UBI32_ETH_NUM_OF_DEVICES; i++) {
626                 dev = ubi32_eth_devices[i];
627                 if (dev) {
628                         priv = netdev_priv(dev);
629                         kfree(priv->regs->tx_dma_ring);
630                         unregister_netdev(dev);
631                         free_netdev(dev);
632                         ubi32_eth_devices[i] = NULL;
633                 }
634         }
635 }
636
637         static const struct net_device_ops ubi32_netdev_ops = {
638                 .ndo_open               = ubi32_eth_open,
639                 .ndo_stop               = ubi32_eth_close,
640                 .ndo_start_xmit         = ubi32_eth_start_xmit,
641                 .ndo_tx_timeout         = ubi32_eth_tx_timeout,
642                 .ndo_do_ioctl           = ubi32_eth_ioctl,
643                 .ndo_change_mtu         = ubi32_eth_change_mtu,
644                 .ndo_set_config         = ubi32_eth_set_config,
645                 .ndo_get_stats          = ubi32_eth_get_stats,
646                 .ndo_validate_addr      = eth_validate_addr,
647                 .ndo_set_mac_address    = eth_mac_addr,
648         };
649
650 int ubi32_eth_init_module(void)
651 {
652         struct ethtionode *eth_node;
653         struct net_device *dev;
654         struct ubi32_eth_private *priv;
655         int i, err;
656
657         /*
658          * Device allocation.
659          */
660         err = 0;
661         for (i = 0; i < UBI32_ETH_NUM_OF_DEVICES; i++) {
662                 /*
663                  * See if the eth_vp is in the device tree.
664                  */
665                 eth_node = (struct ethtionode *)devtree_find_node(eth_if_name[i]);
666                 if (!eth_node) {
667                         printk(KERN_INFO "%s does not exist\n", eth_if_name[i]);
668                         continue;
669                 }
670
671                 eth_node->tx_dma_ring = (struct ubi32_eth_dma_desc **)kmalloc(
672                                 sizeof(struct ubi32_eth_dma_desc *) *
673                                 (TX_DMA_RING_SIZE + RX_DMA_RING_SIZE),
674                                 GFP_ATOMIC | __GFP_NOWARN | __GFP_NORETRY | GFP_DMA);
675
676                 if (eth_node->tx_dma_ring == NULL) {
677                         eth_node->tx_dma_ring = (struct ubi32_eth_dma_desc **)kmalloc(
678                                 sizeof(struct ubi32_eth_dma_desc *) *
679                                 (TX_DMA_RING_SIZE + RX_DMA_RING_SIZE), GFP_KERNEL);
680                         printk(KERN_INFO "fail to allocate from OCM\n");
681                 }
682
683                 if (!eth_node->tx_dma_ring) {
684                         err = -ENOMEM;
685                         break;
686                 }
687                 eth_node->rx_dma_ring = eth_node->tx_dma_ring + TX_DMA_RING_SIZE;
688                 eth_node->tx_sz = TX_DMA_RING_SIZE - 1;
689                 eth_node->rx_sz = RX_DMA_RING_SIZE - 1;
690
691                 dev = alloc_etherdev(sizeof(struct ubi32_eth_private));
692                 if (!dev) {
693                         kfree(eth_node->tx_dma_ring);
694                         err = -ENOMEM;
695                         break;
696                 }
697                 priv = netdev_priv(dev);
698                 priv->dev = dev;
699
700                 /*
701                  * This just fill in some default Ubicom MAC address
702                  */
703                 memcpy(dev->dev_addr, mac_addr[i], ETH_ALEN);
704                 memset(dev->broadcast, 0xff, ETH_ALEN);
705
706                 priv->regs = eth_node;
707                 priv->regs->command = 0;
708                 priv->regs->int_mask = 0;
709                 priv->regs->int_status = 0;
710                 priv->regs->tx_out = 0;
711                 priv->regs->rx_out = 0;
712                 priv->regs->tx_in = 0;
713                 priv->regs->rx_in = 0;
714                 priv->rx_tail = 0;
715                 priv->tx_tail = 0;
716
717                 priv->vp_int_bit = eth_node->dn.sendirq;
718                 dev->irq = eth_node->dn.recvirq;
719
720                 spin_lock_init(&priv->lock);
721
722                 dev->netdev_ops = &ubi32_netdev_ops;
723
724                 dev->watchdog_timeo     = UBI32_ETH_VP_TX_TIMEOUT;
725 #ifdef UBICOM32_USE_NAPI
726                 netif_napi_add(dev, &priv->napi, ubi32_eth_napi_poll, UBI32_ETH_NAPI_WEIGHT);
727 #endif
728                 err = register_netdev(dev);
729                 if (err) {
730                         printk(KERN_WARNING "Failed to register netdev %s\n", eth_if_name[i]);
731                         //release_region();
732                         free_netdev(dev);
733                         kfree(eth_node->tx_dma_ring);
734                         break;
735                 }
736
737                 ubi32_eth_devices[i] = dev;
738                 printk(KERN_INFO "%s vp_base:0x%p, tio_int:%d irq:%d feature:0x%lx\n",
739                         dev->name, priv->regs, eth_node->dn.sendirq, dev->irq, dev->features);
740         }
741
742         if (err) {
743                 ubi32_eth_cleanup();
744                 return err;
745         }
746
747         if (!ubi32_eth_devices[0] && !ubi32_eth_devices[1]) {
748                 return -ENODEV;
749         }
750
751 #if defined(UBICOM32_USE_POLLING)
752         init_timer(&eth_poll_timer);
753         eth_poll_timer.function = ubi32_eth_poll;
754         eth_poll_timer.data = (unsigned long)0;
755         eth_poll_timer.expires = jiffies + 2;
756         add_timer(&eth_poll_timer);
757 #endif
758
759         return 0;
760 }
761
762 module_init(ubi32_eth_init_module);
763 module_exit(ubi32_eth_cleanup);
764
765 MODULE_AUTHOR("Kan Yan, Greg Ren");
766 MODULE_LICENSE("GPL");