Linux-libre 4.10.7-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / i825xx / lib82596.c
1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2    munged into HPPA boxen .
3
4    This driver is based upon 82596.c, original credits are below...
5    but there were too many hoops which HP wants jumped through to
6    keep this code in there in a sane manner.
7
8    3 primary sources of the mess --
9    1) hppa needs *lots* of cacheline flushing to keep this kind of
10    MMIO running.
11
12    2) The 82596 needs to see all of its pointers as their physical
13    address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
14
15    3) The implementation HP is using seems to be significantly pickier
16    about when and how the command and RX units are started.  some
17    command ordering was changed.
18
19    Examination of the mach driver leads one to believe that there
20    might be a saner way to pull this off...  anyone who feels like a
21    full rewrite can be my guest.
22
23    Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
24
25    02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
26    03/02/2000  changes for better/correct(?) cache-flushing (deller)
27 */
28
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
30 /*
31    Based on Apricot.c
32    Written 1994 by Mark Evans.
33    This driver is for the Apricot 82596 bus-master interface
34
35    Modularised 12/94 Mark Evans
36
37
38    Modified to support the 82596 ethernet chips on 680x0 VME boards.
39    by Richard Hirst <richard@sleepie.demon.co.uk>
40    Renamed to be 82596.c
41
42    980825:  Changed to receive directly in to sk_buffs which are
43    allocated at open() time.  Eliminates copy on incoming frames
44    (small ones are still copied).  Shared data now held in a
45    non-cached page, so we can run on 68060 in copyback mode.
46
47    TBD:
48    * look at deferring rx frames rather than discarding (as per tulip)
49    * handle tx ring full as per tulip
50    * performance test to tune rx_copybreak
51
52    Most of my modifications relate to the braindead big-endian
53    implementation by Intel.  When the i596 is operating in
54    'big-endian' mode, it thinks a 32 bit value of 0x12345678
55    should be stored as 0x56781234.  This is a real pain, when
56    you have linked lists which are shared by the 680x0 and the
57    i596.
58
59    Driver skeleton
60    Written 1993 by Donald Becker.
61    Copyright 1993 United States Government as represented by the Director,
62    National Security Agency. This software may only be used and distributed
63    according to the terms of the GNU General Public License as modified by SRC,
64    incorporated herein by reference.
65
66    The author may be reached as becker@scyld.com, or C/O
67    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68
69  */
70
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/interrupt.h>
77 #include <linux/delay.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/skbuff.h>
81 #include <linux/types.h>
82 #include <linux/bitops.h>
83 #include <linux/dma-mapping.h>
84 #include <linux/io.h>
85 #include <linux/irq.h>
86 #include <linux/gfp.h>
87
88 /* DEBUG flags
89  */
90
91 #define DEB_INIT        0x0001
92 #define DEB_PROBE       0x0002
93 #define DEB_SERIOUS     0x0004
94 #define DEB_ERRORS      0x0008
95 #define DEB_MULTI       0x0010
96 #define DEB_TDR         0x0020
97 #define DEB_OPEN        0x0040
98 #define DEB_RESET       0x0080
99 #define DEB_ADDCMD      0x0100
100 #define DEB_STATUS      0x0200
101 #define DEB_STARTTX     0x0400
102 #define DEB_RXADDR      0x0800
103 #define DEB_TXADDR      0x1000
104 #define DEB_RXFRAME     0x2000
105 #define DEB_INTS        0x4000
106 #define DEB_STRUCT      0x8000
107 #define DEB_ANY         0xffff
108
109
110 #define DEB(x, y)       if (i596_debug & (x)) { y; }
111
112
113 /*
114  * The MPU_PORT command allows direct access to the 82596. With PORT access
115  * the following commands are available (p5-18). The 32-bit port command
116  * must be word-swapped with the most significant word written first.
117  * This only applies to VME boards.
118  */
119 #define PORT_RESET              0x00    /* reset 82596 */
120 #define PORT_SELFTEST           0x01    /* selftest */
121 #define PORT_ALTSCP             0x02    /* alternate SCB address */
122 #define PORT_ALTDUMP            0x03    /* Alternate DUMP address */
123
124 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
125
126 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
127  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
128  */
129 static int rx_copybreak = 100;
130
131 #define PKT_BUF_SZ      1536
132 #define MAX_MC_CNT      64
133
134 #define ISCP_BUSY       0x0001
135
136 #define I596_NULL ((u32)0xffffffff)
137
138 #define CMD_EOL         0x8000  /* The last command of the list, stop. */
139 #define CMD_SUSP        0x4000  /* Suspend after doing cmd. */
140 #define CMD_INTR        0x2000  /* Interrupt after doing cmd. */
141
142 #define CMD_FLEX        0x0008  /* Enable flexible memory model */
143
144 enum commands {
145         CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
146         CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
147 };
148
149 #define STAT_C          0x8000  /* Set to 0 after execution */
150 #define STAT_B          0x4000  /* Command being executed */
151 #define STAT_OK         0x2000  /* Command executed ok */
152 #define STAT_A          0x1000  /* Command aborted */
153
154 #define  CUC_START      0x0100
155 #define  CUC_RESUME     0x0200
156 #define  CUC_SUSPEND    0x0300
157 #define  CUC_ABORT      0x0400
158 #define  RX_START       0x0010
159 #define  RX_RESUME      0x0020
160 #define  RX_SUSPEND     0x0030
161 #define  RX_ABORT       0x0040
162
163 #define TX_TIMEOUT      (HZ/20)
164
165
166 struct i596_reg {
167         unsigned short porthi;
168         unsigned short portlo;
169         u32            ca;
170 };
171
172 #define EOF             0x8000
173 #define SIZE_MASK       0x3fff
174
175 struct i596_tbd {
176         unsigned short size;
177         unsigned short pad;
178         u32            next;
179         u32            data;
180         u32 cache_pad[5];               /* Total 32 bytes... */
181 };
182
183 /* The command structure has two 'next' pointers; v_next is the address of
184  * the next command as seen by the CPU, b_next is the address of the next
185  * command as seen by the 82596.  The b_next pointer, as used by the 82596
186  * always references the status field of the next command, rather than the
187  * v_next field, because the 82596 is unaware of v_next.  It may seem more
188  * logical to put v_next at the end of the structure, but we cannot do that
189  * because the 82596 expects other fields to be there, depending on command
190  * type.
191  */
192
193 struct i596_cmd {
194         struct i596_cmd *v_next;        /* Address from CPUs viewpoint */
195         unsigned short status;
196         unsigned short command;
197         u32            b_next;  /* Address from i596 viewpoint */
198 };
199
200 struct tx_cmd {
201         struct i596_cmd cmd;
202         u32            tbd;
203         unsigned short size;
204         unsigned short pad;
205         struct sk_buff *skb;            /* So we can free it after tx */
206         dma_addr_t dma_addr;
207 #ifdef __LP64__
208         u32 cache_pad[6];               /* Total 64 bytes... */
209 #else
210         u32 cache_pad[1];               /* Total 32 bytes... */
211 #endif
212 };
213
214 struct tdr_cmd {
215         struct i596_cmd cmd;
216         unsigned short status;
217         unsigned short pad;
218 };
219
220 struct mc_cmd {
221         struct i596_cmd cmd;
222         short mc_cnt;
223         char mc_addrs[MAX_MC_CNT*6];
224 };
225
226 struct sa_cmd {
227         struct i596_cmd cmd;
228         char eth_addr[8];
229 };
230
231 struct cf_cmd {
232         struct i596_cmd cmd;
233         char i596_config[16];
234 };
235
236 struct i596_rfd {
237         unsigned short stat;
238         unsigned short cmd;
239         u32            b_next;  /* Address from i596 viewpoint */
240         u32            rbd;
241         unsigned short count;
242         unsigned short size;
243         struct i596_rfd *v_next;        /* Address from CPUs viewpoint */
244         struct i596_rfd *v_prev;
245 #ifndef __LP64__
246         u32 cache_pad[2];               /* Total 32 bytes... */
247 #endif
248 };
249
250 struct i596_rbd {
251         /* hardware data */
252         unsigned short count;
253         unsigned short zero1;
254         u32            b_next;
255         u32            b_data;          /* Address from i596 viewpoint */
256         unsigned short size;
257         unsigned short zero2;
258         /* driver data */
259         struct sk_buff *skb;
260         struct i596_rbd *v_next;
261         u32            b_addr;          /* This rbd addr from i596 view */
262         unsigned char *v_data;          /* Address from CPUs viewpoint */
263                                         /* Total 32 bytes... */
264 #ifdef __LP64__
265     u32 cache_pad[4];
266 #endif
267 };
268
269 /* These values as chosen so struct i596_dma fits in one page... */
270
271 #define TX_RING_SIZE 32
272 #define RX_RING_SIZE 16
273
274 struct i596_scb {
275         unsigned short status;
276         unsigned short command;
277         u32           cmd;
278         u32           rfd;
279         u32           crc_err;
280         u32           align_err;
281         u32           resource_err;
282         u32           over_err;
283         u32           rcvdt_err;
284         u32           short_err;
285         unsigned short t_on;
286         unsigned short t_off;
287 };
288
289 struct i596_iscp {
290         u32 stat;
291         u32 scb;
292 };
293
294 struct i596_scp {
295         u32 sysbus;
296         u32 pad;
297         u32 iscp;
298 };
299
300 struct i596_dma {
301         struct i596_scp scp                     __attribute__((aligned(32)));
302         volatile struct i596_iscp iscp          __attribute__((aligned(32)));
303         volatile struct i596_scb scb            __attribute__((aligned(32)));
304         struct sa_cmd sa_cmd                    __attribute__((aligned(32)));
305         struct cf_cmd cf_cmd                    __attribute__((aligned(32)));
306         struct tdr_cmd tdr_cmd                  __attribute__((aligned(32)));
307         struct mc_cmd mc_cmd                    __attribute__((aligned(32)));
308         struct i596_rfd rfds[RX_RING_SIZE]      __attribute__((aligned(32)));
309         struct i596_rbd rbds[RX_RING_SIZE]      __attribute__((aligned(32)));
310         struct tx_cmd tx_cmds[TX_RING_SIZE]     __attribute__((aligned(32)));
311         struct i596_tbd tbds[TX_RING_SIZE]      __attribute__((aligned(32)));
312 };
313
314 struct i596_private {
315         struct i596_dma *dma;
316         u32    stat;
317         int last_restart;
318         struct i596_rfd *rfd_head;
319         struct i596_rbd *rbd_head;
320         struct i596_cmd *cmd_tail;
321         struct i596_cmd *cmd_head;
322         int cmd_backlog;
323         u32    last_cmd;
324         int next_tx_cmd;
325         int options;
326         spinlock_t lock;       /* serialize access to chip */
327         dma_addr_t dma_addr;
328         void __iomem *mpu_port;
329         void __iomem *ca;
330 };
331
332 static const char init_setup[] =
333 {
334         0x8E,           /* length, prefetch on */
335         0xC8,           /* fifo to 8, monitor off */
336         0x80,           /* don't save bad frames */
337         0x2E,           /* No source address insertion, 8 byte preamble */
338         0x00,           /* priority and backoff defaults */
339         0x60,           /* interframe spacing */
340         0x00,           /* slot time LSB */
341         0xf2,           /* slot time and retries */
342         0x00,           /* promiscuous mode */
343         0x00,           /* collision detect */
344         0x40,           /* minimum frame length */
345         0xff,
346         0x00,
347         0x7f /*  *multi IA */ };
348
349 static int i596_open(struct net_device *dev);
350 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
351 static irqreturn_t i596_interrupt(int irq, void *dev_id);
352 static int i596_close(struct net_device *dev);
353 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
354 static void i596_tx_timeout (struct net_device *dev);
355 static void print_eth(unsigned char *buf, char *str);
356 static void set_multicast_list(struct net_device *dev);
357 static inline void ca(struct net_device *dev);
358 static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
359
360 static int rx_ring_size = RX_RING_SIZE;
361 static int ticks_limit = 100;
362 static int max_cmd_backlog = TX_RING_SIZE-1;
363
364 #ifdef CONFIG_NET_POLL_CONTROLLER
365 static void i596_poll_controller(struct net_device *dev);
366 #endif
367
368
369 static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
370 {
371         DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
372         while (--delcnt && dma->iscp.stat) {
373                 udelay(10);
374                 DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
375         }
376         if (!delcnt) {
377                 printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
378                      dev->name, str, SWAP16(dma->iscp.stat));
379                 return -1;
380         } else
381                 return 0;
382 }
383
384
385 static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
386 {
387         DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
388         while (--delcnt && dma->scb.command) {
389                 udelay(10);
390                 DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
391         }
392         if (!delcnt) {
393                 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
394                        dev->name, str,
395                        SWAP16(dma->scb.status),
396                        SWAP16(dma->scb.command));
397                 return -1;
398         } else
399                 return 0;
400 }
401
402
403 static void i596_display_data(struct net_device *dev)
404 {
405         struct i596_private *lp = netdev_priv(dev);
406         struct i596_dma *dma = lp->dma;
407         struct i596_cmd *cmd;
408         struct i596_rfd *rfd;
409         struct i596_rbd *rbd;
410
411         printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
412                &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
413         printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
414                &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
415         printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
416                 " .cmd = %08x, .rfd = %08x\n",
417                &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
418                 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
419         printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
420                " over %x, rcvdt %x, short %x\n",
421                SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
422                SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
423                SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
424         cmd = lp->cmd_head;
425         while (cmd != NULL) {
426                 printk(KERN_DEBUG
427                        "cmd at %p, .status = %04x, .command = %04x,"
428                        " .b_next = %08x\n",
429                        cmd, SWAP16(cmd->status), SWAP16(cmd->command),
430                        SWAP32(cmd->b_next));
431                 cmd = cmd->v_next;
432         }
433         rfd = lp->rfd_head;
434         printk(KERN_DEBUG "rfd_head = %p\n", rfd);
435         do {
436                 printk(KERN_DEBUG
437                        "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
438                        " count %04x\n",
439                        rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
440                        SWAP32(rfd->b_next), SWAP32(rfd->rbd),
441                        SWAP16(rfd->count));
442                 rfd = rfd->v_next;
443         } while (rfd != lp->rfd_head);
444         rbd = lp->rbd_head;
445         printk(KERN_DEBUG "rbd_head = %p\n", rbd);
446         do {
447                 printk(KERN_DEBUG
448                        "   %p .count %04x, b_next %08x, b_data %08x,"
449                        " size %04x\n",
450                         rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
451                        SWAP32(rbd->b_data), SWAP16(rbd->size));
452                 rbd = rbd->v_next;
453         } while (rbd != lp->rbd_head);
454         DMA_INV(dev, dma, sizeof(struct i596_dma));
455 }
456
457
458 #define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
459
460 static inline int init_rx_bufs(struct net_device *dev)
461 {
462         struct i596_private *lp = netdev_priv(dev);
463         struct i596_dma *dma = lp->dma;
464         int i;
465         struct i596_rfd *rfd;
466         struct i596_rbd *rbd;
467
468         /* First build the Receive Buffer Descriptor List */
469
470         for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
471                 dma_addr_t dma_addr;
472                 struct sk_buff *skb;
473
474                 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
475                 if (skb == NULL)
476                         return -1;
477                 dma_addr = dma_map_single(dev->dev.parent, skb->data,
478                                           PKT_BUF_SZ, DMA_FROM_DEVICE);
479                 rbd->v_next = rbd+1;
480                 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
481                 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
482                 rbd->skb = skb;
483                 rbd->v_data = skb->data;
484                 rbd->b_data = SWAP32(dma_addr);
485                 rbd->size = SWAP16(PKT_BUF_SZ);
486         }
487         lp->rbd_head = dma->rbds;
488         rbd = dma->rbds + rx_ring_size - 1;
489         rbd->v_next = dma->rbds;
490         rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
491
492         /* Now build the Receive Frame Descriptor List */
493
494         for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
495                 rfd->rbd = I596_NULL;
496                 rfd->v_next = rfd+1;
497                 rfd->v_prev = rfd-1;
498                 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
499                 rfd->cmd = SWAP16(CMD_FLEX);
500         }
501         lp->rfd_head = dma->rfds;
502         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
503         rfd = dma->rfds;
504         rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
505         rfd->v_prev = dma->rfds + rx_ring_size - 1;
506         rfd = dma->rfds + rx_ring_size - 1;
507         rfd->v_next = dma->rfds;
508         rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
509         rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
510
511         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
512         return 0;
513 }
514
515 static inline void remove_rx_bufs(struct net_device *dev)
516 {
517         struct i596_private *lp = netdev_priv(dev);
518         struct i596_rbd *rbd;
519         int i;
520
521         for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
522                 if (rbd->skb == NULL)
523                         break;
524                 dma_unmap_single(dev->dev.parent,
525                                  (dma_addr_t)SWAP32(rbd->b_data),
526                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
527                 dev_kfree_skb(rbd->skb);
528         }
529 }
530
531
532 static void rebuild_rx_bufs(struct net_device *dev)
533 {
534         struct i596_private *lp = netdev_priv(dev);
535         struct i596_dma *dma = lp->dma;
536         int i;
537
538         /* Ensure rx frame/buffer descriptors are tidy */
539
540         for (i = 0; i < rx_ring_size; i++) {
541                 dma->rfds[i].rbd = I596_NULL;
542                 dma->rfds[i].cmd = SWAP16(CMD_FLEX);
543         }
544         dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
545         lp->rfd_head = dma->rfds;
546         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
547         lp->rbd_head = dma->rbds;
548         dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
549
550         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
551 }
552
553
554 static int init_i596_mem(struct net_device *dev)
555 {
556         struct i596_private *lp = netdev_priv(dev);
557         struct i596_dma *dma = lp->dma;
558         unsigned long flags;
559
560         mpu_port(dev, PORT_RESET, 0);
561         udelay(100);                    /* Wait 100us - seems to help */
562
563         /* change the scp address */
564
565         lp->last_cmd = jiffies;
566
567         dma->scp.sysbus = SYSBUS;
568         dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
569         dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
570         dma->iscp.stat = SWAP32(ISCP_BUSY);
571         lp->cmd_backlog = 0;
572
573         lp->cmd_head = NULL;
574         dma->scb.cmd = I596_NULL;
575
576         DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
577
578         DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
579         DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
580         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
581
582         mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
583         ca(dev);
584         if (wait_istat(dev, dma, 1000, "initialization timed out"))
585                 goto failed;
586         DEB(DEB_INIT, printk(KERN_DEBUG
587                              "%s: i82596 initialization successful\n",
588                              dev->name));
589
590         if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
591                 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
592                 goto failed;
593         }
594
595         /* Ensure rx frame/buffer descriptors are tidy */
596         rebuild_rx_bufs(dev);
597
598         dma->scb.command = 0;
599         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
600
601         DEB(DEB_INIT, printk(KERN_DEBUG
602                              "%s: queuing CmdConfigure\n", dev->name));
603         memcpy(dma->cf_cmd.i596_config, init_setup, 14);
604         dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
605         DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
606         i596_add_cmd(dev, &dma->cf_cmd.cmd);
607
608         DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
609         memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
610         dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
611         DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
612         i596_add_cmd(dev, &dma->sa_cmd.cmd);
613
614         DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
615         dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
616         DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
617         i596_add_cmd(dev, &dma->tdr_cmd.cmd);
618
619         spin_lock_irqsave (&lp->lock, flags);
620
621         if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
622                 spin_unlock_irqrestore (&lp->lock, flags);
623                 goto failed_free_irq;
624         }
625         DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
626         dma->scb.command = SWAP16(RX_START);
627         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
628         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
629
630         ca(dev);
631
632         spin_unlock_irqrestore (&lp->lock, flags);
633         if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
634                 goto failed_free_irq;
635         DEB(DEB_INIT, printk(KERN_DEBUG
636                              "%s: Receive unit started OK\n", dev->name));
637         return 0;
638
639 failed_free_irq:
640         free_irq(dev->irq, dev);
641 failed:
642         printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
643         mpu_port(dev, PORT_RESET, 0);
644         return -1;
645 }
646
647
648 static inline int i596_rx(struct net_device *dev)
649 {
650         struct i596_private *lp = netdev_priv(dev);
651         struct i596_rfd *rfd;
652         struct i596_rbd *rbd;
653         int frames = 0;
654
655         DEB(DEB_RXFRAME, printk(KERN_DEBUG
656                                 "i596_rx(), rfd_head %p, rbd_head %p\n",
657                                 lp->rfd_head, lp->rbd_head));
658
659
660         rfd = lp->rfd_head;             /* Ref next frame to check */
661
662         DMA_INV(dev, rfd, sizeof(struct i596_rfd));
663         while (rfd->stat & SWAP16(STAT_C)) {    /* Loop while complete frames */
664                 if (rfd->rbd == I596_NULL)
665                         rbd = NULL;
666                 else if (rfd->rbd == lp->rbd_head->b_addr) {
667                         rbd = lp->rbd_head;
668                         DMA_INV(dev, rbd, sizeof(struct i596_rbd));
669                 } else {
670                         printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
671                         /* XXX Now what? */
672                         rbd = NULL;
673                 }
674                 DEB(DEB_RXFRAME, printk(KERN_DEBUG
675                                       "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
676                                       rfd, rfd->rbd, rfd->stat));
677
678                 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
679                         /* a good frame */
680                         int pkt_len = SWAP16(rbd->count) & 0x3fff;
681                         struct sk_buff *skb = rbd->skb;
682                         int rx_in_place = 0;
683
684                         DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
685                         frames++;
686
687                         /* Check if the packet is long enough to just accept
688                          * without copying to a properly sized skbuff.
689                          */
690
691                         if (pkt_len > rx_copybreak) {
692                                 struct sk_buff *newskb;
693                                 dma_addr_t dma_addr;
694
695                                 dma_unmap_single(dev->dev.parent,
696                                                  (dma_addr_t)SWAP32(rbd->b_data),
697                                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
698                                 /* Get fresh skbuff to replace filled one. */
699                                 newskb = netdev_alloc_skb_ip_align(dev,
700                                                                    PKT_BUF_SZ);
701                                 if (newskb == NULL) {
702                                         skb = NULL;     /* drop pkt */
703                                         goto memory_squeeze;
704                                 }
705
706                                 /* Pass up the skb already on the Rx ring. */
707                                 skb_put(skb, pkt_len);
708                                 rx_in_place = 1;
709                                 rbd->skb = newskb;
710                                 dma_addr = dma_map_single(dev->dev.parent,
711                                                           newskb->data,
712                                                           PKT_BUF_SZ,
713                                                           DMA_FROM_DEVICE);
714                                 rbd->v_data = newskb->data;
715                                 rbd->b_data = SWAP32(dma_addr);
716                                 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
717                         } else {
718                                 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
719                         }
720 memory_squeeze:
721                         if (skb == NULL) {
722                                 /* XXX tulip.c can defer packets here!! */
723                                 dev->stats.rx_dropped++;
724                         } else {
725                                 if (!rx_in_place) {
726                                         /* 16 byte align the data fields */
727                                         dma_sync_single_for_cpu(dev->dev.parent,
728                                                                 (dma_addr_t)SWAP32(rbd->b_data),
729                                                                 PKT_BUF_SZ, DMA_FROM_DEVICE);
730                                         memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
731                                         dma_sync_single_for_device(dev->dev.parent,
732                                                                    (dma_addr_t)SWAP32(rbd->b_data),
733                                                                    PKT_BUF_SZ, DMA_FROM_DEVICE);
734                                 }
735                                 skb->len = pkt_len;
736                                 skb->protocol = eth_type_trans(skb, dev);
737                                 netif_rx(skb);
738                                 dev->stats.rx_packets++;
739                                 dev->stats.rx_bytes += pkt_len;
740                         }
741                 } else {
742                         DEB(DEB_ERRORS, printk(KERN_DEBUG
743                                                "%s: Error, rfd.stat = 0x%04x\n",
744                                                dev->name, rfd->stat));
745                         dev->stats.rx_errors++;
746                         if (rfd->stat & SWAP16(0x0100))
747                                 dev->stats.collisions++;
748                         if (rfd->stat & SWAP16(0x8000))
749                                 dev->stats.rx_length_errors++;
750                         if (rfd->stat & SWAP16(0x0001))
751                                 dev->stats.rx_over_errors++;
752                         if (rfd->stat & SWAP16(0x0002))
753                                 dev->stats.rx_fifo_errors++;
754                         if (rfd->stat & SWAP16(0x0004))
755                                 dev->stats.rx_frame_errors++;
756                         if (rfd->stat & SWAP16(0x0008))
757                                 dev->stats.rx_crc_errors++;
758                         if (rfd->stat & SWAP16(0x0010))
759                                 dev->stats.rx_length_errors++;
760                 }
761
762                 /* Clear the buffer descriptor count and EOF + F flags */
763
764                 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
765                         rbd->count = 0;
766                         lp->rbd_head = rbd->v_next;
767                         DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
768                 }
769
770                 /* Tidy the frame descriptor, marking it as end of list */
771
772                 rfd->rbd = I596_NULL;
773                 rfd->stat = 0;
774                 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
775                 rfd->count = 0;
776
777                 /* Update record of next frame descriptor to process */
778
779                 lp->dma->scb.rfd = rfd->b_next;
780                 lp->rfd_head = rfd->v_next;
781                 DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
782
783                 /* Remove end-of-list from old end descriptor */
784
785                 rfd->v_prev->cmd = SWAP16(CMD_FLEX);
786                 DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
787                 rfd = lp->rfd_head;
788                 DMA_INV(dev, rfd, sizeof(struct i596_rfd));
789         }
790
791         DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
792
793         return 0;
794 }
795
796
797 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
798 {
799         struct i596_cmd *ptr;
800
801         while (lp->cmd_head != NULL) {
802                 ptr = lp->cmd_head;
803                 lp->cmd_head = ptr->v_next;
804                 lp->cmd_backlog--;
805
806                 switch (SWAP16(ptr->command) & 0x7) {
807                 case CmdTx:
808                         {
809                                 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
810                                 struct sk_buff *skb = tx_cmd->skb;
811                                 dma_unmap_single(dev->dev.parent,
812                                                  tx_cmd->dma_addr,
813                                                  skb->len, DMA_TO_DEVICE);
814
815                                 dev_kfree_skb(skb);
816
817                                 dev->stats.tx_errors++;
818                                 dev->stats.tx_aborted_errors++;
819
820                                 ptr->v_next = NULL;
821                                 ptr->b_next = I596_NULL;
822                                 tx_cmd->cmd.command = 0;  /* Mark as free */
823                                 break;
824                         }
825                 default:
826                         ptr->v_next = NULL;
827                         ptr->b_next = I596_NULL;
828                 }
829                 DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
830         }
831
832         wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
833         lp->dma->scb.cmd = I596_NULL;
834         DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
835 }
836
837
838 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
839 {
840         unsigned long flags;
841
842         DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
843
844         spin_lock_irqsave (&lp->lock, flags);
845
846         wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
847
848         netif_stop_queue(dev);
849
850         /* FIXME: this command might cause an lpmc */
851         lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
852         DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
853         ca(dev);
854
855         /* wait for shutdown */
856         wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
857         spin_unlock_irqrestore (&lp->lock, flags);
858
859         i596_cleanup_cmd(dev, lp);
860         i596_rx(dev);
861
862         netif_start_queue(dev);
863         init_i596_mem(dev);
864 }
865
866
867 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
868 {
869         struct i596_private *lp = netdev_priv(dev);
870         struct i596_dma *dma = lp->dma;
871         unsigned long flags;
872
873         DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
874                                lp->cmd_head));
875
876         cmd->status = 0;
877         cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
878         cmd->v_next = NULL;
879         cmd->b_next = I596_NULL;
880         DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
881
882         spin_lock_irqsave (&lp->lock, flags);
883
884         if (lp->cmd_head != NULL) {
885                 lp->cmd_tail->v_next = cmd;
886                 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
887                 DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
888         } else {
889                 lp->cmd_head = cmd;
890                 wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
891                 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
892                 dma->scb.command = SWAP16(CUC_START);
893                 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
894                 ca(dev);
895         }
896         lp->cmd_tail = cmd;
897         lp->cmd_backlog++;
898
899         spin_unlock_irqrestore (&lp->lock, flags);
900
901         if (lp->cmd_backlog > max_cmd_backlog) {
902                 unsigned long tickssofar = jiffies - lp->last_cmd;
903
904                 if (tickssofar < ticks_limit)
905                         return;
906
907                 printk(KERN_ERR
908                        "%s: command unit timed out, status resetting.\n",
909                        dev->name);
910 #if 1
911                 i596_reset(dev, lp);
912 #endif
913         }
914 }
915
916 static int i596_open(struct net_device *dev)
917 {
918         DEB(DEB_OPEN, printk(KERN_DEBUG
919                              "%s: i596_open() irq %d.\n", dev->name, dev->irq));
920
921         if (init_rx_bufs(dev)) {
922                 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
923                 return -EAGAIN;
924         }
925         if (init_i596_mem(dev)) {
926                 printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
927                 goto out_remove_rx_bufs;
928         }
929         netif_start_queue(dev);
930
931         return 0;
932
933 out_remove_rx_bufs:
934         remove_rx_bufs(dev);
935         return -EAGAIN;
936 }
937
938 static void i596_tx_timeout (struct net_device *dev)
939 {
940         struct i596_private *lp = netdev_priv(dev);
941
942         /* Transmitter timeout, serious problems. */
943         DEB(DEB_ERRORS, printk(KERN_DEBUG
944                                "%s: transmit timed out, status resetting.\n",
945                                dev->name));
946
947         dev->stats.tx_errors++;
948
949         /* Try to restart the adaptor */
950         if (lp->last_restart == dev->stats.tx_packets) {
951                 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
952                 /* Shutdown and restart */
953                 i596_reset (dev, lp);
954         } else {
955                 /* Issue a channel attention signal */
956                 DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
957                 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
958                 DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
959                 ca (dev);
960                 lp->last_restart = dev->stats.tx_packets;
961         }
962
963         netif_trans_update(dev); /* prevent tx timeout */
964         netif_wake_queue (dev);
965 }
966
967
968 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
969 {
970         struct i596_private *lp = netdev_priv(dev);
971         struct tx_cmd *tx_cmd;
972         struct i596_tbd *tbd;
973         short length = skb->len;
974
975         DEB(DEB_STARTTX, printk(KERN_DEBUG
976                                 "%s: i596_start_xmit(%x,%p) called\n",
977                                 dev->name, skb->len, skb->data));
978
979         if (length < ETH_ZLEN) {
980                 if (skb_padto(skb, ETH_ZLEN))
981                         return NETDEV_TX_OK;
982                 length = ETH_ZLEN;
983         }
984
985         netif_stop_queue(dev);
986
987         tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
988         tbd = lp->dma->tbds + lp->next_tx_cmd;
989
990         if (tx_cmd->cmd.command) {
991                 DEB(DEB_ERRORS, printk(KERN_DEBUG
992                                        "%s: xmit ring full, dropping packet.\n",
993                                        dev->name));
994                 dev->stats.tx_dropped++;
995
996                 dev_kfree_skb_any(skb);
997         } else {
998                 if (++lp->next_tx_cmd == TX_RING_SIZE)
999                         lp->next_tx_cmd = 0;
1000                 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1001                 tbd->next = I596_NULL;
1002
1003                 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1004                 tx_cmd->skb = skb;
1005
1006                 tx_cmd->pad = 0;
1007                 tx_cmd->size = 0;
1008                 tbd->pad = 0;
1009                 tbd->size = SWAP16(EOF | length);
1010
1011                 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1012                                                   skb->len, DMA_TO_DEVICE);
1013                 tbd->data = SWAP32(tx_cmd->dma_addr);
1014
1015                 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1016                 DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1017                 DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1018                 i596_add_cmd(dev, &tx_cmd->cmd);
1019
1020                 dev->stats.tx_packets++;
1021                 dev->stats.tx_bytes += length;
1022         }
1023
1024         netif_start_queue(dev);
1025
1026         return NETDEV_TX_OK;
1027 }
1028
1029 static void print_eth(unsigned char *add, char *str)
1030 {
1031         printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1032                add, add + 6, add, add[12], add[13], str);
1033 }
1034 static const struct net_device_ops i596_netdev_ops = {
1035         .ndo_open               = i596_open,
1036         .ndo_stop               = i596_close,
1037         .ndo_start_xmit         = i596_start_xmit,
1038         .ndo_set_rx_mode        = set_multicast_list,
1039         .ndo_tx_timeout         = i596_tx_timeout,
1040         .ndo_validate_addr      = eth_validate_addr,
1041         .ndo_set_mac_address    = eth_mac_addr,
1042 #ifdef CONFIG_NET_POLL_CONTROLLER
1043         .ndo_poll_controller    = i596_poll_controller,
1044 #endif
1045 };
1046
1047 static int i82596_probe(struct net_device *dev)
1048 {
1049         int i;
1050         struct i596_private *lp = netdev_priv(dev);
1051         struct i596_dma *dma;
1052
1053         /* This lot is ensure things have been cache line aligned. */
1054         BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1055         BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1056         BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1057         BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1058 #ifndef __LP64__
1059         BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1060 #endif
1061
1062         if (!dev->base_addr || !dev->irq)
1063                 return -ENODEV;
1064
1065         dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1066                 sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1067         if (!dma) {
1068                 printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1069                 return -ENOMEM;
1070         }
1071
1072         dev->netdev_ops = &i596_netdev_ops;
1073         dev->watchdog_timeo = TX_TIMEOUT;
1074
1075         memset(dma, 0, sizeof(struct i596_dma));
1076         lp->dma = dma;
1077
1078         dma->scb.command = 0;
1079         dma->scb.cmd = I596_NULL;
1080         dma->scb.rfd = I596_NULL;
1081         spin_lock_init(&lp->lock);
1082
1083         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1084
1085         i = register_netdev(dev);
1086         if (i) {
1087                 DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1088                                     (void *)dma, lp->dma_addr);
1089                 return i;
1090         }
1091
1092         DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1093                               dev->name, dev->base_addr, dev->dev_addr,
1094                               dev->irq));
1095         DEB(DEB_INIT, printk(KERN_INFO
1096                              "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1097                              dev->name, dma, (int)sizeof(struct i596_dma),
1098                              &dma->scb));
1099
1100         return 0;
1101 }
1102
1103 #ifdef CONFIG_NET_POLL_CONTROLLER
1104 static void i596_poll_controller(struct net_device *dev)
1105 {
1106         disable_irq(dev->irq);
1107         i596_interrupt(dev->irq, dev);
1108         enable_irq(dev->irq);
1109 }
1110 #endif
1111
1112 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1113 {
1114         struct net_device *dev = dev_id;
1115         struct i596_private *lp;
1116         struct i596_dma *dma;
1117         unsigned short status, ack_cmd = 0;
1118
1119         lp = netdev_priv(dev);
1120         dma = lp->dma;
1121
1122         spin_lock (&lp->lock);
1123
1124         wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1125         status = SWAP16(dma->scb.status);
1126
1127         DEB(DEB_INTS, printk(KERN_DEBUG
1128                              "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1129                         dev->name, dev->irq, status));
1130
1131         ack_cmd = status & 0xf000;
1132
1133         if (!ack_cmd) {
1134                 DEB(DEB_ERRORS, printk(KERN_DEBUG
1135                                        "%s: interrupt with no events\n",
1136                                        dev->name));
1137                 spin_unlock (&lp->lock);
1138                 return IRQ_NONE;
1139         }
1140
1141         if ((status & 0x8000) || (status & 0x2000)) {
1142                 struct i596_cmd *ptr;
1143
1144                 if ((status & 0x8000))
1145                         DEB(DEB_INTS,
1146                             printk(KERN_DEBUG
1147                                    "%s: i596 interrupt completed command.\n",
1148                                    dev->name));
1149                 if ((status & 0x2000))
1150                         DEB(DEB_INTS,
1151                             printk(KERN_DEBUG
1152                                    "%s: i596 interrupt command unit inactive %x.\n",
1153                                    dev->name, status & 0x0700));
1154
1155                 while (lp->cmd_head != NULL) {
1156                         DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1157                         if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1158                                 break;
1159
1160                         ptr = lp->cmd_head;
1161
1162                         DEB(DEB_STATUS,
1163                             printk(KERN_DEBUG
1164                                    "cmd_head->status = %04x, ->command = %04x\n",
1165                                    SWAP16(lp->cmd_head->status),
1166                                    SWAP16(lp->cmd_head->command)));
1167                         lp->cmd_head = ptr->v_next;
1168                         lp->cmd_backlog--;
1169
1170                         switch (SWAP16(ptr->command) & 0x7) {
1171                         case CmdTx:
1172                             {
1173                                 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1174                                 struct sk_buff *skb = tx_cmd->skb;
1175
1176                                 if (ptr->status & SWAP16(STAT_OK)) {
1177                                         DEB(DEB_TXADDR,
1178                                             print_eth(skb->data, "tx-done"));
1179                                 } else {
1180                                         dev->stats.tx_errors++;
1181                                         if (ptr->status & SWAP16(0x0020))
1182                                                 dev->stats.collisions++;
1183                                         if (!(ptr->status & SWAP16(0x0040)))
1184                                                 dev->stats.tx_heartbeat_errors++;
1185                                         if (ptr->status & SWAP16(0x0400))
1186                                                 dev->stats.tx_carrier_errors++;
1187                                         if (ptr->status & SWAP16(0x0800))
1188                                                 dev->stats.collisions++;
1189                                         if (ptr->status & SWAP16(0x1000))
1190                                                 dev->stats.tx_aborted_errors++;
1191                                 }
1192                                 dma_unmap_single(dev->dev.parent,
1193                                                  tx_cmd->dma_addr,
1194                                                  skb->len, DMA_TO_DEVICE);
1195                                 dev_kfree_skb_irq(skb);
1196
1197                                 tx_cmd->cmd.command = 0; /* Mark free */
1198                                 break;
1199                             }
1200                         case CmdTDR:
1201                             {
1202                                 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1203
1204                                 if (status & 0x8000) {
1205                                         DEB(DEB_ANY,
1206                                             printk(KERN_DEBUG "%s: link ok.\n",
1207                                                    dev->name));
1208                                 } else {
1209                                         if (status & 0x4000)
1210                                                 printk(KERN_ERR
1211                                                        "%s: Transceiver problem.\n",
1212                                                        dev->name);
1213                                         if (status & 0x2000)
1214                                                 printk(KERN_ERR
1215                                                        "%s: Termination problem.\n",
1216                                                        dev->name);
1217                                         if (status & 0x1000)
1218                                                 printk(KERN_ERR
1219                                                        "%s: Short circuit.\n",
1220                                                        dev->name);
1221
1222                                         DEB(DEB_TDR,
1223                                             printk(KERN_DEBUG "%s: Time %d.\n",
1224                                                    dev->name, status & 0x07ff));
1225                                 }
1226                                 break;
1227                             }
1228                         case CmdConfigure:
1229                                 /*
1230                                  * Zap command so set_multicast_list() know
1231                                  * it is free
1232                                  */
1233                                 ptr->command = 0;
1234                                 break;
1235                         }
1236                         ptr->v_next = NULL;
1237                         ptr->b_next = I596_NULL;
1238                         DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1239                         lp->last_cmd = jiffies;
1240                 }
1241
1242                 /* This mess is arranging that only the last of any outstanding
1243                  * commands has the interrupt bit set.  Should probably really
1244                  * only add to the cmd queue when the CU is stopped.
1245                  */
1246                 ptr = lp->cmd_head;
1247                 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1248                         struct i596_cmd *prev = ptr;
1249
1250                         ptr->command &= SWAP16(0x1fff);
1251                         ptr = ptr->v_next;
1252                         DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1253                 }
1254
1255                 if (lp->cmd_head != NULL)
1256                         ack_cmd |= CUC_START;
1257                 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1258                 DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1259         }
1260         if ((status & 0x1000) || (status & 0x4000)) {
1261                 if ((status & 0x4000))
1262                         DEB(DEB_INTS,
1263                             printk(KERN_DEBUG
1264                                    "%s: i596 interrupt received a frame.\n",
1265                                    dev->name));
1266                 i596_rx(dev);
1267                 /* Only RX_START if stopped - RGH 07-07-96 */
1268                 if (status & 0x1000) {
1269                         if (netif_running(dev)) {
1270                                 DEB(DEB_ERRORS,
1271                                     printk(KERN_DEBUG
1272                                            "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1273                                            dev->name, status));
1274                                 ack_cmd |= RX_START;
1275                                 dev->stats.rx_errors++;
1276                                 dev->stats.rx_fifo_errors++;
1277                                 rebuild_rx_bufs(dev);
1278                         }
1279                 }
1280         }
1281         wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1282         dma->scb.command = SWAP16(ack_cmd);
1283         DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1284
1285         /* DANGER: I suspect that some kind of interrupt
1286          acknowledgement aside from acking the 82596 might be needed
1287          here...  but it's running acceptably without */
1288
1289         ca(dev);
1290
1291         wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1292         DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1293
1294         spin_unlock (&lp->lock);
1295         return IRQ_HANDLED;
1296 }
1297
1298 static int i596_close(struct net_device *dev)
1299 {
1300         struct i596_private *lp = netdev_priv(dev);
1301         unsigned long flags;
1302
1303         netif_stop_queue(dev);
1304
1305         DEB(DEB_INIT,
1306             printk(KERN_DEBUG
1307                    "%s: Shutting down ethercard, status was %4.4x.\n",
1308                    dev->name, SWAP16(lp->dma->scb.status)));
1309
1310         spin_lock_irqsave(&lp->lock, flags);
1311
1312         wait_cmd(dev, lp->dma, 100, "close1 timed out");
1313         lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1314         DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1315
1316         ca(dev);
1317
1318         wait_cmd(dev, lp->dma, 100, "close2 timed out");
1319         spin_unlock_irqrestore(&lp->lock, flags);
1320         DEB(DEB_STRUCT, i596_display_data(dev));
1321         i596_cleanup_cmd(dev, lp);
1322
1323         free_irq(dev->irq, dev);
1324         remove_rx_bufs(dev);
1325
1326         return 0;
1327 }
1328
1329 /*
1330  *    Set or clear the multicast filter for this adaptor.
1331  */
1332
1333 static void set_multicast_list(struct net_device *dev)
1334 {
1335         struct i596_private *lp = netdev_priv(dev);
1336         struct i596_dma *dma = lp->dma;
1337         int config = 0, cnt;
1338
1339         DEB(DEB_MULTI,
1340             printk(KERN_DEBUG
1341                    "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1342                    dev->name, netdev_mc_count(dev),
1343                    dev->flags & IFF_PROMISC ? "ON" : "OFF",
1344                    dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1345
1346         if ((dev->flags & IFF_PROMISC) &&
1347             !(dma->cf_cmd.i596_config[8] & 0x01)) {
1348                 dma->cf_cmd.i596_config[8] |= 0x01;
1349                 config = 1;
1350         }
1351         if (!(dev->flags & IFF_PROMISC) &&
1352             (dma->cf_cmd.i596_config[8] & 0x01)) {
1353                 dma->cf_cmd.i596_config[8] &= ~0x01;
1354                 config = 1;
1355         }
1356         if ((dev->flags & IFF_ALLMULTI) &&
1357             (dma->cf_cmd.i596_config[11] & 0x20)) {
1358                 dma->cf_cmd.i596_config[11] &= ~0x20;
1359                 config = 1;
1360         }
1361         if (!(dev->flags & IFF_ALLMULTI) &&
1362             !(dma->cf_cmd.i596_config[11] & 0x20)) {
1363                 dma->cf_cmd.i596_config[11] |= 0x20;
1364                 config = 1;
1365         }
1366         if (config) {
1367                 if (dma->cf_cmd.cmd.command)
1368                         printk(KERN_INFO
1369                                "%s: config change request already queued\n",
1370                                dev->name);
1371                 else {
1372                         dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1373                         DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1374                         i596_add_cmd(dev, &dma->cf_cmd.cmd);
1375                 }
1376         }
1377
1378         cnt = netdev_mc_count(dev);
1379         if (cnt > MAX_MC_CNT) {
1380                 cnt = MAX_MC_CNT;
1381                 printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1382                         dev->name, cnt);
1383         }
1384
1385         if (!netdev_mc_empty(dev)) {
1386                 struct netdev_hw_addr *ha;
1387                 unsigned char *cp;
1388                 struct mc_cmd *cmd;
1389
1390                 cmd = &dma->mc_cmd;
1391                 cmd->cmd.command = SWAP16(CmdMulticastList);
1392                 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1393                 cp = cmd->mc_addrs;
1394                 netdev_for_each_mc_addr(ha, dev) {
1395                         if (!cnt--)
1396                                 break;
1397                         memcpy(cp, ha->addr, ETH_ALEN);
1398                         if (i596_debug > 1)
1399                                 DEB(DEB_MULTI,
1400                                     printk(KERN_DEBUG
1401                                            "%s: Adding address %pM\n",
1402                                            dev->name, cp));
1403                         cp += ETH_ALEN;
1404                 }
1405                 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1406                 i596_add_cmd(dev, &cmd->cmd);
1407         }
1408 }