1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
3 Written 1998-2000 by Donald Becker.
5 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
16 The information below comes from Donald Becker's original driver:
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
25 [link no longer provides useful info -jgarzik]
29 #define DRV_NAME "starfire"
30 #define DRV_VERSION "2.1"
31 #define DRV_RELDATE "July 6, 2008"
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/init.h>
39 #include <linux/delay.h>
40 #include <linux/crc32.h>
41 #include <linux/ethtool.h>
42 #include <linux/mii.h>
43 #include <linux/if_vlan.h>
45 #include <linux/firmware.h>
46 #include <asm/processor.h> /* Processor type for cache alignment. */
47 #include <asm/uaccess.h>
51 * The current frame processor firmware fails to checksum a fragment
52 * of length 1. If and when this is fixed, the #define below can be removed.
54 #define HAS_BROKEN_FIRMWARE
57 * If using the broken firmware, data must be padded to the next 32-bit boundary.
59 #ifdef HAS_BROKEN_FIRMWARE
60 #define PADDING_MASK 3
64 * Define this if using the driver with the zero-copy patch
68 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
72 /* The user-configurable values.
73 These may be modified when a driver module is loaded.*/
75 /* Used for tuning interrupt latency vs. overhead. */
76 static int intr_latency;
77 static int small_frames;
79 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
80 static int max_interrupt_work = 20;
82 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
83 The Starfire has a 512 element hash table based on the Ethernet CRC. */
84 static const int multicast_filter_limit = 512;
85 /* Whether to do TCP/UDP checksums in hardware */
86 static int enable_hw_cksum = 1;
88 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
90 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
91 * Setting to > 1518 effectively disables this feature.
94 * The ia64 doesn't allow for unaligned loads even of integers being
95 * misaligned on a 2 byte boundary. Thus always force copying of
96 * packets as the starfire doesn't allow for misaligned DMAs ;-(
99 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
100 * at least, having unaligned frames leads to a rather serious performance
103 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
104 static int rx_copybreak = PKT_BUF_SZ;
106 static int rx_copybreak /* = 0 */;
109 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
111 #define DMA_BURST_SIZE 64
113 #define DMA_BURST_SIZE 128
116 /* Used to pass the media type, etc.
117 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
118 The media type is usually passed in 'options[]'.
119 These variables are deprecated, use ethtool instead. -Ion
121 #define MAX_UNITS 8 /* More are supported, limit only on options */
122 static int options[MAX_UNITS] = {0, };
123 static int full_duplex[MAX_UNITS] = {0, };
125 /* Operational parameters that are set at compile time. */
127 /* The "native" ring sizes are either 256 or 2048.
128 However in some modes a descriptor may be marked to wrap the ring earlier.
130 #define RX_RING_SIZE 256
131 #define TX_RING_SIZE 32
132 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
133 #define DONE_Q_SIZE 1024
134 /* All queues must be aligned on a 256-byte boundary */
135 #define QUEUE_ALIGN 256
137 #if RX_RING_SIZE > 256
138 #define RX_Q_ENTRIES Rx2048QEntries
140 #define RX_Q_ENTRIES Rx256QEntries
143 /* Operational parameters that usually are not changed. */
144 /* Time in jiffies before concluding the transmitter is hung. */
145 #define TX_TIMEOUT (2 * HZ)
149 * We need a much better method to determine if dma_addr_t is 64-bit.
151 #if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
152 /* 64-bit dma_addr_t */
153 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
154 #define netdrv_addr_t __le64
155 #define cpu_to_dma(x) cpu_to_le64(x)
156 #define dma_to_cpu(x) le64_to_cpu(x)
157 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
158 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
159 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
160 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
161 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
162 #else /* 32-bit dma_addr_t */
163 #define netdrv_addr_t __le32
164 #define cpu_to_dma(x) cpu_to_le32(x)
165 #define dma_to_cpu(x) le32_to_cpu(x)
166 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
167 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
168 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
169 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
170 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
173 #define skb_first_frag_len(skb) skb_headlen(skb)
174 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
177 #define FIRMWARE_RX "/*(DEBLOBBED)*/"
178 #define FIRMWARE_TX "/*(DEBLOBBED)*/"
180 /* These identify the driver base version and may not be removed. */
181 static const char version[] __devinitconst =
182 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
183 " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
185 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
186 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
187 MODULE_LICENSE("GPL");
188 MODULE_VERSION(DRV_VERSION);
191 module_param(max_interrupt_work, int, 0);
192 module_param(mtu, int, 0);
193 module_param(debug, int, 0);
194 module_param(rx_copybreak, int, 0);
195 module_param(intr_latency, int, 0);
196 module_param(small_frames, int, 0);
197 module_param_array(options, int, NULL, 0);
198 module_param_array(full_duplex, int, NULL, 0);
199 module_param(enable_hw_cksum, int, 0);
200 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
201 MODULE_PARM_DESC(mtu, "MTU (all boards)");
202 MODULE_PARM_DESC(debug, "Debug level (0-6)");
203 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
204 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
205 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
206 MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
207 MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
208 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
213 I. Board Compatibility
215 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
217 II. Board-specific settings
219 III. Driver operation
223 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
224 ring sizes are set fixed by the hardware, but may optionally be wrapped
225 earlier by the END bit in the descriptor.
226 This driver uses that hardware queue size for the Rx ring, where a large
227 number of entries has no ill effect beyond increases the potential backlog.
228 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
229 disables the queue layer priority ordering and we have no mechanism to
230 utilize the hardware two-level priority queue. When modifying the
231 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
234 IIIb/c. Transmit/Receive Structure
236 See the Adaptec manual for the many possible structures, and options for
237 each structure. There are far too many to document all of them here.
239 For transmit this driver uses type 0/1 transmit descriptors (depending
240 on the 32/64 bitness of the architecture), and relies on automatic
241 minimum-length padding. It does not use the completion queue
242 consumer index, but instead checks for non-zero status entries.
244 For receive this driver uses type 2/3 receive descriptors. The driver
245 allocates full frame size skbuffs for the Rx ring buffers, so all frames
246 should fit in a single descriptor. The driver does not use the completion
247 queue consumer index, but instead checks for non-zero status entries.
249 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
250 is allocated and the frame is copied to the new skbuff. When the incoming
251 frame is larger, the skbuff is passed directly up the protocol stack.
252 Buffers consumed this way are replaced by newly allocated skbuffs in a later
255 A notable aspect of operation is that unaligned buffers are not permitted by
256 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
257 isn't longword aligned, which may cause problems on some machine
258 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
259 the frame into a new skbuff unconditionally. Copied frames are put into the
260 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
262 IIId. Synchronization
264 The driver runs as two independent, single-threaded flows of control. One
265 is the send-packet routine, which enforces single-threaded use by the
266 dev->tbusy flag. The other thread is the interrupt handler, which is single
267 threaded by the hardware and interrupt handling software.
269 The send packet thread has partial control over the Tx ring and the netif_queue
270 status. If the number of free Tx slots in the ring falls below a certain number
271 (currently hardcoded to 4), it signals the upper layer to stop the queue.
273 The interrupt handler has exclusive control over the Rx ring and records stats
274 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
275 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
276 number of free Tx slow is above the threshold, it signals the upper layer to
283 The Adaptec Starfire manuals, available only from Adaptec.
284 http://www.scyld.com/expert/100mbps.html
285 http://www.scyld.com/expert/NWay.html
289 - StopOnPerr is broken, don't enable
290 - Hardware ethernet padding exposes random data, perform software padding
291 instead (unverified -- works correctly for all the hardware I have)
297 enum chip_capability_flags {CanHaveMII=1, };
303 static struct pci_device_id starfire_pci_tbl[] = {
304 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
307 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
309 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
310 static const struct chip_info {
313 } netdrv_tbl[] __devinitdata = {
314 { "Adaptec Starfire 6915", CanHaveMII },
318 /* Offsets to the device registers.
319 Unlike software-only systems, device drivers interact with complex hardware.
320 It's not useful to define symbolic names for every register bit in the
321 device. The name can only partially document the semantics and make
322 the driver longer and more difficult to read.
323 In general, only the important configuration values or bits changed
324 multiple times should be defined symbolically.
326 enum register_offsets {
327 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
328 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
329 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
330 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
331 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
332 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
333 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
335 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
336 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
337 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
338 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
339 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
340 TxMode=0x55000, VlanType=0x55064,
341 PerfFilterTable=0x56000, HashTable=0x56100,
342 TxGfpMem=0x58000, RxGfpMem=0x5a000,
346 * Bits in the interrupt status/mask registers.
347 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
348 * enables all the interrupt sources that are or'ed into those status bits.
350 enum intr_status_bits {
351 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
352 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
353 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
354 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
355 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
356 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
357 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
358 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
359 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
360 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
361 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
362 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
363 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
364 IntrTxGfp=0x02, IntrPCIPad=0x01,
366 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
367 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
368 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
371 /* Bits in the RxFilterMode register. */
373 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
374 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
375 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
379 /* Bits in the TxMode register */
381 MiiSoftReset=0x8000, MIILoopback=0x4000,
382 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
383 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
386 /* Bits in the TxDescCtrl register. */
388 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
389 TxDescSpace128=0x30, TxDescSpace256=0x40,
390 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
391 TxDescType3=0x03, TxDescType4=0x04,
392 TxNoDMACompletion=0x08,
393 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
394 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
395 TxDMABurstSizeShift=8,
398 /* Bits in the RxDescQCtrl register. */
400 RxBufferLenShift=16, RxMinDescrThreshShift=0,
401 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
402 Rx2048QEntries=0x4000, Rx256QEntries=0,
403 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
404 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
405 RxDescSpace4=0x000, RxDescSpace8=0x100,
406 RxDescSpace16=0x200, RxDescSpace32=0x300,
407 RxDescSpace64=0x400, RxDescSpace128=0x500,
411 /* Bits in the RxDMACtrl register. */
412 enum rx_dmactrl_bits {
413 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
414 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
415 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
416 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
417 RxChecksumRejectTCPOnly=0x01000000,
418 RxCompletionQ2Enable=0x800000,
419 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
420 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
421 RxDMAQ2NonIP=0x400000,
422 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
423 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
427 /* Bits in the RxCompletionAddr register */
429 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
430 RxComplProducerWrEn=0x40,
431 RxComplType0=0x00, RxComplType1=0x10,
432 RxComplType2=0x20, RxComplType3=0x30,
433 RxComplThreshShift=0,
436 /* Bits in the TxCompletionAddr register */
438 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
439 TxComplProducerWrEn=0x40,
440 TxComplIntrStatus=0x20,
441 CommonQueueMode=0x10,
442 TxComplThreshShift=0,
445 /* Bits in the GenCtrl register */
447 RxEnable=0x05, TxEnable=0x0a,
448 RxGFPEnable=0x10, TxGFPEnable=0x20,
451 /* Bits in the IntrTimerCtrl register */
452 enum intr_ctrl_bits {
453 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
454 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
455 IntrLatencyMask=0x1f,
458 /* The Rx and Tx buffer descriptors. */
459 struct starfire_rx_desc {
460 netdrv_addr_t rxaddr;
463 RxDescValid=1, RxDescEndRing=2,
466 /* Completion queue entry. */
467 struct short_rx_done_desc {
468 __le32 status; /* Low 16 bits is length. */
470 struct basic_rx_done_desc {
471 __le32 status; /* Low 16 bits is length. */
475 struct csum_rx_done_desc {
476 __le32 status; /* Low 16 bits is length. */
477 __le16 csum; /* Partial checksum */
480 struct full_rx_done_desc {
481 __le32 status; /* Low 16 bits is length. */
485 __le16 csum; /* partial checksum */
488 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
490 typedef struct full_rx_done_desc rx_done_desc;
491 #define RxComplType RxComplType3
492 #else /* not VLAN_SUPPORT */
493 typedef struct csum_rx_done_desc rx_done_desc;
494 #define RxComplType RxComplType2
495 #endif /* not VLAN_SUPPORT */
498 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
501 /* Type 1 Tx descriptor. */
502 struct starfire_tx_desc_1 {
503 __le32 status; /* Upper bits are status, lower 16 length. */
507 /* Type 2 Tx descriptor. */
508 struct starfire_tx_desc_2 {
509 __le32 status; /* Upper bits are status, lower 16 length. */
515 typedef struct starfire_tx_desc_2 starfire_tx_desc;
516 #define TX_DESC_TYPE TxDescType2
517 #else /* not ADDR_64BITS */
518 typedef struct starfire_tx_desc_1 starfire_tx_desc;
519 #define TX_DESC_TYPE TxDescType1
520 #endif /* not ADDR_64BITS */
521 #define TX_DESC_SPACING TxDescSpaceUnlim
525 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
526 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
528 struct tx_done_desc {
529 __le32 status; /* timestamp, index. */
531 __le32 intrstatus; /* interrupt status */
535 struct rx_ring_info {
539 struct tx_ring_info {
542 unsigned int used_slots;
546 struct netdev_private {
547 /* Descriptor rings first for alignment. */
548 struct starfire_rx_desc *rx_ring;
549 starfire_tx_desc *tx_ring;
550 dma_addr_t rx_ring_dma;
551 dma_addr_t tx_ring_dma;
552 /* The addresses of rx/tx-in-place skbuffs. */
553 struct rx_ring_info rx_info[RX_RING_SIZE];
554 struct tx_ring_info tx_info[TX_RING_SIZE];
555 /* Pointers to completion queues (full pages). */
556 rx_done_desc *rx_done_q;
557 dma_addr_t rx_done_q_dma;
558 unsigned int rx_done;
559 struct tx_done_desc *tx_done_q;
560 dma_addr_t tx_done_q_dma;
561 unsigned int tx_done;
562 struct napi_struct napi;
563 struct net_device *dev;
564 struct net_device_stats stats;
565 struct pci_dev *pci_dev;
567 struct vlan_group *vlgrp;
570 dma_addr_t queue_mem_dma;
571 size_t queue_mem_size;
573 /* Frequently used values: keep some adjacent for cache effect. */
575 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
576 unsigned int cur_tx, dirty_tx, reap_tx;
577 unsigned int rx_buf_sz; /* Based on MTU+slack. */
578 /* These values keep track of the transceiver/media in use. */
579 int speed100; /* Set if speed == 100MBit. */
583 /* MII transceiver section. */
584 struct mii_if_info mii_if; /* MII lib hooks/info */
585 int phy_cnt; /* MII device addresses. */
586 unsigned char phys[PHY_CNT]; /* MII device addresses. */
591 static int mdio_read(struct net_device *dev, int phy_id, int location);
592 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
593 static int netdev_open(struct net_device *dev);
594 static void check_duplex(struct net_device *dev);
595 static void tx_timeout(struct net_device *dev);
596 static void init_ring(struct net_device *dev);
597 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
598 static irqreturn_t intr_handler(int irq, void *dev_instance);
599 static void netdev_error(struct net_device *dev, int intr_status);
600 static int __netdev_rx(struct net_device *dev, int *quota);
601 static int netdev_poll(struct napi_struct *napi, int budget);
602 static void refill_rx_ring(struct net_device *dev);
603 static void netdev_error(struct net_device *dev, int intr_status);
604 static void set_rx_mode(struct net_device *dev);
605 static struct net_device_stats *get_stats(struct net_device *dev);
606 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
607 static int netdev_close(struct net_device *dev);
608 static void netdev_media_change(struct net_device *dev);
609 static const struct ethtool_ops ethtool_ops;
613 static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
615 struct netdev_private *np = netdev_priv(dev);
617 spin_lock(&np->lock);
619 printk("%s: Setting vlgrp to %p\n", dev->name, grp);
622 spin_unlock(&np->lock);
625 static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
627 struct netdev_private *np = netdev_priv(dev);
629 spin_lock(&np->lock);
631 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
633 spin_unlock(&np->lock);
636 static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
638 struct netdev_private *np = netdev_priv(dev);
640 spin_lock(&np->lock);
642 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
643 vlan_group_set_device(np->vlgrp, vid, NULL);
645 spin_unlock(&np->lock);
647 #endif /* VLAN_SUPPORT */
650 static const struct net_device_ops netdev_ops = {
651 .ndo_open = netdev_open,
652 .ndo_stop = netdev_close,
653 .ndo_start_xmit = start_tx,
654 .ndo_tx_timeout = tx_timeout,
655 .ndo_get_stats = get_stats,
656 .ndo_set_multicast_list = &set_rx_mode,
657 .ndo_do_ioctl = netdev_ioctl,
658 .ndo_change_mtu = eth_change_mtu,
659 .ndo_set_mac_address = eth_mac_addr,
660 .ndo_validate_addr = eth_validate_addr,
662 .ndo_vlan_rx_register = netdev_vlan_rx_register,
663 .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
664 .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
668 static int __devinit starfire_init_one(struct pci_dev *pdev,
669 const struct pci_device_id *ent)
671 struct netdev_private *np;
672 int i, irq, option, chip_idx = ent->driver_data;
673 struct net_device *dev;
674 static int card_idx = -1;
677 int drv_flags, io_size;
680 /* when built into the kernel, we only print version if device is found */
682 static int printed_version;
683 if (!printed_version++)
689 if (pci_enable_device (pdev))
692 ioaddr = pci_resource_start(pdev, 0);
693 io_size = pci_resource_len(pdev, 0);
694 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
695 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
699 dev = alloc_etherdev(sizeof(*np));
701 printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
704 SET_NETDEV_DEV(dev, &pdev->dev);
708 if (pci_request_regions (pdev, DRV_NAME)) {
709 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
710 goto err_out_free_netdev;
713 base = ioremap(ioaddr, io_size);
715 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
716 card_idx, io_size, ioaddr);
717 goto err_out_free_res;
720 pci_set_master(pdev);
722 /* enable MWI -- it vastly improves Rx performance on sparc64 */
723 pci_try_set_mwi(pdev);
726 /* Starfire can do TCP/UDP checksumming */
728 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
729 #endif /* ZEROCOPY */
732 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
733 #endif /* VLAN_RX_KILL_VID */
735 dev->features |= NETIF_F_HIGHDMA;
736 #endif /* ADDR_64BITS */
738 /* Serial EEPROM reads are hidden by the hardware. */
739 for (i = 0; i < 6; i++)
740 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
742 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
744 for (i = 0; i < 0x20; i++)
746 (unsigned int)readb(base + EEPROMCtrl + i),
747 i % 16 != 15 ? " " : "\n");
750 /* Issue soft reset */
751 writel(MiiSoftReset, base + TxMode);
753 writel(0, base + TxMode);
755 /* Reset the chip to erase previous misconfiguration. */
756 writel(1, base + PCIDeviceConfig);
758 while (--boguscnt > 0) {
760 if ((readl(base + PCIDeviceConfig) & 1) == 0)
764 printk("%s: chipset reset never completed!\n", dev->name);
765 /* wait a little longer */
768 dev->base_addr = (unsigned long)base;
771 np = netdev_priv(dev);
774 spin_lock_init(&np->lock);
775 pci_set_drvdata(pdev, dev);
779 np->mii_if.dev = dev;
780 np->mii_if.mdio_read = mdio_read;
781 np->mii_if.mdio_write = mdio_write;
782 np->mii_if.phy_id_mask = 0x1f;
783 np->mii_if.reg_num_mask = 0x1f;
785 drv_flags = netdrv_tbl[chip_idx].drv_flags;
787 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
789 option = dev->mem_start;
791 /* The lower four bits are the media type. */
793 np->mii_if.full_duplex = 1;
795 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
796 np->mii_if.full_duplex = 1;
798 if (np->mii_if.full_duplex)
799 np->mii_if.force_media = 1;
801 np->mii_if.force_media = 0;
804 /* timer resolution is 128 * 0.8us */
805 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
806 Timer10X | EnableIntrMasking;
808 if (small_frames > 0) {
809 np->intr_timer_ctrl |= SmallFrameBypass;
810 switch (small_frames) {
812 np->intr_timer_ctrl |= SmallFrame64;
815 np->intr_timer_ctrl |= SmallFrame128;
818 np->intr_timer_ctrl |= SmallFrame256;
821 np->intr_timer_ctrl |= SmallFrame512;
822 if (small_frames > 512)
823 printk("Adjusting small_frames down to 512\n");
828 dev->netdev_ops = &netdev_ops;
829 dev->watchdog_timeo = TX_TIMEOUT;
830 SET_ETHTOOL_OPS(dev, ðtool_ops);
832 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
837 if (register_netdev(dev))
838 goto err_out_cleardev;
840 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
841 dev->name, netdrv_tbl[chip_idx].name, base,
844 if (drv_flags & CanHaveMII) {
845 int phy, phy_idx = 0;
847 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
848 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
851 while (--boguscnt > 0)
852 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
855 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
858 mii_status = mdio_read(dev, phy, MII_BMSR);
859 if (mii_status != 0) {
860 np->phys[phy_idx++] = phy;
861 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
862 printk(KERN_INFO "%s: MII PHY found at address %d, status "
863 "%#4.4x advertising %#4.4x.\n",
864 dev->name, phy, mii_status, np->mii_if.advertising);
865 /* there can be only one PHY on-board */
869 np->phy_cnt = phy_idx;
871 np->mii_if.phy_id = np->phys[0];
873 memset(&np->mii_if, 0, sizeof(np->mii_if));
876 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
877 dev->name, enable_hw_cksum ? "enabled" : "disabled");
881 pci_set_drvdata(pdev, NULL);
884 pci_release_regions (pdev);
891 /* Read the MII Management Data I/O (MDIO) interfaces. */
892 static int mdio_read(struct net_device *dev, int phy_id, int location)
894 struct netdev_private *np = netdev_priv(dev);
895 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
896 int result, boguscnt=1000;
897 /* ??? Should we add a busy-wait here? */
899 result = readl(mdio_addr);
900 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
903 if ((result & 0xffff) == 0xffff)
905 return result & 0xffff;
909 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
911 struct netdev_private *np = netdev_priv(dev);
912 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
913 writel(value, mdio_addr);
914 /* The busy-wait will occur before a read. */
918 static int netdev_open(struct net_device *dev)
920 const struct firmware *fw_rx, *fw_tx;
921 const __be32 *fw_rx_data, *fw_tx_data;
922 struct netdev_private *np = netdev_priv(dev);
923 void __iomem *ioaddr = np->base;
925 size_t tx_size, rx_size;
926 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
928 /* Do we ever need to reset the chip??? */
930 retval = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
934 /* Disable the Rx and Tx, and reset the chip. */
935 writel(0, ioaddr + GenCtrl);
936 writel(1, ioaddr + PCIDeviceConfig);
938 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
939 dev->name, dev->irq);
941 /* Allocate the various queues. */
942 if (!np->queue_mem) {
943 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
944 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
945 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
946 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
947 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
948 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
949 if (np->queue_mem == NULL) {
950 free_irq(dev->irq, dev);
954 np->tx_done_q = np->queue_mem;
955 np->tx_done_q_dma = np->queue_mem_dma;
956 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
957 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
958 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
959 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
960 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
961 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
964 /* Start with no carrier, it gets adjusted later */
965 netif_carrier_off(dev);
967 /* Set the size of the Rx buffers. */
968 writel((np->rx_buf_sz << RxBufferLenShift) |
969 (0 << RxMinDescrThreshShift) |
970 RxPrefetchMode | RxVariableQ |
972 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
974 ioaddr + RxDescQCtrl);
976 /* Set up the Rx DMA controller. */
977 writel(RxChecksumIgnore |
978 (0 << RxEarlyIntThreshShift) |
979 (6 << RxHighPrioThreshShift) |
980 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
983 /* Set Tx descriptor */
984 writel((2 << TxHiPriFIFOThreshShift) |
985 (0 << TxPadLenShift) |
986 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
987 TX_DESC_Q_ADDR_SIZE |
988 TX_DESC_SPACING | TX_DESC_TYPE,
989 ioaddr + TxDescCtrl);
991 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
992 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
993 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
994 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
995 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
997 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
998 writel(np->rx_done_q_dma |
1000 (0 << RxComplThreshShift),
1001 ioaddr + RxCompletionAddr);
1004 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1006 /* Fill both the Tx SA register and the Rx perfect filter. */
1007 for (i = 0; i < 6; i++)
1008 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1009 /* The first entry is special because it bypasses the VLAN filter.
1011 writew(0, ioaddr + PerfFilterTable);
1012 writew(0, ioaddr + PerfFilterTable + 4);
1013 writew(0, ioaddr + PerfFilterTable + 8);
1014 for (i = 1; i < 16; i++) {
1015 __be16 *eaddrs = (__be16 *)dev->dev_addr;
1016 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
1017 writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
1018 writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
1019 writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
1022 /* Initialize other registers. */
1023 /* Configure the PCI bus bursts and FIFO thresholds. */
1024 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
1025 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1027 writel(np->tx_mode, ioaddr + TxMode);
1028 np->tx_threshold = 4;
1029 writel(np->tx_threshold, ioaddr + TxThreshold);
1031 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1033 napi_enable(&np->napi);
1035 netif_start_queue(dev);
1038 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1041 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1044 /* Enable GPIO interrupts on link change */
1045 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1047 /* Set the interrupt mask */
1048 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1049 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1050 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1051 ioaddr + IntrEnable);
1052 /* Enable PCI interrupts. */
1053 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1054 ioaddr + PCIDeviceConfig);
1057 /* Set VLAN type to 802.1q */
1058 writel(ETH_P_8021Q, ioaddr + VlanType);
1059 #endif /* VLAN_SUPPORT */
1061 retval = reject_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1063 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1067 if (fw_rx->size % 4) {
1068 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1069 fw_rx->size, FIRMWARE_RX);
1073 retval = reject_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1075 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1079 if (fw_tx->size % 4) {
1080 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1081 fw_tx->size, FIRMWARE_TX);
1085 fw_rx_data = (const __be32 *)&fw_rx->data[0];
1086 fw_tx_data = (const __be32 *)&fw_tx->data[0];
1087 rx_size = fw_rx->size / 4;
1088 tx_size = fw_tx->size / 4;
1090 /* Load Rx/Tx firmware into the frame processors */
1091 for (i = 0; i < rx_size; i++)
1092 writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1093 for (i = 0; i < tx_size; i++)
1094 writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1095 if (enable_hw_cksum)
1096 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1097 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1099 /* Enable the Rx and Tx units only. */
1100 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1103 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1107 release_firmware(fw_tx);
1109 release_firmware(fw_rx);
1117 static void check_duplex(struct net_device *dev)
1119 struct netdev_private *np = netdev_priv(dev);
1121 int silly_count = 1000;
1123 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1124 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1126 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1129 printk("%s: MII reset failed!\n", dev->name);
1133 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1135 if (!np->mii_if.force_media) {
1136 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1138 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1140 reg0 |= BMCR_SPEED100;
1141 if (np->mii_if.full_duplex)
1142 reg0 |= BMCR_FULLDPLX;
1143 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1145 np->speed100 ? "100" : "10",
1146 np->mii_if.full_duplex ? "full" : "half");
1148 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1152 static void tx_timeout(struct net_device *dev)
1154 struct netdev_private *np = netdev_priv(dev);
1155 void __iomem *ioaddr = np->base;
1158 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1159 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1161 /* Perhaps we should reinitialize the hardware here. */
1164 * Stop and restart the interface.
1165 * Cheat and increase the debug level temporarily.
1173 /* Trigger an immediate transmit demand. */
1175 dev->trans_start = jiffies;
1176 np->stats.tx_errors++;
1177 netif_wake_queue(dev);
1181 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1182 static void init_ring(struct net_device *dev)
1184 struct netdev_private *np = netdev_priv(dev);
1187 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1188 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1190 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1192 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1193 for (i = 0; i < RX_RING_SIZE; i++) {
1194 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1195 np->rx_info[i].skb = skb;
1198 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1199 skb->dev = dev; /* Mark as being used by this device. */
1200 /* Grrr, we cannot offset to correctly align the IP header. */
1201 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1203 writew(i - 1, np->base + RxDescQIdx);
1204 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1206 /* Clear the remainder of the Rx buffer ring. */
1207 for ( ; i < RX_RING_SIZE; i++) {
1208 np->rx_ring[i].rxaddr = 0;
1209 np->rx_info[i].skb = NULL;
1210 np->rx_info[i].mapping = 0;
1212 /* Mark the last entry as wrapping the ring. */
1213 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1215 /* Clear the completion rings. */
1216 for (i = 0; i < DONE_Q_SIZE; i++) {
1217 np->rx_done_q[i].status = 0;
1218 np->tx_done_q[i].status = 0;
1221 for (i = 0; i < TX_RING_SIZE; i++)
1222 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1228 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1230 struct netdev_private *np = netdev_priv(dev);
1236 * be cautious here, wrapping the queue has weird semantics
1237 * and we may not have enough slots even when it seems we do.
1239 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1240 netif_stop_queue(dev);
1241 return NETDEV_TX_BUSY;
1244 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1245 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1246 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1247 return NETDEV_TX_OK;
1249 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1251 entry = np->cur_tx % TX_RING_SIZE;
1252 for (i = 0; i < skb_num_frags(skb); i++) {
1257 np->tx_info[entry].skb = skb;
1259 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1260 status |= TxRingWrap;
1264 status |= TxDescIntr;
1267 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1269 np->stats.tx_compressed++;
1271 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1273 np->tx_info[entry].mapping =
1274 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1276 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1277 status |= this_frag->size;
1278 np->tx_info[entry].mapping =
1279 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1282 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1283 np->tx_ring[entry].status = cpu_to_le32(status);
1285 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1286 dev->name, np->cur_tx, np->dirty_tx,
1289 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1290 np->cur_tx += np->tx_info[entry].used_slots;
1293 np->tx_info[entry].used_slots = 1;
1294 np->cur_tx += np->tx_info[entry].used_slots;
1297 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1298 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1302 /* Non-x86: explicitly flush descriptor cache lines here. */
1303 /* Ensure all descriptors are written back before the transmit is
1307 /* Update the producer index. */
1308 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1310 /* 4 is arbitrary, but should be ok */
1311 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1312 netif_stop_queue(dev);
1314 dev->trans_start = jiffies;
1316 return NETDEV_TX_OK;
1320 /* The interrupt handler does all of the Rx thread work and cleans up
1321 after the Tx thread. */
1322 static irqreturn_t intr_handler(int irq, void *dev_instance)
1324 struct net_device *dev = dev_instance;
1325 struct netdev_private *np = netdev_priv(dev);
1326 void __iomem *ioaddr = np->base;
1327 int boguscnt = max_interrupt_work;
1333 u32 intr_status = readl(ioaddr + IntrClear);
1336 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1337 dev->name, intr_status);
1339 if (intr_status == 0 || intr_status == (u32) -1)
1344 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1347 if (likely(napi_schedule_prep(&np->napi))) {
1348 __napi_schedule(&np->napi);
1349 enable = readl(ioaddr + IntrEnable);
1350 enable &= ~(IntrRxDone | IntrRxEmpty);
1351 writel(enable, ioaddr + IntrEnable);
1352 /* flush PCI posting buffers */
1353 readl(ioaddr + IntrEnable);
1355 /* Paranoia check */
1356 enable = readl(ioaddr + IntrEnable);
1357 if (enable & (IntrRxDone | IntrRxEmpty)) {
1359 "%s: interrupt while in poll!\n",
1361 enable &= ~(IntrRxDone | IntrRxEmpty);
1362 writel(enable, ioaddr + IntrEnable);
1367 /* Scavenge the skbuff list based on the Tx-done queue.
1368 There are redundant checks here that may be cleaned up
1369 after the driver has proven to be reliable. */
1370 consumer = readl(ioaddr + TxConsumerIdx);
1372 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1373 dev->name, consumer);
1375 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1377 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1378 dev->name, np->dirty_tx, np->tx_done, tx_status);
1379 if ((tx_status & 0xe0000000) == 0xa0000000) {
1380 np->stats.tx_packets++;
1381 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1382 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1383 struct sk_buff *skb = np->tx_info[entry].skb;
1384 np->tx_info[entry].skb = NULL;
1385 pci_unmap_single(np->pci_dev,
1386 np->tx_info[entry].mapping,
1387 skb_first_frag_len(skb),
1389 np->tx_info[entry].mapping = 0;
1390 np->dirty_tx += np->tx_info[entry].used_slots;
1391 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1394 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1395 pci_unmap_single(np->pci_dev,
1396 np->tx_info[entry].mapping,
1397 skb_shinfo(skb)->frags[i].size,
1404 dev_kfree_skb_irq(skb);
1406 np->tx_done_q[np->tx_done].status = 0;
1407 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1409 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1411 if (netif_queue_stopped(dev) &&
1412 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1413 /* The ring is no longer full, wake the queue. */
1414 netif_wake_queue(dev);
1417 /* Stats overflow */
1418 if (intr_status & IntrStatsMax)
1421 /* Media change interrupt. */
1422 if (intr_status & IntrLinkChange)
1423 netdev_media_change(dev);
1425 /* Abnormal error summary/uncommon events handlers. */
1426 if (intr_status & IntrAbnormalSummary)
1427 netdev_error(dev, intr_status);
1429 if (--boguscnt < 0) {
1431 printk(KERN_WARNING "%s: Too much work at interrupt, "
1433 dev->name, intr_status);
1439 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1440 dev->name, (int) readl(ioaddr + IntrStatus));
1441 return IRQ_RETVAL(handled);
1446 * This routine is logically part of the interrupt/poll handler, but separated
1447 * for clarity and better register allocation.
1449 static int __netdev_rx(struct net_device *dev, int *quota)
1451 struct netdev_private *np = netdev_priv(dev);
1455 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1456 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1457 struct sk_buff *skb;
1460 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1463 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1464 if (!(desc_status & RxOK)) {
1465 /* There was an error. */
1467 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1468 np->stats.rx_errors++;
1469 if (desc_status & RxFIFOErr)
1470 np->stats.rx_fifo_errors++;
1474 if (*quota <= 0) { /* out of rx quota */
1480 pkt_len = desc_status; /* Implicitly Truncate */
1481 entry = (desc_status >> 16) & 0x7ff;
1484 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1485 /* Check if the packet is long enough to accept without copying
1486 to a minimally-sized skbuff. */
1487 if (pkt_len < rx_copybreak
1488 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1489 skb_reserve(skb, 2); /* 16 byte align the IP header */
1490 pci_dma_sync_single_for_cpu(np->pci_dev,
1491 np->rx_info[entry].mapping,
1492 pkt_len, PCI_DMA_FROMDEVICE);
1493 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1494 pci_dma_sync_single_for_device(np->pci_dev,
1495 np->rx_info[entry].mapping,
1496 pkt_len, PCI_DMA_FROMDEVICE);
1497 skb_put(skb, pkt_len);
1499 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1500 skb = np->rx_info[entry].skb;
1501 skb_put(skb, pkt_len);
1502 np->rx_info[entry].skb = NULL;
1503 np->rx_info[entry].mapping = 0;
1505 #ifndef final_version /* Remove after testing. */
1506 /* You will want this info for the initial debug. */
1508 printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1509 skb->data, skb->data + 6,
1510 skb->data[12], skb->data[13]);
1514 skb->protocol = eth_type_trans(skb, dev);
1517 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1519 if (le16_to_cpu(desc->status2) & 0x0100) {
1520 skb->ip_summed = CHECKSUM_UNNECESSARY;
1521 np->stats.rx_compressed++;
1524 * This feature doesn't seem to be working, at least
1525 * with the two firmware versions I have. If the GFP sees
1526 * an IP fragment, it either ignores it completely, or reports
1527 * "bad checksum" on it.
1529 * Maybe I missed something -- corrections are welcome.
1530 * Until then, the printk stays. :-) -Ion
1532 else if (le16_to_cpu(desc->status2) & 0x0040) {
1533 skb->ip_summed = CHECKSUM_COMPLETE;
1534 skb->csum = le16_to_cpu(desc->csum);
1535 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1538 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1539 u16 vlid = le16_to_cpu(desc->vlanid);
1542 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1546 * vlan_hwaccel_rx expects a packet with the VLAN tag
1549 vlan_hwaccel_rx(skb, np->vlgrp, vlid);
1551 #endif /* VLAN_SUPPORT */
1552 netif_receive_skb(skb);
1553 np->stats.rx_packets++;
1558 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1561 if (*quota == 0) { /* out of rx quota */
1565 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1568 refill_rx_ring(dev);
1570 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1571 retcode, np->rx_done, desc_status);
1575 static int netdev_poll(struct napi_struct *napi, int budget)
1577 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1578 struct net_device *dev = np->dev;
1580 void __iomem *ioaddr = np->base;
1584 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1586 if (__netdev_rx(dev, "a))
1589 intr_status = readl(ioaddr + IntrStatus);
1590 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1592 napi_complete(napi);
1593 intr_status = readl(ioaddr + IntrEnable);
1594 intr_status |= IntrRxDone | IntrRxEmpty;
1595 writel(intr_status, ioaddr + IntrEnable);
1599 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
1602 /* Restart Rx engine if stopped. */
1603 return budget - quota;
1606 static void refill_rx_ring(struct net_device *dev)
1608 struct netdev_private *np = netdev_priv(dev);
1609 struct sk_buff *skb;
1612 /* Refill the Rx ring buffers. */
1613 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1614 entry = np->dirty_rx % RX_RING_SIZE;
1615 if (np->rx_info[entry].skb == NULL) {
1616 skb = dev_alloc_skb(np->rx_buf_sz);
1617 np->rx_info[entry].skb = skb;
1619 break; /* Better luck next round. */
1620 np->rx_info[entry].mapping =
1621 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1622 skb->dev = dev; /* Mark as being used by this device. */
1623 np->rx_ring[entry].rxaddr =
1624 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1626 if (entry == RX_RING_SIZE - 1)
1627 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1630 writew(entry, np->base + RxDescQIdx);
1634 static void netdev_media_change(struct net_device *dev)
1636 struct netdev_private *np = netdev_priv(dev);
1637 void __iomem *ioaddr = np->base;
1638 u16 reg0, reg1, reg4, reg5;
1640 u32 new_intr_timer_ctrl;
1642 /* reset status first */
1643 mdio_read(dev, np->phys[0], MII_BMCR);
1644 mdio_read(dev, np->phys[0], MII_BMSR);
1646 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1647 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1649 if (reg1 & BMSR_LSTATUS) {
1651 if (reg0 & BMCR_ANENABLE) {
1652 /* autonegotiation is enabled */
1653 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1654 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1655 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1657 np->mii_if.full_duplex = 1;
1658 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1660 np->mii_if.full_duplex = 0;
1661 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1663 np->mii_if.full_duplex = 1;
1666 np->mii_if.full_duplex = 0;
1669 /* autonegotiation is disabled */
1670 if (reg0 & BMCR_SPEED100)
1674 if (reg0 & BMCR_FULLDPLX)
1675 np->mii_if.full_duplex = 1;
1677 np->mii_if.full_duplex = 0;
1679 netif_carrier_on(dev);
1680 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1682 np->speed100 ? "100" : "10",
1683 np->mii_if.full_duplex ? "full" : "half");
1685 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1686 if (np->mii_if.full_duplex)
1687 new_tx_mode |= FullDuplex;
1688 if (np->tx_mode != new_tx_mode) {
1689 np->tx_mode = new_tx_mode;
1690 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1692 writel(np->tx_mode, ioaddr + TxMode);
1695 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1697 new_intr_timer_ctrl |= Timer10X;
1698 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1699 np->intr_timer_ctrl = new_intr_timer_ctrl;
1700 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1703 netif_carrier_off(dev);
1704 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1709 static void netdev_error(struct net_device *dev, int intr_status)
1711 struct netdev_private *np = netdev_priv(dev);
1713 /* Came close to underrunning the Tx FIFO, increase threshold. */
1714 if (intr_status & IntrTxDataLow) {
1715 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1716 writel(++np->tx_threshold, np->base + TxThreshold);
1717 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1718 dev->name, np->tx_threshold * 16);
1720 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1722 if (intr_status & IntrRxGFPDead) {
1723 np->stats.rx_fifo_errors++;
1724 np->stats.rx_errors++;
1726 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1727 np->stats.tx_fifo_errors++;
1728 np->stats.tx_errors++;
1730 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1731 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1732 dev->name, intr_status);
1736 static struct net_device_stats *get_stats(struct net_device *dev)
1738 struct netdev_private *np = netdev_priv(dev);
1739 void __iomem *ioaddr = np->base;
1741 /* This adapter architecture needs no SMP locks. */
1742 np->stats.tx_bytes = readl(ioaddr + 0x57010);
1743 np->stats.rx_bytes = readl(ioaddr + 0x57044);
1744 np->stats.tx_packets = readl(ioaddr + 0x57000);
1745 np->stats.tx_aborted_errors =
1746 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1747 np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1748 np->stats.collisions =
1749 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1751 /* The chip only need report frame silently dropped. */
1752 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1753 writew(0, ioaddr + RxDMAStatus);
1754 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1755 np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1756 np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1757 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1763 static void set_rx_mode(struct net_device *dev)
1765 struct netdev_private *np = netdev_priv(dev);
1766 void __iomem *ioaddr = np->base;
1767 u32 rx_mode = MinVLANPrio;
1768 struct dev_mc_list *mclist;
1772 rx_mode |= VlanMode;
1775 void __iomem *filter_addr = ioaddr + HashTable + 8;
1776 for (i = 0; i < VLAN_VID_MASK; i++) {
1777 if (vlan_group_get_device(np->vlgrp, i)) {
1778 if (vlan_count >= 32)
1780 writew(i, filter_addr);
1785 if (i == VLAN_VID_MASK) {
1786 rx_mode |= PerfectFilterVlan;
1787 while (vlan_count < 32) {
1788 writew(0, filter_addr);
1794 #endif /* VLAN_SUPPORT */
1796 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1797 rx_mode |= AcceptAll;
1798 } else if ((dev->mc_count > multicast_filter_limit)
1799 || (dev->flags & IFF_ALLMULTI)) {
1800 /* Too many to match, or accept all multicasts. */
1801 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1802 } else if (dev->mc_count <= 14) {
1803 /* Use the 16 element perfect filter, skip first two entries. */
1804 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1806 for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
1807 i++, mclist = mclist->next) {
1808 eaddrs = (__be16 *)mclist->dmi_addr;
1809 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1810 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1811 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1813 eaddrs = (__be16 *)dev->dev_addr;
1815 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1816 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1817 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1819 rx_mode |= AcceptBroadcast|PerfectFilter;
1821 /* Must use a multicast hash table. */
1822 void __iomem *filter_addr;
1824 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1826 memset(mc_filter, 0, sizeof(mc_filter));
1827 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1828 i++, mclist = mclist->next) {
1829 /* The chip uses the upper 9 CRC bits
1830 as index into the hash table */
1831 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1832 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1834 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1836 /* Clear the perfect filter list, skip first two entries. */
1837 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1838 eaddrs = (__be16 *)dev->dev_addr;
1839 for (i = 2; i < 16; i++) {
1840 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1841 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1842 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1844 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1845 writew(mc_filter[i], filter_addr);
1846 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1848 writel(rx_mode, ioaddr + RxFilterMode);
1851 static int check_if_running(struct net_device *dev)
1853 if (!netif_running(dev))
1858 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1860 struct netdev_private *np = netdev_priv(dev);
1861 strcpy(info->driver, DRV_NAME);
1862 strcpy(info->version, DRV_VERSION);
1863 strcpy(info->bus_info, pci_name(np->pci_dev));
1866 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1868 struct netdev_private *np = netdev_priv(dev);
1869 spin_lock_irq(&np->lock);
1870 mii_ethtool_gset(&np->mii_if, ecmd);
1871 spin_unlock_irq(&np->lock);
1875 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1877 struct netdev_private *np = netdev_priv(dev);
1879 spin_lock_irq(&np->lock);
1880 res = mii_ethtool_sset(&np->mii_if, ecmd);
1881 spin_unlock_irq(&np->lock);
1886 static int nway_reset(struct net_device *dev)
1888 struct netdev_private *np = netdev_priv(dev);
1889 return mii_nway_restart(&np->mii_if);
1892 static u32 get_link(struct net_device *dev)
1894 struct netdev_private *np = netdev_priv(dev);
1895 return mii_link_ok(&np->mii_if);
1898 static u32 get_msglevel(struct net_device *dev)
1903 static void set_msglevel(struct net_device *dev, u32 val)
1908 static const struct ethtool_ops ethtool_ops = {
1909 .begin = check_if_running,
1910 .get_drvinfo = get_drvinfo,
1911 .get_settings = get_settings,
1912 .set_settings = set_settings,
1913 .nway_reset = nway_reset,
1914 .get_link = get_link,
1915 .get_msglevel = get_msglevel,
1916 .set_msglevel = set_msglevel,
1919 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1921 struct netdev_private *np = netdev_priv(dev);
1922 struct mii_ioctl_data *data = if_mii(rq);
1925 if (!netif_running(dev))
1928 spin_lock_irq(&np->lock);
1929 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1930 spin_unlock_irq(&np->lock);
1932 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1938 static int netdev_close(struct net_device *dev)
1940 struct netdev_private *np = netdev_priv(dev);
1941 void __iomem *ioaddr = np->base;
1944 netif_stop_queue(dev);
1946 napi_disable(&np->napi);
1949 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1950 dev->name, (int) readl(ioaddr + IntrStatus));
1951 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1952 dev->name, np->cur_tx, np->dirty_tx,
1953 np->cur_rx, np->dirty_rx);
1956 /* Disable interrupts by clearing the interrupt mask. */
1957 writel(0, ioaddr + IntrEnable);
1959 /* Stop the chip's Tx and Rx processes. */
1960 writel(0, ioaddr + GenCtrl);
1961 readl(ioaddr + GenCtrl);
1964 printk(KERN_DEBUG" Tx ring at %#llx:\n",
1965 (long long) np->tx_ring_dma);
1966 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1967 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1968 i, le32_to_cpu(np->tx_ring[i].status),
1969 (long long) dma_to_cpu(np->tx_ring[i].addr),
1970 le32_to_cpu(np->tx_done_q[i].status));
1971 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
1972 (long long) np->rx_ring_dma, np->rx_done_q);
1974 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1975 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1976 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1980 free_irq(dev->irq, dev);
1982 /* Free all the skbuffs in the Rx queue. */
1983 for (i = 0; i < RX_RING_SIZE; i++) {
1984 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1985 if (np->rx_info[i].skb != NULL) {
1986 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1987 dev_kfree_skb(np->rx_info[i].skb);
1989 np->rx_info[i].skb = NULL;
1990 np->rx_info[i].mapping = 0;
1992 for (i = 0; i < TX_RING_SIZE; i++) {
1993 struct sk_buff *skb = np->tx_info[i].skb;
1996 pci_unmap_single(np->pci_dev,
1997 np->tx_info[i].mapping,
1998 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1999 np->tx_info[i].mapping = 0;
2001 np->tx_info[i].skb = NULL;
2008 static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
2010 struct net_device *dev = pci_get_drvdata(pdev);
2012 if (netif_running(dev)) {
2013 netif_device_detach(dev);
2017 pci_save_state(pdev);
2018 pci_set_power_state(pdev, pci_choose_state(pdev,state));
2023 static int starfire_resume(struct pci_dev *pdev)
2025 struct net_device *dev = pci_get_drvdata(pdev);
2027 pci_set_power_state(pdev, PCI_D0);
2028 pci_restore_state(pdev);
2030 if (netif_running(dev)) {
2032 netif_device_attach(dev);
2037 #endif /* CONFIG_PM */
2040 static void __devexit starfire_remove_one (struct pci_dev *pdev)
2042 struct net_device *dev = pci_get_drvdata(pdev);
2043 struct netdev_private *np = netdev_priv(dev);
2047 unregister_netdev(dev);
2050 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2053 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2054 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2055 pci_disable_device(pdev);
2058 pci_release_regions(pdev);
2060 pci_set_drvdata(pdev, NULL);
2061 free_netdev(dev); /* Will also free np!! */
2065 static struct pci_driver starfire_driver = {
2067 .probe = starfire_init_one,
2068 .remove = __devexit_p(starfire_remove_one),
2070 .suspend = starfire_suspend,
2071 .resume = starfire_resume,
2072 #endif /* CONFIG_PM */
2073 .id_table = starfire_pci_tbl,
2077 static int __init starfire_init (void)
2079 /* when a module, this is printed whether or not devices are found in probe */
2083 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2086 /* we can do this test only at run-time... sigh */
2087 if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2088 printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2092 return pci_register_driver(&starfire_driver);
2096 static void __exit starfire_cleanup (void)
2098 pci_unregister_driver (&starfire_driver);
2102 module_init(starfire_init);
2103 module_exit(starfire_cleanup);