1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define DRV_NAME "via-rhine"
35 #define DRV_VERSION "1.5.1"
36 #define DRV_RELDATE "2010-10-09"
38 #include <linux/types.h>
40 /* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
43 #define RHINE_MSG_DEFAULT \
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
53 static int rx_copybreak;
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
70 /* Operational parameters that are set at compile time. */
72 /* Keep the ring sizes a power of two for compile efficiency.
73 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 Making the Tx ring too large decreases the effectiveness of channel
75 bonding and packet priority.
76 There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE 16
78 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
79 #define RX_RING_SIZE 64
81 /* Operational parameters that usually are not changed. */
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT (2*HZ)
86 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/dma-mapping.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/skbuff.h>
101 #include <linux/init.h>
102 #include <linux/delay.h>
103 #include <linux/mii.h>
104 #include <linux/ethtool.h>
105 #include <linux/crc32.h>
106 #include <linux/if_vlan.h>
107 #include <linux/bitops.h>
108 #include <linux/workqueue.h>
109 #include <asm/processor.h> /* Processor type for cache alignment. */
112 #include <asm/uaccess.h>
113 #include <linux/dmi.h>
115 /* These identify the driver base version and may not be removed. */
116 static const char version[] =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
119 /* This driver was written to use PCI memory space. Some early versions
120 of the Rhine may only work correctly with I/O space accesses. */
121 #ifdef CONFIG_VIA_RHINE_MMIO
126 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128 MODULE_LICENSE("GPL");
130 module_param(debug, int, 0);
131 module_param(rx_copybreak, int, 0);
132 module_param(avoid_D3, bool, 0);
133 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
143 I. Board Compatibility
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
148 II. Board-specific settings
150 Boards with this chip are functional only in a bus-master PCI slot.
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
158 III. Driver operation
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
166 IIIb/c. Transmit/Receive Structure
168 This driver attempts to use a zero-copy receive and transmit scheme.
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
195 IIId. Synchronization
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200 which is single threaded by the hardware and interrupt handling software.
202 The send packet thread has partial control over the Tx ring. It locks the
203 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204 the ring is not available it stops the transmit queue by
205 calling netif_stop_queue.
207 The interrupt handler has exclusive control over the Rx ring and records stats
208 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209 empty by incrementing the dirty_tx mark. If at least half of the entries in
210 the Rx ring are available the transmit queue is woken up if it was stopped.
216 Preliminary VT86C100A manual from http://www.via.com.tw/
217 http://www.scyld.com/expert/100mbps.html
218 http://www.scyld.com/expert/NWay.html
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
225 The VT86C100A manual is not reliable information.
226 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227 in significant performance degradation for bounce buffer copies on transmit
228 and unaligned IP headers on receive.
229 The chip does not pad to minimum transmit length.
234 /* This table drives the PCI probe routines. It's mostly boilerplate in all
235 of the drivers, and will likely be provided by some future kernel.
236 Note the matching code -- the first table entry matchs all 56** cards but
237 second only the 1234 card.
244 VT8231 = 0x50, /* Integrated MAC */
245 VT8233 = 0x60, /* Integrated MAC */
246 VT8235 = 0x74, /* Integrated MAC */
247 VT8237 = 0x78, /* Integrated MAC */
254 VT6105M = 0x90, /* Management adapter */
258 rqWOL = 0x0001, /* Wake-On-LAN support */
259 rqForceReset = 0x0002,
260 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
261 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
262 rqRhineI = 0x0100, /* See comment below */
265 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266 * MMIO as well as for the collision counter and the Tx FIFO underflow
267 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
270 /* Beware of PCI posted writes */
271 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
273 static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
275 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
276 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
277 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
278 { } /* terminate list */
280 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
283 /* Offsets to the device registers. */
284 enum register_offsets {
285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 ChipCmd1=0x09, TQWake=0x0A,
287 IntrStatus=0x0C, IntrEnable=0x0E,
288 MulticastFilter0=0x10, MulticastFilter1=0x14,
289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 StickyHW=0x83, IntrStatus2=0x84,
295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
301 /* Bits in ConfigD */
303 BackOptional=0x01, BackModify=0x02,
304 BackCaptureEffect=0x04, BackRandom=0x08
307 /* Bits in the TxConfig (TCR) register */
310 TCR_LB0=0x02, /* loopback[0] */
311 TCR_LB1=0x04, /* loopback[1] */
319 /* Bits in the CamCon (CAMC) register */
327 /* Bits in the PCIBusConfig1 (BCR1) register */
335 BCR1_TXQNOBK=0x40, /* for VT6105 */
336 BCR1_VIDFR=0x80, /* for VT6105 */
337 BCR1_MED0=0x40, /* for VT6102 */
338 BCR1_MED1=0x80, /* for VT6102 */
342 /* Registers we check that mmio and reg are the same. */
343 static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
349 /* Bits in the interrupt status/mask registers. */
350 enum intr_status_bits {
354 IntrTxError = 0x0008,
355 IntrRxEmpty = 0x0020,
357 IntrStatsMax = 0x0080,
358 IntrRxEarly = 0x0100,
359 IntrTxUnderrun = 0x0210,
360 IntrRxOverflow = 0x0400,
361 IntrRxDropped = 0x0800,
362 IntrRxNoBuf = 0x1000,
363 IntrTxAborted = 0x2000,
364 IntrLinkChange = 0x4000,
365 IntrRxWakeUp = 0x8000,
366 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
367 IntrNormalSummary = IntrRxDone | IntrTxDone,
368 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
372 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
381 /* The Rx and Tx buffer descriptors. */
384 __le32 desc_length; /* Chain flag, Buffer/frame length */
390 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
395 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396 #define TXDESC 0x00e08000
398 enum rx_status_bits {
399 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
402 /* Bits in *_desc.*_status */
403 enum desc_status_bits {
407 /* Bits in *_desc.*_length */
408 enum desc_length_bits {
412 /* Bits in ChipCmd. */
414 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
423 struct u64_stats_sync syncp;
426 struct rhine_private {
427 /* Bit mask for configured VLAN ids */
428 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
430 /* Descriptor rings */
431 struct rx_desc *rx_ring;
432 struct tx_desc *tx_ring;
433 dma_addr_t rx_ring_dma;
434 dma_addr_t tx_ring_dma;
436 /* The addresses of receive-in-place skbuffs. */
437 struct sk_buff *rx_skbuff[RX_RING_SIZE];
438 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
440 /* The saved address of a sent-in-place packet/buffer, for later free(). */
441 struct sk_buff *tx_skbuff[TX_RING_SIZE];
442 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
444 /* Tx bounce buffers (Rhine-I only) */
445 unsigned char *tx_buf[TX_RING_SIZE];
446 unsigned char *tx_bufs;
447 dma_addr_t tx_bufs_dma;
449 struct pci_dev *pdev;
451 struct net_device *dev;
452 struct napi_struct napi;
454 struct mutex task_lock;
456 struct work_struct slow_event_task;
457 struct work_struct reset_task;
461 /* Frequently used values: keep some adjacent for cache effect. */
463 struct rx_desc *rx_head_desc;
464 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
465 unsigned int cur_tx, dirty_tx;
466 unsigned int rx_buf_sz; /* Based on MTU+slack. */
467 struct rhine_stats rx_stats;
468 struct rhine_stats tx_stats;
471 u8 tx_thresh, rx_thresh;
473 struct mii_if_info mii_if;
477 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
478 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
479 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
481 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
482 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
483 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
485 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
486 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
487 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
489 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
490 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
491 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
494 static int mdio_read(struct net_device *dev, int phy_id, int location);
495 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
496 static int rhine_open(struct net_device *dev);
497 static void rhine_reset_task(struct work_struct *work);
498 static void rhine_slow_event_task(struct work_struct *work);
499 static void rhine_tx_timeout(struct net_device *dev);
500 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
501 struct net_device *dev);
502 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
503 static void rhine_tx(struct net_device *dev);
504 static int rhine_rx(struct net_device *dev, int limit);
505 static void rhine_set_rx_mode(struct net_device *dev);
506 static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
507 struct rtnl_link_stats64 *stats);
508 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
509 static const struct ethtool_ops netdev_ethtool_ops;
510 static int rhine_close(struct net_device *dev);
511 static int rhine_vlan_rx_add_vid(struct net_device *dev,
512 __be16 proto, u16 vid);
513 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
514 __be16 proto, u16 vid);
515 static void rhine_restart_tx(struct net_device *dev);
517 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
519 void __iomem *ioaddr = rp->base;
522 for (i = 0; i < 1024; i++) {
523 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
525 if (low ^ has_mask_bits)
530 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
531 "count: %04d\n", low ? "low" : "high", reg, mask, i);
535 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
537 rhine_wait_bit(rp, reg, mask, false);
540 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
542 rhine_wait_bit(rp, reg, mask, true);
545 static u32 rhine_get_events(struct rhine_private *rp)
547 void __iomem *ioaddr = rp->base;
550 intr_status = ioread16(ioaddr + IntrStatus);
551 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
552 if (rp->quirks & rqStatusWBRace)
553 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
557 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
559 void __iomem *ioaddr = rp->base;
561 if (rp->quirks & rqStatusWBRace)
562 iowrite8(mask >> 16, ioaddr + IntrStatus2);
563 iowrite16(mask, ioaddr + IntrStatus);
568 * Get power related registers into sane state.
569 * Notify user about past WOL event.
571 static void rhine_power_init(struct net_device *dev)
573 struct rhine_private *rp = netdev_priv(dev);
574 void __iomem *ioaddr = rp->base;
577 if (rp->quirks & rqWOL) {
578 /* Make sure chip is in power state D0 */
579 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
581 /* Disable "force PME-enable" */
582 iowrite8(0x80, ioaddr + WOLcgClr);
584 /* Clear power-event config bits (WOL) */
585 iowrite8(0xFF, ioaddr + WOLcrClr);
586 /* More recent cards can manage two additional patterns */
587 if (rp->quirks & rq6patterns)
588 iowrite8(0x03, ioaddr + WOLcrClr1);
590 /* Save power-event status bits */
591 wolstat = ioread8(ioaddr + PwrcsrSet);
592 if (rp->quirks & rq6patterns)
593 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
595 /* Clear power-event status bits */
596 iowrite8(0xFF, ioaddr + PwrcsrClr);
597 if (rp->quirks & rq6patterns)
598 iowrite8(0x03, ioaddr + PwrcsrClr1);
604 reason = "Magic packet";
607 reason = "Link went up";
610 reason = "Link went down";
613 reason = "Unicast packet";
616 reason = "Multicast/broadcast packet";
621 netdev_info(dev, "Woke system up. Reason: %s\n",
627 static void rhine_chip_reset(struct net_device *dev)
629 struct rhine_private *rp = netdev_priv(dev);
630 void __iomem *ioaddr = rp->base;
633 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
636 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
637 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
640 if (rp->quirks & rqForceReset)
641 iowrite8(0x40, ioaddr + MiscCmd);
643 /* Reset can take somewhat longer (rare) */
644 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
647 cmd1 = ioread8(ioaddr + ChipCmd1);
648 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
649 "failed" : "succeeded");
653 static void enable_mmio(long pioaddr, u32 quirks)
656 if (quirks & rqRhineI) {
657 /* More recent docs say that this bit is reserved ... */
658 n = inb(pioaddr + ConfigA) | 0x20;
659 outb(n, pioaddr + ConfigA);
661 n = inb(pioaddr + ConfigD) | 0x80;
662 outb(n, pioaddr + ConfigD);
668 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
669 * (plus 0x6C for Rhine-I/II)
671 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
673 struct rhine_private *rp = netdev_priv(dev);
674 void __iomem *ioaddr = rp->base;
677 outb(0x20, pioaddr + MACRegEEcsr);
678 for (i = 0; i < 1024; i++) {
679 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
683 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
687 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
688 * MMIO. If reloading EEPROM was done first this could be avoided, but
689 * it is not known if that still works with the "win98-reboot" problem.
691 enable_mmio(pioaddr, rp->quirks);
694 /* Turn off EEPROM-controlled wake-up (magic packet) */
695 if (rp->quirks & rqWOL)
696 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
700 #ifdef CONFIG_NET_POLL_CONTROLLER
701 static void rhine_poll(struct net_device *dev)
703 struct rhine_private *rp = netdev_priv(dev);
704 const int irq = rp->pdev->irq;
707 rhine_interrupt(irq, dev);
712 static void rhine_kick_tx_threshold(struct rhine_private *rp)
714 if (rp->tx_thresh < 0xe0) {
715 void __iomem *ioaddr = rp->base;
717 rp->tx_thresh += 0x20;
718 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
722 static void rhine_tx_err(struct rhine_private *rp, u32 status)
724 struct net_device *dev = rp->dev;
726 if (status & IntrTxAborted) {
727 netif_info(rp, tx_err, dev,
728 "Abort %08x, frame dropped\n", status);
731 if (status & IntrTxUnderrun) {
732 rhine_kick_tx_threshold(rp);
733 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
734 "Tx threshold now %02x\n", rp->tx_thresh);
737 if (status & IntrTxDescRace)
738 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
740 if ((status & IntrTxError) &&
741 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
742 rhine_kick_tx_threshold(rp);
743 netif_info(rp, tx_err, dev, "Unspecified error. "
744 "Tx threshold now %02x\n", rp->tx_thresh);
747 rhine_restart_tx(dev);
750 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
752 void __iomem *ioaddr = rp->base;
753 struct net_device_stats *stats = &rp->dev->stats;
755 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
756 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
759 * Clears the "tally counters" for CRC errors and missed frames(?).
760 * It has been reported that some chips need a write of 0 to clear
761 * these, for others the counters are set to 1 when written to and
762 * instead cleared when read. So we clear them both ways ...
764 iowrite32(0, ioaddr + RxMissed);
765 ioread16(ioaddr + RxCRCErrs);
766 ioread16(ioaddr + RxMissed);
769 #define RHINE_EVENT_NAPI_RX (IntrRxDone | \
777 #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
781 #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
783 #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
784 RHINE_EVENT_NAPI_TX | \
786 #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
787 #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
789 static int rhine_napipoll(struct napi_struct *napi, int budget)
791 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
792 struct net_device *dev = rp->dev;
793 void __iomem *ioaddr = rp->base;
794 u16 enable_mask = RHINE_EVENT & 0xffff;
798 status = rhine_get_events(rp);
799 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
801 if (status & RHINE_EVENT_NAPI_RX)
802 work_done += rhine_rx(dev, budget);
804 if (status & RHINE_EVENT_NAPI_TX) {
805 if (status & RHINE_EVENT_NAPI_TX_ERR) {
806 /* Avoid scavenging before Tx engine turned off */
807 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
808 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
809 netif_warn(rp, tx_err, dev, "Tx still on\n");
814 if (status & RHINE_EVENT_NAPI_TX_ERR)
815 rhine_tx_err(rp, status);
818 if (status & IntrStatsMax) {
819 spin_lock(&rp->lock);
820 rhine_update_rx_crc_and_missed_errord(rp);
821 spin_unlock(&rp->lock);
824 if (status & RHINE_EVENT_SLOW) {
825 enable_mask &= ~RHINE_EVENT_SLOW;
826 schedule_work(&rp->slow_event_task);
829 if (work_done < budget) {
831 iowrite16(enable_mask, ioaddr + IntrEnable);
837 static void rhine_hw_init(struct net_device *dev, long pioaddr)
839 struct rhine_private *rp = netdev_priv(dev);
841 /* Reset the chip to erase previous misconfiguration. */
842 rhine_chip_reset(dev);
844 /* Rhine-I needs extra time to recuperate before EEPROM reload */
845 if (rp->quirks & rqRhineI)
848 /* Reload EEPROM controlled bytes cleared by soft reset */
849 rhine_reload_eeprom(pioaddr, dev);
852 static const struct net_device_ops rhine_netdev_ops = {
853 .ndo_open = rhine_open,
854 .ndo_stop = rhine_close,
855 .ndo_start_xmit = rhine_start_tx,
856 .ndo_get_stats64 = rhine_get_stats64,
857 .ndo_set_rx_mode = rhine_set_rx_mode,
858 .ndo_change_mtu = eth_change_mtu,
859 .ndo_validate_addr = eth_validate_addr,
860 .ndo_set_mac_address = eth_mac_addr,
861 .ndo_do_ioctl = netdev_ioctl,
862 .ndo_tx_timeout = rhine_tx_timeout,
863 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
864 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
865 #ifdef CONFIG_NET_POLL_CONTROLLER
866 .ndo_poll_controller = rhine_poll,
870 static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
872 struct net_device *dev;
873 struct rhine_private *rp;
878 void __iomem *ioaddr;
887 /* when built into the kernel, we only print version if device is found */
889 pr_info_once("%s\n", version);
896 if (pdev->revision < VTunknown0) {
900 else if (pdev->revision >= VT6102) {
901 quirks = rqWOL | rqForceReset;
902 if (pdev->revision < VT6105) {
904 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
907 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
908 if (pdev->revision >= VT6105_B0)
909 quirks |= rq6patterns;
910 if (pdev->revision < VT6105M)
913 name = "Rhine III (Management Adapter)";
917 rc = pci_enable_device(pdev);
921 /* this should always be supported */
922 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
925 "32-bit PCI DMA addresses not supported by the card!?\n");
930 if ((pci_resource_len(pdev, 0) < io_size) ||
931 (pci_resource_len(pdev, 1) < io_size)) {
933 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
937 pioaddr = pci_resource_start(pdev, 0);
938 memaddr = pci_resource_start(pdev, 1);
940 pci_set_master(pdev);
942 dev = alloc_etherdev(sizeof(struct rhine_private));
947 SET_NETDEV_DEV(dev, &pdev->dev);
949 rp = netdev_priv(dev);
952 rp->pioaddr = pioaddr;
954 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
956 rc = pci_request_regions(pdev, DRV_NAME);
958 goto err_out_free_netdev;
960 ioaddr = pci_iomap(pdev, bar, io_size);
964 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
965 pci_name(pdev), io_size, memaddr);
966 goto err_out_free_res;
970 enable_mmio(pioaddr, quirks);
972 /* Check that selected MMIO registers match the PIO ones */
974 while (mmio_verify_registers[i]) {
975 int reg = mmio_verify_registers[i++];
976 unsigned char a = inb(pioaddr+reg);
977 unsigned char b = readb(ioaddr+reg);
981 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
986 #endif /* USE_MMIO */
990 /* Get chip registers into a sane state */
991 rhine_power_init(dev);
992 rhine_hw_init(dev, pioaddr);
994 for (i = 0; i < 6; i++)
995 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
997 if (!is_valid_ether_addr(dev->dev_addr)) {
998 /* Report it and use a random ethernet address instead */
999 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
1000 eth_hw_addr_random(dev);
1001 netdev_info(dev, "Using random MAC address: %pM\n",
1005 /* For Rhine-I/II, phy_id is loaded from EEPROM */
1007 phy_id = ioread8(ioaddr + 0x6C);
1009 spin_lock_init(&rp->lock);
1010 mutex_init(&rp->task_lock);
1011 INIT_WORK(&rp->reset_task, rhine_reset_task);
1012 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1014 rp->mii_if.dev = dev;
1015 rp->mii_if.mdio_read = mdio_read;
1016 rp->mii_if.mdio_write = mdio_write;
1017 rp->mii_if.phy_id_mask = 0x1f;
1018 rp->mii_if.reg_num_mask = 0x1f;
1020 /* The chip-specific entries in the device structure. */
1021 dev->netdev_ops = &rhine_netdev_ops;
1022 dev->ethtool_ops = &netdev_ethtool_ops,
1023 dev->watchdog_timeo = TX_TIMEOUT;
1025 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1027 if (rp->quirks & rqRhineI)
1028 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1030 if (pdev->revision >= VT6105M)
1031 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1032 NETIF_F_HW_VLAN_CTAG_RX |
1033 NETIF_F_HW_VLAN_CTAG_FILTER;
1035 /* dev->name not defined before register_netdev()! */
1036 rc = register_netdev(dev);
1040 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1047 dev->dev_addr, pdev->irq);
1049 pci_set_drvdata(pdev, dev);
1053 int mii_status = mdio_read(dev, phy_id, 1);
1054 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1055 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1056 if (mii_status != 0xffff && mii_status != 0x0000) {
1057 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1059 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1061 mii_status, rp->mii_if.advertising,
1062 mdio_read(dev, phy_id, 5));
1064 /* set IFF_RUNNING */
1065 if (mii_status & BMSR_LSTATUS)
1066 netif_carrier_on(dev);
1068 netif_carrier_off(dev);
1072 rp->mii_if.phy_id = phy_id;
1074 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1079 pci_iounmap(pdev, ioaddr);
1081 pci_release_regions(pdev);
1082 err_out_free_netdev:
1088 static int alloc_ring(struct net_device* dev)
1090 struct rhine_private *rp = netdev_priv(dev);
1092 dma_addr_t ring_dma;
1094 ring = pci_alloc_consistent(rp->pdev,
1095 RX_RING_SIZE * sizeof(struct rx_desc) +
1096 TX_RING_SIZE * sizeof(struct tx_desc),
1099 netdev_err(dev, "Could not allocate DMA memory\n");
1102 if (rp->quirks & rqRhineI) {
1103 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1104 PKT_BUF_SZ * TX_RING_SIZE,
1106 if (rp->tx_bufs == NULL) {
1107 pci_free_consistent(rp->pdev,
1108 RX_RING_SIZE * sizeof(struct rx_desc) +
1109 TX_RING_SIZE * sizeof(struct tx_desc),
1116 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1117 rp->rx_ring_dma = ring_dma;
1118 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1123 static void free_ring(struct net_device* dev)
1125 struct rhine_private *rp = netdev_priv(dev);
1127 pci_free_consistent(rp->pdev,
1128 RX_RING_SIZE * sizeof(struct rx_desc) +
1129 TX_RING_SIZE * sizeof(struct tx_desc),
1130 rp->rx_ring, rp->rx_ring_dma);
1134 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1135 rp->tx_bufs, rp->tx_bufs_dma);
1141 static void alloc_rbufs(struct net_device *dev)
1143 struct rhine_private *rp = netdev_priv(dev);
1147 rp->dirty_rx = rp->cur_rx = 0;
1149 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1150 rp->rx_head_desc = &rp->rx_ring[0];
1151 next = rp->rx_ring_dma;
1153 /* Init the ring entries */
1154 for (i = 0; i < RX_RING_SIZE; i++) {
1155 rp->rx_ring[i].rx_status = 0;
1156 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1157 next += sizeof(struct rx_desc);
1158 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1159 rp->rx_skbuff[i] = NULL;
1161 /* Mark the last entry as wrapping the ring. */
1162 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1164 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1165 for (i = 0; i < RX_RING_SIZE; i++) {
1166 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1167 rp->rx_skbuff[i] = skb;
1171 rp->rx_skbuff_dma[i] =
1172 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1173 PCI_DMA_FROMDEVICE);
1175 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1176 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1178 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1181 static void free_rbufs(struct net_device* dev)
1183 struct rhine_private *rp = netdev_priv(dev);
1186 /* Free all the skbuffs in the Rx queue. */
1187 for (i = 0; i < RX_RING_SIZE; i++) {
1188 rp->rx_ring[i].rx_status = 0;
1189 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1190 if (rp->rx_skbuff[i]) {
1191 pci_unmap_single(rp->pdev,
1192 rp->rx_skbuff_dma[i],
1193 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1194 dev_kfree_skb(rp->rx_skbuff[i]);
1196 rp->rx_skbuff[i] = NULL;
1200 static void alloc_tbufs(struct net_device* dev)
1202 struct rhine_private *rp = netdev_priv(dev);
1206 rp->dirty_tx = rp->cur_tx = 0;
1207 next = rp->tx_ring_dma;
1208 for (i = 0; i < TX_RING_SIZE; i++) {
1209 rp->tx_skbuff[i] = NULL;
1210 rp->tx_ring[i].tx_status = 0;
1211 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1212 next += sizeof(struct tx_desc);
1213 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1214 if (rp->quirks & rqRhineI)
1215 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1217 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1221 static void free_tbufs(struct net_device* dev)
1223 struct rhine_private *rp = netdev_priv(dev);
1226 for (i = 0; i < TX_RING_SIZE; i++) {
1227 rp->tx_ring[i].tx_status = 0;
1228 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1229 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1230 if (rp->tx_skbuff[i]) {
1231 if (rp->tx_skbuff_dma[i]) {
1232 pci_unmap_single(rp->pdev,
1233 rp->tx_skbuff_dma[i],
1234 rp->tx_skbuff[i]->len,
1237 dev_kfree_skb(rp->tx_skbuff[i]);
1239 rp->tx_skbuff[i] = NULL;
1240 rp->tx_buf[i] = NULL;
1244 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1246 struct rhine_private *rp = netdev_priv(dev);
1247 void __iomem *ioaddr = rp->base;
1249 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1251 if (rp->mii_if.full_duplex)
1252 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1255 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1258 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1259 rp->mii_if.force_media, netif_carrier_ok(dev));
1262 /* Called after status of force_media possibly changed */
1263 static void rhine_set_carrier(struct mii_if_info *mii)
1265 struct net_device *dev = mii->dev;
1266 struct rhine_private *rp = netdev_priv(dev);
1268 if (mii->force_media) {
1269 /* autoneg is off: Link is always assumed to be up */
1270 if (!netif_carrier_ok(dev))
1271 netif_carrier_on(dev);
1272 } else /* Let MMI library update carrier status */
1273 rhine_check_media(dev, 0);
1275 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1276 mii->force_media, netif_carrier_ok(dev));
1280 * rhine_set_cam - set CAM multicast filters
1281 * @ioaddr: register block of this Rhine
1282 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1283 * @addr: multicast address (6 bytes)
1285 * Load addresses into multicast filters.
1287 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1291 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1294 /* Paranoid -- idx out of range should never happen */
1295 idx &= (MCAM_SIZE - 1);
1297 iowrite8((u8) idx, ioaddr + CamAddr);
1299 for (i = 0; i < 6; i++, addr++)
1300 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1304 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1307 iowrite8(0, ioaddr + CamCon);
1311 * rhine_set_vlan_cam - set CAM VLAN filters
1312 * @ioaddr: register block of this Rhine
1313 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1314 * @addr: VLAN ID (2 bytes)
1316 * Load addresses into VLAN filters.
1318 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1320 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1323 /* Paranoid -- idx out of range should never happen */
1324 idx &= (VCAM_SIZE - 1);
1326 iowrite8((u8) idx, ioaddr + CamAddr);
1328 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1332 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1335 iowrite8(0, ioaddr + CamCon);
1339 * rhine_set_cam_mask - set multicast CAM mask
1340 * @ioaddr: register block of this Rhine
1341 * @mask: multicast CAM mask
1343 * Mask sets multicast filters active/inactive.
1345 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1347 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1351 iowrite32(mask, ioaddr + CamMask);
1354 iowrite8(0, ioaddr + CamCon);
1358 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1359 * @ioaddr: register block of this Rhine
1360 * @mask: VLAN CAM mask
1362 * Mask sets VLAN filters active/inactive.
1364 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1366 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1370 iowrite32(mask, ioaddr + CamMask);
1373 iowrite8(0, ioaddr + CamCon);
1377 * rhine_init_cam_filter - initialize CAM filters
1378 * @dev: network device
1380 * Initialize (disable) hardware VLAN and multicast support on this
1383 static void rhine_init_cam_filter(struct net_device *dev)
1385 struct rhine_private *rp = netdev_priv(dev);
1386 void __iomem *ioaddr = rp->base;
1388 /* Disable all CAMs */
1389 rhine_set_vlan_cam_mask(ioaddr, 0);
1390 rhine_set_cam_mask(ioaddr, 0);
1392 /* disable hardware VLAN support */
1393 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1394 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1398 * rhine_update_vcam - update VLAN CAM filters
1399 * @rp: rhine_private data of this Rhine
1401 * Update VLAN CAM filters to match configuration change.
1403 static void rhine_update_vcam(struct net_device *dev)
1405 struct rhine_private *rp = netdev_priv(dev);
1406 void __iomem *ioaddr = rp->base;
1408 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1411 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1412 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1414 if (++i >= VCAM_SIZE)
1417 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1420 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1422 struct rhine_private *rp = netdev_priv(dev);
1424 spin_lock_bh(&rp->lock);
1425 set_bit(vid, rp->active_vlans);
1426 rhine_update_vcam(dev);
1427 spin_unlock_bh(&rp->lock);
1431 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1433 struct rhine_private *rp = netdev_priv(dev);
1435 spin_lock_bh(&rp->lock);
1436 clear_bit(vid, rp->active_vlans);
1437 rhine_update_vcam(dev);
1438 spin_unlock_bh(&rp->lock);
1442 static void init_registers(struct net_device *dev)
1444 struct rhine_private *rp = netdev_priv(dev);
1445 void __iomem *ioaddr = rp->base;
1448 for (i = 0; i < 6; i++)
1449 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1451 /* Initialize other registers. */
1452 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1453 /* Configure initial FIFO thresholds. */
1454 iowrite8(0x20, ioaddr + TxConfig);
1455 rp->tx_thresh = 0x20;
1456 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1458 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1459 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1461 rhine_set_rx_mode(dev);
1463 if (rp->pdev->revision >= VT6105M)
1464 rhine_init_cam_filter(dev);
1466 napi_enable(&rp->napi);
1468 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1470 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1472 rhine_check_media(dev, 1);
1475 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1476 static void rhine_enable_linkmon(struct rhine_private *rp)
1478 void __iomem *ioaddr = rp->base;
1480 iowrite8(0, ioaddr + MIICmd);
1481 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1482 iowrite8(0x80, ioaddr + MIICmd);
1484 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1486 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1489 /* Disable MII link status auto-polling (required for MDIO access) */
1490 static void rhine_disable_linkmon(struct rhine_private *rp)
1492 void __iomem *ioaddr = rp->base;
1494 iowrite8(0, ioaddr + MIICmd);
1496 if (rp->quirks & rqRhineI) {
1497 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1499 /* Can be called from ISR. Evil. */
1502 /* 0x80 must be set immediately before turning it off */
1503 iowrite8(0x80, ioaddr + MIICmd);
1505 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1507 /* Heh. Now clear 0x80 again. */
1508 iowrite8(0, ioaddr + MIICmd);
1511 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1514 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1516 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1518 struct rhine_private *rp = netdev_priv(dev);
1519 void __iomem *ioaddr = rp->base;
1522 rhine_disable_linkmon(rp);
1524 /* rhine_disable_linkmon already cleared MIICmd */
1525 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1526 iowrite8(regnum, ioaddr + MIIRegAddr);
1527 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1528 rhine_wait_bit_low(rp, MIICmd, 0x40);
1529 result = ioread16(ioaddr + MIIData);
1531 rhine_enable_linkmon(rp);
1535 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1537 struct rhine_private *rp = netdev_priv(dev);
1538 void __iomem *ioaddr = rp->base;
1540 rhine_disable_linkmon(rp);
1542 /* rhine_disable_linkmon already cleared MIICmd */
1543 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1544 iowrite8(regnum, ioaddr + MIIRegAddr);
1545 iowrite16(value, ioaddr + MIIData);
1546 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1547 rhine_wait_bit_low(rp, MIICmd, 0x20);
1549 rhine_enable_linkmon(rp);
1552 static void rhine_task_disable(struct rhine_private *rp)
1554 mutex_lock(&rp->task_lock);
1555 rp->task_enable = false;
1556 mutex_unlock(&rp->task_lock);
1558 cancel_work_sync(&rp->slow_event_task);
1559 cancel_work_sync(&rp->reset_task);
1562 static void rhine_task_enable(struct rhine_private *rp)
1564 mutex_lock(&rp->task_lock);
1565 rp->task_enable = true;
1566 mutex_unlock(&rp->task_lock);
1569 static int rhine_open(struct net_device *dev)
1571 struct rhine_private *rp = netdev_priv(dev);
1572 void __iomem *ioaddr = rp->base;
1575 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1580 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1582 rc = alloc_ring(dev);
1584 free_irq(rp->pdev->irq, dev);
1589 rhine_chip_reset(dev);
1590 rhine_task_enable(rp);
1591 init_registers(dev);
1593 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1594 __func__, ioread16(ioaddr + ChipCmd),
1595 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1597 netif_start_queue(dev);
1602 static void rhine_reset_task(struct work_struct *work)
1604 struct rhine_private *rp = container_of(work, struct rhine_private,
1606 struct net_device *dev = rp->dev;
1608 mutex_lock(&rp->task_lock);
1610 if (!rp->task_enable)
1613 napi_disable(&rp->napi);
1614 netif_tx_disable(dev);
1615 spin_lock_bh(&rp->lock);
1617 /* clear all descriptors */
1623 /* Reinitialize the hardware. */
1624 rhine_chip_reset(dev);
1625 init_registers(dev);
1627 spin_unlock_bh(&rp->lock);
1629 dev->trans_start = jiffies; /* prevent tx timeout */
1630 dev->stats.tx_errors++;
1631 netif_wake_queue(dev);
1634 mutex_unlock(&rp->task_lock);
1637 static void rhine_tx_timeout(struct net_device *dev)
1639 struct rhine_private *rp = netdev_priv(dev);
1640 void __iomem *ioaddr = rp->base;
1642 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1643 ioread16(ioaddr + IntrStatus),
1644 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1646 schedule_work(&rp->reset_task);
1649 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1650 struct net_device *dev)
1652 struct rhine_private *rp = netdev_priv(dev);
1653 void __iomem *ioaddr = rp->base;
1656 /* Caution: the write order is important here, set the field
1657 with the "ownership" bits last. */
1659 /* Calculate the next Tx descriptor entry. */
1660 entry = rp->cur_tx % TX_RING_SIZE;
1662 if (skb_padto(skb, ETH_ZLEN))
1663 return NETDEV_TX_OK;
1665 rp->tx_skbuff[entry] = skb;
1667 if ((rp->quirks & rqRhineI) &&
1668 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1669 /* Must use alignment buffer. */
1670 if (skb->len > PKT_BUF_SZ) {
1671 /* packet too long, drop it */
1673 rp->tx_skbuff[entry] = NULL;
1674 dev->stats.tx_dropped++;
1675 return NETDEV_TX_OK;
1678 /* Padding is not copied and so must be redone. */
1679 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1680 if (skb->len < ETH_ZLEN)
1681 memset(rp->tx_buf[entry] + skb->len, 0,
1682 ETH_ZLEN - skb->len);
1683 rp->tx_skbuff_dma[entry] = 0;
1684 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1685 (rp->tx_buf[entry] -
1688 rp->tx_skbuff_dma[entry] =
1689 pci_map_single(rp->pdev, skb->data, skb->len,
1691 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1694 rp->tx_ring[entry].desc_length =
1695 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1697 if (unlikely(vlan_tx_tag_present(skb))) {
1698 u16 vid_pcp = vlan_tx_tag_get(skb);
1700 /* drop CFI/DEI bit, register needs VID and PCP */
1701 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1702 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1703 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1704 /* request tagging */
1705 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1708 rp->tx_ring[entry].tx_status = 0;
1712 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1717 /* Non-x86 Todo: explicitly flush cache lines here. */
1719 if (vlan_tx_tag_present(skb))
1720 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1721 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1723 /* Wake the potentially-idle transmit channel */
1724 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1728 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1729 netif_stop_queue(dev);
1731 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1732 rp->cur_tx - 1, entry);
1734 return NETDEV_TX_OK;
1737 static void rhine_irq_disable(struct rhine_private *rp)
1739 iowrite16(0x0000, rp->base + IntrEnable);
1743 /* The interrupt handler does all of the Rx thread work and cleans up
1744 after the Tx thread. */
1745 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1747 struct net_device *dev = dev_instance;
1748 struct rhine_private *rp = netdev_priv(dev);
1752 status = rhine_get_events(rp);
1754 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1756 if (status & RHINE_EVENT) {
1759 rhine_irq_disable(rp);
1760 napi_schedule(&rp->napi);
1763 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1764 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1768 return IRQ_RETVAL(handled);
1771 /* This routine is logically part of the interrupt handler, but isolated
1773 static void rhine_tx(struct net_device *dev)
1775 struct rhine_private *rp = netdev_priv(dev);
1776 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1778 /* find and cleanup dirty tx descriptors */
1779 while (rp->dirty_tx != rp->cur_tx) {
1780 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1781 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1783 if (txstatus & DescOwn)
1785 if (txstatus & 0x8000) {
1786 netif_dbg(rp, tx_done, dev,
1787 "Transmit error, Tx status %08x\n", txstatus);
1788 dev->stats.tx_errors++;
1789 if (txstatus & 0x0400)
1790 dev->stats.tx_carrier_errors++;
1791 if (txstatus & 0x0200)
1792 dev->stats.tx_window_errors++;
1793 if (txstatus & 0x0100)
1794 dev->stats.tx_aborted_errors++;
1795 if (txstatus & 0x0080)
1796 dev->stats.tx_heartbeat_errors++;
1797 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1798 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1799 dev->stats.tx_fifo_errors++;
1800 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1801 break; /* Keep the skb - we try again */
1803 /* Transmitter restarted in 'abnormal' handler. */
1805 if (rp->quirks & rqRhineI)
1806 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1808 dev->stats.collisions += txstatus & 0x0F;
1809 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1810 (txstatus >> 3) & 0xF, txstatus & 0xF);
1812 u64_stats_update_begin(&rp->tx_stats.syncp);
1813 rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1814 rp->tx_stats.packets++;
1815 u64_stats_update_end(&rp->tx_stats.syncp);
1817 /* Free the original skb. */
1818 if (rp->tx_skbuff_dma[entry]) {
1819 pci_unmap_single(rp->pdev,
1820 rp->tx_skbuff_dma[entry],
1821 rp->tx_skbuff[entry]->len,
1824 dev_kfree_skb(rp->tx_skbuff[entry]);
1825 rp->tx_skbuff[entry] = NULL;
1826 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1828 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1829 netif_wake_queue(dev);
1833 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1834 * @skb: pointer to sk_buff
1835 * @data_size: used data area of the buffer including CRC
1837 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1838 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1839 * aligned following the CRC.
1841 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1843 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1844 return be16_to_cpup((__be16 *)trailer);
1847 /* Process up to limit frames from receive ring */
1848 static int rhine_rx(struct net_device *dev, int limit)
1850 struct rhine_private *rp = netdev_priv(dev);
1852 int entry = rp->cur_rx % RX_RING_SIZE;
1854 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1855 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1857 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1858 for (count = 0; count < limit; ++count) {
1859 struct rx_desc *desc = rp->rx_head_desc;
1860 u32 desc_status = le32_to_cpu(desc->rx_status);
1861 u32 desc_length = le32_to_cpu(desc->desc_length);
1862 int data_size = desc_status >> 16;
1864 if (desc_status & DescOwn)
1867 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1870 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1871 if ((desc_status & RxWholePkt) != RxWholePkt) {
1873 "Oversized Ethernet frame spanned multiple buffers, "
1874 "entry %#x length %d status %08x!\n",
1878 "Oversized Ethernet frame %p vs %p\n",
1880 &rp->rx_ring[entry]);
1881 dev->stats.rx_length_errors++;
1882 } else if (desc_status & RxErr) {
1883 /* There was a error. */
1884 netif_dbg(rp, rx_err, dev,
1885 "%s() Rx error %08x\n", __func__,
1887 dev->stats.rx_errors++;
1888 if (desc_status & 0x0030)
1889 dev->stats.rx_length_errors++;
1890 if (desc_status & 0x0048)
1891 dev->stats.rx_fifo_errors++;
1892 if (desc_status & 0x0004)
1893 dev->stats.rx_frame_errors++;
1894 if (desc_status & 0x0002) {
1895 /* this can also be updated outside the interrupt handler */
1896 spin_lock(&rp->lock);
1897 dev->stats.rx_crc_errors++;
1898 spin_unlock(&rp->lock);
1902 struct sk_buff *skb = NULL;
1903 /* Length should omit the CRC */
1904 int pkt_len = data_size - 4;
1907 /* Check if the packet is long enough to accept without
1908 copying to a minimally-sized skbuff. */
1909 if (pkt_len < rx_copybreak)
1910 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1912 pci_dma_sync_single_for_cpu(rp->pdev,
1913 rp->rx_skbuff_dma[entry],
1915 PCI_DMA_FROMDEVICE);
1917 skb_copy_to_linear_data(skb,
1918 rp->rx_skbuff[entry]->data,
1920 skb_put(skb, pkt_len);
1921 pci_dma_sync_single_for_device(rp->pdev,
1922 rp->rx_skbuff_dma[entry],
1924 PCI_DMA_FROMDEVICE);
1926 skb = rp->rx_skbuff[entry];
1928 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1931 rp->rx_skbuff[entry] = NULL;
1932 skb_put(skb, pkt_len);
1933 pci_unmap_single(rp->pdev,
1934 rp->rx_skbuff_dma[entry],
1936 PCI_DMA_FROMDEVICE);
1939 if (unlikely(desc_length & DescTag))
1940 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1942 skb->protocol = eth_type_trans(skb, dev);
1944 if (unlikely(desc_length & DescTag))
1945 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1946 netif_receive_skb(skb);
1948 u64_stats_update_begin(&rp->rx_stats.syncp);
1949 rp->rx_stats.bytes += pkt_len;
1950 rp->rx_stats.packets++;
1951 u64_stats_update_end(&rp->rx_stats.syncp);
1953 entry = (++rp->cur_rx) % RX_RING_SIZE;
1954 rp->rx_head_desc = &rp->rx_ring[entry];
1957 /* Refill the Rx ring buffers. */
1958 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1959 struct sk_buff *skb;
1960 entry = rp->dirty_rx % RX_RING_SIZE;
1961 if (rp->rx_skbuff[entry] == NULL) {
1962 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1963 rp->rx_skbuff[entry] = skb;
1965 break; /* Better luck next round. */
1966 rp->rx_skbuff_dma[entry] =
1967 pci_map_single(rp->pdev, skb->data,
1969 PCI_DMA_FROMDEVICE);
1970 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1972 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1978 static void rhine_restart_tx(struct net_device *dev) {
1979 struct rhine_private *rp = netdev_priv(dev);
1980 void __iomem *ioaddr = rp->base;
1981 int entry = rp->dirty_tx % TX_RING_SIZE;
1985 * If new errors occurred, we need to sort them out before doing Tx.
1986 * In that case the ISR will be back here RSN anyway.
1988 intr_status = rhine_get_events(rp);
1990 if ((intr_status & IntrTxErrSummary) == 0) {
1992 /* We know better than the chip where it should continue. */
1993 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1994 ioaddr + TxRingPtr);
1996 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1999 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2000 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2001 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2003 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2008 /* This should never happen */
2009 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2015 static void rhine_slow_event_task(struct work_struct *work)
2017 struct rhine_private *rp =
2018 container_of(work, struct rhine_private, slow_event_task);
2019 struct net_device *dev = rp->dev;
2022 mutex_lock(&rp->task_lock);
2024 if (!rp->task_enable)
2027 intr_status = rhine_get_events(rp);
2028 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2030 if (intr_status & IntrLinkChange)
2031 rhine_check_media(dev, 0);
2033 if (intr_status & IntrPCIErr)
2034 netif_warn(rp, hw, dev, "PCI error\n");
2036 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2039 mutex_unlock(&rp->task_lock);
2042 static struct rtnl_link_stats64 *
2043 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2045 struct rhine_private *rp = netdev_priv(dev);
2048 spin_lock_bh(&rp->lock);
2049 rhine_update_rx_crc_and_missed_errord(rp);
2050 spin_unlock_bh(&rp->lock);
2052 netdev_stats_to_stats64(stats, &dev->stats);
2055 start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
2056 stats->rx_packets = rp->rx_stats.packets;
2057 stats->rx_bytes = rp->rx_stats.bytes;
2058 } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
2061 start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
2062 stats->tx_packets = rp->tx_stats.packets;
2063 stats->tx_bytes = rp->tx_stats.bytes;
2064 } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
2069 static void rhine_set_rx_mode(struct net_device *dev)
2071 struct rhine_private *rp = netdev_priv(dev);
2072 void __iomem *ioaddr = rp->base;
2073 u32 mc_filter[2]; /* Multicast hash filter */
2074 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2075 struct netdev_hw_addr *ha;
2077 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2079 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2080 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2081 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2082 (dev->flags & IFF_ALLMULTI)) {
2083 /* Too many to match, or accept all multicasts. */
2084 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2085 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2086 } else if (rp->pdev->revision >= VT6105M) {
2088 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2089 netdev_for_each_mc_addr(ha, dev) {
2092 rhine_set_cam(ioaddr, i, ha->addr);
2096 rhine_set_cam_mask(ioaddr, mCAMmask);
2098 memset(mc_filter, 0, sizeof(mc_filter));
2099 netdev_for_each_mc_addr(ha, dev) {
2100 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2102 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2104 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2105 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2107 /* enable/disable VLAN receive filtering */
2108 if (rp->pdev->revision >= VT6105M) {
2109 if (dev->flags & IFF_PROMISC)
2110 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2112 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2114 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2117 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2119 struct rhine_private *rp = netdev_priv(dev);
2121 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2122 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2123 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2126 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2128 struct rhine_private *rp = netdev_priv(dev);
2131 mutex_lock(&rp->task_lock);
2132 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2133 mutex_unlock(&rp->task_lock);
2138 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2140 struct rhine_private *rp = netdev_priv(dev);
2143 mutex_lock(&rp->task_lock);
2144 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2145 rhine_set_carrier(&rp->mii_if);
2146 mutex_unlock(&rp->task_lock);
2151 static int netdev_nway_reset(struct net_device *dev)
2153 struct rhine_private *rp = netdev_priv(dev);
2155 return mii_nway_restart(&rp->mii_if);
2158 static u32 netdev_get_link(struct net_device *dev)
2160 struct rhine_private *rp = netdev_priv(dev);
2162 return mii_link_ok(&rp->mii_if);
2165 static u32 netdev_get_msglevel(struct net_device *dev)
2167 struct rhine_private *rp = netdev_priv(dev);
2169 return rp->msg_enable;
2172 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2174 struct rhine_private *rp = netdev_priv(dev);
2176 rp->msg_enable = value;
2179 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2181 struct rhine_private *rp = netdev_priv(dev);
2183 if (!(rp->quirks & rqWOL))
2186 spin_lock_irq(&rp->lock);
2187 wol->supported = WAKE_PHY | WAKE_MAGIC |
2188 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2189 wol->wolopts = rp->wolopts;
2190 spin_unlock_irq(&rp->lock);
2193 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2195 struct rhine_private *rp = netdev_priv(dev);
2196 u32 support = WAKE_PHY | WAKE_MAGIC |
2197 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2199 if (!(rp->quirks & rqWOL))
2202 if (wol->wolopts & ~support)
2205 spin_lock_irq(&rp->lock);
2206 rp->wolopts = wol->wolopts;
2207 spin_unlock_irq(&rp->lock);
2212 static const struct ethtool_ops netdev_ethtool_ops = {
2213 .get_drvinfo = netdev_get_drvinfo,
2214 .get_settings = netdev_get_settings,
2215 .set_settings = netdev_set_settings,
2216 .nway_reset = netdev_nway_reset,
2217 .get_link = netdev_get_link,
2218 .get_msglevel = netdev_get_msglevel,
2219 .set_msglevel = netdev_set_msglevel,
2220 .get_wol = rhine_get_wol,
2221 .set_wol = rhine_set_wol,
2224 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2226 struct rhine_private *rp = netdev_priv(dev);
2229 if (!netif_running(dev))
2232 mutex_lock(&rp->task_lock);
2233 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2234 rhine_set_carrier(&rp->mii_if);
2235 mutex_unlock(&rp->task_lock);
2240 static int rhine_close(struct net_device *dev)
2242 struct rhine_private *rp = netdev_priv(dev);
2243 void __iomem *ioaddr = rp->base;
2245 rhine_task_disable(rp);
2246 napi_disable(&rp->napi);
2247 netif_stop_queue(dev);
2249 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2250 ioread16(ioaddr + ChipCmd));
2252 /* Switch to loopback mode to avoid hardware races. */
2253 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2255 rhine_irq_disable(rp);
2257 /* Stop the chip's Tx and Rx processes. */
2258 iowrite16(CmdStop, ioaddr + ChipCmd);
2260 free_irq(rp->pdev->irq, dev);
2269 static void rhine_remove_one(struct pci_dev *pdev)
2271 struct net_device *dev = pci_get_drvdata(pdev);
2272 struct rhine_private *rp = netdev_priv(dev);
2274 unregister_netdev(dev);
2276 pci_iounmap(pdev, rp->base);
2277 pci_release_regions(pdev);
2280 pci_disable_device(pdev);
2281 pci_set_drvdata(pdev, NULL);
2284 static void rhine_shutdown (struct pci_dev *pdev)
2286 struct net_device *dev = pci_get_drvdata(pdev);
2287 struct rhine_private *rp = netdev_priv(dev);
2288 void __iomem *ioaddr = rp->base;
2290 if (!(rp->quirks & rqWOL))
2291 return; /* Nothing to do for non-WOL adapters */
2293 rhine_power_init(dev);
2295 /* Make sure we use pattern 0, 1 and not 4, 5 */
2296 if (rp->quirks & rq6patterns)
2297 iowrite8(0x04, ioaddr + WOLcgClr);
2299 spin_lock(&rp->lock);
2301 if (rp->wolopts & WAKE_MAGIC) {
2302 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2304 * Turn EEPROM-controlled wake-up back on -- some hardware may
2305 * not cooperate otherwise.
2307 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2310 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2311 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2313 if (rp->wolopts & WAKE_PHY)
2314 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2316 if (rp->wolopts & WAKE_UCAST)
2317 iowrite8(WOLucast, ioaddr + WOLcrSet);
2320 /* Enable legacy WOL (for old motherboards) */
2321 iowrite8(0x01, ioaddr + PwcfgSet);
2322 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2325 spin_unlock(&rp->lock);
2327 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2328 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2330 pci_wake_from_d3(pdev, true);
2331 pci_set_power_state(pdev, PCI_D3hot);
2335 #ifdef CONFIG_PM_SLEEP
2336 static int rhine_suspend(struct device *device)
2338 struct pci_dev *pdev = to_pci_dev(device);
2339 struct net_device *dev = pci_get_drvdata(pdev);
2340 struct rhine_private *rp = netdev_priv(dev);
2342 if (!netif_running(dev))
2345 rhine_task_disable(rp);
2346 rhine_irq_disable(rp);
2347 napi_disable(&rp->napi);
2349 netif_device_detach(dev);
2351 rhine_shutdown(pdev);
2356 static int rhine_resume(struct device *device)
2358 struct pci_dev *pdev = to_pci_dev(device);
2359 struct net_device *dev = pci_get_drvdata(pdev);
2360 struct rhine_private *rp = netdev_priv(dev);
2362 if (!netif_running(dev))
2366 enable_mmio(rp->pioaddr, rp->quirks);
2368 rhine_power_init(dev);
2373 rhine_task_enable(rp);
2374 spin_lock_bh(&rp->lock);
2375 init_registers(dev);
2376 spin_unlock_bh(&rp->lock);
2378 netif_device_attach(dev);
2383 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2384 #define RHINE_PM_OPS (&rhine_pm_ops)
2388 #define RHINE_PM_OPS NULL
2390 #endif /* !CONFIG_PM_SLEEP */
2392 static struct pci_driver rhine_driver = {
2394 .id_table = rhine_pci_tbl,
2395 .probe = rhine_init_one,
2396 .remove = rhine_remove_one,
2397 .shutdown = rhine_shutdown,
2398 .driver.pm = RHINE_PM_OPS,
2401 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2405 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2406 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2412 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2413 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2419 static int __init rhine_init(void)
2421 /* when a module, this is printed whether or not devices are found in probe */
2423 pr_info("%s\n", version);
2425 if (dmi_check_system(rhine_dmi_table)) {
2426 /* these BIOSes fail at PXE boot if chip is in D3 */
2428 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2431 pr_info("avoid_D3 set\n");
2433 return pci_register_driver(&rhine_driver);
2437 static void __exit rhine_cleanup(void)
2439 pci_unregister_driver(&rhine_driver);
2443 module_init(rhine_init);
2444 module_exit(rhine_cleanup);