1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Microchip Technology
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
33 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME "lan78xx"
37 #define TX_TIMEOUT_JIFFIES (5 * HZ)
38 #define THROTTLE_JIFFIES (HZ / 8)
39 #define UNLINK_TIMEOUT_MS 3
41 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
43 #define SS_USB_PKT_SIZE (1024)
44 #define HS_USB_PKT_SIZE (512)
45 #define FS_USB_PKT_SIZE (64)
47 #define MAX_RX_FIFO_SIZE (12 * 1024)
48 #define MAX_TX_FIFO_SIZE (12 * 1024)
49 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
50 #define DEFAULT_BULK_IN_DELAY (0x0800)
51 #define MAX_SINGLE_PACKET_SIZE (9000)
52 #define DEFAULT_TX_CSUM_ENABLE (true)
53 #define DEFAULT_RX_CSUM_ENABLE (true)
54 #define DEFAULT_TSO_CSUM_ENABLE (true)
55 #define DEFAULT_VLAN_FILTER_ENABLE (true)
56 #define DEFAULT_VLAN_RX_OFFLOAD (true)
57 #define TX_OVERHEAD (8)
60 #define LAN78XX_USB_VENDOR_ID (0x0424)
61 #define LAN7800_USB_PRODUCT_ID (0x7800)
62 #define LAN7850_USB_PRODUCT_ID (0x7850)
63 #define LAN7801_USB_PRODUCT_ID (0x7801)
64 #define LAN78XX_EEPROM_MAGIC (0x78A5)
65 #define LAN78XX_OTP_MAGIC (0x78F3)
70 #define EEPROM_INDICATOR (0xA5)
71 #define EEPROM_MAC_OFFSET (0x01)
72 #define MAX_EEPROM_SIZE 512
73 #define OTP_INDICATOR_1 (0xF3)
74 #define OTP_INDICATOR_2 (0xF7)
76 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
77 WAKE_MCAST | WAKE_BCAST | \
78 WAKE_ARP | WAKE_MAGIC)
80 /* USB related defines */
81 #define BULK_IN_PIPE 1
82 #define BULK_OUT_PIPE 2
84 /* default autosuspend delay (mSec)*/
85 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
87 /* statistic update interval (mSec) */
88 #define STAT_UPDATE_TIMER (1 * 1000)
90 /* defines interrupts from interrupt EP */
91 #define MAX_INT_EP (32)
92 #define INT_EP_INTEP (31)
93 #define INT_EP_OTP_WR_DONE (28)
94 #define INT_EP_EEE_TX_LPI_START (26)
95 #define INT_EP_EEE_TX_LPI_STOP (25)
96 #define INT_EP_EEE_RX_LPI (24)
97 #define INT_EP_MAC_RESET_TIMEOUT (23)
98 #define INT_EP_RDFO (22)
99 #define INT_EP_TXE (21)
100 #define INT_EP_USB_STATUS (20)
101 #define INT_EP_TX_DIS (19)
102 #define INT_EP_RX_DIS (18)
103 #define INT_EP_PHY (17)
104 #define INT_EP_DP (16)
105 #define INT_EP_MAC_ERR (15)
106 #define INT_EP_TDFU (14)
107 #define INT_EP_TDFO (13)
108 #define INT_EP_UTX (12)
109 #define INT_EP_GPIO_11 (11)
110 #define INT_EP_GPIO_10 (10)
111 #define INT_EP_GPIO_9 (9)
112 #define INT_EP_GPIO_8 (8)
113 #define INT_EP_GPIO_7 (7)
114 #define INT_EP_GPIO_6 (6)
115 #define INT_EP_GPIO_5 (5)
116 #define INT_EP_GPIO_4 (4)
117 #define INT_EP_GPIO_3 (3)
118 #define INT_EP_GPIO_2 (2)
119 #define INT_EP_GPIO_1 (1)
120 #define INT_EP_GPIO_0 (0)
122 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
124 "RX Alignment Errors",
125 "Rx Fragment Errors",
127 "RX Undersize Frame Errors",
128 "RX Oversize Frame Errors",
130 "RX Unicast Byte Count",
131 "RX Broadcast Byte Count",
132 "RX Multicast Byte Count",
134 "RX Broadcast Frames",
135 "RX Multicast Frames",
138 "RX 65 - 127 Byte Frames",
139 "RX 128 - 255 Byte Frames",
140 "RX 256 - 511 Bytes Frames",
141 "RX 512 - 1023 Byte Frames",
142 "RX 1024 - 1518 Byte Frames",
143 "RX Greater 1518 Byte Frames",
144 "EEE RX LPI Transitions",
147 "TX Excess Deferral Errors",
150 "TX Single Collisions",
151 "TX Multiple Collisions",
152 "TX Excessive Collision",
153 "TX Late Collisions",
154 "TX Unicast Byte Count",
155 "TX Broadcast Byte Count",
156 "TX Multicast Byte Count",
158 "TX Broadcast Frames",
159 "TX Multicast Frames",
162 "TX 65 - 127 Byte Frames",
163 "TX 128 - 255 Byte Frames",
164 "TX 256 - 511 Bytes Frames",
165 "TX 512 - 1023 Byte Frames",
166 "TX 1024 - 1518 Byte Frames",
167 "TX Greater 1518 Byte Frames",
168 "EEE TX LPI Transitions",
172 struct lan78xx_statstage {
174 u32 rx_alignment_errors;
175 u32 rx_fragment_errors;
176 u32 rx_jabber_errors;
177 u32 rx_undersize_frame_errors;
178 u32 rx_oversize_frame_errors;
179 u32 rx_dropped_frames;
180 u32 rx_unicast_byte_count;
181 u32 rx_broadcast_byte_count;
182 u32 rx_multicast_byte_count;
183 u32 rx_unicast_frames;
184 u32 rx_broadcast_frames;
185 u32 rx_multicast_frames;
187 u32 rx_64_byte_frames;
188 u32 rx_65_127_byte_frames;
189 u32 rx_128_255_byte_frames;
190 u32 rx_256_511_bytes_frames;
191 u32 rx_512_1023_byte_frames;
192 u32 rx_1024_1518_byte_frames;
193 u32 rx_greater_1518_byte_frames;
194 u32 eee_rx_lpi_transitions;
197 u32 tx_excess_deferral_errors;
198 u32 tx_carrier_errors;
199 u32 tx_bad_byte_count;
200 u32 tx_single_collisions;
201 u32 tx_multiple_collisions;
202 u32 tx_excessive_collision;
203 u32 tx_late_collisions;
204 u32 tx_unicast_byte_count;
205 u32 tx_broadcast_byte_count;
206 u32 tx_multicast_byte_count;
207 u32 tx_unicast_frames;
208 u32 tx_broadcast_frames;
209 u32 tx_multicast_frames;
211 u32 tx_64_byte_frames;
212 u32 tx_65_127_byte_frames;
213 u32 tx_128_255_byte_frames;
214 u32 tx_256_511_bytes_frames;
215 u32 tx_512_1023_byte_frames;
216 u32 tx_1024_1518_byte_frames;
217 u32 tx_greater_1518_byte_frames;
218 u32 eee_tx_lpi_transitions;
222 struct lan78xx_statstage64 {
224 u64 rx_alignment_errors;
225 u64 rx_fragment_errors;
226 u64 rx_jabber_errors;
227 u64 rx_undersize_frame_errors;
228 u64 rx_oversize_frame_errors;
229 u64 rx_dropped_frames;
230 u64 rx_unicast_byte_count;
231 u64 rx_broadcast_byte_count;
232 u64 rx_multicast_byte_count;
233 u64 rx_unicast_frames;
234 u64 rx_broadcast_frames;
235 u64 rx_multicast_frames;
237 u64 rx_64_byte_frames;
238 u64 rx_65_127_byte_frames;
239 u64 rx_128_255_byte_frames;
240 u64 rx_256_511_bytes_frames;
241 u64 rx_512_1023_byte_frames;
242 u64 rx_1024_1518_byte_frames;
243 u64 rx_greater_1518_byte_frames;
244 u64 eee_rx_lpi_transitions;
247 u64 tx_excess_deferral_errors;
248 u64 tx_carrier_errors;
249 u64 tx_bad_byte_count;
250 u64 tx_single_collisions;
251 u64 tx_multiple_collisions;
252 u64 tx_excessive_collision;
253 u64 tx_late_collisions;
254 u64 tx_unicast_byte_count;
255 u64 tx_broadcast_byte_count;
256 u64 tx_multicast_byte_count;
257 u64 tx_unicast_frames;
258 u64 tx_broadcast_frames;
259 u64 tx_multicast_frames;
261 u64 tx_64_byte_frames;
262 u64 tx_65_127_byte_frames;
263 u64 tx_128_255_byte_frames;
264 u64 tx_256_511_bytes_frames;
265 u64 tx_512_1023_byte_frames;
266 u64 tx_1024_1518_byte_frames;
267 u64 tx_greater_1518_byte_frames;
268 u64 eee_tx_lpi_transitions;
272 static u32 lan78xx_regs[] = {
294 #define PHY_REG_SIZE (32 * sizeof(u32))
298 struct lan78xx_priv {
299 struct lan78xx_net *dev;
301 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
302 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
303 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
304 struct mutex dataport_mutex; /* for dataport access */
305 spinlock_t rfe_ctl_lock; /* for rfe register access */
306 struct work_struct set_multicast;
307 struct work_struct set_vlan;
321 struct skb_data { /* skb->cb is one of these */
323 struct lan78xx_net *dev;
324 enum skb_state state;
330 struct usb_ctrlrequest req;
331 struct lan78xx_net *dev;
334 #define EVENT_TX_HALT 0
335 #define EVENT_RX_HALT 1
336 #define EVENT_RX_MEMORY 2
337 #define EVENT_STS_SPLIT 3
338 #define EVENT_LINK_RESET 4
339 #define EVENT_RX_PAUSED 5
340 #define EVENT_DEV_WAKING 6
341 #define EVENT_DEV_ASLEEP 7
342 #define EVENT_DEV_OPEN 8
343 #define EVENT_STAT_UPDATE 9
346 struct mutex access_lock; /* for stats access */
347 struct lan78xx_statstage saved;
348 struct lan78xx_statstage rollover_count;
349 struct lan78xx_statstage rollover_max;
350 struct lan78xx_statstage64 curr_stat;
353 struct irq_domain_data {
354 struct irq_domain *irqdomain;
356 struct irq_chip *irqchip;
357 irq_flow_handler_t irq_handler;
359 struct mutex irq_lock; /* for irq bus access */
363 struct net_device *net;
364 struct usb_device *udev;
365 struct usb_interface *intf;
370 struct sk_buff_head rxq;
371 struct sk_buff_head txq;
372 struct sk_buff_head done;
373 struct sk_buff_head rxq_pause;
374 struct sk_buff_head txq_pend;
376 struct tasklet_struct bh;
377 struct delayed_work wq;
379 struct usb_host_endpoint *ep_blkin;
380 struct usb_host_endpoint *ep_blkout;
381 struct usb_host_endpoint *ep_intr;
385 struct urb *urb_intr;
386 struct usb_anchor deferred;
388 struct mutex phy_mutex; /* for phy access */
389 unsigned pipe_in, pipe_out, pipe_intr;
391 u32 hard_mtu; /* count any extra framing */
392 size_t rx_urb_size; /* size for rx urbs */
396 wait_queue_head_t *wait;
397 unsigned char suspend_count;
400 struct timer_list delay;
401 struct timer_list stat_monitor;
403 unsigned long data[5];
410 struct mii_bus *mdiobus;
411 phy_interface_t interface;
414 u8 fc_request_control;
417 struct statstage stats;
419 struct irq_domain_data domain_data;
422 /* define external phy id */
423 #define PHY_LAN8835 (0x0007C130)
424 #define PHY_KSZ9031RNX (0x00221620)
426 /* use ethtool to change the level for any given device */
427 static int msg_level = -1;
428 module_param(msg_level, int, 0);
429 MODULE_PARM_DESC(msg_level, "Override default message level");
431 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
433 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
439 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
440 USB_VENDOR_REQUEST_READ_REGISTER,
441 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
442 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
443 if (likely(ret >= 0)) {
447 netdev_warn(dev->net,
448 "Failed to read register index 0x%08x. ret = %d",
457 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
459 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
468 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
469 USB_VENDOR_REQUEST_WRITE_REGISTER,
470 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
471 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
472 if (unlikely(ret < 0)) {
473 netdev_warn(dev->net,
474 "Failed to write register index 0x%08x. ret = %d",
483 static int lan78xx_read_stats(struct lan78xx_net *dev,
484 struct lan78xx_statstage *data)
488 struct lan78xx_statstage *stats;
492 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
496 ret = usb_control_msg(dev->udev,
497 usb_rcvctrlpipe(dev->udev, 0),
498 USB_VENDOR_REQUEST_GET_STATS,
499 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
504 USB_CTRL_SET_TIMEOUT);
505 if (likely(ret >= 0)) {
508 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
509 le32_to_cpus(&src[i]);
513 netdev_warn(dev->net,
514 "Failed to read stat ret = 0x%x", ret);
522 #define check_counter_rollover(struct1, dev_stats, member) { \
523 if (struct1->member < dev_stats.saved.member) \
524 dev_stats.rollover_count.member++; \
527 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
528 struct lan78xx_statstage *stats)
530 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
531 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
532 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
533 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
534 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
535 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
536 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
537 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
538 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
539 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
540 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
541 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
542 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
543 check_counter_rollover(stats, dev->stats, rx_pause_frames);
544 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
545 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
546 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
547 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
548 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
549 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
550 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
551 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
552 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
553 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
554 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
555 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
556 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
557 check_counter_rollover(stats, dev->stats, tx_single_collisions);
558 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
559 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
560 check_counter_rollover(stats, dev->stats, tx_late_collisions);
561 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
562 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
563 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
564 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
565 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
566 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
567 check_counter_rollover(stats, dev->stats, tx_pause_frames);
568 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
569 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
570 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
571 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
572 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
573 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
574 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
575 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
576 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
578 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
581 static void lan78xx_update_stats(struct lan78xx_net *dev)
583 u32 *p, *count, *max;
586 struct lan78xx_statstage lan78xx_stats;
588 if (usb_autopm_get_interface(dev->intf) < 0)
591 p = (u32 *)&lan78xx_stats;
592 count = (u32 *)&dev->stats.rollover_count;
593 max = (u32 *)&dev->stats.rollover_max;
594 data = (u64 *)&dev->stats.curr_stat;
596 mutex_lock(&dev->stats.access_lock);
598 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
599 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
601 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
602 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
604 mutex_unlock(&dev->stats.access_lock);
606 usb_autopm_put_interface(dev->intf);
609 /* Loop until the read is completed with timeout called with phy_mutex held */
610 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
612 unsigned long start_time = jiffies;
617 ret = lan78xx_read_reg(dev, MII_ACC, &val);
618 if (unlikely(ret < 0))
621 if (!(val & MII_ACC_MII_BUSY_))
623 } while (!time_after(jiffies, start_time + HZ));
628 static inline u32 mii_access(int id, int index, int read)
632 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
633 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
635 ret |= MII_ACC_MII_READ_;
637 ret |= MII_ACC_MII_WRITE_;
638 ret |= MII_ACC_MII_BUSY_;
643 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
645 unsigned long start_time = jiffies;
650 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
651 if (unlikely(ret < 0))
654 if (!(val & E2P_CMD_EPC_BUSY_) ||
655 (val & E2P_CMD_EPC_TIMEOUT_))
657 usleep_range(40, 100);
658 } while (!time_after(jiffies, start_time + HZ));
660 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
661 netdev_warn(dev->net, "EEPROM read operation timeout");
668 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
670 unsigned long start_time = jiffies;
675 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
676 if (unlikely(ret < 0))
679 if (!(val & E2P_CMD_EPC_BUSY_))
682 usleep_range(40, 100);
683 } while (!time_after(jiffies, start_time + HZ));
685 netdev_warn(dev->net, "EEPROM is busy");
689 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
690 u32 length, u8 *data)
697 /* depends on chip, some EEPROM pins are muxed with LED function.
698 * disable & restore LED function to access EEPROM.
700 ret = lan78xx_read_reg(dev, HW_CFG, &val);
702 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
703 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
704 ret = lan78xx_write_reg(dev, HW_CFG, val);
707 retval = lan78xx_eeprom_confirm_not_busy(dev);
711 for (i = 0; i < length; i++) {
712 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
713 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
714 ret = lan78xx_write_reg(dev, E2P_CMD, val);
715 if (unlikely(ret < 0)) {
720 retval = lan78xx_wait_eeprom(dev);
724 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
725 if (unlikely(ret < 0)) {
730 data[i] = val & 0xFF;
736 if (dev->chipid == ID_REV_CHIP_ID_7800_)
737 ret = lan78xx_write_reg(dev, HW_CFG, saved);
742 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
743 u32 length, u8 *data)
748 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
749 if ((ret == 0) && (sig == EEPROM_INDICATOR))
750 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
757 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
758 u32 length, u8 *data)
765 /* depends on chip, some EEPROM pins are muxed with LED function.
766 * disable & restore LED function to access EEPROM.
768 ret = lan78xx_read_reg(dev, HW_CFG, &val);
770 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
771 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
772 ret = lan78xx_write_reg(dev, HW_CFG, val);
775 retval = lan78xx_eeprom_confirm_not_busy(dev);
779 /* Issue write/erase enable command */
780 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
781 ret = lan78xx_write_reg(dev, E2P_CMD, val);
782 if (unlikely(ret < 0)) {
787 retval = lan78xx_wait_eeprom(dev);
791 for (i = 0; i < length; i++) {
792 /* Fill data register */
794 ret = lan78xx_write_reg(dev, E2P_DATA, val);
800 /* Send "write" command */
801 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
802 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
803 ret = lan78xx_write_reg(dev, E2P_CMD, val);
809 retval = lan78xx_wait_eeprom(dev);
818 if (dev->chipid == ID_REV_CHIP_ID_7800_)
819 ret = lan78xx_write_reg(dev, HW_CFG, saved);
824 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
825 u32 length, u8 *data)
830 unsigned long timeout;
832 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
834 if (buf & OTP_PWR_DN_PWRDN_N_) {
835 /* clear it and wait to be cleared */
836 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
838 timeout = jiffies + HZ;
841 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
842 if (time_after(jiffies, timeout)) {
843 netdev_warn(dev->net,
844 "timeout on OTP_PWR_DN");
847 } while (buf & OTP_PWR_DN_PWRDN_N_);
850 for (i = 0; i < length; i++) {
851 ret = lan78xx_write_reg(dev, OTP_ADDR1,
852 ((offset + i) >> 8) & OTP_ADDR1_15_11);
853 ret = lan78xx_write_reg(dev, OTP_ADDR2,
854 ((offset + i) & OTP_ADDR2_10_3));
856 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
857 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
859 timeout = jiffies + HZ;
862 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
863 if (time_after(jiffies, timeout)) {
864 netdev_warn(dev->net,
865 "timeout on OTP_STATUS");
868 } while (buf & OTP_STATUS_BUSY_);
870 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
872 data[i] = (u8)(buf & 0xFF);
878 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
879 u32 length, u8 *data)
884 unsigned long timeout;
886 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
888 if (buf & OTP_PWR_DN_PWRDN_N_) {
889 /* clear it and wait to be cleared */
890 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
892 timeout = jiffies + HZ;
895 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
896 if (time_after(jiffies, timeout)) {
897 netdev_warn(dev->net,
898 "timeout on OTP_PWR_DN completion");
901 } while (buf & OTP_PWR_DN_PWRDN_N_);
904 /* set to BYTE program mode */
905 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
907 for (i = 0; i < length; i++) {
908 ret = lan78xx_write_reg(dev, OTP_ADDR1,
909 ((offset + i) >> 8) & OTP_ADDR1_15_11);
910 ret = lan78xx_write_reg(dev, OTP_ADDR2,
911 ((offset + i) & OTP_ADDR2_10_3));
912 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
913 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
914 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
916 timeout = jiffies + HZ;
919 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
920 if (time_after(jiffies, timeout)) {
921 netdev_warn(dev->net,
922 "Timeout on OTP_STATUS completion");
925 } while (buf & OTP_STATUS_BUSY_);
931 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
932 u32 length, u8 *data)
937 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
940 if (sig == OTP_INDICATOR_2)
942 else if (sig != OTP_INDICATOR_1)
945 ret = lan78xx_read_raw_otp(dev, offset, length, data);
951 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
955 for (i = 0; i < 100; i++) {
958 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
959 if (unlikely(ret < 0))
962 if (dp_sel & DP_SEL_DPRDY_)
965 usleep_range(40, 100);
968 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
973 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
974 u32 addr, u32 length, u32 *buf)
976 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
980 if (usb_autopm_get_interface(dev->intf) < 0)
983 mutex_lock(&pdata->dataport_mutex);
985 ret = lan78xx_dataport_wait_not_busy(dev);
989 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
991 dp_sel &= ~DP_SEL_RSEL_MASK_;
992 dp_sel |= ram_select;
993 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
995 for (i = 0; i < length; i++) {
996 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
998 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1000 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1002 ret = lan78xx_dataport_wait_not_busy(dev);
1008 mutex_unlock(&pdata->dataport_mutex);
1009 usb_autopm_put_interface(dev->intf);
1014 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1015 int index, u8 addr[ETH_ALEN])
1019 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1021 temp = addr[2] | (temp << 8);
1022 temp = addr[1] | (temp << 8);
1023 temp = addr[0] | (temp << 8);
1024 pdata->pfilter_table[index][1] = temp;
1026 temp = addr[4] | (temp << 8);
1027 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1028 pdata->pfilter_table[index][0] = temp;
1032 /* returns hash bit number for given MAC address */
1033 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1035 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1038 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1040 struct lan78xx_priv *pdata =
1041 container_of(param, struct lan78xx_priv, set_multicast);
1042 struct lan78xx_net *dev = pdata->dev;
1046 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1049 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1050 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1052 for (i = 1; i < NUM_OF_MAF; i++) {
1053 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1054 ret = lan78xx_write_reg(dev, MAF_LO(i),
1055 pdata->pfilter_table[i][1]);
1056 ret = lan78xx_write_reg(dev, MAF_HI(i),
1057 pdata->pfilter_table[i][0]);
1060 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1063 static void lan78xx_set_multicast(struct net_device *netdev)
1065 struct lan78xx_net *dev = netdev_priv(netdev);
1066 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1067 unsigned long flags;
1070 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1072 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1073 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1075 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1076 pdata->mchash_table[i] = 0;
1077 /* pfilter_table[0] has own HW address */
1078 for (i = 1; i < NUM_OF_MAF; i++) {
1079 pdata->pfilter_table[i][0] =
1080 pdata->pfilter_table[i][1] = 0;
1083 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1085 if (dev->net->flags & IFF_PROMISC) {
1086 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1087 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1089 if (dev->net->flags & IFF_ALLMULTI) {
1090 netif_dbg(dev, drv, dev->net,
1091 "receive all multicast enabled");
1092 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1096 if (netdev_mc_count(dev->net)) {
1097 struct netdev_hw_addr *ha;
1100 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1102 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1105 netdev_for_each_mc_addr(ha, netdev) {
1106 /* set first 32 into Perfect Filter */
1108 lan78xx_set_addr_filter(pdata, i, ha->addr);
1110 u32 bitnum = lan78xx_hash(ha->addr);
1112 pdata->mchash_table[bitnum / 32] |=
1113 (1 << (bitnum % 32));
1114 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1120 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1122 /* defer register writes to a sleepable context */
1123 schedule_work(&pdata->set_multicast);
1126 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1127 u16 lcladv, u16 rmtadv)
1129 u32 flow = 0, fct_flow = 0;
1133 if (dev->fc_autoneg)
1134 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1136 cap = dev->fc_request_control;
1138 if (cap & FLOW_CTRL_TX)
1139 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1141 if (cap & FLOW_CTRL_RX)
1142 flow |= FLOW_CR_RX_FCEN_;
1144 if (dev->udev->speed == USB_SPEED_SUPER)
1146 else if (dev->udev->speed == USB_SPEED_HIGH)
1149 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1150 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1151 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1153 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1155 /* threshold value should be set before enabling flow */
1156 ret = lan78xx_write_reg(dev, FLOW, flow);
1161 static int lan78xx_link_reset(struct lan78xx_net *dev)
1163 struct phy_device *phydev = dev->net->phydev;
1164 struct ethtool_link_ksettings ecmd;
1165 int ladv, radv, ret;
1168 /* clear LAN78xx interrupt status */
1169 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1170 if (unlikely(ret < 0))
1173 phy_read_status(phydev);
1175 if (!phydev->link && dev->link_on) {
1176 dev->link_on = false;
1179 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1180 if (unlikely(ret < 0))
1183 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1184 if (unlikely(ret < 0))
1187 del_timer(&dev->stat_monitor);
1188 } else if (phydev->link && !dev->link_on) {
1189 dev->link_on = true;
1191 phy_ethtool_ksettings_get(phydev, &ecmd);
1193 if (dev->udev->speed == USB_SPEED_SUPER) {
1194 if (ecmd.base.speed == 1000) {
1196 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1197 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1198 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1200 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1201 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1202 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1204 /* enable U1 & U2 */
1205 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1206 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1207 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1208 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1212 ladv = phy_read(phydev, MII_ADVERTISE);
1216 radv = phy_read(phydev, MII_LPA);
1220 netif_dbg(dev, link, dev->net,
1221 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1222 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1224 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1227 if (!timer_pending(&dev->stat_monitor)) {
1229 mod_timer(&dev->stat_monitor,
1230 jiffies + STAT_UPDATE_TIMER);
1233 tasklet_schedule(&dev->bh);
1239 /* some work can't be done in tasklets, so we use keventd
1241 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1242 * but tasklet_schedule() doesn't. hope the failure is rare.
1244 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1246 set_bit(work, &dev->flags);
1247 if (!schedule_delayed_work(&dev->wq, 0))
1248 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1251 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1255 if (urb->actual_length != 4) {
1256 netdev_warn(dev->net,
1257 "unexpected urb length %d", urb->actual_length);
1261 memcpy(&intdata, urb->transfer_buffer, 4);
1262 le32_to_cpus(&intdata);
1264 if (intdata & INT_ENP_PHY_INT) {
1265 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1266 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1268 if (dev->domain_data.phyirq > 0) {
1269 local_irq_disable();
1270 generic_handle_irq(dev->domain_data.phyirq);
1274 netdev_warn(dev->net,
1275 "unexpected interrupt: 0x%08x\n", intdata);
1278 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1280 return MAX_EEPROM_SIZE;
1283 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1284 struct ethtool_eeprom *ee, u8 *data)
1286 struct lan78xx_net *dev = netdev_priv(netdev);
1289 ret = usb_autopm_get_interface(dev->intf);
1293 ee->magic = LAN78XX_EEPROM_MAGIC;
1295 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1297 usb_autopm_put_interface(dev->intf);
1302 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1303 struct ethtool_eeprom *ee, u8 *data)
1305 struct lan78xx_net *dev = netdev_priv(netdev);
1308 ret = usb_autopm_get_interface(dev->intf);
1312 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1313 * to load data from EEPROM
1315 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1316 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1317 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1318 (ee->offset == 0) &&
1320 (data[0] == OTP_INDICATOR_1))
1321 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1323 usb_autopm_put_interface(dev->intf);
1328 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1331 if (stringset == ETH_SS_STATS)
1332 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1335 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1337 if (sset == ETH_SS_STATS)
1338 return ARRAY_SIZE(lan78xx_gstrings);
1343 static void lan78xx_get_stats(struct net_device *netdev,
1344 struct ethtool_stats *stats, u64 *data)
1346 struct lan78xx_net *dev = netdev_priv(netdev);
1348 lan78xx_update_stats(dev);
1350 mutex_lock(&dev->stats.access_lock);
1351 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1352 mutex_unlock(&dev->stats.access_lock);
1355 static void lan78xx_get_wol(struct net_device *netdev,
1356 struct ethtool_wolinfo *wol)
1358 struct lan78xx_net *dev = netdev_priv(netdev);
1361 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1363 if (usb_autopm_get_interface(dev->intf) < 0)
1366 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1367 if (unlikely(ret < 0)) {
1371 if (buf & USB_CFG_RMT_WKP_) {
1372 wol->supported = WAKE_ALL;
1373 wol->wolopts = pdata->wol;
1380 usb_autopm_put_interface(dev->intf);
1383 static int lan78xx_set_wol(struct net_device *netdev,
1384 struct ethtool_wolinfo *wol)
1386 struct lan78xx_net *dev = netdev_priv(netdev);
1387 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1390 ret = usb_autopm_get_interface(dev->intf);
1394 if (wol->wolopts & ~WAKE_ALL)
1397 pdata->wol = wol->wolopts;
1399 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1401 phy_ethtool_set_wol(netdev->phydev, wol);
1403 usb_autopm_put_interface(dev->intf);
1408 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1410 struct lan78xx_net *dev = netdev_priv(net);
1411 struct phy_device *phydev = net->phydev;
1415 ret = usb_autopm_get_interface(dev->intf);
1419 ret = phy_ethtool_get_eee(phydev, edata);
1423 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1424 if (buf & MAC_CR_EEE_EN_) {
1425 edata->eee_enabled = true;
1426 edata->eee_active = !!(edata->advertised &
1427 edata->lp_advertised);
1428 edata->tx_lpi_enabled = true;
1429 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1430 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1431 edata->tx_lpi_timer = buf;
1433 edata->eee_enabled = false;
1434 edata->eee_active = false;
1435 edata->tx_lpi_enabled = false;
1436 edata->tx_lpi_timer = 0;
1441 usb_autopm_put_interface(dev->intf);
1446 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1448 struct lan78xx_net *dev = netdev_priv(net);
1452 ret = usb_autopm_get_interface(dev->intf);
1456 if (edata->eee_enabled) {
1457 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1458 buf |= MAC_CR_EEE_EN_;
1459 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1461 phy_ethtool_set_eee(net->phydev, edata);
1463 buf = (u32)edata->tx_lpi_timer;
1464 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1466 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1467 buf &= ~MAC_CR_EEE_EN_;
1468 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1471 usb_autopm_put_interface(dev->intf);
1476 static u32 lan78xx_get_link(struct net_device *net)
1478 phy_read_status(net->phydev);
1480 return net->phydev->link;
1483 static void lan78xx_get_drvinfo(struct net_device *net,
1484 struct ethtool_drvinfo *info)
1486 struct lan78xx_net *dev = netdev_priv(net);
1488 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1489 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1492 static u32 lan78xx_get_msglevel(struct net_device *net)
1494 struct lan78xx_net *dev = netdev_priv(net);
1496 return dev->msg_enable;
1499 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1501 struct lan78xx_net *dev = netdev_priv(net);
1503 dev->msg_enable = level;
1506 static int lan78xx_get_link_ksettings(struct net_device *net,
1507 struct ethtool_link_ksettings *cmd)
1509 struct lan78xx_net *dev = netdev_priv(net);
1510 struct phy_device *phydev = net->phydev;
1513 ret = usb_autopm_get_interface(dev->intf);
1517 phy_ethtool_ksettings_get(phydev, cmd);
1519 usb_autopm_put_interface(dev->intf);
1524 static int lan78xx_set_link_ksettings(struct net_device *net,
1525 const struct ethtool_link_ksettings *cmd)
1527 struct lan78xx_net *dev = netdev_priv(net);
1528 struct phy_device *phydev = net->phydev;
1532 ret = usb_autopm_get_interface(dev->intf);
1536 /* change speed & duplex */
1537 ret = phy_ethtool_ksettings_set(phydev, cmd);
1539 if (!cmd->base.autoneg) {
1540 /* force link down */
1541 temp = phy_read(phydev, MII_BMCR);
1542 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1544 phy_write(phydev, MII_BMCR, temp);
1547 usb_autopm_put_interface(dev->intf);
1552 static void lan78xx_get_pause(struct net_device *net,
1553 struct ethtool_pauseparam *pause)
1555 struct lan78xx_net *dev = netdev_priv(net);
1556 struct phy_device *phydev = net->phydev;
1557 struct ethtool_link_ksettings ecmd;
1559 phy_ethtool_ksettings_get(phydev, &ecmd);
1561 pause->autoneg = dev->fc_autoneg;
1563 if (dev->fc_request_control & FLOW_CTRL_TX)
1564 pause->tx_pause = 1;
1566 if (dev->fc_request_control & FLOW_CTRL_RX)
1567 pause->rx_pause = 1;
1570 static int lan78xx_set_pause(struct net_device *net,
1571 struct ethtool_pauseparam *pause)
1573 struct lan78xx_net *dev = netdev_priv(net);
1574 struct phy_device *phydev = net->phydev;
1575 struct ethtool_link_ksettings ecmd;
1578 phy_ethtool_ksettings_get(phydev, &ecmd);
1580 if (pause->autoneg && !ecmd.base.autoneg) {
1585 dev->fc_request_control = 0;
1586 if (pause->rx_pause)
1587 dev->fc_request_control |= FLOW_CTRL_RX;
1589 if (pause->tx_pause)
1590 dev->fc_request_control |= FLOW_CTRL_TX;
1592 if (ecmd.base.autoneg) {
1593 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1596 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1597 ecmd.link_modes.advertising);
1598 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1599 ecmd.link_modes.advertising);
1600 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1601 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1602 linkmode_or(ecmd.link_modes.advertising, fc,
1603 ecmd.link_modes.advertising);
1605 phy_ethtool_ksettings_set(phydev, &ecmd);
1608 dev->fc_autoneg = pause->autoneg;
1615 static int lan78xx_get_regs_len(struct net_device *netdev)
1617 if (!netdev->phydev)
1618 return (sizeof(lan78xx_regs));
1620 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1624 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1629 struct lan78xx_net *dev = netdev_priv(netdev);
1631 /* Read Device/MAC registers */
1632 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1633 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1635 if (!netdev->phydev)
1638 /* Read PHY registers */
1639 for (j = 0; j < 32; i++, j++)
1640 data[i] = phy_read(netdev->phydev, j);
1643 static const struct ethtool_ops lan78xx_ethtool_ops = {
1644 .get_link = lan78xx_get_link,
1645 .nway_reset = phy_ethtool_nway_reset,
1646 .get_drvinfo = lan78xx_get_drvinfo,
1647 .get_msglevel = lan78xx_get_msglevel,
1648 .set_msglevel = lan78xx_set_msglevel,
1649 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1650 .get_eeprom = lan78xx_ethtool_get_eeprom,
1651 .set_eeprom = lan78xx_ethtool_set_eeprom,
1652 .get_ethtool_stats = lan78xx_get_stats,
1653 .get_sset_count = lan78xx_get_sset_count,
1654 .get_strings = lan78xx_get_strings,
1655 .get_wol = lan78xx_get_wol,
1656 .set_wol = lan78xx_set_wol,
1657 .get_eee = lan78xx_get_eee,
1658 .set_eee = lan78xx_set_eee,
1659 .get_pauseparam = lan78xx_get_pause,
1660 .set_pauseparam = lan78xx_set_pause,
1661 .get_link_ksettings = lan78xx_get_link_ksettings,
1662 .set_link_ksettings = lan78xx_set_link_ksettings,
1663 .get_regs_len = lan78xx_get_regs_len,
1664 .get_regs = lan78xx_get_regs,
1667 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1669 if (!netif_running(netdev))
1672 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1675 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1677 u32 addr_lo, addr_hi;
1681 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1682 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1684 addr[0] = addr_lo & 0xFF;
1685 addr[1] = (addr_lo >> 8) & 0xFF;
1686 addr[2] = (addr_lo >> 16) & 0xFF;
1687 addr[3] = (addr_lo >> 24) & 0xFF;
1688 addr[4] = addr_hi & 0xFF;
1689 addr[5] = (addr_hi >> 8) & 0xFF;
1691 if (!is_valid_ether_addr(addr)) {
1692 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1693 /* valid address present in Device Tree */
1694 netif_dbg(dev, ifup, dev->net,
1695 "MAC address read from Device Tree");
1696 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1697 ETH_ALEN, addr) == 0) ||
1698 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1699 ETH_ALEN, addr) == 0)) &&
1700 is_valid_ether_addr(addr)) {
1701 /* eeprom values are valid so use them */
1702 netif_dbg(dev, ifup, dev->net,
1703 "MAC address read from EEPROM");
1705 /* generate random MAC */
1706 eth_random_addr(addr);
1707 netif_dbg(dev, ifup, dev->net,
1708 "MAC address set to random addr");
1711 addr_lo = addr[0] | (addr[1] << 8) |
1712 (addr[2] << 16) | (addr[3] << 24);
1713 addr_hi = addr[4] | (addr[5] << 8);
1715 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1716 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1719 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1720 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1722 ether_addr_copy(dev->net->dev_addr, addr);
1725 /* MDIO read and write wrappers for phylib */
1726 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1728 struct lan78xx_net *dev = bus->priv;
1732 ret = usb_autopm_get_interface(dev->intf);
1736 mutex_lock(&dev->phy_mutex);
1738 /* confirm MII not busy */
1739 ret = lan78xx_phy_wait_not_busy(dev);
1743 /* set the address, index & direction (read from PHY) */
1744 addr = mii_access(phy_id, idx, MII_READ);
1745 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1747 ret = lan78xx_phy_wait_not_busy(dev);
1751 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1753 ret = (int)(val & 0xFFFF);
1756 mutex_unlock(&dev->phy_mutex);
1757 usb_autopm_put_interface(dev->intf);
1762 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1765 struct lan78xx_net *dev = bus->priv;
1769 ret = usb_autopm_get_interface(dev->intf);
1773 mutex_lock(&dev->phy_mutex);
1775 /* confirm MII not busy */
1776 ret = lan78xx_phy_wait_not_busy(dev);
1781 ret = lan78xx_write_reg(dev, MII_DATA, val);
1783 /* set the address, index & direction (write to PHY) */
1784 addr = mii_access(phy_id, idx, MII_WRITE);
1785 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1787 ret = lan78xx_phy_wait_not_busy(dev);
1792 mutex_unlock(&dev->phy_mutex);
1793 usb_autopm_put_interface(dev->intf);
1797 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1799 struct device_node *node;
1802 dev->mdiobus = mdiobus_alloc();
1803 if (!dev->mdiobus) {
1804 netdev_err(dev->net, "can't allocate MDIO bus\n");
1808 dev->mdiobus->priv = (void *)dev;
1809 dev->mdiobus->read = lan78xx_mdiobus_read;
1810 dev->mdiobus->write = lan78xx_mdiobus_write;
1811 dev->mdiobus->name = "lan78xx-mdiobus";
1813 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1814 dev->udev->bus->busnum, dev->udev->devnum);
1816 switch (dev->chipid) {
1817 case ID_REV_CHIP_ID_7800_:
1818 case ID_REV_CHIP_ID_7850_:
1819 /* set to internal PHY id */
1820 dev->mdiobus->phy_mask = ~(1 << 1);
1822 case ID_REV_CHIP_ID_7801_:
1823 /* scan thru PHYAD[2..0] */
1824 dev->mdiobus->phy_mask = ~(0xFF);
1828 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1829 ret = of_mdiobus_register(dev->mdiobus, node);
1832 netdev_err(dev->net, "can't register MDIO bus\n");
1836 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1839 mdiobus_free(dev->mdiobus);
1843 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1845 mdiobus_unregister(dev->mdiobus);
1846 mdiobus_free(dev->mdiobus);
1849 static void lan78xx_link_status_change(struct net_device *net)
1851 struct phy_device *phydev = net->phydev;
1854 /* At forced 100 F/H mode, chip may fail to set mode correctly
1855 * when cable is switched between long(~50+m) and short one.
1856 * As workaround, set to 10 before setting to 100
1857 * at forced 100 F/H mode.
1859 if (!phydev->autoneg && (phydev->speed == 100)) {
1860 /* disable phy interrupt */
1861 temp = phy_read(phydev, LAN88XX_INT_MASK);
1862 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1863 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1865 temp = phy_read(phydev, MII_BMCR);
1866 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1867 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1868 temp |= BMCR_SPEED100;
1869 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1871 /* clear pending interrupt generated while workaround */
1872 temp = phy_read(phydev, LAN88XX_INT_STS);
1874 /* enable phy interrupt back */
1875 temp = phy_read(phydev, LAN88XX_INT_MASK);
1876 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1877 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1881 static int irq_map(struct irq_domain *d, unsigned int irq,
1882 irq_hw_number_t hwirq)
1884 struct irq_domain_data *data = d->host_data;
1886 irq_set_chip_data(irq, data);
1887 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1888 irq_set_noprobe(irq);
1893 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1895 irq_set_chip_and_handler(irq, NULL, NULL);
1896 irq_set_chip_data(irq, NULL);
1899 static const struct irq_domain_ops chip_domain_ops = {
1904 static void lan78xx_irq_mask(struct irq_data *irqd)
1906 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1908 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1911 static void lan78xx_irq_unmask(struct irq_data *irqd)
1913 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1915 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1918 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1920 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1922 mutex_lock(&data->irq_lock);
1925 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1927 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1928 struct lan78xx_net *dev =
1929 container_of(data, struct lan78xx_net, domain_data);
1933 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1934 * are only two callbacks executed in non-atomic contex.
1936 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1937 if (buf != data->irqenable)
1938 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1940 mutex_unlock(&data->irq_lock);
1943 static struct irq_chip lan78xx_irqchip = {
1944 .name = "lan78xx-irqs",
1945 .irq_mask = lan78xx_irq_mask,
1946 .irq_unmask = lan78xx_irq_unmask,
1947 .irq_bus_lock = lan78xx_irq_bus_lock,
1948 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1951 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1953 struct device_node *of_node;
1954 struct irq_domain *irqdomain;
1955 unsigned int irqmap = 0;
1959 of_node = dev->udev->dev.parent->of_node;
1961 mutex_init(&dev->domain_data.irq_lock);
1963 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1964 dev->domain_data.irqenable = buf;
1966 dev->domain_data.irqchip = &lan78xx_irqchip;
1967 dev->domain_data.irq_handler = handle_simple_irq;
1969 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1970 &chip_domain_ops, &dev->domain_data);
1972 /* create mapping for PHY interrupt */
1973 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1975 irq_domain_remove(irqdomain);
1984 dev->domain_data.irqdomain = irqdomain;
1985 dev->domain_data.phyirq = irqmap;
1990 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1992 if (dev->domain_data.phyirq > 0) {
1993 irq_dispose_mapping(dev->domain_data.phyirq);
1995 if (dev->domain_data.irqdomain)
1996 irq_domain_remove(dev->domain_data.irqdomain);
1998 dev->domain_data.phyirq = 0;
1999 dev->domain_data.irqdomain = NULL;
2002 static int lan8835_fixup(struct phy_device *phydev)
2006 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2008 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2009 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2012 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2014 /* RGMII MAC TXC Delay Enable */
2015 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2016 MAC_RGMII_ID_TXC_DELAY_EN_);
2018 /* RGMII TX DLL Tune Adjust */
2019 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2021 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2026 static int ksz9031rnx_fixup(struct phy_device *phydev)
2028 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2030 /* Micrel9301RNX PHY configuration */
2031 /* RGMII Control Signal Pad Skew */
2032 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2033 /* RGMII RX Data Pad Skew */
2034 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2035 /* RGMII RX Clock Pad Skew */
2036 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2038 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2043 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2047 struct fixed_phy_status fphy_status = {
2049 .speed = SPEED_1000,
2050 .duplex = DUPLEX_FULL,
2052 struct phy_device *phydev;
2054 phydev = phy_find_first(dev->mdiobus);
2056 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2057 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2058 if (IS_ERR(phydev)) {
2059 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2062 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2063 dev->interface = PHY_INTERFACE_MODE_RGMII;
2064 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2065 MAC_RGMII_ID_TXC_DELAY_EN_);
2066 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2067 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2068 buf |= HW_CFG_CLK125_EN_;
2069 buf |= HW_CFG_REFCLK25_EN_;
2070 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2073 netdev_err(dev->net, "no PHY driver found\n");
2076 dev->interface = PHY_INTERFACE_MODE_RGMII;
2077 /* external PHY fixup for KSZ9031RNX */
2078 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2081 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2084 /* external PHY fixup for LAN8835 */
2085 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2088 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2091 /* add more external PHY fixup here if needed */
2093 phydev->is_internal = false;
2098 static int lan78xx_phy_init(struct lan78xx_net *dev)
2100 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2103 struct phy_device *phydev;
2105 switch (dev->chipid) {
2106 case ID_REV_CHIP_ID_7801_:
2107 phydev = lan7801_phy_init(dev);
2109 netdev_err(dev->net, "lan7801: PHY Init Failed");
2114 case ID_REV_CHIP_ID_7800_:
2115 case ID_REV_CHIP_ID_7850_:
2116 phydev = phy_find_first(dev->mdiobus);
2118 netdev_err(dev->net, "no PHY found\n");
2121 phydev->is_internal = true;
2122 dev->interface = PHY_INTERFACE_MODE_GMII;
2126 netdev_err(dev->net, "Unknown CHIP ID found\n");
2130 /* if phyirq is not set, use polling mode in phylib */
2131 if (dev->domain_data.phyirq > 0)
2132 phydev->irq = dev->domain_data.phyirq;
2135 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2137 /* set to AUTOMDIX */
2138 phydev->mdix = ETH_TP_MDI_AUTO;
2140 ret = phy_connect_direct(dev->net, phydev,
2141 lan78xx_link_status_change,
2144 netdev_err(dev->net, "can't attach PHY to %s\n",
2146 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2147 if (phy_is_pseudo_fixed_link(phydev)) {
2148 fixed_phy_unregister(phydev);
2150 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2152 phy_unregister_fixup_for_uid(PHY_LAN8835,
2159 /* MAC doesn't support 1000T Half */
2160 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2162 /* support both flow controls */
2163 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2164 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2165 phydev->advertising);
2166 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2167 phydev->advertising);
2168 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2169 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2170 linkmode_or(phydev->advertising, fc, phydev->advertising);
2172 if (phydev->mdio.dev.of_node) {
2176 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2177 "microchip,led-modes",
2180 /* Ensure the appropriate LEDs are enabled */
2181 lan78xx_read_reg(dev, HW_CFG, ®);
2182 reg &= ~(HW_CFG_LED0_EN_ |
2186 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2187 (len > 1) * HW_CFG_LED1_EN_ |
2188 (len > 2) * HW_CFG_LED2_EN_ |
2189 (len > 3) * HW_CFG_LED3_EN_;
2190 lan78xx_write_reg(dev, HW_CFG, reg);
2194 genphy_config_aneg(phydev);
2196 dev->fc_autoneg = phydev->autoneg;
2201 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2207 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2209 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2212 buf &= ~MAC_RX_RXEN_;
2213 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2216 /* add 4 to size for FCS */
2217 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2218 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2220 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2223 buf |= MAC_RX_RXEN_;
2224 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2230 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2232 struct sk_buff *skb;
2233 unsigned long flags;
2236 spin_lock_irqsave(&q->lock, flags);
2237 while (!skb_queue_empty(q)) {
2238 struct skb_data *entry;
2242 skb_queue_walk(q, skb) {
2243 entry = (struct skb_data *)skb->cb;
2244 if (entry->state != unlink_start)
2249 entry->state = unlink_start;
2252 /* Get reference count of the URB to avoid it to be
2253 * freed during usb_unlink_urb, which may trigger
2254 * use-after-free problem inside usb_unlink_urb since
2255 * usb_unlink_urb is always racing with .complete
2256 * handler(include defer_bh).
2259 spin_unlock_irqrestore(&q->lock, flags);
2260 /* during some PM-driven resume scenarios,
2261 * these (async) unlinks complete immediately
2263 ret = usb_unlink_urb(urb);
2264 if (ret != -EINPROGRESS && ret != 0)
2265 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2269 spin_lock_irqsave(&q->lock, flags);
2271 spin_unlock_irqrestore(&q->lock, flags);
2275 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2277 struct lan78xx_net *dev = netdev_priv(netdev);
2278 int ll_mtu = new_mtu + netdev->hard_header_len;
2279 int old_hard_mtu = dev->hard_mtu;
2280 int old_rx_urb_size = dev->rx_urb_size;
2283 /* no second zero-length packet read wanted after mtu-sized packets */
2284 if ((ll_mtu % dev->maxpacket) == 0)
2287 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2289 netdev->mtu = new_mtu;
2291 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2292 if (dev->rx_urb_size == old_hard_mtu) {
2293 dev->rx_urb_size = dev->hard_mtu;
2294 if (dev->rx_urb_size > old_rx_urb_size) {
2295 if (netif_running(dev->net)) {
2296 unlink_urbs(dev, &dev->rxq);
2297 tasklet_schedule(&dev->bh);
2305 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2307 struct lan78xx_net *dev = netdev_priv(netdev);
2308 struct sockaddr *addr = p;
2309 u32 addr_lo, addr_hi;
2312 if (netif_running(netdev))
2315 if (!is_valid_ether_addr(addr->sa_data))
2316 return -EADDRNOTAVAIL;
2318 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2320 addr_lo = netdev->dev_addr[0] |
2321 netdev->dev_addr[1] << 8 |
2322 netdev->dev_addr[2] << 16 |
2323 netdev->dev_addr[3] << 24;
2324 addr_hi = netdev->dev_addr[4] |
2325 netdev->dev_addr[5] << 8;
2327 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2328 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2330 /* Added to support MAC address changes */
2331 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2332 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2337 /* Enable or disable Rx checksum offload engine */
2338 static int lan78xx_set_features(struct net_device *netdev,
2339 netdev_features_t features)
2341 struct lan78xx_net *dev = netdev_priv(netdev);
2342 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2343 unsigned long flags;
2346 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2348 if (features & NETIF_F_RXCSUM) {
2349 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2350 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2352 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2353 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2356 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2357 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2359 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2361 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2362 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2364 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2366 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2368 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2373 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2375 struct lan78xx_priv *pdata =
2376 container_of(param, struct lan78xx_priv, set_vlan);
2377 struct lan78xx_net *dev = pdata->dev;
2379 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2380 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2383 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2384 __be16 proto, u16 vid)
2386 struct lan78xx_net *dev = netdev_priv(netdev);
2387 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2389 u16 vid_dword_index;
2391 vid_dword_index = (vid >> 5) & 0x7F;
2392 vid_bit_index = vid & 0x1F;
2394 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2396 /* defer register writes to a sleepable context */
2397 schedule_work(&pdata->set_vlan);
2402 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2403 __be16 proto, u16 vid)
2405 struct lan78xx_net *dev = netdev_priv(netdev);
2406 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2408 u16 vid_dword_index;
2410 vid_dword_index = (vid >> 5) & 0x7F;
2411 vid_bit_index = vid & 0x1F;
2413 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2415 /* defer register writes to a sleepable context */
2416 schedule_work(&pdata->set_vlan);
2421 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2425 u32 regs[6] = { 0 };
2427 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2428 if (buf & USB_CFG1_LTM_ENABLE_) {
2430 /* Get values from EEPROM first */
2431 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2432 if (temp[0] == 24) {
2433 ret = lan78xx_read_raw_eeprom(dev,
2440 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2441 if (temp[0] == 24) {
2442 ret = lan78xx_read_raw_otp(dev,
2452 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2453 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2454 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2455 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2456 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2457 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2460 static int lan78xx_reset(struct lan78xx_net *dev)
2462 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2465 unsigned long timeout;
2468 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2469 buf |= HW_CFG_LRST_;
2470 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2472 timeout = jiffies + HZ;
2475 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2476 if (time_after(jiffies, timeout)) {
2477 netdev_warn(dev->net,
2478 "timeout on completion of LiteReset");
2481 } while (buf & HW_CFG_LRST_);
2483 lan78xx_init_mac_address(dev);
2485 /* save DEVID for later usage */
2486 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2487 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2488 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2490 /* Respond to the IN token with a NAK */
2491 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2492 buf |= USB_CFG_BIR_;
2493 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2496 lan78xx_init_ltm(dev);
2498 if (dev->udev->speed == USB_SPEED_SUPER) {
2499 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2500 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2503 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2504 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2505 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2506 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2507 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2509 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2510 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2515 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2516 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2518 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2520 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2522 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2523 buf |= USB_CFG_BCE_;
2524 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2526 /* set FIFO sizes */
2527 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2528 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2530 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2531 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2533 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2534 ret = lan78xx_write_reg(dev, FLOW, 0);
2535 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2537 /* Don't need rfe_ctl_lock during initialisation */
2538 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2539 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2540 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2542 /* Enable or disable checksum offload engines */
2543 lan78xx_set_features(dev->net, dev->net->features);
2545 lan78xx_set_multicast(dev->net);
2548 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2549 buf |= PMT_CTL_PHY_RST_;
2550 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2552 timeout = jiffies + HZ;
2555 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2556 if (time_after(jiffies, timeout)) {
2557 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2560 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2562 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2563 /* LAN7801 only has RGMII mode */
2564 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2565 buf &= ~MAC_CR_GMII_EN_;
2567 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2568 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2569 if (!ret && sig != EEPROM_INDICATOR) {
2570 /* Implies there is no external eeprom. Set mac speed */
2571 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2572 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2575 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2577 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2578 buf |= MAC_TX_TXEN_;
2579 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2581 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2582 buf |= FCT_TX_CTL_EN_;
2583 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2585 ret = lan78xx_set_rx_max_frame_length(dev,
2586 dev->net->mtu + VLAN_ETH_HLEN);
2588 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2589 buf |= MAC_RX_RXEN_;
2590 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2592 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2593 buf |= FCT_RX_CTL_EN_;
2594 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2599 static void lan78xx_init_stats(struct lan78xx_net *dev)
2604 /* initialize for stats update
2605 * some counters are 20bits and some are 32bits
2607 p = (u32 *)&dev->stats.rollover_max;
2608 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2611 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2612 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2613 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2614 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2615 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2616 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2617 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2618 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2619 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2620 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2622 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2625 static int lan78xx_open(struct net_device *net)
2627 struct lan78xx_net *dev = netdev_priv(net);
2630 ret = usb_autopm_get_interface(dev->intf);
2634 phy_start(net->phydev);
2636 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2638 /* for Link Check */
2639 if (dev->urb_intr) {
2640 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2642 netif_err(dev, ifup, dev->net,
2643 "intr submit %d\n", ret);
2648 lan78xx_init_stats(dev);
2650 set_bit(EVENT_DEV_OPEN, &dev->flags);
2652 netif_start_queue(net);
2654 dev->link_on = false;
2656 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2658 usb_autopm_put_interface(dev->intf);
2664 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2666 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2667 DECLARE_WAITQUEUE(wait, current);
2670 /* ensure there are no more active urbs */
2671 add_wait_queue(&unlink_wakeup, &wait);
2672 set_current_state(TASK_UNINTERRUPTIBLE);
2673 dev->wait = &unlink_wakeup;
2674 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2676 /* maybe wait for deletions to finish. */
2677 while (!skb_queue_empty(&dev->rxq) &&
2678 !skb_queue_empty(&dev->txq) &&
2679 !skb_queue_empty(&dev->done)) {
2680 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2681 set_current_state(TASK_UNINTERRUPTIBLE);
2682 netif_dbg(dev, ifdown, dev->net,
2683 "waited for %d urb completions\n", temp);
2685 set_current_state(TASK_RUNNING);
2687 remove_wait_queue(&unlink_wakeup, &wait);
2690 static int lan78xx_stop(struct net_device *net)
2692 struct lan78xx_net *dev = netdev_priv(net);
2694 if (timer_pending(&dev->stat_monitor))
2695 del_timer_sync(&dev->stat_monitor);
2698 phy_stop(net->phydev);
2700 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2701 netif_stop_queue(net);
2703 netif_info(dev, ifdown, dev->net,
2704 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2705 net->stats.rx_packets, net->stats.tx_packets,
2706 net->stats.rx_errors, net->stats.tx_errors);
2708 lan78xx_terminate_urbs(dev);
2710 usb_kill_urb(dev->urb_intr);
2712 skb_queue_purge(&dev->rxq_pause);
2714 /* deferred work (task, timer, softirq) must also stop.
2715 * can't flush_scheduled_work() until we drop rtnl (later),
2716 * else workers could deadlock; so make workers a NOP.
2719 cancel_delayed_work_sync(&dev->wq);
2720 tasklet_kill(&dev->bh);
2722 usb_autopm_put_interface(dev->intf);
2727 static int lan78xx_linearize(struct sk_buff *skb)
2729 return skb_linearize(skb);
2732 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2733 struct sk_buff *skb, gfp_t flags)
2735 u32 tx_cmd_a, tx_cmd_b;
2737 if (skb_cow_head(skb, TX_OVERHEAD)) {
2738 dev_kfree_skb_any(skb);
2742 if (lan78xx_linearize(skb) < 0)
2745 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2747 if (skb->ip_summed == CHECKSUM_PARTIAL)
2748 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2751 if (skb_is_gso(skb)) {
2752 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2754 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2756 tx_cmd_a |= TX_CMD_A_LSO_;
2759 if (skb_vlan_tag_present(skb)) {
2760 tx_cmd_a |= TX_CMD_A_IVTG_;
2761 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2765 cpu_to_le32s(&tx_cmd_b);
2766 memcpy(skb->data, &tx_cmd_b, 4);
2769 cpu_to_le32s(&tx_cmd_a);
2770 memcpy(skb->data, &tx_cmd_a, 4);
2775 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2776 struct sk_buff_head *list, enum skb_state state)
2778 unsigned long flags;
2779 enum skb_state old_state;
2780 struct skb_data *entry = (struct skb_data *)skb->cb;
2782 spin_lock_irqsave(&list->lock, flags);
2783 old_state = entry->state;
2784 entry->state = state;
2786 __skb_unlink(skb, list);
2787 spin_unlock(&list->lock);
2788 spin_lock(&dev->done.lock);
2790 __skb_queue_tail(&dev->done, skb);
2791 if (skb_queue_len(&dev->done) == 1)
2792 tasklet_schedule(&dev->bh);
2793 spin_unlock_irqrestore(&dev->done.lock, flags);
2798 static void tx_complete(struct urb *urb)
2800 struct sk_buff *skb = (struct sk_buff *)urb->context;
2801 struct skb_data *entry = (struct skb_data *)skb->cb;
2802 struct lan78xx_net *dev = entry->dev;
2804 if (urb->status == 0) {
2805 dev->net->stats.tx_packets += entry->num_of_packet;
2806 dev->net->stats.tx_bytes += entry->length;
2808 dev->net->stats.tx_errors++;
2810 switch (urb->status) {
2812 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2815 /* software-driven interface shutdown */
2823 netif_stop_queue(dev->net);
2826 netif_dbg(dev, tx_err, dev->net,
2827 "tx err %d\n", entry->urb->status);
2832 usb_autopm_put_interface_async(dev->intf);
2834 defer_bh(dev, skb, &dev->txq, tx_done);
2837 static void lan78xx_queue_skb(struct sk_buff_head *list,
2838 struct sk_buff *newsk, enum skb_state state)
2840 struct skb_data *entry = (struct skb_data *)newsk->cb;
2842 __skb_queue_tail(list, newsk);
2843 entry->state = state;
2847 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2849 struct lan78xx_net *dev = netdev_priv(net);
2850 struct sk_buff *skb2 = NULL;
2853 skb_tx_timestamp(skb);
2854 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2858 skb_queue_tail(&dev->txq_pend, skb2);
2860 /* throttle TX patch at slower than SUPER SPEED USB */
2861 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2862 (skb_queue_len(&dev->txq_pend) > 10))
2863 netif_stop_queue(net);
2865 netif_dbg(dev, tx_err, dev->net,
2866 "lan78xx_tx_prep return NULL\n");
2867 dev->net->stats.tx_errors++;
2868 dev->net->stats.tx_dropped++;
2871 tasklet_schedule(&dev->bh);
2873 return NETDEV_TX_OK;
2877 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2880 struct usb_host_interface *alt = NULL;
2881 struct usb_host_endpoint *in = NULL, *out = NULL;
2882 struct usb_host_endpoint *status = NULL;
2884 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2890 alt = intf->altsetting + tmp;
2892 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2893 struct usb_host_endpoint *e;
2896 e = alt->endpoint + ep;
2897 switch (e->desc.bmAttributes) {
2898 case USB_ENDPOINT_XFER_INT:
2899 if (!usb_endpoint_dir_in(&e->desc))
2903 case USB_ENDPOINT_XFER_BULK:
2908 if (usb_endpoint_dir_in(&e->desc)) {
2911 else if (intr && !status)
2921 if (!alt || !in || !out)
2924 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2925 in->desc.bEndpointAddress &
2926 USB_ENDPOINT_NUMBER_MASK);
2927 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2928 out->desc.bEndpointAddress &
2929 USB_ENDPOINT_NUMBER_MASK);
2930 dev->ep_intr = status;
2935 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2937 struct lan78xx_priv *pdata = NULL;
2941 ret = lan78xx_get_endpoints(dev, intf);
2943 netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2948 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2950 pdata = (struct lan78xx_priv *)(dev->data[0]);
2952 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2958 spin_lock_init(&pdata->rfe_ctl_lock);
2959 mutex_init(&pdata->dataport_mutex);
2961 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2963 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2964 pdata->vlan_table[i] = 0;
2966 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2968 dev->net->features = 0;
2970 if (DEFAULT_TX_CSUM_ENABLE)
2971 dev->net->features |= NETIF_F_HW_CSUM;
2973 if (DEFAULT_RX_CSUM_ENABLE)
2974 dev->net->features |= NETIF_F_RXCSUM;
2976 if (DEFAULT_TSO_CSUM_ENABLE)
2977 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2979 if (DEFAULT_VLAN_RX_OFFLOAD)
2980 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2982 if (DEFAULT_VLAN_FILTER_ENABLE)
2983 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2985 dev->net->hw_features = dev->net->features;
2987 ret = lan78xx_setup_irq_domain(dev);
2989 netdev_warn(dev->net,
2990 "lan78xx_setup_irq_domain() failed : %d", ret);
2994 dev->net->hard_header_len += TX_OVERHEAD;
2995 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2997 /* Init all registers */
2998 ret = lan78xx_reset(dev);
3000 netdev_warn(dev->net, "Registers INIT FAILED....");
3004 ret = lan78xx_mdio_init(dev);
3006 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3010 dev->net->flags |= IFF_MULTICAST;
3012 pdata->wol = WAKE_MAGIC;
3017 lan78xx_remove_irq_domain(dev);
3020 netdev_warn(dev->net, "Bind routine FAILED");
3021 cancel_work_sync(&pdata->set_multicast);
3022 cancel_work_sync(&pdata->set_vlan);
3027 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3029 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3031 lan78xx_remove_irq_domain(dev);
3033 lan78xx_remove_mdio(dev);
3036 cancel_work_sync(&pdata->set_multicast);
3037 cancel_work_sync(&pdata->set_vlan);
3038 netif_dbg(dev, ifdown, dev->net, "free pdata");
3045 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3046 struct sk_buff *skb,
3047 u32 rx_cmd_a, u32 rx_cmd_b)
3049 /* HW Checksum offload appears to be flawed if used when not stripping
3050 * VLAN headers. Drop back to S/W checksums under these conditions.
3052 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3053 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3054 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3055 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3056 skb->ip_summed = CHECKSUM_NONE;
3058 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3059 skb->ip_summed = CHECKSUM_COMPLETE;
3063 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3064 struct sk_buff *skb,
3065 u32 rx_cmd_a, u32 rx_cmd_b)
3067 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3068 (rx_cmd_a & RX_CMD_A_FVTG_))
3069 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3070 (rx_cmd_b & 0xffff));
3073 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3077 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3078 skb_queue_tail(&dev->rxq_pause, skb);
3082 dev->net->stats.rx_packets++;
3083 dev->net->stats.rx_bytes += skb->len;
3085 skb->protocol = eth_type_trans(skb, dev->net);
3087 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3088 skb->len + sizeof(struct ethhdr), skb->protocol);
3089 memset(skb->cb, 0, sizeof(struct skb_data));
3091 if (skb_defer_rx_timestamp(skb))
3094 status = netif_rx(skb);
3095 if (status != NET_RX_SUCCESS)
3096 netif_dbg(dev, rx_err, dev->net,
3097 "netif_rx status %d\n", status);
3100 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3102 if (skb->len < dev->net->hard_header_len)
3105 while (skb->len > 0) {
3106 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3108 struct sk_buff *skb2;
3109 unsigned char *packet;
3111 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3112 le32_to_cpus(&rx_cmd_a);
3113 skb_pull(skb, sizeof(rx_cmd_a));
3115 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3116 le32_to_cpus(&rx_cmd_b);
3117 skb_pull(skb, sizeof(rx_cmd_b));
3119 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3120 le16_to_cpus(&rx_cmd_c);
3121 skb_pull(skb, sizeof(rx_cmd_c));
3125 /* get the packet length */
3126 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3127 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3129 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3130 netif_dbg(dev, rx_err, dev->net,
3131 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3133 /* last frame in this batch */
3134 if (skb->len == size) {
3135 lan78xx_rx_csum_offload(dev, skb,
3136 rx_cmd_a, rx_cmd_b);
3137 lan78xx_rx_vlan_offload(dev, skb,
3138 rx_cmd_a, rx_cmd_b);
3140 skb_trim(skb, skb->len - 4); /* remove fcs */
3141 skb->truesize = size + sizeof(struct sk_buff);
3146 skb2 = skb_clone(skb, GFP_ATOMIC);
3147 if (unlikely(!skb2)) {
3148 netdev_warn(dev->net, "Error allocating skb");
3153 skb2->data = packet;
3154 skb_set_tail_pointer(skb2, size);
3156 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3157 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3159 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3160 skb2->truesize = size + sizeof(struct sk_buff);
3162 lan78xx_skb_return(dev, skb2);
3165 skb_pull(skb, size);
3167 /* padding bytes before the next frame starts */
3169 skb_pull(skb, align_count);
3175 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3177 if (!lan78xx_rx(dev, skb)) {
3178 dev->net->stats.rx_errors++;
3183 lan78xx_skb_return(dev, skb);
3187 netif_dbg(dev, rx_err, dev->net, "drop\n");
3188 dev->net->stats.rx_errors++;
3190 skb_queue_tail(&dev->done, skb);
3193 static void rx_complete(struct urb *urb);
3195 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3197 struct sk_buff *skb;
3198 struct skb_data *entry;
3199 unsigned long lockflags;
3200 size_t size = dev->rx_urb_size;
3203 skb = netdev_alloc_skb_ip_align(dev->net, size);
3209 entry = (struct skb_data *)skb->cb;
3214 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3215 skb->data, size, rx_complete, skb);
3217 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3219 if (netif_device_present(dev->net) &&
3220 netif_running(dev->net) &&
3221 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3222 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3223 ret = usb_submit_urb(urb, GFP_ATOMIC);
3226 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3229 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3232 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3233 netif_device_detach(dev->net);
3239 netif_dbg(dev, rx_err, dev->net,
3240 "rx submit, %d\n", ret);
3241 tasklet_schedule(&dev->bh);
3244 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3247 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3249 dev_kfree_skb_any(skb);
3255 static void rx_complete(struct urb *urb)
3257 struct sk_buff *skb = (struct sk_buff *)urb->context;
3258 struct skb_data *entry = (struct skb_data *)skb->cb;
3259 struct lan78xx_net *dev = entry->dev;
3260 int urb_status = urb->status;
3261 enum skb_state state;
3263 skb_put(skb, urb->actual_length);
3267 switch (urb_status) {
3269 if (skb->len < dev->net->hard_header_len) {
3271 dev->net->stats.rx_errors++;
3272 dev->net->stats.rx_length_errors++;
3273 netif_dbg(dev, rx_err, dev->net,
3274 "rx length %d\n", skb->len);
3276 usb_mark_last_busy(dev->udev);
3279 dev->net->stats.rx_errors++;
3280 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3282 case -ECONNRESET: /* async unlink */
3283 case -ESHUTDOWN: /* hardware gone */
3284 netif_dbg(dev, ifdown, dev->net,
3285 "rx shutdown, code %d\n", urb_status);
3293 dev->net->stats.rx_errors++;
3299 /* data overrun ... flush fifo? */
3301 dev->net->stats.rx_over_errors++;
3306 dev->net->stats.rx_errors++;
3307 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3311 state = defer_bh(dev, skb, &dev->rxq, state);
3314 if (netif_running(dev->net) &&
3315 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3316 state != unlink_start) {
3317 rx_submit(dev, urb, GFP_ATOMIC);
3322 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3325 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3328 struct urb *urb = NULL;
3329 struct skb_data *entry;
3330 unsigned long flags;
3331 struct sk_buff_head *tqp = &dev->txq_pend;
3332 struct sk_buff *skb, *skb2;
3335 int skb_totallen, pkt_cnt;
3341 spin_lock_irqsave(&tqp->lock, flags);
3342 skb_queue_walk(tqp, skb) {
3343 if (skb_is_gso(skb)) {
3344 if (!skb_queue_is_first(tqp, skb)) {
3345 /* handle previous packets first */
3349 length = skb->len - TX_OVERHEAD;
3350 __skb_unlink(skb, tqp);
3351 spin_unlock_irqrestore(&tqp->lock, flags);
3355 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3357 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3360 spin_unlock_irqrestore(&tqp->lock, flags);
3362 /* copy to a single skb */
3363 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3367 skb_put(skb, skb_totallen);
3369 for (count = pos = 0; count < pkt_cnt; count++) {
3370 skb2 = skb_dequeue(tqp);
3372 length += (skb2->len - TX_OVERHEAD);
3373 memcpy(skb->data + pos, skb2->data, skb2->len);
3374 pos += roundup(skb2->len, sizeof(u32));
3375 dev_kfree_skb(skb2);
3380 urb = usb_alloc_urb(0, GFP_ATOMIC);
3384 entry = (struct skb_data *)skb->cb;
3387 entry->length = length;
3388 entry->num_of_packet = count;
3390 spin_lock_irqsave(&dev->txq.lock, flags);
3391 ret = usb_autopm_get_interface_async(dev->intf);
3393 spin_unlock_irqrestore(&dev->txq.lock, flags);
3397 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3398 skb->data, skb->len, tx_complete, skb);
3400 if (length % dev->maxpacket == 0) {
3401 /* send USB_ZERO_PACKET */
3402 urb->transfer_flags |= URB_ZERO_PACKET;
3406 /* if this triggers the device is still a sleep */
3407 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3408 /* transmission will be done in resume */
3409 usb_anchor_urb(urb, &dev->deferred);
3410 /* no use to process more packets */
3411 netif_stop_queue(dev->net);
3413 spin_unlock_irqrestore(&dev->txq.lock, flags);
3414 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3419 ret = usb_submit_urb(urb, GFP_ATOMIC);
3422 netif_trans_update(dev->net);
3423 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3424 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3425 netif_stop_queue(dev->net);
3428 netif_stop_queue(dev->net);
3429 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3430 usb_autopm_put_interface_async(dev->intf);
3433 usb_autopm_put_interface_async(dev->intf);
3434 netif_dbg(dev, tx_err, dev->net,
3435 "tx: submit urb err %d\n", ret);
3439 spin_unlock_irqrestore(&dev->txq.lock, flags);
3442 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3444 dev->net->stats.tx_dropped++;
3446 dev_kfree_skb_any(skb);
3449 netif_dbg(dev, tx_queued, dev->net,
3450 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3453 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3458 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3459 for (i = 0; i < 10; i++) {
3460 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3462 urb = usb_alloc_urb(0, GFP_ATOMIC);
3464 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3468 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3469 tasklet_schedule(&dev->bh);
3471 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3472 netif_wake_queue(dev->net);
3475 static void lan78xx_bh(unsigned long param)
3477 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3478 struct sk_buff *skb;
3479 struct skb_data *entry;
3481 while ((skb = skb_dequeue(&dev->done))) {
3482 entry = (struct skb_data *)(skb->cb);
3483 switch (entry->state) {
3485 entry->state = rx_cleanup;
3486 rx_process(dev, skb);
3489 usb_free_urb(entry->urb);
3493 usb_free_urb(entry->urb);
3497 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3502 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3503 /* reset update timer delta */
3504 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3506 mod_timer(&dev->stat_monitor,
3507 jiffies + STAT_UPDATE_TIMER);
3510 if (!skb_queue_empty(&dev->txq_pend))
3513 if (!timer_pending(&dev->delay) &&
3514 !test_bit(EVENT_RX_HALT, &dev->flags))
3519 static void lan78xx_delayedwork(struct work_struct *work)
3522 struct lan78xx_net *dev;
3524 dev = container_of(work, struct lan78xx_net, wq.work);
3526 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3527 unlink_urbs(dev, &dev->txq);
3528 status = usb_autopm_get_interface(dev->intf);
3531 status = usb_clear_halt(dev->udev, dev->pipe_out);
3532 usb_autopm_put_interface(dev->intf);
3535 status != -ESHUTDOWN) {
3536 if (netif_msg_tx_err(dev))
3538 netdev_err(dev->net,
3539 "can't clear tx halt, status %d\n",
3542 clear_bit(EVENT_TX_HALT, &dev->flags);
3543 if (status != -ESHUTDOWN)
3544 netif_wake_queue(dev->net);
3547 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3548 unlink_urbs(dev, &dev->rxq);
3549 status = usb_autopm_get_interface(dev->intf);
3552 status = usb_clear_halt(dev->udev, dev->pipe_in);
3553 usb_autopm_put_interface(dev->intf);
3556 status != -ESHUTDOWN) {
3557 if (netif_msg_rx_err(dev))
3559 netdev_err(dev->net,
3560 "can't clear rx halt, status %d\n",
3563 clear_bit(EVENT_RX_HALT, &dev->flags);
3564 tasklet_schedule(&dev->bh);
3568 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3571 clear_bit(EVENT_LINK_RESET, &dev->flags);
3572 status = usb_autopm_get_interface(dev->intf);
3575 if (lan78xx_link_reset(dev) < 0) {
3576 usb_autopm_put_interface(dev->intf);
3578 netdev_info(dev->net, "link reset failed (%d)\n",
3581 usb_autopm_put_interface(dev->intf);
3585 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3586 lan78xx_update_stats(dev);
3588 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3590 mod_timer(&dev->stat_monitor,
3591 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3593 dev->delta = min((dev->delta * 2), 50);
3597 static void intr_complete(struct urb *urb)
3599 struct lan78xx_net *dev = urb->context;
3600 int status = urb->status;
3605 lan78xx_status(dev, urb);
3608 /* software-driven interface shutdown */
3609 case -ENOENT: /* urb killed */
3610 case -ESHUTDOWN: /* hardware gone */
3611 netif_dbg(dev, ifdown, dev->net,
3612 "intr shutdown, code %d\n", status);
3615 /* NOTE: not throttling like RX/TX, since this endpoint
3616 * already polls infrequently
3619 netdev_dbg(dev->net, "intr status %d\n", status);
3623 if (!netif_running(dev->net))
3626 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3627 status = usb_submit_urb(urb, GFP_ATOMIC);
3629 netif_err(dev, timer, dev->net,
3630 "intr resubmit --> %d\n", status);
3633 static void lan78xx_disconnect(struct usb_interface *intf)
3635 struct lan78xx_net *dev;
3636 struct usb_device *udev;
3637 struct net_device *net;
3638 struct phy_device *phydev;
3640 dev = usb_get_intfdata(intf);
3641 usb_set_intfdata(intf, NULL);
3645 udev = interface_to_usbdev(intf);
3647 phydev = net->phydev;
3649 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3650 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3652 phy_disconnect(net->phydev);
3654 if (phy_is_pseudo_fixed_link(phydev))
3655 fixed_phy_unregister(phydev);
3657 unregister_netdev(net);
3659 cancel_delayed_work_sync(&dev->wq);
3661 usb_scuttle_anchored_urbs(&dev->deferred);
3663 lan78xx_unbind(dev, intf);
3665 usb_kill_urb(dev->urb_intr);
3666 usb_free_urb(dev->urb_intr);
3672 static void lan78xx_tx_timeout(struct net_device *net)
3674 struct lan78xx_net *dev = netdev_priv(net);
3676 unlink_urbs(dev, &dev->txq);
3677 tasklet_schedule(&dev->bh);
3680 static const struct net_device_ops lan78xx_netdev_ops = {
3681 .ndo_open = lan78xx_open,
3682 .ndo_stop = lan78xx_stop,
3683 .ndo_start_xmit = lan78xx_start_xmit,
3684 .ndo_tx_timeout = lan78xx_tx_timeout,
3685 .ndo_change_mtu = lan78xx_change_mtu,
3686 .ndo_set_mac_address = lan78xx_set_mac_addr,
3687 .ndo_validate_addr = eth_validate_addr,
3688 .ndo_do_ioctl = lan78xx_ioctl,
3689 .ndo_set_rx_mode = lan78xx_set_multicast,
3690 .ndo_set_features = lan78xx_set_features,
3691 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3692 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3695 static void lan78xx_stat_monitor(struct timer_list *t)
3697 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3699 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3702 static int lan78xx_probe(struct usb_interface *intf,
3703 const struct usb_device_id *id)
3705 struct lan78xx_net *dev;
3706 struct net_device *netdev;
3707 struct usb_device *udev;
3713 udev = interface_to_usbdev(intf);
3714 udev = usb_get_dev(udev);
3716 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3718 dev_err(&intf->dev, "Error: OOM\n");
3723 /* netdev_printk() needs this */
3724 SET_NETDEV_DEV(netdev, &intf->dev);
3726 dev = netdev_priv(netdev);
3730 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3731 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3733 skb_queue_head_init(&dev->rxq);
3734 skb_queue_head_init(&dev->txq);
3735 skb_queue_head_init(&dev->done);
3736 skb_queue_head_init(&dev->rxq_pause);
3737 skb_queue_head_init(&dev->txq_pend);
3738 mutex_init(&dev->phy_mutex);
3740 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3741 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3742 init_usb_anchor(&dev->deferred);
3744 netdev->netdev_ops = &lan78xx_netdev_ops;
3745 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3746 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3749 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3751 mutex_init(&dev->stats.access_lock);
3753 ret = lan78xx_bind(dev, intf);
3757 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3758 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3760 /* MTU range: 68 - 9000 */
3761 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3763 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3764 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3765 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3767 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3768 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3770 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3771 dev->ep_intr->desc.bEndpointAddress &
3772 USB_ENDPOINT_NUMBER_MASK);
3773 period = dev->ep_intr->desc.bInterval;
3775 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3776 buf = kmalloc(maxp, GFP_KERNEL);
3778 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3779 if (!dev->urb_intr) {
3784 usb_fill_int_urb(dev->urb_intr, dev->udev,
3785 dev->pipe_intr, buf, maxp,
3786 intr_complete, dev, period);
3790 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3792 /* driver requires remote-wakeup capability during autosuspend. */
3793 intf->needs_remote_wakeup = 1;
3795 ret = lan78xx_phy_init(dev);
3799 ret = register_netdev(netdev);
3801 netif_err(dev, probe, netdev, "couldn't register the device\n");
3805 usb_set_intfdata(intf, dev);
3807 ret = device_set_wakeup_enable(&udev->dev, true);
3809 /* Default delay of 2sec has more overhead than advantage.
3810 * Set to 10sec as default.
3812 pm_runtime_set_autosuspend_delay(&udev->dev,
3813 DEFAULT_AUTOSUSPEND_DELAY);
3818 phy_disconnect(netdev->phydev);
3820 usb_free_urb(dev->urb_intr);
3822 lan78xx_unbind(dev, intf);
3824 free_netdev(netdev);
3831 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3833 const u16 crc16poly = 0x8005;
3839 for (i = 0; i < len; i++) {
3841 for (bit = 0; bit < 8; bit++) {
3845 if (msb ^ (u16)(data & 1)) {
3847 crc |= (u16)0x0001U;
3856 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3864 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3865 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3866 const u8 arp_type[2] = { 0x08, 0x06 };
3868 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3869 buf &= ~MAC_TX_TXEN_;
3870 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3871 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3872 buf &= ~MAC_RX_RXEN_;
3873 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3875 ret = lan78xx_write_reg(dev, WUCSR, 0);
3876 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3877 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3882 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3883 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3884 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3886 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3887 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3890 if (wol & WAKE_PHY) {
3891 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3893 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3894 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3895 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3897 if (wol & WAKE_MAGIC) {
3898 temp_wucsr |= WUCSR_MPEN_;
3900 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3901 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3902 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3904 if (wol & WAKE_BCAST) {
3905 temp_wucsr |= WUCSR_BCST_EN_;
3907 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3908 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3909 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3911 if (wol & WAKE_MCAST) {
3912 temp_wucsr |= WUCSR_WAKE_EN_;
3914 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3915 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3916 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3918 WUF_CFGX_TYPE_MCAST_ |
3919 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3920 (crc & WUF_CFGX_CRC16_MASK_));
3922 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3923 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3924 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3925 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3928 /* for IPv6 Multicast */
3929 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3930 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3932 WUF_CFGX_TYPE_MCAST_ |
3933 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3934 (crc & WUF_CFGX_CRC16_MASK_));
3936 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3937 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3938 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3939 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3942 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3943 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3944 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3946 if (wol & WAKE_UCAST) {
3947 temp_wucsr |= WUCSR_PFDA_EN_;
3949 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3950 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3951 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3953 if (wol & WAKE_ARP) {
3954 temp_wucsr |= WUCSR_WAKE_EN_;
3956 /* set WUF_CFG & WUF_MASK
3957 * for packettype (offset 12,13) = ARP (0x0806)
3959 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3960 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3962 WUF_CFGX_TYPE_ALL_ |
3963 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3964 (crc & WUF_CFGX_CRC16_MASK_));
3966 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3967 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3968 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3969 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3972 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3973 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3974 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3977 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3979 /* when multiple WOL bits are set */
3980 if (hweight_long((unsigned long)wol) > 1) {
3981 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3982 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3983 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3985 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3988 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3989 buf |= PMT_CTL_WUPS_MASK_;
3990 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3992 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3993 buf |= MAC_RX_RXEN_;
3994 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3999 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4001 struct lan78xx_net *dev = usb_get_intfdata(intf);
4002 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4007 event = message.event;
4009 if (!dev->suspend_count++) {
4010 spin_lock_irq(&dev->txq.lock);
4011 /* don't autosuspend while transmitting */
4012 if ((skb_queue_len(&dev->txq) ||
4013 skb_queue_len(&dev->txq_pend)) &&
4014 PMSG_IS_AUTO(message)) {
4015 spin_unlock_irq(&dev->txq.lock);
4019 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4020 spin_unlock_irq(&dev->txq.lock);
4024 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4025 buf &= ~MAC_TX_TXEN_;
4026 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4027 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4028 buf &= ~MAC_RX_RXEN_;
4029 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4031 /* empty out the rx and queues */
4032 netif_device_detach(dev->net);
4033 lan78xx_terminate_urbs(dev);
4034 usb_kill_urb(dev->urb_intr);
4037 netif_device_attach(dev->net);
4040 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4041 del_timer(&dev->stat_monitor);
4043 if (PMSG_IS_AUTO(message)) {
4044 /* auto suspend (selective suspend) */
4045 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4046 buf &= ~MAC_TX_TXEN_;
4047 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4048 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4049 buf &= ~MAC_RX_RXEN_;
4050 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4052 ret = lan78xx_write_reg(dev, WUCSR, 0);
4053 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4054 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4056 /* set goodframe wakeup */
4057 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4059 buf |= WUCSR_RFE_WAKE_EN_;
4060 buf |= WUCSR_STORE_WAKE_;
4062 ret = lan78xx_write_reg(dev, WUCSR, buf);
4064 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4066 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4067 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4069 buf |= PMT_CTL_PHY_WAKE_EN_;
4070 buf |= PMT_CTL_WOL_EN_;
4071 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4072 buf |= PMT_CTL_SUS_MODE_3_;
4074 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4076 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4078 buf |= PMT_CTL_WUPS_MASK_;
4080 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4082 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4083 buf |= MAC_RX_RXEN_;
4084 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4086 lan78xx_set_suspend(dev, pdata->wol);
4095 static int lan78xx_resume(struct usb_interface *intf)
4097 struct lan78xx_net *dev = usb_get_intfdata(intf);
4098 struct sk_buff *skb;
4103 if (!timer_pending(&dev->stat_monitor)) {
4105 mod_timer(&dev->stat_monitor,
4106 jiffies + STAT_UPDATE_TIMER);
4109 if (!--dev->suspend_count) {
4110 /* resume interrupt URBs */
4111 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4112 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4114 spin_lock_irq(&dev->txq.lock);
4115 while ((res = usb_get_from_anchor(&dev->deferred))) {
4116 skb = (struct sk_buff *)res->context;
4117 ret = usb_submit_urb(res, GFP_ATOMIC);
4119 dev_kfree_skb_any(skb);
4121 usb_autopm_put_interface_async(dev->intf);
4123 netif_trans_update(dev->net);
4124 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4128 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4129 spin_unlock_irq(&dev->txq.lock);
4131 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4132 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4133 netif_start_queue(dev->net);
4134 tasklet_schedule(&dev->bh);
4138 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4139 ret = lan78xx_write_reg(dev, WUCSR, 0);
4140 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4142 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4144 WUCSR2_IPV6_TCPSYN_RCD_ |
4145 WUCSR2_IPV4_TCPSYN_RCD_);
4147 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4148 WUCSR_EEE_RX_WAKE_ |
4150 WUCSR_RFE_WAKE_FR_ |
4155 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4156 buf |= MAC_TX_TXEN_;
4157 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4162 static int lan78xx_reset_resume(struct usb_interface *intf)
4164 struct lan78xx_net *dev = usb_get_intfdata(intf);
4168 phy_start(dev->net->phydev);
4170 return lan78xx_resume(intf);
4173 static const struct usb_device_id products[] = {
4175 /* LAN7800 USB Gigabit Ethernet Device */
4176 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4179 /* LAN7850 USB Gigabit Ethernet Device */
4180 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4183 /* LAN7801 USB Gigabit Ethernet Device */
4184 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4188 MODULE_DEVICE_TABLE(usb, products);
4190 static struct usb_driver lan78xx_driver = {
4191 .name = DRIVER_NAME,
4192 .id_table = products,
4193 .probe = lan78xx_probe,
4194 .disconnect = lan78xx_disconnect,
4195 .suspend = lan78xx_suspend,
4196 .resume = lan78xx_resume,
4197 .reset_resume = lan78xx_reset_resume,
4198 .supports_autosuspend = 1,
4199 .disable_hub_initiated_lpm = 1,
4202 module_usb_driver(lan78xx_driver);
4204 MODULE_AUTHOR(DRIVER_AUTHOR);
4205 MODULE_DESCRIPTION(DRIVER_DESC);
4206 MODULE_LICENSE("GPL");