Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / net / usb / lan78xx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32
33 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME     "lan78xx"
36
37 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
38 #define THROTTLE_JIFFIES                (HZ / 8)
39 #define UNLINK_TIMEOUT_MS               3
40
41 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
42
43 #define SS_USB_PKT_SIZE                 (1024)
44 #define HS_USB_PKT_SIZE                 (512)
45 #define FS_USB_PKT_SIZE                 (64)
46
47 #define MAX_RX_FIFO_SIZE                (12 * 1024)
48 #define MAX_TX_FIFO_SIZE                (12 * 1024)
49 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
50 #define DEFAULT_BULK_IN_DELAY           (0x0800)
51 #define MAX_SINGLE_PACKET_SIZE          (9000)
52 #define DEFAULT_TX_CSUM_ENABLE          (true)
53 #define DEFAULT_RX_CSUM_ENABLE          (true)
54 #define DEFAULT_TSO_CSUM_ENABLE         (true)
55 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
56 #define DEFAULT_VLAN_RX_OFFLOAD         (true)
57 #define TX_OVERHEAD                     (8)
58 #define RXW_PADDING                     2
59
60 #define LAN78XX_USB_VENDOR_ID           (0x0424)
61 #define LAN7800_USB_PRODUCT_ID          (0x7800)
62 #define LAN7850_USB_PRODUCT_ID          (0x7850)
63 #define LAN7801_USB_PRODUCT_ID          (0x7801)
64 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
65 #define LAN78XX_OTP_MAGIC               (0x78F3)
66
67 #define MII_READ                        1
68 #define MII_WRITE                       0
69
70 #define EEPROM_INDICATOR                (0xA5)
71 #define EEPROM_MAC_OFFSET               (0x01)
72 #define MAX_EEPROM_SIZE                 512
73 #define OTP_INDICATOR_1                 (0xF3)
74 #define OTP_INDICATOR_2                 (0xF7)
75
76 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
77                                          WAKE_MCAST | WAKE_BCAST | \
78                                          WAKE_ARP | WAKE_MAGIC)
79
80 /* USB related defines */
81 #define BULK_IN_PIPE                    1
82 #define BULK_OUT_PIPE                   2
83
84 /* default autosuspend delay (mSec)*/
85 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
86
87 /* statistic update interval (mSec) */
88 #define STAT_UPDATE_TIMER               (1 * 1000)
89
90 /* defines interrupts from interrupt EP */
91 #define MAX_INT_EP                      (32)
92 #define INT_EP_INTEP                    (31)
93 #define INT_EP_OTP_WR_DONE              (28)
94 #define INT_EP_EEE_TX_LPI_START         (26)
95 #define INT_EP_EEE_TX_LPI_STOP          (25)
96 #define INT_EP_EEE_RX_LPI               (24)
97 #define INT_EP_MAC_RESET_TIMEOUT        (23)
98 #define INT_EP_RDFO                     (22)
99 #define INT_EP_TXE                      (21)
100 #define INT_EP_USB_STATUS               (20)
101 #define INT_EP_TX_DIS                   (19)
102 #define INT_EP_RX_DIS                   (18)
103 #define INT_EP_PHY                      (17)
104 #define INT_EP_DP                       (16)
105 #define INT_EP_MAC_ERR                  (15)
106 #define INT_EP_TDFU                     (14)
107 #define INT_EP_TDFO                     (13)
108 #define INT_EP_UTX                      (12)
109 #define INT_EP_GPIO_11                  (11)
110 #define INT_EP_GPIO_10                  (10)
111 #define INT_EP_GPIO_9                   (9)
112 #define INT_EP_GPIO_8                   (8)
113 #define INT_EP_GPIO_7                   (7)
114 #define INT_EP_GPIO_6                   (6)
115 #define INT_EP_GPIO_5                   (5)
116 #define INT_EP_GPIO_4                   (4)
117 #define INT_EP_GPIO_3                   (3)
118 #define INT_EP_GPIO_2                   (2)
119 #define INT_EP_GPIO_1                   (1)
120 #define INT_EP_GPIO_0                   (0)
121
122 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
123         "RX FCS Errors",
124         "RX Alignment Errors",
125         "Rx Fragment Errors",
126         "RX Jabber Errors",
127         "RX Undersize Frame Errors",
128         "RX Oversize Frame Errors",
129         "RX Dropped Frames",
130         "RX Unicast Byte Count",
131         "RX Broadcast Byte Count",
132         "RX Multicast Byte Count",
133         "RX Unicast Frames",
134         "RX Broadcast Frames",
135         "RX Multicast Frames",
136         "RX Pause Frames",
137         "RX 64 Byte Frames",
138         "RX 65 - 127 Byte Frames",
139         "RX 128 - 255 Byte Frames",
140         "RX 256 - 511 Bytes Frames",
141         "RX 512 - 1023 Byte Frames",
142         "RX 1024 - 1518 Byte Frames",
143         "RX Greater 1518 Byte Frames",
144         "EEE RX LPI Transitions",
145         "EEE RX LPI Time",
146         "TX FCS Errors",
147         "TX Excess Deferral Errors",
148         "TX Carrier Errors",
149         "TX Bad Byte Count",
150         "TX Single Collisions",
151         "TX Multiple Collisions",
152         "TX Excessive Collision",
153         "TX Late Collisions",
154         "TX Unicast Byte Count",
155         "TX Broadcast Byte Count",
156         "TX Multicast Byte Count",
157         "TX Unicast Frames",
158         "TX Broadcast Frames",
159         "TX Multicast Frames",
160         "TX Pause Frames",
161         "TX 64 Byte Frames",
162         "TX 65 - 127 Byte Frames",
163         "TX 128 - 255 Byte Frames",
164         "TX 256 - 511 Bytes Frames",
165         "TX 512 - 1023 Byte Frames",
166         "TX 1024 - 1518 Byte Frames",
167         "TX Greater 1518 Byte Frames",
168         "EEE TX LPI Transitions",
169         "EEE TX LPI Time",
170 };
171
172 struct lan78xx_statstage {
173         u32 rx_fcs_errors;
174         u32 rx_alignment_errors;
175         u32 rx_fragment_errors;
176         u32 rx_jabber_errors;
177         u32 rx_undersize_frame_errors;
178         u32 rx_oversize_frame_errors;
179         u32 rx_dropped_frames;
180         u32 rx_unicast_byte_count;
181         u32 rx_broadcast_byte_count;
182         u32 rx_multicast_byte_count;
183         u32 rx_unicast_frames;
184         u32 rx_broadcast_frames;
185         u32 rx_multicast_frames;
186         u32 rx_pause_frames;
187         u32 rx_64_byte_frames;
188         u32 rx_65_127_byte_frames;
189         u32 rx_128_255_byte_frames;
190         u32 rx_256_511_bytes_frames;
191         u32 rx_512_1023_byte_frames;
192         u32 rx_1024_1518_byte_frames;
193         u32 rx_greater_1518_byte_frames;
194         u32 eee_rx_lpi_transitions;
195         u32 eee_rx_lpi_time;
196         u32 tx_fcs_errors;
197         u32 tx_excess_deferral_errors;
198         u32 tx_carrier_errors;
199         u32 tx_bad_byte_count;
200         u32 tx_single_collisions;
201         u32 tx_multiple_collisions;
202         u32 tx_excessive_collision;
203         u32 tx_late_collisions;
204         u32 tx_unicast_byte_count;
205         u32 tx_broadcast_byte_count;
206         u32 tx_multicast_byte_count;
207         u32 tx_unicast_frames;
208         u32 tx_broadcast_frames;
209         u32 tx_multicast_frames;
210         u32 tx_pause_frames;
211         u32 tx_64_byte_frames;
212         u32 tx_65_127_byte_frames;
213         u32 tx_128_255_byte_frames;
214         u32 tx_256_511_bytes_frames;
215         u32 tx_512_1023_byte_frames;
216         u32 tx_1024_1518_byte_frames;
217         u32 tx_greater_1518_byte_frames;
218         u32 eee_tx_lpi_transitions;
219         u32 eee_tx_lpi_time;
220 };
221
222 struct lan78xx_statstage64 {
223         u64 rx_fcs_errors;
224         u64 rx_alignment_errors;
225         u64 rx_fragment_errors;
226         u64 rx_jabber_errors;
227         u64 rx_undersize_frame_errors;
228         u64 rx_oversize_frame_errors;
229         u64 rx_dropped_frames;
230         u64 rx_unicast_byte_count;
231         u64 rx_broadcast_byte_count;
232         u64 rx_multicast_byte_count;
233         u64 rx_unicast_frames;
234         u64 rx_broadcast_frames;
235         u64 rx_multicast_frames;
236         u64 rx_pause_frames;
237         u64 rx_64_byte_frames;
238         u64 rx_65_127_byte_frames;
239         u64 rx_128_255_byte_frames;
240         u64 rx_256_511_bytes_frames;
241         u64 rx_512_1023_byte_frames;
242         u64 rx_1024_1518_byte_frames;
243         u64 rx_greater_1518_byte_frames;
244         u64 eee_rx_lpi_transitions;
245         u64 eee_rx_lpi_time;
246         u64 tx_fcs_errors;
247         u64 tx_excess_deferral_errors;
248         u64 tx_carrier_errors;
249         u64 tx_bad_byte_count;
250         u64 tx_single_collisions;
251         u64 tx_multiple_collisions;
252         u64 tx_excessive_collision;
253         u64 tx_late_collisions;
254         u64 tx_unicast_byte_count;
255         u64 tx_broadcast_byte_count;
256         u64 tx_multicast_byte_count;
257         u64 tx_unicast_frames;
258         u64 tx_broadcast_frames;
259         u64 tx_multicast_frames;
260         u64 tx_pause_frames;
261         u64 tx_64_byte_frames;
262         u64 tx_65_127_byte_frames;
263         u64 tx_128_255_byte_frames;
264         u64 tx_256_511_bytes_frames;
265         u64 tx_512_1023_byte_frames;
266         u64 tx_1024_1518_byte_frames;
267         u64 tx_greater_1518_byte_frames;
268         u64 eee_tx_lpi_transitions;
269         u64 eee_tx_lpi_time;
270 };
271
272 static u32 lan78xx_regs[] = {
273         ID_REV,
274         INT_STS,
275         HW_CFG,
276         PMT_CTL,
277         E2P_CMD,
278         E2P_DATA,
279         USB_STATUS,
280         VLAN_TYPE,
281         MAC_CR,
282         MAC_RX,
283         MAC_TX,
284         FLOW,
285         ERR_STS,
286         MII_ACC,
287         MII_DATA,
288         EEE_TX_LPI_REQ_DLY,
289         EEE_TW_TX_SYS,
290         EEE_TX_LPI_REM_DLY,
291         WUCSR
292 };
293
294 #define PHY_REG_SIZE (32 * sizeof(u32))
295
296 struct lan78xx_net;
297
298 struct lan78xx_priv {
299         struct lan78xx_net *dev;
300         u32 rfe_ctl;
301         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
302         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
303         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
304         struct mutex dataport_mutex; /* for dataport access */
305         spinlock_t rfe_ctl_lock; /* for rfe register access */
306         struct work_struct set_multicast;
307         struct work_struct set_vlan;
308         u32 wol;
309 };
310
311 enum skb_state {
312         illegal = 0,
313         tx_start,
314         tx_done,
315         rx_start,
316         rx_done,
317         rx_cleanup,
318         unlink_start
319 };
320
321 struct skb_data {               /* skb->cb is one of these */
322         struct urb *urb;
323         struct lan78xx_net *dev;
324         enum skb_state state;
325         size_t length;
326         int num_of_packet;
327 };
328
329 struct usb_context {
330         struct usb_ctrlrequest req;
331         struct lan78xx_net *dev;
332 };
333
334 #define EVENT_TX_HALT                   0
335 #define EVENT_RX_HALT                   1
336 #define EVENT_RX_MEMORY                 2
337 #define EVENT_STS_SPLIT                 3
338 #define EVENT_LINK_RESET                4
339 #define EVENT_RX_PAUSED                 5
340 #define EVENT_DEV_WAKING                6
341 #define EVENT_DEV_ASLEEP                7
342 #define EVENT_DEV_OPEN                  8
343 #define EVENT_STAT_UPDATE               9
344
345 struct statstage {
346         struct mutex                    access_lock;    /* for stats access */
347         struct lan78xx_statstage        saved;
348         struct lan78xx_statstage        rollover_count;
349         struct lan78xx_statstage        rollover_max;
350         struct lan78xx_statstage64      curr_stat;
351 };
352
353 struct irq_domain_data {
354         struct irq_domain       *irqdomain;
355         unsigned int            phyirq;
356         struct irq_chip         *irqchip;
357         irq_flow_handler_t      irq_handler;
358         u32                     irqenable;
359         struct mutex            irq_lock;               /* for irq bus access */
360 };
361
362 struct lan78xx_net {
363         struct net_device       *net;
364         struct usb_device       *udev;
365         struct usb_interface    *intf;
366         void                    *driver_priv;
367
368         int                     rx_qlen;
369         int                     tx_qlen;
370         struct sk_buff_head     rxq;
371         struct sk_buff_head     txq;
372         struct sk_buff_head     done;
373         struct sk_buff_head     rxq_pause;
374         struct sk_buff_head     txq_pend;
375
376         struct tasklet_struct   bh;
377         struct delayed_work     wq;
378
379         struct usb_host_endpoint *ep_blkin;
380         struct usb_host_endpoint *ep_blkout;
381         struct usb_host_endpoint *ep_intr;
382
383         int                     msg_enable;
384
385         struct urb              *urb_intr;
386         struct usb_anchor       deferred;
387
388         struct mutex            phy_mutex; /* for phy access */
389         unsigned                pipe_in, pipe_out, pipe_intr;
390
391         u32                     hard_mtu;       /* count any extra framing */
392         size_t                  rx_urb_size;    /* size for rx urbs */
393
394         unsigned long           flags;
395
396         wait_queue_head_t       *wait;
397         unsigned char           suspend_count;
398
399         unsigned                maxpacket;
400         struct timer_list       delay;
401         struct timer_list       stat_monitor;
402
403         unsigned long           data[5];
404
405         int                     link_on;
406         u8                      mdix_ctrl;
407
408         u32                     chipid;
409         u32                     chiprev;
410         struct mii_bus          *mdiobus;
411         phy_interface_t         interface;
412
413         int                     fc_autoneg;
414         u8                      fc_request_control;
415
416         int                     delta;
417         struct statstage        stats;
418
419         struct irq_domain_data  domain_data;
420 };
421
422 /* define external phy id */
423 #define PHY_LAN8835                     (0x0007C130)
424 #define PHY_KSZ9031RNX                  (0x00221620)
425
426 /* use ethtool to change the level for any given device */
427 static int msg_level = -1;
428 module_param(msg_level, int, 0);
429 MODULE_PARM_DESC(msg_level, "Override default message level");
430
431 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
432 {
433         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
434         int ret;
435
436         if (!buf)
437                 return -ENOMEM;
438
439         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
440                               USB_VENDOR_REQUEST_READ_REGISTER,
441                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
442                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
443         if (likely(ret >= 0)) {
444                 le32_to_cpus(buf);
445                 *data = *buf;
446         } else {
447                 netdev_warn(dev->net,
448                             "Failed to read register index 0x%08x. ret = %d",
449                             index, ret);
450         }
451
452         kfree(buf);
453
454         return ret;
455 }
456
457 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
458 {
459         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
460         int ret;
461
462         if (!buf)
463                 return -ENOMEM;
464
465         *buf = data;
466         cpu_to_le32s(buf);
467
468         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
469                               USB_VENDOR_REQUEST_WRITE_REGISTER,
470                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
471                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
472         if (unlikely(ret < 0)) {
473                 netdev_warn(dev->net,
474                             "Failed to write register index 0x%08x. ret = %d",
475                             index, ret);
476         }
477
478         kfree(buf);
479
480         return ret;
481 }
482
483 static int lan78xx_read_stats(struct lan78xx_net *dev,
484                               struct lan78xx_statstage *data)
485 {
486         int ret = 0;
487         int i;
488         struct lan78xx_statstage *stats;
489         u32 *src;
490         u32 *dst;
491
492         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
493         if (!stats)
494                 return -ENOMEM;
495
496         ret = usb_control_msg(dev->udev,
497                               usb_rcvctrlpipe(dev->udev, 0),
498                               USB_VENDOR_REQUEST_GET_STATS,
499                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
500                               0,
501                               0,
502                               (void *)stats,
503                               sizeof(*stats),
504                               USB_CTRL_SET_TIMEOUT);
505         if (likely(ret >= 0)) {
506                 src = (u32 *)stats;
507                 dst = (u32 *)data;
508                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
509                         le32_to_cpus(&src[i]);
510                         dst[i] = src[i];
511                 }
512         } else {
513                 netdev_warn(dev->net,
514                             "Failed to read stat ret = 0x%x", ret);
515         }
516
517         kfree(stats);
518
519         return ret;
520 }
521
522 #define check_counter_rollover(struct1, dev_stats, member) {    \
523         if (struct1->member < dev_stats.saved.member)           \
524                 dev_stats.rollover_count.member++;              \
525         }
526
527 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
528                                         struct lan78xx_statstage *stats)
529 {
530         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
531         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
532         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
533         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
534         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
535         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
536         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
537         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
538         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
539         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
540         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
541         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
542         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
543         check_counter_rollover(stats, dev->stats, rx_pause_frames);
544         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
545         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
546         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
547         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
548         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
549         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
550         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
551         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
552         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
553         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
554         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
555         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
556         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
557         check_counter_rollover(stats, dev->stats, tx_single_collisions);
558         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
559         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
560         check_counter_rollover(stats, dev->stats, tx_late_collisions);
561         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
562         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
563         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
564         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
565         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
566         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
567         check_counter_rollover(stats, dev->stats, tx_pause_frames);
568         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
569         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
570         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
571         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
572         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
573         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
574         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
575         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
576         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
577
578         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
579 }
580
581 static void lan78xx_update_stats(struct lan78xx_net *dev)
582 {
583         u32 *p, *count, *max;
584         u64 *data;
585         int i;
586         struct lan78xx_statstage lan78xx_stats;
587
588         if (usb_autopm_get_interface(dev->intf) < 0)
589                 return;
590
591         p = (u32 *)&lan78xx_stats;
592         count = (u32 *)&dev->stats.rollover_count;
593         max = (u32 *)&dev->stats.rollover_max;
594         data = (u64 *)&dev->stats.curr_stat;
595
596         mutex_lock(&dev->stats.access_lock);
597
598         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
599                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
600
601         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
602                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
603
604         mutex_unlock(&dev->stats.access_lock);
605
606         usb_autopm_put_interface(dev->intf);
607 }
608
609 /* Loop until the read is completed with timeout called with phy_mutex held */
610 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
611 {
612         unsigned long start_time = jiffies;
613         u32 val;
614         int ret;
615
616         do {
617                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
618                 if (unlikely(ret < 0))
619                         return -EIO;
620
621                 if (!(val & MII_ACC_MII_BUSY_))
622                         return 0;
623         } while (!time_after(jiffies, start_time + HZ));
624
625         return -EIO;
626 }
627
628 static inline u32 mii_access(int id, int index, int read)
629 {
630         u32 ret;
631
632         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
633         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
634         if (read)
635                 ret |= MII_ACC_MII_READ_;
636         else
637                 ret |= MII_ACC_MII_WRITE_;
638         ret |= MII_ACC_MII_BUSY_;
639
640         return ret;
641 }
642
643 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
644 {
645         unsigned long start_time = jiffies;
646         u32 val;
647         int ret;
648
649         do {
650                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
651                 if (unlikely(ret < 0))
652                         return -EIO;
653
654                 if (!(val & E2P_CMD_EPC_BUSY_) ||
655                     (val & E2P_CMD_EPC_TIMEOUT_))
656                         break;
657                 usleep_range(40, 100);
658         } while (!time_after(jiffies, start_time + HZ));
659
660         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
661                 netdev_warn(dev->net, "EEPROM read operation timeout");
662                 return -EIO;
663         }
664
665         return 0;
666 }
667
668 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
669 {
670         unsigned long start_time = jiffies;
671         u32 val;
672         int ret;
673
674         do {
675                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
676                 if (unlikely(ret < 0))
677                         return -EIO;
678
679                 if (!(val & E2P_CMD_EPC_BUSY_))
680                         return 0;
681
682                 usleep_range(40, 100);
683         } while (!time_after(jiffies, start_time + HZ));
684
685         netdev_warn(dev->net, "EEPROM is busy");
686         return -EIO;
687 }
688
689 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
690                                    u32 length, u8 *data)
691 {
692         u32 val;
693         u32 saved;
694         int i, ret;
695         int retval;
696
697         /* depends on chip, some EEPROM pins are muxed with LED function.
698          * disable & restore LED function to access EEPROM.
699          */
700         ret = lan78xx_read_reg(dev, HW_CFG, &val);
701         saved = val;
702         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
703                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
704                 ret = lan78xx_write_reg(dev, HW_CFG, val);
705         }
706
707         retval = lan78xx_eeprom_confirm_not_busy(dev);
708         if (retval)
709                 return retval;
710
711         for (i = 0; i < length; i++) {
712                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
713                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
714                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
715                 if (unlikely(ret < 0)) {
716                         retval = -EIO;
717                         goto exit;
718                 }
719
720                 retval = lan78xx_wait_eeprom(dev);
721                 if (retval < 0)
722                         goto exit;
723
724                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
725                 if (unlikely(ret < 0)) {
726                         retval = -EIO;
727                         goto exit;
728                 }
729
730                 data[i] = val & 0xFF;
731                 offset++;
732         }
733
734         retval = 0;
735 exit:
736         if (dev->chipid == ID_REV_CHIP_ID_7800_)
737                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
738
739         return retval;
740 }
741
742 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
743                                u32 length, u8 *data)
744 {
745         u8 sig;
746         int ret;
747
748         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
749         if ((ret == 0) && (sig == EEPROM_INDICATOR))
750                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
751         else
752                 ret = -EINVAL;
753
754         return ret;
755 }
756
757 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
758                                     u32 length, u8 *data)
759 {
760         u32 val;
761         u32 saved;
762         int i, ret;
763         int retval;
764
765         /* depends on chip, some EEPROM pins are muxed with LED function.
766          * disable & restore LED function to access EEPROM.
767          */
768         ret = lan78xx_read_reg(dev, HW_CFG, &val);
769         saved = val;
770         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
771                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
772                 ret = lan78xx_write_reg(dev, HW_CFG, val);
773         }
774
775         retval = lan78xx_eeprom_confirm_not_busy(dev);
776         if (retval)
777                 goto exit;
778
779         /* Issue write/erase enable command */
780         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
781         ret = lan78xx_write_reg(dev, E2P_CMD, val);
782         if (unlikely(ret < 0)) {
783                 retval = -EIO;
784                 goto exit;
785         }
786
787         retval = lan78xx_wait_eeprom(dev);
788         if (retval < 0)
789                 goto exit;
790
791         for (i = 0; i < length; i++) {
792                 /* Fill data register */
793                 val = data[i];
794                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
795                 if (ret < 0) {
796                         retval = -EIO;
797                         goto exit;
798                 }
799
800                 /* Send "write" command */
801                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
802                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
803                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
804                 if (ret < 0) {
805                         retval = -EIO;
806                         goto exit;
807                 }
808
809                 retval = lan78xx_wait_eeprom(dev);
810                 if (retval < 0)
811                         goto exit;
812
813                 offset++;
814         }
815
816         retval = 0;
817 exit:
818         if (dev->chipid == ID_REV_CHIP_ID_7800_)
819                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
820
821         return retval;
822 }
823
824 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
825                                 u32 length, u8 *data)
826 {
827         int i;
828         int ret;
829         u32 buf;
830         unsigned long timeout;
831
832         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
833
834         if (buf & OTP_PWR_DN_PWRDN_N_) {
835                 /* clear it and wait to be cleared */
836                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
837
838                 timeout = jiffies + HZ;
839                 do {
840                         usleep_range(1, 10);
841                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
842                         if (time_after(jiffies, timeout)) {
843                                 netdev_warn(dev->net,
844                                             "timeout on OTP_PWR_DN");
845                                 return -EIO;
846                         }
847                 } while (buf & OTP_PWR_DN_PWRDN_N_);
848         }
849
850         for (i = 0; i < length; i++) {
851                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
852                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
853                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
854                                         ((offset + i) & OTP_ADDR2_10_3));
855
856                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
857                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
858
859                 timeout = jiffies + HZ;
860                 do {
861                         udelay(1);
862                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
863                         if (time_after(jiffies, timeout)) {
864                                 netdev_warn(dev->net,
865                                             "timeout on OTP_STATUS");
866                                 return -EIO;
867                         }
868                 } while (buf & OTP_STATUS_BUSY_);
869
870                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
871
872                 data[i] = (u8)(buf & 0xFF);
873         }
874
875         return 0;
876 }
877
878 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
879                                  u32 length, u8 *data)
880 {
881         int i;
882         int ret;
883         u32 buf;
884         unsigned long timeout;
885
886         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
887
888         if (buf & OTP_PWR_DN_PWRDN_N_) {
889                 /* clear it and wait to be cleared */
890                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
891
892                 timeout = jiffies + HZ;
893                 do {
894                         udelay(1);
895                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
896                         if (time_after(jiffies, timeout)) {
897                                 netdev_warn(dev->net,
898                                             "timeout on OTP_PWR_DN completion");
899                                 return -EIO;
900                         }
901                 } while (buf & OTP_PWR_DN_PWRDN_N_);
902         }
903
904         /* set to BYTE program mode */
905         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
906
907         for (i = 0; i < length; i++) {
908                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
909                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
910                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
911                                         ((offset + i) & OTP_ADDR2_10_3));
912                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
913                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
914                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
915
916                 timeout = jiffies + HZ;
917                 do {
918                         udelay(1);
919                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
920                         if (time_after(jiffies, timeout)) {
921                                 netdev_warn(dev->net,
922                                             "Timeout on OTP_STATUS completion");
923                                 return -EIO;
924                         }
925                 } while (buf & OTP_STATUS_BUSY_);
926         }
927
928         return 0;
929 }
930
931 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
932                             u32 length, u8 *data)
933 {
934         u8 sig;
935         int ret;
936
937         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
938
939         if (ret == 0) {
940                 if (sig == OTP_INDICATOR_2)
941                         offset += 0x100;
942                 else if (sig != OTP_INDICATOR_1)
943                         ret = -EINVAL;
944                 if (!ret)
945                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
946         }
947
948         return ret;
949 }
950
951 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
952 {
953         int i, ret;
954
955         for (i = 0; i < 100; i++) {
956                 u32 dp_sel;
957
958                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
959                 if (unlikely(ret < 0))
960                         return -EIO;
961
962                 if (dp_sel & DP_SEL_DPRDY_)
963                         return 0;
964
965                 usleep_range(40, 100);
966         }
967
968         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
969
970         return -EIO;
971 }
972
973 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
974                                   u32 addr, u32 length, u32 *buf)
975 {
976         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
977         u32 dp_sel;
978         int i, ret;
979
980         if (usb_autopm_get_interface(dev->intf) < 0)
981                         return 0;
982
983         mutex_lock(&pdata->dataport_mutex);
984
985         ret = lan78xx_dataport_wait_not_busy(dev);
986         if (ret < 0)
987                 goto done;
988
989         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
990
991         dp_sel &= ~DP_SEL_RSEL_MASK_;
992         dp_sel |= ram_select;
993         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
994
995         for (i = 0; i < length; i++) {
996                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
997
998                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
999
1000                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1001
1002                 ret = lan78xx_dataport_wait_not_busy(dev);
1003                 if (ret < 0)
1004                         goto done;
1005         }
1006
1007 done:
1008         mutex_unlock(&pdata->dataport_mutex);
1009         usb_autopm_put_interface(dev->intf);
1010
1011         return ret;
1012 }
1013
1014 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1015                                     int index, u8 addr[ETH_ALEN])
1016 {
1017         u32 temp;
1018
1019         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1020                 temp = addr[3];
1021                 temp = addr[2] | (temp << 8);
1022                 temp = addr[1] | (temp << 8);
1023                 temp = addr[0] | (temp << 8);
1024                 pdata->pfilter_table[index][1] = temp;
1025                 temp = addr[5];
1026                 temp = addr[4] | (temp << 8);
1027                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1028                 pdata->pfilter_table[index][0] = temp;
1029         }
1030 }
1031
1032 /* returns hash bit number for given MAC address */
1033 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1034 {
1035         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1036 }
1037
1038 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1039 {
1040         struct lan78xx_priv *pdata =
1041                         container_of(param, struct lan78xx_priv, set_multicast);
1042         struct lan78xx_net *dev = pdata->dev;
1043         int i;
1044         int ret;
1045
1046         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1047                   pdata->rfe_ctl);
1048
1049         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1050                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1051
1052         for (i = 1; i < NUM_OF_MAF; i++) {
1053                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1054                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1055                                         pdata->pfilter_table[i][1]);
1056                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1057                                         pdata->pfilter_table[i][0]);
1058         }
1059
1060         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1061 }
1062
1063 static void lan78xx_set_multicast(struct net_device *netdev)
1064 {
1065         struct lan78xx_net *dev = netdev_priv(netdev);
1066         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1067         unsigned long flags;
1068         int i;
1069
1070         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1071
1072         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1073                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1074
1075         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1076                         pdata->mchash_table[i] = 0;
1077         /* pfilter_table[0] has own HW address */
1078         for (i = 1; i < NUM_OF_MAF; i++) {
1079                         pdata->pfilter_table[i][0] =
1080                         pdata->pfilter_table[i][1] = 0;
1081         }
1082
1083         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1084
1085         if (dev->net->flags & IFF_PROMISC) {
1086                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1087                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1088         } else {
1089                 if (dev->net->flags & IFF_ALLMULTI) {
1090                         netif_dbg(dev, drv, dev->net,
1091                                   "receive all multicast enabled");
1092                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1093                 }
1094         }
1095
1096         if (netdev_mc_count(dev->net)) {
1097                 struct netdev_hw_addr *ha;
1098                 int i;
1099
1100                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1101
1102                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1103
1104                 i = 1;
1105                 netdev_for_each_mc_addr(ha, netdev) {
1106                         /* set first 32 into Perfect Filter */
1107                         if (i < 33) {
1108                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1109                         } else {
1110                                 u32 bitnum = lan78xx_hash(ha->addr);
1111
1112                                 pdata->mchash_table[bitnum / 32] |=
1113                                                         (1 << (bitnum % 32));
1114                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1115                         }
1116                         i++;
1117                 }
1118         }
1119
1120         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1121
1122         /* defer register writes to a sleepable context */
1123         schedule_work(&pdata->set_multicast);
1124 }
1125
1126 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1127                                       u16 lcladv, u16 rmtadv)
1128 {
1129         u32 flow = 0, fct_flow = 0;
1130         int ret;
1131         u8 cap;
1132
1133         if (dev->fc_autoneg)
1134                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1135         else
1136                 cap = dev->fc_request_control;
1137
1138         if (cap & FLOW_CTRL_TX)
1139                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1140
1141         if (cap & FLOW_CTRL_RX)
1142                 flow |= FLOW_CR_RX_FCEN_;
1143
1144         if (dev->udev->speed == USB_SPEED_SUPER)
1145                 fct_flow = 0x817;
1146         else if (dev->udev->speed == USB_SPEED_HIGH)
1147                 fct_flow = 0x211;
1148
1149         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1150                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1151                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1152
1153         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1154
1155         /* threshold value should be set before enabling flow */
1156         ret = lan78xx_write_reg(dev, FLOW, flow);
1157
1158         return 0;
1159 }
1160
1161 static int lan78xx_link_reset(struct lan78xx_net *dev)
1162 {
1163         struct phy_device *phydev = dev->net->phydev;
1164         struct ethtool_link_ksettings ecmd;
1165         int ladv, radv, ret;
1166         u32 buf;
1167
1168         /* clear LAN78xx interrupt status */
1169         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1170         if (unlikely(ret < 0))
1171                 return -EIO;
1172
1173         phy_read_status(phydev);
1174
1175         if (!phydev->link && dev->link_on) {
1176                 dev->link_on = false;
1177
1178                 /* reset MAC */
1179                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1180                 if (unlikely(ret < 0))
1181                         return -EIO;
1182                 buf |= MAC_CR_RST_;
1183                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1184                 if (unlikely(ret < 0))
1185                         return -EIO;
1186
1187                 del_timer(&dev->stat_monitor);
1188         } else if (phydev->link && !dev->link_on) {
1189                 dev->link_on = true;
1190
1191                 phy_ethtool_ksettings_get(phydev, &ecmd);
1192
1193                 if (dev->udev->speed == USB_SPEED_SUPER) {
1194                         if (ecmd.base.speed == 1000) {
1195                                 /* disable U2 */
1196                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1197                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1198                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1199                                 /* enable U1 */
1200                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1201                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1202                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1203                         } else {
1204                                 /* enable U1 & U2 */
1205                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1206                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1207                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1208                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1209                         }
1210                 }
1211
1212                 ladv = phy_read(phydev, MII_ADVERTISE);
1213                 if (ladv < 0)
1214                         return ladv;
1215
1216                 radv = phy_read(phydev, MII_LPA);
1217                 if (radv < 0)
1218                         return radv;
1219
1220                 netif_dbg(dev, link, dev->net,
1221                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1222                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1223
1224                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1225                                                  radv);
1226
1227                 if (!timer_pending(&dev->stat_monitor)) {
1228                         dev->delta = 1;
1229                         mod_timer(&dev->stat_monitor,
1230                                   jiffies + STAT_UPDATE_TIMER);
1231                 }
1232
1233                 tasklet_schedule(&dev->bh);
1234         }
1235
1236         return ret;
1237 }
1238
1239 /* some work can't be done in tasklets, so we use keventd
1240  *
1241  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1242  * but tasklet_schedule() doesn't.      hope the failure is rare.
1243  */
1244 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1245 {
1246         set_bit(work, &dev->flags);
1247         if (!schedule_delayed_work(&dev->wq, 0))
1248                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1249 }
1250
1251 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1252 {
1253         u32 intdata;
1254
1255         if (urb->actual_length != 4) {
1256                 netdev_warn(dev->net,
1257                             "unexpected urb length %d", urb->actual_length);
1258                 return;
1259         }
1260
1261         memcpy(&intdata, urb->transfer_buffer, 4);
1262         le32_to_cpus(&intdata);
1263
1264         if (intdata & INT_ENP_PHY_INT) {
1265                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1266                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1267
1268                 if (dev->domain_data.phyirq > 0) {
1269                         local_irq_disable();
1270                         generic_handle_irq(dev->domain_data.phyirq);
1271                         local_irq_enable();
1272                 }
1273         } else
1274                 netdev_warn(dev->net,
1275                             "unexpected interrupt: 0x%08x\n", intdata);
1276 }
1277
1278 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1279 {
1280         return MAX_EEPROM_SIZE;
1281 }
1282
1283 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1284                                       struct ethtool_eeprom *ee, u8 *data)
1285 {
1286         struct lan78xx_net *dev = netdev_priv(netdev);
1287         int ret;
1288
1289         ret = usb_autopm_get_interface(dev->intf);
1290         if (ret)
1291                 return ret;
1292
1293         ee->magic = LAN78XX_EEPROM_MAGIC;
1294
1295         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1296
1297         usb_autopm_put_interface(dev->intf);
1298
1299         return ret;
1300 }
1301
1302 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1303                                       struct ethtool_eeprom *ee, u8 *data)
1304 {
1305         struct lan78xx_net *dev = netdev_priv(netdev);
1306         int ret;
1307
1308         ret = usb_autopm_get_interface(dev->intf);
1309         if (ret)
1310                 return ret;
1311
1312         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1313          * to load data from EEPROM
1314          */
1315         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1316                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1317         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1318                  (ee->offset == 0) &&
1319                  (ee->len == 512) &&
1320                  (data[0] == OTP_INDICATOR_1))
1321                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1322
1323         usb_autopm_put_interface(dev->intf);
1324
1325         return ret;
1326 }
1327
1328 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1329                                 u8 *data)
1330 {
1331         if (stringset == ETH_SS_STATS)
1332                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1333 }
1334
1335 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1336 {
1337         if (sset == ETH_SS_STATS)
1338                 return ARRAY_SIZE(lan78xx_gstrings);
1339         else
1340                 return -EOPNOTSUPP;
1341 }
1342
1343 static void lan78xx_get_stats(struct net_device *netdev,
1344                               struct ethtool_stats *stats, u64 *data)
1345 {
1346         struct lan78xx_net *dev = netdev_priv(netdev);
1347
1348         lan78xx_update_stats(dev);
1349
1350         mutex_lock(&dev->stats.access_lock);
1351         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1352         mutex_unlock(&dev->stats.access_lock);
1353 }
1354
1355 static void lan78xx_get_wol(struct net_device *netdev,
1356                             struct ethtool_wolinfo *wol)
1357 {
1358         struct lan78xx_net *dev = netdev_priv(netdev);
1359         int ret;
1360         u32 buf;
1361         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1362
1363         if (usb_autopm_get_interface(dev->intf) < 0)
1364                         return;
1365
1366         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1367         if (unlikely(ret < 0)) {
1368                 wol->supported = 0;
1369                 wol->wolopts = 0;
1370         } else {
1371                 if (buf & USB_CFG_RMT_WKP_) {
1372                         wol->supported = WAKE_ALL;
1373                         wol->wolopts = pdata->wol;
1374                 } else {
1375                         wol->supported = 0;
1376                         wol->wolopts = 0;
1377                 }
1378         }
1379
1380         usb_autopm_put_interface(dev->intf);
1381 }
1382
1383 static int lan78xx_set_wol(struct net_device *netdev,
1384                            struct ethtool_wolinfo *wol)
1385 {
1386         struct lan78xx_net *dev = netdev_priv(netdev);
1387         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1388         int ret;
1389
1390         ret = usb_autopm_get_interface(dev->intf);
1391         if (ret < 0)
1392                 return ret;
1393
1394         if (wol->wolopts & ~WAKE_ALL)
1395                 return -EINVAL;
1396
1397         pdata->wol = wol->wolopts;
1398
1399         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1400
1401         phy_ethtool_set_wol(netdev->phydev, wol);
1402
1403         usb_autopm_put_interface(dev->intf);
1404
1405         return ret;
1406 }
1407
1408 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1409 {
1410         struct lan78xx_net *dev = netdev_priv(net);
1411         struct phy_device *phydev = net->phydev;
1412         int ret;
1413         u32 buf;
1414
1415         ret = usb_autopm_get_interface(dev->intf);
1416         if (ret < 0)
1417                 return ret;
1418
1419         ret = phy_ethtool_get_eee(phydev, edata);
1420         if (ret < 0)
1421                 goto exit;
1422
1423         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1424         if (buf & MAC_CR_EEE_EN_) {
1425                 edata->eee_enabled = true;
1426                 edata->eee_active = !!(edata->advertised &
1427                                        edata->lp_advertised);
1428                 edata->tx_lpi_enabled = true;
1429                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1430                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1431                 edata->tx_lpi_timer = buf;
1432         } else {
1433                 edata->eee_enabled = false;
1434                 edata->eee_active = false;
1435                 edata->tx_lpi_enabled = false;
1436                 edata->tx_lpi_timer = 0;
1437         }
1438
1439         ret = 0;
1440 exit:
1441         usb_autopm_put_interface(dev->intf);
1442
1443         return ret;
1444 }
1445
1446 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1447 {
1448         struct lan78xx_net *dev = netdev_priv(net);
1449         int ret;
1450         u32 buf;
1451
1452         ret = usb_autopm_get_interface(dev->intf);
1453         if (ret < 0)
1454                 return ret;
1455
1456         if (edata->eee_enabled) {
1457                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1458                 buf |= MAC_CR_EEE_EN_;
1459                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1460
1461                 phy_ethtool_set_eee(net->phydev, edata);
1462
1463                 buf = (u32)edata->tx_lpi_timer;
1464                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1465         } else {
1466                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1467                 buf &= ~MAC_CR_EEE_EN_;
1468                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1469         }
1470
1471         usb_autopm_put_interface(dev->intf);
1472
1473         return 0;
1474 }
1475
1476 static u32 lan78xx_get_link(struct net_device *net)
1477 {
1478         phy_read_status(net->phydev);
1479
1480         return net->phydev->link;
1481 }
1482
1483 static void lan78xx_get_drvinfo(struct net_device *net,
1484                                 struct ethtool_drvinfo *info)
1485 {
1486         struct lan78xx_net *dev = netdev_priv(net);
1487
1488         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1489         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1490 }
1491
1492 static u32 lan78xx_get_msglevel(struct net_device *net)
1493 {
1494         struct lan78xx_net *dev = netdev_priv(net);
1495
1496         return dev->msg_enable;
1497 }
1498
1499 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1500 {
1501         struct lan78xx_net *dev = netdev_priv(net);
1502
1503         dev->msg_enable = level;
1504 }
1505
1506 static int lan78xx_get_link_ksettings(struct net_device *net,
1507                                       struct ethtool_link_ksettings *cmd)
1508 {
1509         struct lan78xx_net *dev = netdev_priv(net);
1510         struct phy_device *phydev = net->phydev;
1511         int ret;
1512
1513         ret = usb_autopm_get_interface(dev->intf);
1514         if (ret < 0)
1515                 return ret;
1516
1517         phy_ethtool_ksettings_get(phydev, cmd);
1518
1519         usb_autopm_put_interface(dev->intf);
1520
1521         return ret;
1522 }
1523
1524 static int lan78xx_set_link_ksettings(struct net_device *net,
1525                                       const struct ethtool_link_ksettings *cmd)
1526 {
1527         struct lan78xx_net *dev = netdev_priv(net);
1528         struct phy_device *phydev = net->phydev;
1529         int ret = 0;
1530         int temp;
1531
1532         ret = usb_autopm_get_interface(dev->intf);
1533         if (ret < 0)
1534                 return ret;
1535
1536         /* change speed & duplex */
1537         ret = phy_ethtool_ksettings_set(phydev, cmd);
1538
1539         if (!cmd->base.autoneg) {
1540                 /* force link down */
1541                 temp = phy_read(phydev, MII_BMCR);
1542                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1543                 mdelay(1);
1544                 phy_write(phydev, MII_BMCR, temp);
1545         }
1546
1547         usb_autopm_put_interface(dev->intf);
1548
1549         return ret;
1550 }
1551
1552 static void lan78xx_get_pause(struct net_device *net,
1553                               struct ethtool_pauseparam *pause)
1554 {
1555         struct lan78xx_net *dev = netdev_priv(net);
1556         struct phy_device *phydev = net->phydev;
1557         struct ethtool_link_ksettings ecmd;
1558
1559         phy_ethtool_ksettings_get(phydev, &ecmd);
1560
1561         pause->autoneg = dev->fc_autoneg;
1562
1563         if (dev->fc_request_control & FLOW_CTRL_TX)
1564                 pause->tx_pause = 1;
1565
1566         if (dev->fc_request_control & FLOW_CTRL_RX)
1567                 pause->rx_pause = 1;
1568 }
1569
1570 static int lan78xx_set_pause(struct net_device *net,
1571                              struct ethtool_pauseparam *pause)
1572 {
1573         struct lan78xx_net *dev = netdev_priv(net);
1574         struct phy_device *phydev = net->phydev;
1575         struct ethtool_link_ksettings ecmd;
1576         int ret;
1577
1578         phy_ethtool_ksettings_get(phydev, &ecmd);
1579
1580         if (pause->autoneg && !ecmd.base.autoneg) {
1581                 ret = -EINVAL;
1582                 goto exit;
1583         }
1584
1585         dev->fc_request_control = 0;
1586         if (pause->rx_pause)
1587                 dev->fc_request_control |= FLOW_CTRL_RX;
1588
1589         if (pause->tx_pause)
1590                 dev->fc_request_control |= FLOW_CTRL_TX;
1591
1592         if (ecmd.base.autoneg) {
1593                 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1594                 u32 mii_adv;
1595
1596                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1597                                    ecmd.link_modes.advertising);
1598                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1599                                    ecmd.link_modes.advertising);
1600                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1601                 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1602                 linkmode_or(ecmd.link_modes.advertising, fc,
1603                             ecmd.link_modes.advertising);
1604
1605                 phy_ethtool_ksettings_set(phydev, &ecmd);
1606         }
1607
1608         dev->fc_autoneg = pause->autoneg;
1609
1610         ret = 0;
1611 exit:
1612         return ret;
1613 }
1614
1615 static int lan78xx_get_regs_len(struct net_device *netdev)
1616 {
1617         if (!netdev->phydev)
1618                 return (sizeof(lan78xx_regs));
1619         else
1620                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1621 }
1622
1623 static void
1624 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1625                  void *buf)
1626 {
1627         u32 *data = buf;
1628         int i, j;
1629         struct lan78xx_net *dev = netdev_priv(netdev);
1630
1631         /* Read Device/MAC registers */
1632         for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1633                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1634
1635         if (!netdev->phydev)
1636                 return;
1637
1638         /* Read PHY registers */
1639         for (j = 0; j < 32; i++, j++)
1640                 data[i] = phy_read(netdev->phydev, j);
1641 }
1642
1643 static const struct ethtool_ops lan78xx_ethtool_ops = {
1644         .get_link       = lan78xx_get_link,
1645         .nway_reset     = phy_ethtool_nway_reset,
1646         .get_drvinfo    = lan78xx_get_drvinfo,
1647         .get_msglevel   = lan78xx_get_msglevel,
1648         .set_msglevel   = lan78xx_set_msglevel,
1649         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1650         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1651         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1652         .get_ethtool_stats = lan78xx_get_stats,
1653         .get_sset_count = lan78xx_get_sset_count,
1654         .get_strings    = lan78xx_get_strings,
1655         .get_wol        = lan78xx_get_wol,
1656         .set_wol        = lan78xx_set_wol,
1657         .get_eee        = lan78xx_get_eee,
1658         .set_eee        = lan78xx_set_eee,
1659         .get_pauseparam = lan78xx_get_pause,
1660         .set_pauseparam = lan78xx_set_pause,
1661         .get_link_ksettings = lan78xx_get_link_ksettings,
1662         .set_link_ksettings = lan78xx_set_link_ksettings,
1663         .get_regs_len   = lan78xx_get_regs_len,
1664         .get_regs       = lan78xx_get_regs,
1665 };
1666
1667 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1668 {
1669         if (!netif_running(netdev))
1670                 return -EINVAL;
1671
1672         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1673 }
1674
1675 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1676 {
1677         u32 addr_lo, addr_hi;
1678         int ret;
1679         u8 addr[6];
1680
1681         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1682         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1683
1684         addr[0] = addr_lo & 0xFF;
1685         addr[1] = (addr_lo >> 8) & 0xFF;
1686         addr[2] = (addr_lo >> 16) & 0xFF;
1687         addr[3] = (addr_lo >> 24) & 0xFF;
1688         addr[4] = addr_hi & 0xFF;
1689         addr[5] = (addr_hi >> 8) & 0xFF;
1690
1691         if (!is_valid_ether_addr(addr)) {
1692                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1693                         /* valid address present in Device Tree */
1694                         netif_dbg(dev, ifup, dev->net,
1695                                   "MAC address read from Device Tree");
1696                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1697                                                  ETH_ALEN, addr) == 0) ||
1698                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1699                                               ETH_ALEN, addr) == 0)) &&
1700                            is_valid_ether_addr(addr)) {
1701                         /* eeprom values are valid so use them */
1702                         netif_dbg(dev, ifup, dev->net,
1703                                   "MAC address read from EEPROM");
1704                 } else {
1705                         /* generate random MAC */
1706                         eth_random_addr(addr);
1707                         netif_dbg(dev, ifup, dev->net,
1708                                   "MAC address set to random addr");
1709                 }
1710
1711                 addr_lo = addr[0] | (addr[1] << 8) |
1712                           (addr[2] << 16) | (addr[3] << 24);
1713                 addr_hi = addr[4] | (addr[5] << 8);
1714
1715                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1716                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1717         }
1718
1719         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1720         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1721
1722         ether_addr_copy(dev->net->dev_addr, addr);
1723 }
1724
1725 /* MDIO read and write wrappers for phylib */
1726 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1727 {
1728         struct lan78xx_net *dev = bus->priv;
1729         u32 val, addr;
1730         int ret;
1731
1732         ret = usb_autopm_get_interface(dev->intf);
1733         if (ret < 0)
1734                 return ret;
1735
1736         mutex_lock(&dev->phy_mutex);
1737
1738         /* confirm MII not busy */
1739         ret = lan78xx_phy_wait_not_busy(dev);
1740         if (ret < 0)
1741                 goto done;
1742
1743         /* set the address, index & direction (read from PHY) */
1744         addr = mii_access(phy_id, idx, MII_READ);
1745         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1746
1747         ret = lan78xx_phy_wait_not_busy(dev);
1748         if (ret < 0)
1749                 goto done;
1750
1751         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1752
1753         ret = (int)(val & 0xFFFF);
1754
1755 done:
1756         mutex_unlock(&dev->phy_mutex);
1757         usb_autopm_put_interface(dev->intf);
1758
1759         return ret;
1760 }
1761
1762 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1763                                  u16 regval)
1764 {
1765         struct lan78xx_net *dev = bus->priv;
1766         u32 val, addr;
1767         int ret;
1768
1769         ret = usb_autopm_get_interface(dev->intf);
1770         if (ret < 0)
1771                 return ret;
1772
1773         mutex_lock(&dev->phy_mutex);
1774
1775         /* confirm MII not busy */
1776         ret = lan78xx_phy_wait_not_busy(dev);
1777         if (ret < 0)
1778                 goto done;
1779
1780         val = (u32)regval;
1781         ret = lan78xx_write_reg(dev, MII_DATA, val);
1782
1783         /* set the address, index & direction (write to PHY) */
1784         addr = mii_access(phy_id, idx, MII_WRITE);
1785         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1786
1787         ret = lan78xx_phy_wait_not_busy(dev);
1788         if (ret < 0)
1789                 goto done;
1790
1791 done:
1792         mutex_unlock(&dev->phy_mutex);
1793         usb_autopm_put_interface(dev->intf);
1794         return 0;
1795 }
1796
1797 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1798 {
1799         struct device_node *node;
1800         int ret;
1801
1802         dev->mdiobus = mdiobus_alloc();
1803         if (!dev->mdiobus) {
1804                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1805                 return -ENOMEM;
1806         }
1807
1808         dev->mdiobus->priv = (void *)dev;
1809         dev->mdiobus->read = lan78xx_mdiobus_read;
1810         dev->mdiobus->write = lan78xx_mdiobus_write;
1811         dev->mdiobus->name = "lan78xx-mdiobus";
1812
1813         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1814                  dev->udev->bus->busnum, dev->udev->devnum);
1815
1816         switch (dev->chipid) {
1817         case ID_REV_CHIP_ID_7800_:
1818         case ID_REV_CHIP_ID_7850_:
1819                 /* set to internal PHY id */
1820                 dev->mdiobus->phy_mask = ~(1 << 1);
1821                 break;
1822         case ID_REV_CHIP_ID_7801_:
1823                 /* scan thru PHYAD[2..0] */
1824                 dev->mdiobus->phy_mask = ~(0xFF);
1825                 break;
1826         }
1827
1828         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1829         ret = of_mdiobus_register(dev->mdiobus, node);
1830         of_node_put(node);
1831         if (ret) {
1832                 netdev_err(dev->net, "can't register MDIO bus\n");
1833                 goto exit1;
1834         }
1835
1836         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1837         return 0;
1838 exit1:
1839         mdiobus_free(dev->mdiobus);
1840         return ret;
1841 }
1842
1843 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1844 {
1845         mdiobus_unregister(dev->mdiobus);
1846         mdiobus_free(dev->mdiobus);
1847 }
1848
1849 static void lan78xx_link_status_change(struct net_device *net)
1850 {
1851         struct phy_device *phydev = net->phydev;
1852         int ret, temp;
1853
1854         /* At forced 100 F/H mode, chip may fail to set mode correctly
1855          * when cable is switched between long(~50+m) and short one.
1856          * As workaround, set to 10 before setting to 100
1857          * at forced 100 F/H mode.
1858          */
1859         if (!phydev->autoneg && (phydev->speed == 100)) {
1860                 /* disable phy interrupt */
1861                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1862                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1863                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1864
1865                 temp = phy_read(phydev, MII_BMCR);
1866                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1867                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1868                 temp |= BMCR_SPEED100;
1869                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1870
1871                 /* clear pending interrupt generated while workaround */
1872                 temp = phy_read(phydev, LAN88XX_INT_STS);
1873
1874                 /* enable phy interrupt back */
1875                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1876                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1877                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1878         }
1879 }
1880
1881 static int irq_map(struct irq_domain *d, unsigned int irq,
1882                    irq_hw_number_t hwirq)
1883 {
1884         struct irq_domain_data *data = d->host_data;
1885
1886         irq_set_chip_data(irq, data);
1887         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1888         irq_set_noprobe(irq);
1889
1890         return 0;
1891 }
1892
1893 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1894 {
1895         irq_set_chip_and_handler(irq, NULL, NULL);
1896         irq_set_chip_data(irq, NULL);
1897 }
1898
1899 static const struct irq_domain_ops chip_domain_ops = {
1900         .map    = irq_map,
1901         .unmap  = irq_unmap,
1902 };
1903
1904 static void lan78xx_irq_mask(struct irq_data *irqd)
1905 {
1906         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1907
1908         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1909 }
1910
1911 static void lan78xx_irq_unmask(struct irq_data *irqd)
1912 {
1913         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1914
1915         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1916 }
1917
1918 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1919 {
1920         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1921
1922         mutex_lock(&data->irq_lock);
1923 }
1924
1925 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1926 {
1927         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1928         struct lan78xx_net *dev =
1929                         container_of(data, struct lan78xx_net, domain_data);
1930         u32 buf;
1931         int ret;
1932
1933         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1934          * are only two callbacks executed in non-atomic contex.
1935          */
1936         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1937         if (buf != data->irqenable)
1938                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1939
1940         mutex_unlock(&data->irq_lock);
1941 }
1942
1943 static struct irq_chip lan78xx_irqchip = {
1944         .name                   = "lan78xx-irqs",
1945         .irq_mask               = lan78xx_irq_mask,
1946         .irq_unmask             = lan78xx_irq_unmask,
1947         .irq_bus_lock           = lan78xx_irq_bus_lock,
1948         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1949 };
1950
1951 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1952 {
1953         struct device_node *of_node;
1954         struct irq_domain *irqdomain;
1955         unsigned int irqmap = 0;
1956         u32 buf;
1957         int ret = 0;
1958
1959         of_node = dev->udev->dev.parent->of_node;
1960
1961         mutex_init(&dev->domain_data.irq_lock);
1962
1963         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1964         dev->domain_data.irqenable = buf;
1965
1966         dev->domain_data.irqchip = &lan78xx_irqchip;
1967         dev->domain_data.irq_handler = handle_simple_irq;
1968
1969         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1970                                           &chip_domain_ops, &dev->domain_data);
1971         if (irqdomain) {
1972                 /* create mapping for PHY interrupt */
1973                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1974                 if (!irqmap) {
1975                         irq_domain_remove(irqdomain);
1976
1977                         irqdomain = NULL;
1978                         ret = -EINVAL;
1979                 }
1980         } else {
1981                 ret = -EINVAL;
1982         }
1983
1984         dev->domain_data.irqdomain = irqdomain;
1985         dev->domain_data.phyirq = irqmap;
1986
1987         return ret;
1988 }
1989
1990 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1991 {
1992         if (dev->domain_data.phyirq > 0) {
1993                 irq_dispose_mapping(dev->domain_data.phyirq);
1994
1995                 if (dev->domain_data.irqdomain)
1996                         irq_domain_remove(dev->domain_data.irqdomain);
1997         }
1998         dev->domain_data.phyirq = 0;
1999         dev->domain_data.irqdomain = NULL;
2000 }
2001
2002 static int lan8835_fixup(struct phy_device *phydev)
2003 {
2004         int buf;
2005         int ret;
2006         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2007
2008         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2009         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2010         buf &= ~0x1800;
2011         buf |= 0x0800;
2012         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2013
2014         /* RGMII MAC TXC Delay Enable */
2015         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2016                                 MAC_RGMII_ID_TXC_DELAY_EN_);
2017
2018         /* RGMII TX DLL Tune Adjust */
2019         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2020
2021         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2022
2023         return 1;
2024 }
2025
2026 static int ksz9031rnx_fixup(struct phy_device *phydev)
2027 {
2028         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2029
2030         /* Micrel9301RNX PHY configuration */
2031         /* RGMII Control Signal Pad Skew */
2032         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2033         /* RGMII RX Data Pad Skew */
2034         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2035         /* RGMII RX Clock Pad Skew */
2036         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2037
2038         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2039
2040         return 1;
2041 }
2042
2043 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2044 {
2045         u32 buf;
2046         int ret;
2047         struct fixed_phy_status fphy_status = {
2048                 .link = 1,
2049                 .speed = SPEED_1000,
2050                 .duplex = DUPLEX_FULL,
2051         };
2052         struct phy_device *phydev;
2053
2054         phydev = phy_find_first(dev->mdiobus);
2055         if (!phydev) {
2056                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2057                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2058                 if (IS_ERR(phydev)) {
2059                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2060                         return NULL;
2061                 }
2062                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2063                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2064                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2065                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2066                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2067                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2068                 buf |= HW_CFG_CLK125_EN_;
2069                 buf |= HW_CFG_REFCLK25_EN_;
2070                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2071         } else {
2072                 if (!phydev->drv) {
2073                         netdev_err(dev->net, "no PHY driver found\n");
2074                         return NULL;
2075                 }
2076                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2077                 /* external PHY fixup for KSZ9031RNX */
2078                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2079                                                  ksz9031rnx_fixup);
2080                 if (ret < 0) {
2081                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2082                         return NULL;
2083                 }
2084                 /* external PHY fixup for LAN8835 */
2085                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2086                                                  lan8835_fixup);
2087                 if (ret < 0) {
2088                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2089                         return NULL;
2090                 }
2091                 /* add more external PHY fixup here if needed */
2092
2093                 phydev->is_internal = false;
2094         }
2095         return phydev;
2096 }
2097
2098 static int lan78xx_phy_init(struct lan78xx_net *dev)
2099 {
2100         __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2101         int ret;
2102         u32 mii_adv;
2103         struct phy_device *phydev;
2104
2105         switch (dev->chipid) {
2106         case ID_REV_CHIP_ID_7801_:
2107                 phydev = lan7801_phy_init(dev);
2108                 if (!phydev) {
2109                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2110                         return -EIO;
2111                 }
2112                 break;
2113
2114         case ID_REV_CHIP_ID_7800_:
2115         case ID_REV_CHIP_ID_7850_:
2116                 phydev = phy_find_first(dev->mdiobus);
2117                 if (!phydev) {
2118                         netdev_err(dev->net, "no PHY found\n");
2119                         return -EIO;
2120                 }
2121                 phydev->is_internal = true;
2122                 dev->interface = PHY_INTERFACE_MODE_GMII;
2123                 break;
2124
2125         default:
2126                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2127                 return -EIO;
2128         }
2129
2130         /* if phyirq is not set, use polling mode in phylib */
2131         if (dev->domain_data.phyirq > 0)
2132                 phydev->irq = dev->domain_data.phyirq;
2133         else
2134                 phydev->irq = 0;
2135         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2136
2137         /* set to AUTOMDIX */
2138         phydev->mdix = ETH_TP_MDI_AUTO;
2139
2140         ret = phy_connect_direct(dev->net, phydev,
2141                                  lan78xx_link_status_change,
2142                                  dev->interface);
2143         if (ret) {
2144                 netdev_err(dev->net, "can't attach PHY to %s\n",
2145                            dev->mdiobus->id);
2146                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2147                         if (phy_is_pseudo_fixed_link(phydev)) {
2148                                 fixed_phy_unregister(phydev);
2149                         } else {
2150                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2151                                                              0xfffffff0);
2152                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2153                                                              0xfffffff0);
2154                         }
2155                 }
2156                 return -EIO;
2157         }
2158
2159         /* MAC doesn't support 1000T Half */
2160         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2161
2162         /* support both flow controls */
2163         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2164         linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2165                            phydev->advertising);
2166         linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2167                            phydev->advertising);
2168         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2169         mii_adv_to_linkmode_adv_t(fc, mii_adv);
2170         linkmode_or(phydev->advertising, fc, phydev->advertising);
2171
2172         if (phydev->mdio.dev.of_node) {
2173                 u32 reg;
2174                 int len;
2175
2176                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2177                                                       "microchip,led-modes",
2178                                                       sizeof(u32));
2179                 if (len >= 0) {
2180                         /* Ensure the appropriate LEDs are enabled */
2181                         lan78xx_read_reg(dev, HW_CFG, &reg);
2182                         reg &= ~(HW_CFG_LED0_EN_ |
2183                                  HW_CFG_LED1_EN_ |
2184                                  HW_CFG_LED2_EN_ |
2185                                  HW_CFG_LED3_EN_);
2186                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2187                                 (len > 1) * HW_CFG_LED1_EN_ |
2188                                 (len > 2) * HW_CFG_LED2_EN_ |
2189                                 (len > 3) * HW_CFG_LED3_EN_;
2190                         lan78xx_write_reg(dev, HW_CFG, reg);
2191                 }
2192         }
2193
2194         genphy_config_aneg(phydev);
2195
2196         dev->fc_autoneg = phydev->autoneg;
2197
2198         return 0;
2199 }
2200
2201 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2202 {
2203         int ret = 0;
2204         u32 buf;
2205         bool rxenabled;
2206
2207         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2208
2209         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2210
2211         if (rxenabled) {
2212                 buf &= ~MAC_RX_RXEN_;
2213                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2214         }
2215
2216         /* add 4 to size for FCS */
2217         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2218         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2219
2220         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2221
2222         if (rxenabled) {
2223                 buf |= MAC_RX_RXEN_;
2224                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2225         }
2226
2227         return 0;
2228 }
2229
2230 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2231 {
2232         struct sk_buff *skb;
2233         unsigned long flags;
2234         int count = 0;
2235
2236         spin_lock_irqsave(&q->lock, flags);
2237         while (!skb_queue_empty(q)) {
2238                 struct skb_data *entry;
2239                 struct urb *urb;
2240                 int ret;
2241
2242                 skb_queue_walk(q, skb) {
2243                         entry = (struct skb_data *)skb->cb;
2244                         if (entry->state != unlink_start)
2245                                 goto found;
2246                 }
2247                 break;
2248 found:
2249                 entry->state = unlink_start;
2250                 urb = entry->urb;
2251
2252                 /* Get reference count of the URB to avoid it to be
2253                  * freed during usb_unlink_urb, which may trigger
2254                  * use-after-free problem inside usb_unlink_urb since
2255                  * usb_unlink_urb is always racing with .complete
2256                  * handler(include defer_bh).
2257                  */
2258                 usb_get_urb(urb);
2259                 spin_unlock_irqrestore(&q->lock, flags);
2260                 /* during some PM-driven resume scenarios,
2261                  * these (async) unlinks complete immediately
2262                  */
2263                 ret = usb_unlink_urb(urb);
2264                 if (ret != -EINPROGRESS && ret != 0)
2265                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2266                 else
2267                         count++;
2268                 usb_put_urb(urb);
2269                 spin_lock_irqsave(&q->lock, flags);
2270         }
2271         spin_unlock_irqrestore(&q->lock, flags);
2272         return count;
2273 }
2274
2275 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2276 {
2277         struct lan78xx_net *dev = netdev_priv(netdev);
2278         int ll_mtu = new_mtu + netdev->hard_header_len;
2279         int old_hard_mtu = dev->hard_mtu;
2280         int old_rx_urb_size = dev->rx_urb_size;
2281         int ret;
2282
2283         /* no second zero-length packet read wanted after mtu-sized packets */
2284         if ((ll_mtu % dev->maxpacket) == 0)
2285                 return -EDOM;
2286
2287         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2288
2289         netdev->mtu = new_mtu;
2290
2291         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2292         if (dev->rx_urb_size == old_hard_mtu) {
2293                 dev->rx_urb_size = dev->hard_mtu;
2294                 if (dev->rx_urb_size > old_rx_urb_size) {
2295                         if (netif_running(dev->net)) {
2296                                 unlink_urbs(dev, &dev->rxq);
2297                                 tasklet_schedule(&dev->bh);
2298                         }
2299                 }
2300         }
2301
2302         return 0;
2303 }
2304
2305 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2306 {
2307         struct lan78xx_net *dev = netdev_priv(netdev);
2308         struct sockaddr *addr = p;
2309         u32 addr_lo, addr_hi;
2310         int ret;
2311
2312         if (netif_running(netdev))
2313                 return -EBUSY;
2314
2315         if (!is_valid_ether_addr(addr->sa_data))
2316                 return -EADDRNOTAVAIL;
2317
2318         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2319
2320         addr_lo = netdev->dev_addr[0] |
2321                   netdev->dev_addr[1] << 8 |
2322                   netdev->dev_addr[2] << 16 |
2323                   netdev->dev_addr[3] << 24;
2324         addr_hi = netdev->dev_addr[4] |
2325                   netdev->dev_addr[5] << 8;
2326
2327         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2328         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2329
2330         /* Added to support MAC address changes */
2331         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2332         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2333
2334         return 0;
2335 }
2336
2337 /* Enable or disable Rx checksum offload engine */
2338 static int lan78xx_set_features(struct net_device *netdev,
2339                                 netdev_features_t features)
2340 {
2341         struct lan78xx_net *dev = netdev_priv(netdev);
2342         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2343         unsigned long flags;
2344         int ret;
2345
2346         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2347
2348         if (features & NETIF_F_RXCSUM) {
2349                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2350                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2351         } else {
2352                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2353                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2354         }
2355
2356         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2357                 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2358         else
2359                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2360
2361         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2362                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2363         else
2364                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2365
2366         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2367
2368         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2369
2370         return 0;
2371 }
2372
2373 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2374 {
2375         struct lan78xx_priv *pdata =
2376                         container_of(param, struct lan78xx_priv, set_vlan);
2377         struct lan78xx_net *dev = pdata->dev;
2378
2379         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2380                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2381 }
2382
2383 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2384                                    __be16 proto, u16 vid)
2385 {
2386         struct lan78xx_net *dev = netdev_priv(netdev);
2387         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2388         u16 vid_bit_index;
2389         u16 vid_dword_index;
2390
2391         vid_dword_index = (vid >> 5) & 0x7F;
2392         vid_bit_index = vid & 0x1F;
2393
2394         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2395
2396         /* defer register writes to a sleepable context */
2397         schedule_work(&pdata->set_vlan);
2398
2399         return 0;
2400 }
2401
2402 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2403                                     __be16 proto, u16 vid)
2404 {
2405         struct lan78xx_net *dev = netdev_priv(netdev);
2406         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2407         u16 vid_bit_index;
2408         u16 vid_dword_index;
2409
2410         vid_dword_index = (vid >> 5) & 0x7F;
2411         vid_bit_index = vid & 0x1F;
2412
2413         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2414
2415         /* defer register writes to a sleepable context */
2416         schedule_work(&pdata->set_vlan);
2417
2418         return 0;
2419 }
2420
2421 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2422 {
2423         int ret;
2424         u32 buf;
2425         u32 regs[6] = { 0 };
2426
2427         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2428         if (buf & USB_CFG1_LTM_ENABLE_) {
2429                 u8 temp[2];
2430                 /* Get values from EEPROM first */
2431                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2432                         if (temp[0] == 24) {
2433                                 ret = lan78xx_read_raw_eeprom(dev,
2434                                                               temp[1] * 2,
2435                                                               24,
2436                                                               (u8 *)regs);
2437                                 if (ret < 0)
2438                                         return;
2439                         }
2440                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2441                         if (temp[0] == 24) {
2442                                 ret = lan78xx_read_raw_otp(dev,
2443                                                            temp[1] * 2,
2444                                                            24,
2445                                                            (u8 *)regs);
2446                                 if (ret < 0)
2447                                         return;
2448                         }
2449                 }
2450         }
2451
2452         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2453         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2454         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2455         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2456         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2457         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2458 }
2459
2460 static int lan78xx_reset(struct lan78xx_net *dev)
2461 {
2462         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2463         u32 buf;
2464         int ret = 0;
2465         unsigned long timeout;
2466         u8 sig;
2467
2468         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2469         buf |= HW_CFG_LRST_;
2470         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2471
2472         timeout = jiffies + HZ;
2473         do {
2474                 mdelay(1);
2475                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2476                 if (time_after(jiffies, timeout)) {
2477                         netdev_warn(dev->net,
2478                                     "timeout on completion of LiteReset");
2479                         return -EIO;
2480                 }
2481         } while (buf & HW_CFG_LRST_);
2482
2483         lan78xx_init_mac_address(dev);
2484
2485         /* save DEVID for later usage */
2486         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2487         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2488         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2489
2490         /* Respond to the IN token with a NAK */
2491         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2492         buf |= USB_CFG_BIR_;
2493         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2494
2495         /* Init LTM */
2496         lan78xx_init_ltm(dev);
2497
2498         if (dev->udev->speed == USB_SPEED_SUPER) {
2499                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2500                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2501                 dev->rx_qlen = 4;
2502                 dev->tx_qlen = 4;
2503         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2504                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2505                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2506                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2507                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2508         } else {
2509                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2510                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2511                 dev->rx_qlen = 4;
2512                 dev->tx_qlen = 4;
2513         }
2514
2515         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2516         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2517
2518         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2519         buf |= HW_CFG_MEF_;
2520         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2521
2522         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2523         buf |= USB_CFG_BCE_;
2524         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2525
2526         /* set FIFO sizes */
2527         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2528         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2529
2530         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2531         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2532
2533         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2534         ret = lan78xx_write_reg(dev, FLOW, 0);
2535         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2536
2537         /* Don't need rfe_ctl_lock during initialisation */
2538         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2539         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2540         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2541
2542         /* Enable or disable checksum offload engines */
2543         lan78xx_set_features(dev->net, dev->net->features);
2544
2545         lan78xx_set_multicast(dev->net);
2546
2547         /* reset PHY */
2548         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2549         buf |= PMT_CTL_PHY_RST_;
2550         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2551
2552         timeout = jiffies + HZ;
2553         do {
2554                 mdelay(1);
2555                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2556                 if (time_after(jiffies, timeout)) {
2557                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2558                         return -EIO;
2559                 }
2560         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2561
2562         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2563         /* LAN7801 only has RGMII mode */
2564         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2565                 buf &= ~MAC_CR_GMII_EN_;
2566
2567         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2568                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2569                 if (!ret && sig != EEPROM_INDICATOR) {
2570                         /* Implies there is no external eeprom. Set mac speed */
2571                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2572                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2573                 }
2574         }
2575         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2576
2577         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2578         buf |= MAC_TX_TXEN_;
2579         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2580
2581         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2582         buf |= FCT_TX_CTL_EN_;
2583         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2584
2585         ret = lan78xx_set_rx_max_frame_length(dev,
2586                                               dev->net->mtu + VLAN_ETH_HLEN);
2587
2588         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2589         buf |= MAC_RX_RXEN_;
2590         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2591
2592         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2593         buf |= FCT_RX_CTL_EN_;
2594         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2595
2596         return 0;
2597 }
2598
2599 static void lan78xx_init_stats(struct lan78xx_net *dev)
2600 {
2601         u32 *p;
2602         int i;
2603
2604         /* initialize for stats update
2605          * some counters are 20bits and some are 32bits
2606          */
2607         p = (u32 *)&dev->stats.rollover_max;
2608         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2609                 p[i] = 0xFFFFF;
2610
2611         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2612         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2613         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2614         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2615         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2616         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2617         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2618         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2619         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2620         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2621
2622         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2623 }
2624
2625 static int lan78xx_open(struct net_device *net)
2626 {
2627         struct lan78xx_net *dev = netdev_priv(net);
2628         int ret;
2629
2630         ret = usb_autopm_get_interface(dev->intf);
2631         if (ret < 0)
2632                 goto out;
2633
2634         phy_start(net->phydev);
2635
2636         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2637
2638         /* for Link Check */
2639         if (dev->urb_intr) {
2640                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2641                 if (ret < 0) {
2642                         netif_err(dev, ifup, dev->net,
2643                                   "intr submit %d\n", ret);
2644                         goto done;
2645                 }
2646         }
2647
2648         lan78xx_init_stats(dev);
2649
2650         set_bit(EVENT_DEV_OPEN, &dev->flags);
2651
2652         netif_start_queue(net);
2653
2654         dev->link_on = false;
2655
2656         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2657 done:
2658         usb_autopm_put_interface(dev->intf);
2659
2660 out:
2661         return ret;
2662 }
2663
2664 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2665 {
2666         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2667         DECLARE_WAITQUEUE(wait, current);
2668         int temp;
2669
2670         /* ensure there are no more active urbs */
2671         add_wait_queue(&unlink_wakeup, &wait);
2672         set_current_state(TASK_UNINTERRUPTIBLE);
2673         dev->wait = &unlink_wakeup;
2674         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2675
2676         /* maybe wait for deletions to finish. */
2677         while (!skb_queue_empty(&dev->rxq) &&
2678                !skb_queue_empty(&dev->txq) &&
2679                !skb_queue_empty(&dev->done)) {
2680                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2681                 set_current_state(TASK_UNINTERRUPTIBLE);
2682                 netif_dbg(dev, ifdown, dev->net,
2683                           "waited for %d urb completions\n", temp);
2684         }
2685         set_current_state(TASK_RUNNING);
2686         dev->wait = NULL;
2687         remove_wait_queue(&unlink_wakeup, &wait);
2688 }
2689
2690 static int lan78xx_stop(struct net_device *net)
2691 {
2692         struct lan78xx_net *dev = netdev_priv(net);
2693
2694         if (timer_pending(&dev->stat_monitor))
2695                 del_timer_sync(&dev->stat_monitor);
2696
2697         if (net->phydev)
2698                 phy_stop(net->phydev);
2699
2700         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2701         netif_stop_queue(net);
2702
2703         netif_info(dev, ifdown, dev->net,
2704                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2705                    net->stats.rx_packets, net->stats.tx_packets,
2706                    net->stats.rx_errors, net->stats.tx_errors);
2707
2708         lan78xx_terminate_urbs(dev);
2709
2710         usb_kill_urb(dev->urb_intr);
2711
2712         skb_queue_purge(&dev->rxq_pause);
2713
2714         /* deferred work (task, timer, softirq) must also stop.
2715          * can't flush_scheduled_work() until we drop rtnl (later),
2716          * else workers could deadlock; so make workers a NOP.
2717          */
2718         dev->flags = 0;
2719         cancel_delayed_work_sync(&dev->wq);
2720         tasklet_kill(&dev->bh);
2721
2722         usb_autopm_put_interface(dev->intf);
2723
2724         return 0;
2725 }
2726
2727 static int lan78xx_linearize(struct sk_buff *skb)
2728 {
2729         return skb_linearize(skb);
2730 }
2731
2732 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2733                                        struct sk_buff *skb, gfp_t flags)
2734 {
2735         u32 tx_cmd_a, tx_cmd_b;
2736
2737         if (skb_cow_head(skb, TX_OVERHEAD)) {
2738                 dev_kfree_skb_any(skb);
2739                 return NULL;
2740         }
2741
2742         if (lan78xx_linearize(skb) < 0)
2743                 return NULL;
2744
2745         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2746
2747         if (skb->ip_summed == CHECKSUM_PARTIAL)
2748                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2749
2750         tx_cmd_b = 0;
2751         if (skb_is_gso(skb)) {
2752                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2753
2754                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2755
2756                 tx_cmd_a |= TX_CMD_A_LSO_;
2757         }
2758
2759         if (skb_vlan_tag_present(skb)) {
2760                 tx_cmd_a |= TX_CMD_A_IVTG_;
2761                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2762         }
2763
2764         skb_push(skb, 4);
2765         cpu_to_le32s(&tx_cmd_b);
2766         memcpy(skb->data, &tx_cmd_b, 4);
2767
2768         skb_push(skb, 4);
2769         cpu_to_le32s(&tx_cmd_a);
2770         memcpy(skb->data, &tx_cmd_a, 4);
2771
2772         return skb;
2773 }
2774
2775 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2776                                struct sk_buff_head *list, enum skb_state state)
2777 {
2778         unsigned long flags;
2779         enum skb_state old_state;
2780         struct skb_data *entry = (struct skb_data *)skb->cb;
2781
2782         spin_lock_irqsave(&list->lock, flags);
2783         old_state = entry->state;
2784         entry->state = state;
2785
2786         __skb_unlink(skb, list);
2787         spin_unlock(&list->lock);
2788         spin_lock(&dev->done.lock);
2789
2790         __skb_queue_tail(&dev->done, skb);
2791         if (skb_queue_len(&dev->done) == 1)
2792                 tasklet_schedule(&dev->bh);
2793         spin_unlock_irqrestore(&dev->done.lock, flags);
2794
2795         return old_state;
2796 }
2797
2798 static void tx_complete(struct urb *urb)
2799 {
2800         struct sk_buff *skb = (struct sk_buff *)urb->context;
2801         struct skb_data *entry = (struct skb_data *)skb->cb;
2802         struct lan78xx_net *dev = entry->dev;
2803
2804         if (urb->status == 0) {
2805                 dev->net->stats.tx_packets += entry->num_of_packet;
2806                 dev->net->stats.tx_bytes += entry->length;
2807         } else {
2808                 dev->net->stats.tx_errors++;
2809
2810                 switch (urb->status) {
2811                 case -EPIPE:
2812                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2813                         break;
2814
2815                 /* software-driven interface shutdown */
2816                 case -ECONNRESET:
2817                 case -ESHUTDOWN:
2818                         break;
2819
2820                 case -EPROTO:
2821                 case -ETIME:
2822                 case -EILSEQ:
2823                         netif_stop_queue(dev->net);
2824                         break;
2825                 default:
2826                         netif_dbg(dev, tx_err, dev->net,
2827                                   "tx err %d\n", entry->urb->status);
2828                         break;
2829                 }
2830         }
2831
2832         usb_autopm_put_interface_async(dev->intf);
2833
2834         defer_bh(dev, skb, &dev->txq, tx_done);
2835 }
2836
2837 static void lan78xx_queue_skb(struct sk_buff_head *list,
2838                               struct sk_buff *newsk, enum skb_state state)
2839 {
2840         struct skb_data *entry = (struct skb_data *)newsk->cb;
2841
2842         __skb_queue_tail(list, newsk);
2843         entry->state = state;
2844 }
2845
2846 static netdev_tx_t
2847 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2848 {
2849         struct lan78xx_net *dev = netdev_priv(net);
2850         struct sk_buff *skb2 = NULL;
2851
2852         if (skb) {
2853                 skb_tx_timestamp(skb);
2854                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2855         }
2856
2857         if (skb2) {
2858                 skb_queue_tail(&dev->txq_pend, skb2);
2859
2860                 /* throttle TX patch at slower than SUPER SPEED USB */
2861                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2862                     (skb_queue_len(&dev->txq_pend) > 10))
2863                         netif_stop_queue(net);
2864         } else {
2865                 netif_dbg(dev, tx_err, dev->net,
2866                           "lan78xx_tx_prep return NULL\n");
2867                 dev->net->stats.tx_errors++;
2868                 dev->net->stats.tx_dropped++;
2869         }
2870
2871         tasklet_schedule(&dev->bh);
2872
2873         return NETDEV_TX_OK;
2874 }
2875
2876 static int
2877 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2878 {
2879         int tmp;
2880         struct usb_host_interface *alt = NULL;
2881         struct usb_host_endpoint *in = NULL, *out = NULL;
2882         struct usb_host_endpoint *status = NULL;
2883
2884         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2885                 unsigned ep;
2886
2887                 in = NULL;
2888                 out = NULL;
2889                 status = NULL;
2890                 alt = intf->altsetting + tmp;
2891
2892                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2893                         struct usb_host_endpoint *e;
2894                         int intr = 0;
2895
2896                         e = alt->endpoint + ep;
2897                         switch (e->desc.bmAttributes) {
2898                         case USB_ENDPOINT_XFER_INT:
2899                                 if (!usb_endpoint_dir_in(&e->desc))
2900                                         continue;
2901                                 intr = 1;
2902                                 /* FALLTHROUGH */
2903                         case USB_ENDPOINT_XFER_BULK:
2904                                 break;
2905                         default:
2906                                 continue;
2907                         }
2908                         if (usb_endpoint_dir_in(&e->desc)) {
2909                                 if (!intr && !in)
2910                                         in = e;
2911                                 else if (intr && !status)
2912                                         status = e;
2913                         } else {
2914                                 if (!out)
2915                                         out = e;
2916                         }
2917                 }
2918                 if (in && out)
2919                         break;
2920         }
2921         if (!alt || !in || !out)
2922                 return -EINVAL;
2923
2924         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2925                                        in->desc.bEndpointAddress &
2926                                        USB_ENDPOINT_NUMBER_MASK);
2927         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2928                                         out->desc.bEndpointAddress &
2929                                         USB_ENDPOINT_NUMBER_MASK);
2930         dev->ep_intr = status;
2931
2932         return 0;
2933 }
2934
2935 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2936 {
2937         struct lan78xx_priv *pdata = NULL;
2938         int ret;
2939         int i;
2940
2941         ret = lan78xx_get_endpoints(dev, intf);
2942         if (ret) {
2943                 netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2944                             ret);
2945                 return ret;
2946         }
2947
2948         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2949
2950         pdata = (struct lan78xx_priv *)(dev->data[0]);
2951         if (!pdata) {
2952                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2953                 return -ENOMEM;
2954         }
2955
2956         pdata->dev = dev;
2957
2958         spin_lock_init(&pdata->rfe_ctl_lock);
2959         mutex_init(&pdata->dataport_mutex);
2960
2961         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2962
2963         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2964                 pdata->vlan_table[i] = 0;
2965
2966         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2967
2968         dev->net->features = 0;
2969
2970         if (DEFAULT_TX_CSUM_ENABLE)
2971                 dev->net->features |= NETIF_F_HW_CSUM;
2972
2973         if (DEFAULT_RX_CSUM_ENABLE)
2974                 dev->net->features |= NETIF_F_RXCSUM;
2975
2976         if (DEFAULT_TSO_CSUM_ENABLE)
2977                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2978
2979         if (DEFAULT_VLAN_RX_OFFLOAD)
2980                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2981
2982         if (DEFAULT_VLAN_FILTER_ENABLE)
2983                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2984
2985         dev->net->hw_features = dev->net->features;
2986
2987         ret = lan78xx_setup_irq_domain(dev);
2988         if (ret < 0) {
2989                 netdev_warn(dev->net,
2990                             "lan78xx_setup_irq_domain() failed : %d", ret);
2991                 goto out1;
2992         }
2993
2994         dev->net->hard_header_len += TX_OVERHEAD;
2995         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2996
2997         /* Init all registers */
2998         ret = lan78xx_reset(dev);
2999         if (ret) {
3000                 netdev_warn(dev->net, "Registers INIT FAILED....");
3001                 goto out2;
3002         }
3003
3004         ret = lan78xx_mdio_init(dev);
3005         if (ret) {
3006                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3007                 goto out2;
3008         }
3009
3010         dev->net->flags |= IFF_MULTICAST;
3011
3012         pdata->wol = WAKE_MAGIC;
3013
3014         return ret;
3015
3016 out2:
3017         lan78xx_remove_irq_domain(dev);
3018
3019 out1:
3020         netdev_warn(dev->net, "Bind routine FAILED");
3021         cancel_work_sync(&pdata->set_multicast);
3022         cancel_work_sync(&pdata->set_vlan);
3023         kfree(pdata);
3024         return ret;
3025 }
3026
3027 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3028 {
3029         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3030
3031         lan78xx_remove_irq_domain(dev);
3032
3033         lan78xx_remove_mdio(dev);
3034
3035         if (pdata) {
3036                 cancel_work_sync(&pdata->set_multicast);
3037                 cancel_work_sync(&pdata->set_vlan);
3038                 netif_dbg(dev, ifdown, dev->net, "free pdata");
3039                 kfree(pdata);
3040                 pdata = NULL;
3041                 dev->data[0] = 0;
3042         }
3043 }
3044
3045 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3046                                     struct sk_buff *skb,
3047                                     u32 rx_cmd_a, u32 rx_cmd_b)
3048 {
3049         /* HW Checksum offload appears to be flawed if used when not stripping
3050          * VLAN headers. Drop back to S/W checksums under these conditions.
3051          */
3052         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3053             unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3054             ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3055              !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3056                 skb->ip_summed = CHECKSUM_NONE;
3057         } else {
3058                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3059                 skb->ip_summed = CHECKSUM_COMPLETE;
3060         }
3061 }
3062
3063 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3064                                     struct sk_buff *skb,
3065                                     u32 rx_cmd_a, u32 rx_cmd_b)
3066 {
3067         if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3068             (rx_cmd_a & RX_CMD_A_FVTG_))
3069                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3070                                        (rx_cmd_b & 0xffff));
3071 }
3072
3073 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3074 {
3075         int status;
3076
3077         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3078                 skb_queue_tail(&dev->rxq_pause, skb);
3079                 return;
3080         }
3081
3082         dev->net->stats.rx_packets++;
3083         dev->net->stats.rx_bytes += skb->len;
3084
3085         skb->protocol = eth_type_trans(skb, dev->net);
3086
3087         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3088                   skb->len + sizeof(struct ethhdr), skb->protocol);
3089         memset(skb->cb, 0, sizeof(struct skb_data));
3090
3091         if (skb_defer_rx_timestamp(skb))
3092                 return;
3093
3094         status = netif_rx(skb);
3095         if (status != NET_RX_SUCCESS)
3096                 netif_dbg(dev, rx_err, dev->net,
3097                           "netif_rx status %d\n", status);
3098 }
3099
3100 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3101 {
3102         if (skb->len < dev->net->hard_header_len)
3103                 return 0;
3104
3105         while (skb->len > 0) {
3106                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3107                 u16 rx_cmd_c;
3108                 struct sk_buff *skb2;
3109                 unsigned char *packet;
3110
3111                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3112                 le32_to_cpus(&rx_cmd_a);
3113                 skb_pull(skb, sizeof(rx_cmd_a));
3114
3115                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3116                 le32_to_cpus(&rx_cmd_b);
3117                 skb_pull(skb, sizeof(rx_cmd_b));
3118
3119                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3120                 le16_to_cpus(&rx_cmd_c);
3121                 skb_pull(skb, sizeof(rx_cmd_c));
3122
3123                 packet = skb->data;
3124
3125                 /* get the packet length */
3126                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3127                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3128
3129                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3130                         netif_dbg(dev, rx_err, dev->net,
3131                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3132                 } else {
3133                         /* last frame in this batch */
3134                         if (skb->len == size) {
3135                                 lan78xx_rx_csum_offload(dev, skb,
3136                                                         rx_cmd_a, rx_cmd_b);
3137                                 lan78xx_rx_vlan_offload(dev, skb,
3138                                                         rx_cmd_a, rx_cmd_b);
3139
3140                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3141                                 skb->truesize = size + sizeof(struct sk_buff);
3142
3143                                 return 1;
3144                         }
3145
3146                         skb2 = skb_clone(skb, GFP_ATOMIC);
3147                         if (unlikely(!skb2)) {
3148                                 netdev_warn(dev->net, "Error allocating skb");
3149                                 return 0;
3150                         }
3151
3152                         skb2->len = size;
3153                         skb2->data = packet;
3154                         skb_set_tail_pointer(skb2, size);
3155
3156                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3157                         lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3158
3159                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3160                         skb2->truesize = size + sizeof(struct sk_buff);
3161
3162                         lan78xx_skb_return(dev, skb2);
3163                 }
3164
3165                 skb_pull(skb, size);
3166
3167                 /* padding bytes before the next frame starts */
3168                 if (skb->len)
3169                         skb_pull(skb, align_count);
3170         }
3171
3172         return 1;
3173 }
3174
3175 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3176 {
3177         if (!lan78xx_rx(dev, skb)) {
3178                 dev->net->stats.rx_errors++;
3179                 goto done;
3180         }
3181
3182         if (skb->len) {
3183                 lan78xx_skb_return(dev, skb);
3184                 return;
3185         }
3186
3187         netif_dbg(dev, rx_err, dev->net, "drop\n");
3188         dev->net->stats.rx_errors++;
3189 done:
3190         skb_queue_tail(&dev->done, skb);
3191 }
3192
3193 static void rx_complete(struct urb *urb);
3194
3195 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3196 {
3197         struct sk_buff *skb;
3198         struct skb_data *entry;
3199         unsigned long lockflags;
3200         size_t size = dev->rx_urb_size;
3201         int ret = 0;
3202
3203         skb = netdev_alloc_skb_ip_align(dev->net, size);
3204         if (!skb) {
3205                 usb_free_urb(urb);
3206                 return -ENOMEM;
3207         }
3208
3209         entry = (struct skb_data *)skb->cb;
3210         entry->urb = urb;
3211         entry->dev = dev;
3212         entry->length = 0;
3213
3214         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3215                           skb->data, size, rx_complete, skb);
3216
3217         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3218
3219         if (netif_device_present(dev->net) &&
3220             netif_running(dev->net) &&
3221             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3222             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3223                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3224                 switch (ret) {
3225                 case 0:
3226                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3227                         break;
3228                 case -EPIPE:
3229                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3230                         break;
3231                 case -ENODEV:
3232                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3233                         netif_device_detach(dev->net);
3234                         break;
3235                 case -EHOSTUNREACH:
3236                         ret = -ENOLINK;
3237                         break;
3238                 default:
3239                         netif_dbg(dev, rx_err, dev->net,
3240                                   "rx submit, %d\n", ret);
3241                         tasklet_schedule(&dev->bh);
3242                 }
3243         } else {
3244                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3245                 ret = -ENOLINK;
3246         }
3247         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3248         if (ret) {
3249                 dev_kfree_skb_any(skb);
3250                 usb_free_urb(urb);
3251         }
3252         return ret;
3253 }
3254
3255 static void rx_complete(struct urb *urb)
3256 {
3257         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3258         struct skb_data *entry = (struct skb_data *)skb->cb;
3259         struct lan78xx_net *dev = entry->dev;
3260         int urb_status = urb->status;
3261         enum skb_state state;
3262
3263         skb_put(skb, urb->actual_length);
3264         state = rx_done;
3265         entry->urb = NULL;
3266
3267         switch (urb_status) {
3268         case 0:
3269                 if (skb->len < dev->net->hard_header_len) {
3270                         state = rx_cleanup;
3271                         dev->net->stats.rx_errors++;
3272                         dev->net->stats.rx_length_errors++;
3273                         netif_dbg(dev, rx_err, dev->net,
3274                                   "rx length %d\n", skb->len);
3275                 }
3276                 usb_mark_last_busy(dev->udev);
3277                 break;
3278         case -EPIPE:
3279                 dev->net->stats.rx_errors++;
3280                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3281                 /* FALLTHROUGH */
3282         case -ECONNRESET:                               /* async unlink */
3283         case -ESHUTDOWN:                                /* hardware gone */
3284                 netif_dbg(dev, ifdown, dev->net,
3285                           "rx shutdown, code %d\n", urb_status);
3286                 state = rx_cleanup;
3287                 entry->urb = urb;
3288                 urb = NULL;
3289                 break;
3290         case -EPROTO:
3291         case -ETIME:
3292         case -EILSEQ:
3293                 dev->net->stats.rx_errors++;
3294                 state = rx_cleanup;
3295                 entry->urb = urb;
3296                 urb = NULL;
3297                 break;
3298
3299         /* data overrun ... flush fifo? */
3300         case -EOVERFLOW:
3301                 dev->net->stats.rx_over_errors++;
3302                 /* FALLTHROUGH */
3303
3304         default:
3305                 state = rx_cleanup;
3306                 dev->net->stats.rx_errors++;
3307                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3308                 break;
3309         }
3310
3311         state = defer_bh(dev, skb, &dev->rxq, state);
3312
3313         if (urb) {
3314                 if (netif_running(dev->net) &&
3315                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3316                     state != unlink_start) {
3317                         rx_submit(dev, urb, GFP_ATOMIC);
3318                         return;
3319                 }
3320                 usb_free_urb(urb);
3321         }
3322         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3323 }
3324
3325 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3326 {
3327         int length;
3328         struct urb *urb = NULL;
3329         struct skb_data *entry;
3330         unsigned long flags;
3331         struct sk_buff_head *tqp = &dev->txq_pend;
3332         struct sk_buff *skb, *skb2;
3333         int ret;
3334         int count, pos;
3335         int skb_totallen, pkt_cnt;
3336
3337         skb_totallen = 0;
3338         pkt_cnt = 0;
3339         count = 0;
3340         length = 0;
3341         spin_lock_irqsave(&tqp->lock, flags);
3342         skb_queue_walk(tqp, skb) {
3343                 if (skb_is_gso(skb)) {
3344                         if (!skb_queue_is_first(tqp, skb)) {
3345                                 /* handle previous packets first */
3346                                 break;
3347                         }
3348                         count = 1;
3349                         length = skb->len - TX_OVERHEAD;
3350                         __skb_unlink(skb, tqp);
3351                         spin_unlock_irqrestore(&tqp->lock, flags);
3352                         goto gso_skb;
3353                 }
3354
3355                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3356                         break;
3357                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3358                 pkt_cnt++;
3359         }
3360         spin_unlock_irqrestore(&tqp->lock, flags);
3361
3362         /* copy to a single skb */
3363         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3364         if (!skb)
3365                 goto drop;
3366
3367         skb_put(skb, skb_totallen);
3368
3369         for (count = pos = 0; count < pkt_cnt; count++) {
3370                 skb2 = skb_dequeue(tqp);
3371                 if (skb2) {
3372                         length += (skb2->len - TX_OVERHEAD);
3373                         memcpy(skb->data + pos, skb2->data, skb2->len);
3374                         pos += roundup(skb2->len, sizeof(u32));
3375                         dev_kfree_skb(skb2);
3376                 }
3377         }
3378
3379 gso_skb:
3380         urb = usb_alloc_urb(0, GFP_ATOMIC);
3381         if (!urb)
3382                 goto drop;
3383
3384         entry = (struct skb_data *)skb->cb;
3385         entry->urb = urb;
3386         entry->dev = dev;
3387         entry->length = length;
3388         entry->num_of_packet = count;
3389
3390         spin_lock_irqsave(&dev->txq.lock, flags);
3391         ret = usb_autopm_get_interface_async(dev->intf);
3392         if (ret < 0) {
3393                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3394                 goto drop;
3395         }
3396
3397         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3398                           skb->data, skb->len, tx_complete, skb);
3399
3400         if (length % dev->maxpacket == 0) {
3401                 /* send USB_ZERO_PACKET */
3402                 urb->transfer_flags |= URB_ZERO_PACKET;
3403         }
3404
3405 #ifdef CONFIG_PM
3406         /* if this triggers the device is still a sleep */
3407         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3408                 /* transmission will be done in resume */
3409                 usb_anchor_urb(urb, &dev->deferred);
3410                 /* no use to process more packets */
3411                 netif_stop_queue(dev->net);
3412                 usb_put_urb(urb);
3413                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3414                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3415                 return;
3416         }
3417 #endif
3418
3419         ret = usb_submit_urb(urb, GFP_ATOMIC);
3420         switch (ret) {
3421         case 0:
3422                 netif_trans_update(dev->net);
3423                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3424                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3425                         netif_stop_queue(dev->net);
3426                 break;
3427         case -EPIPE:
3428                 netif_stop_queue(dev->net);
3429                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3430                 usb_autopm_put_interface_async(dev->intf);
3431                 break;
3432         default:
3433                 usb_autopm_put_interface_async(dev->intf);
3434                 netif_dbg(dev, tx_err, dev->net,
3435                           "tx: submit urb err %d\n", ret);
3436                 break;
3437         }
3438
3439         spin_unlock_irqrestore(&dev->txq.lock, flags);
3440
3441         if (ret) {
3442                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3443 drop:
3444                 dev->net->stats.tx_dropped++;
3445                 if (skb)
3446                         dev_kfree_skb_any(skb);
3447                 usb_free_urb(urb);
3448         } else
3449                 netif_dbg(dev, tx_queued, dev->net,
3450                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3451 }
3452
3453 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3454 {
3455         struct urb *urb;
3456         int i;
3457
3458         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3459                 for (i = 0; i < 10; i++) {
3460                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3461                                 break;
3462                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3463                         if (urb)
3464                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3465                                         return;
3466                 }
3467
3468                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3469                         tasklet_schedule(&dev->bh);
3470         }
3471         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3472                 netif_wake_queue(dev->net);
3473 }
3474
3475 static void lan78xx_bh(unsigned long param)
3476 {
3477         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3478         struct sk_buff *skb;
3479         struct skb_data *entry;
3480
3481         while ((skb = skb_dequeue(&dev->done))) {
3482                 entry = (struct skb_data *)(skb->cb);
3483                 switch (entry->state) {
3484                 case rx_done:
3485                         entry->state = rx_cleanup;
3486                         rx_process(dev, skb);
3487                         continue;
3488                 case tx_done:
3489                         usb_free_urb(entry->urb);
3490                         dev_kfree_skb(skb);
3491                         continue;
3492                 case rx_cleanup:
3493                         usb_free_urb(entry->urb);
3494                         dev_kfree_skb(skb);
3495                         continue;
3496                 default:
3497                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3498                         return;
3499                 }
3500         }
3501
3502         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3503                 /* reset update timer delta */
3504                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3505                         dev->delta = 1;
3506                         mod_timer(&dev->stat_monitor,
3507                                   jiffies + STAT_UPDATE_TIMER);
3508                 }
3509
3510                 if (!skb_queue_empty(&dev->txq_pend))
3511                         lan78xx_tx_bh(dev);
3512
3513                 if (!timer_pending(&dev->delay) &&
3514                     !test_bit(EVENT_RX_HALT, &dev->flags))
3515                         lan78xx_rx_bh(dev);
3516         }
3517 }
3518
3519 static void lan78xx_delayedwork(struct work_struct *work)
3520 {
3521         int status;
3522         struct lan78xx_net *dev;
3523
3524         dev = container_of(work, struct lan78xx_net, wq.work);
3525
3526         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3527                 unlink_urbs(dev, &dev->txq);
3528                 status = usb_autopm_get_interface(dev->intf);
3529                 if (status < 0)
3530                         goto fail_pipe;
3531                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3532                 usb_autopm_put_interface(dev->intf);
3533                 if (status < 0 &&
3534                     status != -EPIPE &&
3535                     status != -ESHUTDOWN) {
3536                         if (netif_msg_tx_err(dev))
3537 fail_pipe:
3538                                 netdev_err(dev->net,
3539                                            "can't clear tx halt, status %d\n",
3540                                            status);
3541                 } else {
3542                         clear_bit(EVENT_TX_HALT, &dev->flags);
3543                         if (status != -ESHUTDOWN)
3544                                 netif_wake_queue(dev->net);
3545                 }
3546         }
3547         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3548                 unlink_urbs(dev, &dev->rxq);
3549                 status = usb_autopm_get_interface(dev->intf);
3550                 if (status < 0)
3551                                 goto fail_halt;
3552                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3553                 usb_autopm_put_interface(dev->intf);
3554                 if (status < 0 &&
3555                     status != -EPIPE &&
3556                     status != -ESHUTDOWN) {
3557                         if (netif_msg_rx_err(dev))
3558 fail_halt:
3559                                 netdev_err(dev->net,
3560                                            "can't clear rx halt, status %d\n",
3561                                            status);
3562                 } else {
3563                         clear_bit(EVENT_RX_HALT, &dev->flags);
3564                         tasklet_schedule(&dev->bh);
3565                 }
3566         }
3567
3568         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3569                 int ret = 0;
3570
3571                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3572                 status = usb_autopm_get_interface(dev->intf);
3573                 if (status < 0)
3574                         goto skip_reset;
3575                 if (lan78xx_link_reset(dev) < 0) {
3576                         usb_autopm_put_interface(dev->intf);
3577 skip_reset:
3578                         netdev_info(dev->net, "link reset failed (%d)\n",
3579                                     ret);
3580                 } else {
3581                         usb_autopm_put_interface(dev->intf);
3582                 }
3583         }
3584
3585         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3586                 lan78xx_update_stats(dev);
3587
3588                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3589
3590                 mod_timer(&dev->stat_monitor,
3591                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3592
3593                 dev->delta = min((dev->delta * 2), 50);
3594         }
3595 }
3596
3597 static void intr_complete(struct urb *urb)
3598 {
3599         struct lan78xx_net *dev = urb->context;
3600         int status = urb->status;
3601
3602         switch (status) {
3603         /* success */
3604         case 0:
3605                 lan78xx_status(dev, urb);
3606                 break;
3607
3608         /* software-driven interface shutdown */
3609         case -ENOENT:                   /* urb killed */
3610         case -ESHUTDOWN:                /* hardware gone */
3611                 netif_dbg(dev, ifdown, dev->net,
3612                           "intr shutdown, code %d\n", status);
3613                 return;
3614
3615         /* NOTE:  not throttling like RX/TX, since this endpoint
3616          * already polls infrequently
3617          */
3618         default:
3619                 netdev_dbg(dev->net, "intr status %d\n", status);
3620                 break;
3621         }
3622
3623         if (!netif_running(dev->net))
3624                 return;
3625
3626         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3627         status = usb_submit_urb(urb, GFP_ATOMIC);
3628         if (status != 0)
3629                 netif_err(dev, timer, dev->net,
3630                           "intr resubmit --> %d\n", status);
3631 }
3632
3633 static void lan78xx_disconnect(struct usb_interface *intf)
3634 {
3635         struct lan78xx_net *dev;
3636         struct usb_device *udev;
3637         struct net_device *net;
3638         struct phy_device *phydev;
3639
3640         dev = usb_get_intfdata(intf);
3641         usb_set_intfdata(intf, NULL);
3642         if (!dev)
3643                 return;
3644
3645         udev = interface_to_usbdev(intf);
3646         net = dev->net;
3647         phydev = net->phydev;
3648
3649         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3650         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3651
3652         phy_disconnect(net->phydev);
3653
3654         if (phy_is_pseudo_fixed_link(phydev))
3655                 fixed_phy_unregister(phydev);
3656
3657         unregister_netdev(net);
3658
3659         cancel_delayed_work_sync(&dev->wq);
3660
3661         usb_scuttle_anchored_urbs(&dev->deferred);
3662
3663         lan78xx_unbind(dev, intf);
3664
3665         usb_kill_urb(dev->urb_intr);
3666         usb_free_urb(dev->urb_intr);
3667
3668         free_netdev(net);
3669         usb_put_dev(udev);
3670 }
3671
3672 static void lan78xx_tx_timeout(struct net_device *net)
3673 {
3674         struct lan78xx_net *dev = netdev_priv(net);
3675
3676         unlink_urbs(dev, &dev->txq);
3677         tasklet_schedule(&dev->bh);
3678 }
3679
3680 static const struct net_device_ops lan78xx_netdev_ops = {
3681         .ndo_open               = lan78xx_open,
3682         .ndo_stop               = lan78xx_stop,
3683         .ndo_start_xmit         = lan78xx_start_xmit,
3684         .ndo_tx_timeout         = lan78xx_tx_timeout,
3685         .ndo_change_mtu         = lan78xx_change_mtu,
3686         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3687         .ndo_validate_addr      = eth_validate_addr,
3688         .ndo_do_ioctl           = lan78xx_ioctl,
3689         .ndo_set_rx_mode        = lan78xx_set_multicast,
3690         .ndo_set_features       = lan78xx_set_features,
3691         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3692         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3693 };
3694
3695 static void lan78xx_stat_monitor(struct timer_list *t)
3696 {
3697         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3698
3699         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3700 }
3701
3702 static int lan78xx_probe(struct usb_interface *intf,
3703                          const struct usb_device_id *id)
3704 {
3705         struct lan78xx_net *dev;
3706         struct net_device *netdev;
3707         struct usb_device *udev;
3708         int ret;
3709         unsigned maxp;
3710         unsigned period;
3711         u8 *buf = NULL;
3712
3713         udev = interface_to_usbdev(intf);
3714         udev = usb_get_dev(udev);
3715
3716         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3717         if (!netdev) {
3718                 dev_err(&intf->dev, "Error: OOM\n");
3719                 ret = -ENOMEM;
3720                 goto out1;
3721         }
3722
3723         /* netdev_printk() needs this */
3724         SET_NETDEV_DEV(netdev, &intf->dev);
3725
3726         dev = netdev_priv(netdev);
3727         dev->udev = udev;
3728         dev->intf = intf;
3729         dev->net = netdev;
3730         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3731                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3732
3733         skb_queue_head_init(&dev->rxq);
3734         skb_queue_head_init(&dev->txq);
3735         skb_queue_head_init(&dev->done);
3736         skb_queue_head_init(&dev->rxq_pause);
3737         skb_queue_head_init(&dev->txq_pend);
3738         mutex_init(&dev->phy_mutex);
3739
3740         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3741         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3742         init_usb_anchor(&dev->deferred);
3743
3744         netdev->netdev_ops = &lan78xx_netdev_ops;
3745         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3746         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3747
3748         dev->delta = 1;
3749         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3750
3751         mutex_init(&dev->stats.access_lock);
3752
3753         ret = lan78xx_bind(dev, intf);
3754         if (ret < 0)
3755                 goto out2;
3756
3757         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3758                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3759
3760         /* MTU range: 68 - 9000 */
3761         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3762
3763         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3764         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3765         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3766
3767         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3768         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3769
3770         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3771                                         dev->ep_intr->desc.bEndpointAddress &
3772                                         USB_ENDPOINT_NUMBER_MASK);
3773         period = dev->ep_intr->desc.bInterval;
3774
3775         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3776         buf = kmalloc(maxp, GFP_KERNEL);
3777         if (buf) {
3778                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3779                 if (!dev->urb_intr) {
3780                         ret = -ENOMEM;
3781                         kfree(buf);
3782                         goto out3;
3783                 } else {
3784                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3785                                          dev->pipe_intr, buf, maxp,
3786                                          intr_complete, dev, period);
3787                 }
3788         }
3789
3790         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3791
3792         /* driver requires remote-wakeup capability during autosuspend. */
3793         intf->needs_remote_wakeup = 1;
3794
3795         ret = lan78xx_phy_init(dev);
3796         if (ret < 0)
3797                 goto out4;
3798
3799         ret = register_netdev(netdev);
3800         if (ret != 0) {
3801                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3802                 goto out5;
3803         }
3804
3805         usb_set_intfdata(intf, dev);
3806
3807         ret = device_set_wakeup_enable(&udev->dev, true);
3808
3809          /* Default delay of 2sec has more overhead than advantage.
3810           * Set to 10sec as default.
3811           */
3812         pm_runtime_set_autosuspend_delay(&udev->dev,
3813                                          DEFAULT_AUTOSUSPEND_DELAY);
3814
3815         return 0;
3816
3817 out5:
3818         phy_disconnect(netdev->phydev);
3819 out4:
3820         usb_free_urb(dev->urb_intr);
3821 out3:
3822         lan78xx_unbind(dev, intf);
3823 out2:
3824         free_netdev(netdev);
3825 out1:
3826         usb_put_dev(udev);
3827
3828         return ret;
3829 }
3830
3831 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3832 {
3833         const u16 crc16poly = 0x8005;
3834         int i;
3835         u16 bit, crc, msb;
3836         u8 data;
3837
3838         crc = 0xFFFF;
3839         for (i = 0; i < len; i++) {
3840                 data = *buf++;
3841                 for (bit = 0; bit < 8; bit++) {
3842                         msb = crc >> 15;
3843                         crc <<= 1;
3844
3845                         if (msb ^ (u16)(data & 1)) {
3846                                 crc ^= crc16poly;
3847                                 crc |= (u16)0x0001U;
3848                         }
3849                         data >>= 1;
3850                 }
3851         }
3852
3853         return crc;
3854 }
3855
3856 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3857 {
3858         u32 buf;
3859         int ret;
3860         int mask_index;
3861         u16 crc;
3862         u32 temp_wucsr;
3863         u32 temp_pmt_ctl;
3864         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3865         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3866         const u8 arp_type[2] = { 0x08, 0x06 };
3867
3868         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3869         buf &= ~MAC_TX_TXEN_;
3870         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3871         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3872         buf &= ~MAC_RX_RXEN_;
3873         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3874
3875         ret = lan78xx_write_reg(dev, WUCSR, 0);
3876         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3877         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3878
3879         temp_wucsr = 0;
3880
3881         temp_pmt_ctl = 0;
3882         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3883         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3884         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3885
3886         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3887                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3888
3889         mask_index = 0;
3890         if (wol & WAKE_PHY) {
3891                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3892
3893                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3894                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3895                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3896         }
3897         if (wol & WAKE_MAGIC) {
3898                 temp_wucsr |= WUCSR_MPEN_;
3899
3900                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3901                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3902                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3903         }
3904         if (wol & WAKE_BCAST) {
3905                 temp_wucsr |= WUCSR_BCST_EN_;
3906
3907                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3908                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3909                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3910         }
3911         if (wol & WAKE_MCAST) {
3912                 temp_wucsr |= WUCSR_WAKE_EN_;
3913
3914                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3915                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3916                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3917                                         WUF_CFGX_EN_ |
3918                                         WUF_CFGX_TYPE_MCAST_ |
3919                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3920                                         (crc & WUF_CFGX_CRC16_MASK_));
3921
3922                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3923                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3924                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3925                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3926                 mask_index++;
3927
3928                 /* for IPv6 Multicast */
3929                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3930                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3931                                         WUF_CFGX_EN_ |
3932                                         WUF_CFGX_TYPE_MCAST_ |
3933                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3934                                         (crc & WUF_CFGX_CRC16_MASK_));
3935
3936                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3937                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3938                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3939                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3940                 mask_index++;
3941
3942                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3943                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3944                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3945         }
3946         if (wol & WAKE_UCAST) {
3947                 temp_wucsr |= WUCSR_PFDA_EN_;
3948
3949                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3950                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3951                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3952         }
3953         if (wol & WAKE_ARP) {
3954                 temp_wucsr |= WUCSR_WAKE_EN_;
3955
3956                 /* set WUF_CFG & WUF_MASK
3957                  * for packettype (offset 12,13) = ARP (0x0806)
3958                  */
3959                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3960                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3961                                         WUF_CFGX_EN_ |
3962                                         WUF_CFGX_TYPE_ALL_ |
3963                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3964                                         (crc & WUF_CFGX_CRC16_MASK_));
3965
3966                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3967                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3968                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3969                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3970                 mask_index++;
3971
3972                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3973                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3974                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3975         }
3976
3977         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3978
3979         /* when multiple WOL bits are set */
3980         if (hweight_long((unsigned long)wol) > 1) {
3981                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3982                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3983                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3984         }
3985         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3986
3987         /* clear WUPS */
3988         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3989         buf |= PMT_CTL_WUPS_MASK_;
3990         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3991
3992         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3993         buf |= MAC_RX_RXEN_;
3994         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3995
3996         return 0;
3997 }
3998
3999 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4000 {
4001         struct lan78xx_net *dev = usb_get_intfdata(intf);
4002         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4003         u32 buf;
4004         int ret;
4005         int event;
4006
4007         event = message.event;
4008
4009         if (!dev->suspend_count++) {
4010                 spin_lock_irq(&dev->txq.lock);
4011                 /* don't autosuspend while transmitting */
4012                 if ((skb_queue_len(&dev->txq) ||
4013                      skb_queue_len(&dev->txq_pend)) &&
4014                         PMSG_IS_AUTO(message)) {
4015                         spin_unlock_irq(&dev->txq.lock);
4016                         ret = -EBUSY;
4017                         goto out;
4018                 } else {
4019                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4020                         spin_unlock_irq(&dev->txq.lock);
4021                 }
4022
4023                 /* stop TX & RX */
4024                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4025                 buf &= ~MAC_TX_TXEN_;
4026                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4027                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4028                 buf &= ~MAC_RX_RXEN_;
4029                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4030
4031                 /* empty out the rx and queues */
4032                 netif_device_detach(dev->net);
4033                 lan78xx_terminate_urbs(dev);
4034                 usb_kill_urb(dev->urb_intr);
4035
4036                 /* reattach */
4037                 netif_device_attach(dev->net);
4038         }
4039
4040         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4041                 del_timer(&dev->stat_monitor);
4042
4043                 if (PMSG_IS_AUTO(message)) {
4044                         /* auto suspend (selective suspend) */
4045                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4046                         buf &= ~MAC_TX_TXEN_;
4047                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4048                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4049                         buf &= ~MAC_RX_RXEN_;
4050                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4051
4052                         ret = lan78xx_write_reg(dev, WUCSR, 0);
4053                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4054                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4055
4056                         /* set goodframe wakeup */
4057                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4058
4059                         buf |= WUCSR_RFE_WAKE_EN_;
4060                         buf |= WUCSR_STORE_WAKE_;
4061
4062                         ret = lan78xx_write_reg(dev, WUCSR, buf);
4063
4064                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4065
4066                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4067                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4068
4069                         buf |= PMT_CTL_PHY_WAKE_EN_;
4070                         buf |= PMT_CTL_WOL_EN_;
4071                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4072                         buf |= PMT_CTL_SUS_MODE_3_;
4073
4074                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4075
4076                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4077
4078                         buf |= PMT_CTL_WUPS_MASK_;
4079
4080                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4081
4082                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4083                         buf |= MAC_RX_RXEN_;
4084                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4085                 } else {
4086                         lan78xx_set_suspend(dev, pdata->wol);
4087                 }
4088         }
4089
4090         ret = 0;
4091 out:
4092         return ret;
4093 }
4094
4095 static int lan78xx_resume(struct usb_interface *intf)
4096 {
4097         struct lan78xx_net *dev = usb_get_intfdata(intf);
4098         struct sk_buff *skb;
4099         struct urb *res;
4100         int ret;
4101         u32 buf;
4102
4103         if (!timer_pending(&dev->stat_monitor)) {
4104                 dev->delta = 1;
4105                 mod_timer(&dev->stat_monitor,
4106                           jiffies + STAT_UPDATE_TIMER);
4107         }
4108
4109         if (!--dev->suspend_count) {
4110                 /* resume interrupt URBs */
4111                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4112                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4113
4114                 spin_lock_irq(&dev->txq.lock);
4115                 while ((res = usb_get_from_anchor(&dev->deferred))) {
4116                         skb = (struct sk_buff *)res->context;
4117                         ret = usb_submit_urb(res, GFP_ATOMIC);
4118                         if (ret < 0) {
4119                                 dev_kfree_skb_any(skb);
4120                                 usb_free_urb(res);
4121                                 usb_autopm_put_interface_async(dev->intf);
4122                         } else {
4123                                 netif_trans_update(dev->net);
4124                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4125                         }
4126                 }
4127
4128                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4129                 spin_unlock_irq(&dev->txq.lock);
4130
4131                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4132                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4133                                 netif_start_queue(dev->net);
4134                         tasklet_schedule(&dev->bh);
4135                 }
4136         }
4137
4138         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4139         ret = lan78xx_write_reg(dev, WUCSR, 0);
4140         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4141
4142         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4143                                              WUCSR2_ARP_RCD_ |
4144                                              WUCSR2_IPV6_TCPSYN_RCD_ |
4145                                              WUCSR2_IPV4_TCPSYN_RCD_);
4146
4147         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4148                                             WUCSR_EEE_RX_WAKE_ |
4149                                             WUCSR_PFDA_FR_ |
4150                                             WUCSR_RFE_WAKE_FR_ |
4151                                             WUCSR_WUFR_ |
4152                                             WUCSR_MPR_ |
4153                                             WUCSR_BCST_FR_);
4154
4155         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4156         buf |= MAC_TX_TXEN_;
4157         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4158
4159         return 0;
4160 }
4161
4162 static int lan78xx_reset_resume(struct usb_interface *intf)
4163 {
4164         struct lan78xx_net *dev = usb_get_intfdata(intf);
4165
4166         lan78xx_reset(dev);
4167
4168         phy_start(dev->net->phydev);
4169
4170         return lan78xx_resume(intf);
4171 }
4172
4173 static const struct usb_device_id products[] = {
4174         {
4175         /* LAN7800 USB Gigabit Ethernet Device */
4176         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4177         },
4178         {
4179         /* LAN7850 USB Gigabit Ethernet Device */
4180         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4181         },
4182         {
4183         /* LAN7801 USB Gigabit Ethernet Device */
4184         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4185         },
4186         {},
4187 };
4188 MODULE_DEVICE_TABLE(usb, products);
4189
4190 static struct usb_driver lan78xx_driver = {
4191         .name                   = DRIVER_NAME,
4192         .id_table               = products,
4193         .probe                  = lan78xx_probe,
4194         .disconnect             = lan78xx_disconnect,
4195         .suspend                = lan78xx_suspend,
4196         .resume                 = lan78xx_resume,
4197         .reset_resume           = lan78xx_reset_resume,
4198         .supports_autosuspend   = 1,
4199         .disable_hub_initiated_lpm = 1,
4200 };
4201
4202 module_usb_driver(lan78xx_driver);
4203
4204 MODULE_AUTHOR(DRIVER_AUTHOR);
4205 MODULE_DESCRIPTION(DRIVER_DESC);
4206 MODULE_LICENSE("GPL");