2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/stringify.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/compiler.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mdio.h>
30 #include <linux/mii.h>
31 #include <linux/phy.h>
32 #include <linux/brcmphy.h>
33 #include <linux/if_vlan.h>
35 #include <linux/tcp.h>
36 #include <linux/workqueue.h>
37 #include <linux/prefetch.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/firmware.h>
40 #include <linux/ssb/ssb_driver_gige.h>
41 #include <linux/hwmon.h>
42 #include <linux/hwmon-sysfs.h>
44 #include <net/checksum.h>
48 #include <asm/byteorder.h>
49 #include <linux/uaccess.h>
51 #include <uapi/linux/net_tstamp.h>
52 #include <linux/ptp_clock_kernel.h>
55 #include <asm/idprom.h>
64 /* Functions & macros to verify TG3_FLAGS types */
66 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 return test_bit(flag, bits);
71 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
76 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 clear_bit(flag, bits);
81 #define tg3_flag(tp, flag) \
82 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
83 #define tg3_flag_set(tp, flag) \
84 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_clear(tp, flag) \
86 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define DRV_MODULE_NAME "tg3"
90 #define TG3_MIN_NUM 133
91 #define DRV_MODULE_VERSION \
92 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
93 #define DRV_MODULE_RELDATE "Jul 29, 2013"
95 #define RESET_KIND_SHUTDOWN 0
96 #define RESET_KIND_INIT 1
97 #define RESET_KIND_SUSPEND 2
99 #define TG3_DEF_RX_MODE 0
100 #define TG3_DEF_TX_MODE 0
101 #define TG3_DEF_MSG_ENABLE \
111 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
113 /* length of time before we decide the hardware is borked,
114 * and dev->tx_timeout() should be called to fix the problem
117 #define TG3_TX_TIMEOUT (5 * HZ)
119 /* hardware minimum and maximum for a single frame's data payload */
120 #define TG3_MIN_MTU 60
121 #define TG3_MAX_MTU(tp) \
122 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124 /* These numbers seem to be hard coded in the NIC firmware somehow.
125 * You can't change the ring sizes, but you can change where you place
126 * them in the NIC onboard memory.
128 #define TG3_RX_STD_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
131 #define TG3_DEF_RX_RING_PENDING 200
132 #define TG3_RX_JMB_RING_SIZE(tp) \
133 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
134 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
135 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 #define TG3_DMA_BYTE_ENAB 64
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
191 #if (NET_IP_ALIGN != 0)
192 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
194 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
197 /* minimum number of free TX descriptors required to wake up TX process */
198 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
199 #define TG3_TX_BD_DMA_MAX_2K 2048
200 #define TG3_TX_BD_DMA_MAX_4K 4096
202 #define TG3_RAW_IP_ALIGN 2
204 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
205 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207 #define FIRMWARE_TG3 "/*(DEBLOBBED)*/"
208 #define FIRMWARE_TG357766 "/*(DEBLOBBED)*/"
209 #define FIRMWARE_TG3TSO "/*(DEBLOBBED)*/"
210 #define FIRMWARE_TG3TSO5 "/*(DEBLOBBED)*/"
212 static char version[] =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
221 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
222 module_param(tg3_debug, int, 0);
223 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
225 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
226 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
228 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
248 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
249 TG3_DRV_DATA_FLAG_5705_10_100},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
251 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
252 TG3_DRV_DATA_FLAG_5705_10_100},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
269 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
277 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
278 PCI_VENDOR_ID_LENOVO,
279 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
283 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
302 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
303 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
304 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
305 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
306 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
307 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
311 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
321 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
331 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
332 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
333 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
334 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
335 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
336 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
337 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
338 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
342 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
344 static const struct {
345 const char string[ETH_GSTRING_LEN];
346 } ethtool_stats_keys[] = {
349 { "rx_ucast_packets" },
350 { "rx_mcast_packets" },
351 { "rx_bcast_packets" },
353 { "rx_align_errors" },
354 { "rx_xon_pause_rcvd" },
355 { "rx_xoff_pause_rcvd" },
356 { "rx_mac_ctrl_rcvd" },
357 { "rx_xoff_entered" },
358 { "rx_frame_too_long_errors" },
360 { "rx_undersize_packets" },
361 { "rx_in_length_errors" },
362 { "rx_out_length_errors" },
363 { "rx_64_or_less_octet_packets" },
364 { "rx_65_to_127_octet_packets" },
365 { "rx_128_to_255_octet_packets" },
366 { "rx_256_to_511_octet_packets" },
367 { "rx_512_to_1023_octet_packets" },
368 { "rx_1024_to_1522_octet_packets" },
369 { "rx_1523_to_2047_octet_packets" },
370 { "rx_2048_to_4095_octet_packets" },
371 { "rx_4096_to_8191_octet_packets" },
372 { "rx_8192_to_9022_octet_packets" },
379 { "tx_flow_control" },
381 { "tx_single_collisions" },
382 { "tx_mult_collisions" },
384 { "tx_excessive_collisions" },
385 { "tx_late_collisions" },
386 { "tx_collide_2times" },
387 { "tx_collide_3times" },
388 { "tx_collide_4times" },
389 { "tx_collide_5times" },
390 { "tx_collide_6times" },
391 { "tx_collide_7times" },
392 { "tx_collide_8times" },
393 { "tx_collide_9times" },
394 { "tx_collide_10times" },
395 { "tx_collide_11times" },
396 { "tx_collide_12times" },
397 { "tx_collide_13times" },
398 { "tx_collide_14times" },
399 { "tx_collide_15times" },
400 { "tx_ucast_packets" },
401 { "tx_mcast_packets" },
402 { "tx_bcast_packets" },
403 { "tx_carrier_sense_errors" },
407 { "dma_writeq_full" },
408 { "dma_write_prioq_full" },
412 { "rx_threshold_hit" },
414 { "dma_readq_full" },
415 { "dma_read_prioq_full" },
416 { "tx_comp_queue_full" },
418 { "ring_set_send_prod_index" },
419 { "ring_status_update" },
421 { "nic_avoided_irqs" },
422 { "nic_tx_threshold_hit" },
424 { "mbuf_lwm_thresh_hit" },
427 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
428 #define TG3_NVRAM_TEST 0
429 #define TG3_LINK_TEST 1
430 #define TG3_REGISTER_TEST 2
431 #define TG3_MEMORY_TEST 3
432 #define TG3_MAC_LOOPB_TEST 4
433 #define TG3_PHY_LOOPB_TEST 5
434 #define TG3_EXT_LOOPB_TEST 6
435 #define TG3_INTERRUPT_TEST 7
438 static const struct {
439 const char string[ETH_GSTRING_LEN];
440 } ethtool_test_keys[] = {
441 [TG3_NVRAM_TEST] = { "nvram test (online) " },
442 [TG3_LINK_TEST] = { "link test (online) " },
443 [TG3_REGISTER_TEST] = { "register test (offline)" },
444 [TG3_MEMORY_TEST] = { "memory test (offline)" },
445 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
446 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
447 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
448 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
451 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
454 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
456 writel(val, tp->regs + off);
459 static u32 tg3_read32(struct tg3 *tp, u32 off)
461 return readl(tp->regs + off);
464 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
466 writel(val, tp->aperegs + off);
469 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
471 return readl(tp->aperegs + off);
474 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
478 spin_lock_irqsave(&tp->indirect_lock, flags);
479 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
481 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
486 writel(val, tp->regs + off);
487 readl(tp->regs + off);
490 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
498 spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
506 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
507 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
508 TG3_64BIT_REG_LOW, val);
511 if (off == TG3_RX_STD_PROD_IDX_REG) {
512 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
513 TG3_64BIT_REG_LOW, val);
517 spin_lock_irqsave(&tp->indirect_lock, flags);
518 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
519 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
520 spin_unlock_irqrestore(&tp->indirect_lock, flags);
522 /* In indirect mode when disabling interrupts, we also need
523 * to clear the interrupt bit in the GRC local ctrl register.
525 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
527 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
528 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
532 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
537 spin_lock_irqsave(&tp->indirect_lock, flags);
538 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
540 spin_unlock_irqrestore(&tp->indirect_lock, flags);
544 /* usec_wait specifies the wait time in usec when writing to certain registers
545 * where it is unsafe to read back the register without some delay.
546 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
547 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
549 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
551 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
552 /* Non-posted methods */
553 tp->write32(tp, off, val);
556 tg3_write32(tp, off, val);
561 /* Wait again after the read for the posted method to guarantee that
562 * the wait time is met.
568 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
570 tp->write32_mbox(tp, off, val);
571 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
572 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
573 !tg3_flag(tp, ICH_WORKAROUND)))
574 tp->read32_mbox(tp, off);
577 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
579 void __iomem *mbox = tp->regs + off;
581 if (tg3_flag(tp, TXD_MBOX_HWBUG))
583 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
584 tg3_flag(tp, FLUSH_POSTED_WRITES))
588 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
590 return readl(tp->regs + off + GRCMBOX_BASE);
593 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
595 writel(val, tp->regs + off + GRCMBOX_BASE);
598 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
599 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
600 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
601 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
602 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
604 #define tw32(reg, val) tp->write32(tp, reg, val)
605 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
606 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
607 #define tr32(reg) tp->read32(tp, reg)
609 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
613 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
614 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
617 spin_lock_irqsave(&tp->indirect_lock, flags);
618 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
619 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
620 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
622 /* Always leave this as zero. */
623 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
625 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
626 tw32_f(TG3PCI_MEM_WIN_DATA, val);
628 /* Always leave this as zero. */
629 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
631 spin_unlock_irqrestore(&tp->indirect_lock, flags);
634 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
638 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
639 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
644 spin_lock_irqsave(&tp->indirect_lock, flags);
645 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
646 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
647 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
649 /* Always leave this as zero. */
650 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
652 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
653 *val = tr32(TG3PCI_MEM_WIN_DATA);
655 /* Always leave this as zero. */
656 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
658 spin_unlock_irqrestore(&tp->indirect_lock, flags);
661 static void tg3_ape_lock_init(struct tg3 *tp)
666 if (tg3_asic_rev(tp) == ASIC_REV_5761)
667 regbase = TG3_APE_LOCK_GRANT;
669 regbase = TG3_APE_PER_LOCK_GRANT;
671 /* Make sure the driver hasn't any stale locks. */
672 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
674 case TG3_APE_LOCK_PHY0:
675 case TG3_APE_LOCK_PHY1:
676 case TG3_APE_LOCK_PHY2:
677 case TG3_APE_LOCK_PHY3:
678 bit = APE_LOCK_GRANT_DRIVER;
682 bit = APE_LOCK_GRANT_DRIVER;
684 bit = 1 << tp->pci_fn;
686 tg3_ape_write32(tp, regbase + 4 * i, bit);
691 static int tg3_ape_lock(struct tg3 *tp, int locknum)
695 u32 status, req, gnt, bit;
697 if (!tg3_flag(tp, ENABLE_APE))
701 case TG3_APE_LOCK_GPIO:
702 if (tg3_asic_rev(tp) == ASIC_REV_5761)
704 case TG3_APE_LOCK_GRC:
705 case TG3_APE_LOCK_MEM:
707 bit = APE_LOCK_REQ_DRIVER;
709 bit = 1 << tp->pci_fn;
711 case TG3_APE_LOCK_PHY0:
712 case TG3_APE_LOCK_PHY1:
713 case TG3_APE_LOCK_PHY2:
714 case TG3_APE_LOCK_PHY3:
715 bit = APE_LOCK_REQ_DRIVER;
721 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
722 req = TG3_APE_LOCK_REQ;
723 gnt = TG3_APE_LOCK_GRANT;
725 req = TG3_APE_PER_LOCK_REQ;
726 gnt = TG3_APE_PER_LOCK_GRANT;
731 tg3_ape_write32(tp, req + off, bit);
733 /* Wait for up to 1 millisecond to acquire lock. */
734 for (i = 0; i < 100; i++) {
735 status = tg3_ape_read32(tp, gnt + off);
738 if (pci_channel_offline(tp->pdev))
745 /* Revoke the lock request. */
746 tg3_ape_write32(tp, gnt + off, bit);
753 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
757 if (!tg3_flag(tp, ENABLE_APE))
761 case TG3_APE_LOCK_GPIO:
762 if (tg3_asic_rev(tp) == ASIC_REV_5761)
764 case TG3_APE_LOCK_GRC:
765 case TG3_APE_LOCK_MEM:
767 bit = APE_LOCK_GRANT_DRIVER;
769 bit = 1 << tp->pci_fn;
771 case TG3_APE_LOCK_PHY0:
772 case TG3_APE_LOCK_PHY1:
773 case TG3_APE_LOCK_PHY2:
774 case TG3_APE_LOCK_PHY3:
775 bit = APE_LOCK_GRANT_DRIVER;
781 if (tg3_asic_rev(tp) == ASIC_REV_5761)
782 gnt = TG3_APE_LOCK_GRANT;
784 gnt = TG3_APE_PER_LOCK_GRANT;
786 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
789 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
794 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
797 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
798 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
801 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
804 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
807 return timeout_us ? 0 : -EBUSY;
810 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
814 for (i = 0; i < timeout_us / 10; i++) {
815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
817 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823 return i == timeout_us / 10;
826 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
830 u32 i, bufoff, msgoff, maxlen, apedata;
832 if (!tg3_flag(tp, APE_HAS_NCSI))
835 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
836 if (apedata != APE_SEG_SIG_MAGIC)
839 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
840 if (!(apedata & APE_FW_STATUS_READY))
843 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
845 msgoff = bufoff + 2 * sizeof(u32);
846 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
851 /* Cap xfer sizes to scratchpad limits. */
852 length = (len > maxlen) ? maxlen : len;
855 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
856 if (!(apedata & APE_FW_STATUS_READY))
859 /* Wait for up to 1 msec for APE to service previous event. */
860 err = tg3_ape_event_lock(tp, 1000);
864 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
865 APE_EVENT_STATUS_SCRTCHPD_READ |
866 APE_EVENT_STATUS_EVENT_PENDING;
867 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
869 tg3_ape_write32(tp, bufoff, base_off);
870 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
872 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
873 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
877 if (tg3_ape_wait_for_event(tp, 30000))
880 for (i = 0; length; i += 4, length -= 4) {
881 u32 val = tg3_ape_read32(tp, msgoff + i);
882 memcpy(data, &val, sizeof(u32));
890 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
895 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
896 if (apedata != APE_SEG_SIG_MAGIC)
899 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
900 if (!(apedata & APE_FW_STATUS_READY))
903 /* Wait for up to 1 millisecond for APE to service previous event. */
904 err = tg3_ape_event_lock(tp, 1000);
908 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
909 event | APE_EVENT_STATUS_EVENT_PENDING);
911 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
912 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
917 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
922 if (!tg3_flag(tp, ENABLE_APE))
926 case RESET_KIND_INIT:
927 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
928 APE_HOST_SEG_SIG_MAGIC);
929 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
930 APE_HOST_SEG_LEN_MAGIC);
931 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
932 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
933 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
934 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
935 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
936 APE_HOST_BEHAV_NO_PHYLOCK);
937 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
938 TG3_APE_HOST_DRVR_STATE_START);
940 event = APE_EVENT_STATUS_STATE_START;
942 case RESET_KIND_SHUTDOWN:
943 /* With the interface we are currently using,
944 * APE does not track driver state. Wiping
945 * out the HOST SEGMENT SIGNATURE forces
946 * the APE to assume OS absent status.
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
950 if (device_may_wakeup(&tp->pdev->dev) &&
951 tg3_flag(tp, WOL_ENABLE)) {
952 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
953 TG3_APE_HOST_WOL_SPEED_AUTO);
954 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
956 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
960 event = APE_EVENT_STATUS_STATE_UNLOAD;
966 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
968 tg3_ape_send_event(tp, event);
971 static void tg3_disable_ints(struct tg3 *tp)
975 tw32(TG3PCI_MISC_HOST_CTRL,
976 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
977 for (i = 0; i < tp->irq_max; i++)
978 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
981 static void tg3_enable_ints(struct tg3 *tp)
988 tw32(TG3PCI_MISC_HOST_CTRL,
989 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
991 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
992 for (i = 0; i < tp->irq_cnt; i++) {
993 struct tg3_napi *tnapi = &tp->napi[i];
995 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
996 if (tg3_flag(tp, 1SHOT_MSI))
997 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
999 tp->coal_now |= tnapi->coal_now;
1002 /* Force an initial interrupt */
1003 if (!tg3_flag(tp, TAGGED_STATUS) &&
1004 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1005 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1007 tw32(HOSTCC_MODE, tp->coal_now);
1009 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1012 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1014 struct tg3 *tp = tnapi->tp;
1015 struct tg3_hw_status *sblk = tnapi->hw_status;
1016 unsigned int work_exists = 0;
1018 /* check for phy events */
1019 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1020 if (sblk->status & SD_STATUS_LINK_CHG)
1024 /* check for TX work to do */
1025 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1028 /* check for RX work to do */
1029 if (tnapi->rx_rcb_prod_idx &&
1030 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1037 * similar to tg3_enable_ints, but it accurately determines whether there
1038 * is new work pending and can return without flushing the PIO write
1039 * which reenables interrupts
1041 static void tg3_int_reenable(struct tg3_napi *tnapi)
1043 struct tg3 *tp = tnapi->tp;
1045 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1048 /* When doing tagged status, this work check is unnecessary.
1049 * The last_tag we write above tells the chip which piece of
1050 * work we've completed.
1052 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1053 tw32(HOSTCC_MODE, tp->coalesce_mode |
1054 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1057 static void tg3_switch_clocks(struct tg3 *tp)
1060 u32 orig_clock_ctrl;
1062 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1065 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1067 orig_clock_ctrl = clock_ctrl;
1068 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1069 CLOCK_CTRL_CLKRUN_OENABLE |
1071 tp->pci_clock_ctrl = clock_ctrl;
1073 if (tg3_flag(tp, 5705_PLUS)) {
1074 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1075 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1076 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1078 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1079 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1081 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1090 #define PHY_BUSY_LOOPS 5000
1092 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1099 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1101 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1105 tg3_ape_lock(tp, tp->phy_ape_lock);
1109 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1110 MI_COM_PHY_ADDR_MASK);
1111 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1112 MI_COM_REG_ADDR_MASK);
1113 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1115 tw32_f(MAC_MI_COM, frame_val);
1117 loops = PHY_BUSY_LOOPS;
1118 while (loops != 0) {
1120 frame_val = tr32(MAC_MI_COM);
1122 if ((frame_val & MI_COM_BUSY) == 0) {
1124 frame_val = tr32(MAC_MI_COM);
1132 *val = frame_val & MI_COM_DATA_MASK;
1136 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1137 tw32_f(MAC_MI_MODE, tp->mi_mode);
1141 tg3_ape_unlock(tp, tp->phy_ape_lock);
1146 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1148 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1151 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1158 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1159 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1162 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1164 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1168 tg3_ape_lock(tp, tp->phy_ape_lock);
1170 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1171 MI_COM_PHY_ADDR_MASK);
1172 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1173 MI_COM_REG_ADDR_MASK);
1174 frame_val |= (val & MI_COM_DATA_MASK);
1175 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1177 tw32_f(MAC_MI_COM, frame_val);
1179 loops = PHY_BUSY_LOOPS;
1180 while (loops != 0) {
1182 frame_val = tr32(MAC_MI_COM);
1183 if ((frame_val & MI_COM_BUSY) == 0) {
1185 frame_val = tr32(MAC_MI_COM);
1195 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1196 tw32_f(MAC_MI_MODE, tp->mi_mode);
1200 tg3_ape_unlock(tp, tp->phy_ape_lock);
1205 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1207 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1210 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1214 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1218 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1222 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1223 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1233 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1237 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1241 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1250 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1256 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1260 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1262 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1267 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1271 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1273 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1278 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1282 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1283 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1284 MII_TG3_AUXCTL_SHDWSEL_MISC);
1286 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1291 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1293 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1294 set |= MII_TG3_AUXCTL_MISC_WREN;
1296 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1299 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1304 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1310 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1312 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1314 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1315 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1320 static int tg3_bmcr_reset(struct tg3 *tp)
1325 /* OK, reset it, and poll the BMCR_RESET bit until it
1326 * clears or we time out.
1328 phy_control = BMCR_RESET;
1329 err = tg3_writephy(tp, MII_BMCR, phy_control);
1335 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1339 if ((phy_control & BMCR_RESET) == 0) {
1351 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1353 struct tg3 *tp = bp->priv;
1356 spin_lock_bh(&tp->lock);
1358 if (tg3_readphy(tp, reg, &val))
1361 spin_unlock_bh(&tp->lock);
1366 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1368 struct tg3 *tp = bp->priv;
1371 spin_lock_bh(&tp->lock);
1373 if (tg3_writephy(tp, reg, val))
1376 spin_unlock_bh(&tp->lock);
1381 static int tg3_mdio_reset(struct mii_bus *bp)
1386 static void tg3_mdio_config_5785(struct tg3 *tp)
1389 struct phy_device *phydev;
1391 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1392 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1393 case PHY_ID_BCM50610:
1394 case PHY_ID_BCM50610M:
1395 val = MAC_PHYCFG2_50610_LED_MODES;
1397 case PHY_ID_BCMAC131:
1398 val = MAC_PHYCFG2_AC131_LED_MODES;
1400 case PHY_ID_RTL8211C:
1401 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1403 case PHY_ID_RTL8201E:
1404 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1410 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1411 tw32(MAC_PHYCFG2, val);
1413 val = tr32(MAC_PHYCFG1);
1414 val &= ~(MAC_PHYCFG1_RGMII_INT |
1415 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1416 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1417 tw32(MAC_PHYCFG1, val);
1422 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1423 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1424 MAC_PHYCFG2_FMODE_MASK_MASK |
1425 MAC_PHYCFG2_GMODE_MASK_MASK |
1426 MAC_PHYCFG2_ACT_MASK_MASK |
1427 MAC_PHYCFG2_QUAL_MASK_MASK |
1428 MAC_PHYCFG2_INBAND_ENABLE;
1430 tw32(MAC_PHYCFG2, val);
1432 val = tr32(MAC_PHYCFG1);
1433 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1434 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1435 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1436 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1437 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1438 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1439 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1441 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1442 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1443 tw32(MAC_PHYCFG1, val);
1445 val = tr32(MAC_EXT_RGMII_MODE);
1446 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1447 MAC_RGMII_MODE_RX_QUALITY |
1448 MAC_RGMII_MODE_RX_ACTIVITY |
1449 MAC_RGMII_MODE_RX_ENG_DET |
1450 MAC_RGMII_MODE_TX_ENABLE |
1451 MAC_RGMII_MODE_TX_LOWPWR |
1452 MAC_RGMII_MODE_TX_RESET);
1453 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1454 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1455 val |= MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET;
1459 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1460 val |= MAC_RGMII_MODE_TX_ENABLE |
1461 MAC_RGMII_MODE_TX_LOWPWR |
1462 MAC_RGMII_MODE_TX_RESET;
1464 tw32(MAC_EXT_RGMII_MODE, val);
1467 static void tg3_mdio_start(struct tg3 *tp)
1469 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1470 tw32_f(MAC_MI_MODE, tp->mi_mode);
1473 if (tg3_flag(tp, MDIOBUS_INITED) &&
1474 tg3_asic_rev(tp) == ASIC_REV_5785)
1475 tg3_mdio_config_5785(tp);
1478 static int tg3_mdio_init(struct tg3 *tp)
1482 struct phy_device *phydev;
1484 if (tg3_flag(tp, 5717_PLUS)) {
1487 tp->phy_addr = tp->pci_fn + 1;
1489 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1490 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1492 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1493 TG3_CPMU_PHY_STRAP_IS_SERDES;
1497 tp->phy_addr = TG3_PHY_MII_ADDR;
1501 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1504 tp->mdio_bus = mdiobus_alloc();
1505 if (tp->mdio_bus == NULL)
1508 tp->mdio_bus->name = "tg3 mdio bus";
1509 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1510 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1511 tp->mdio_bus->priv = tp;
1512 tp->mdio_bus->parent = &tp->pdev->dev;
1513 tp->mdio_bus->read = &tg3_mdio_read;
1514 tp->mdio_bus->write = &tg3_mdio_write;
1515 tp->mdio_bus->reset = &tg3_mdio_reset;
1516 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1517 tp->mdio_bus->irq = &tp->mdio_irq[0];
1519 for (i = 0; i < PHY_MAX_ADDR; i++)
1520 tp->mdio_bus->irq[i] = PHY_POLL;
1522 /* The bus registration will look for all the PHYs on the mdio bus.
1523 * Unfortunately, it does not ensure the PHY is powered up before
1524 * accessing the PHY ID registers. A chip reset is the
1525 * quickest way to bring the device back to an operational state..
1527 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1530 i = mdiobus_register(tp->mdio_bus);
1532 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1533 mdiobus_free(tp->mdio_bus);
1537 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1539 if (!phydev || !phydev->drv) {
1540 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1541 mdiobus_unregister(tp->mdio_bus);
1542 mdiobus_free(tp->mdio_bus);
1546 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1547 case PHY_ID_BCM57780:
1548 phydev->interface = PHY_INTERFACE_MODE_GMII;
1549 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1551 case PHY_ID_BCM50610:
1552 case PHY_ID_BCM50610M:
1553 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1554 PHY_BRCM_RX_REFCLK_UNUSED |
1555 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1556 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1557 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1558 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1559 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1560 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1561 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1562 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1564 case PHY_ID_RTL8211C:
1565 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1567 case PHY_ID_RTL8201E:
1568 case PHY_ID_BCMAC131:
1569 phydev->interface = PHY_INTERFACE_MODE_MII;
1570 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1571 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1575 tg3_flag_set(tp, MDIOBUS_INITED);
1577 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1578 tg3_mdio_config_5785(tp);
1583 static void tg3_mdio_fini(struct tg3 *tp)
1585 if (tg3_flag(tp, MDIOBUS_INITED)) {
1586 tg3_flag_clear(tp, MDIOBUS_INITED);
1587 mdiobus_unregister(tp->mdio_bus);
1588 mdiobus_free(tp->mdio_bus);
1592 /* tp->lock is held. */
1593 static inline void tg3_generate_fw_event(struct tg3 *tp)
1597 val = tr32(GRC_RX_CPU_EVENT);
1598 val |= GRC_RX_CPU_DRIVER_EVENT;
1599 tw32_f(GRC_RX_CPU_EVENT, val);
1601 tp->last_event_jiffies = jiffies;
1604 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1606 /* tp->lock is held. */
1607 static void tg3_wait_for_event_ack(struct tg3 *tp)
1610 unsigned int delay_cnt;
1613 /* If enough time has passed, no wait is necessary. */
1614 time_remain = (long)(tp->last_event_jiffies + 1 +
1615 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1617 if (time_remain < 0)
1620 /* Check if we can shorten the wait time. */
1621 delay_cnt = jiffies_to_usecs(time_remain);
1622 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1623 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1624 delay_cnt = (delay_cnt >> 3) + 1;
1626 for (i = 0; i < delay_cnt; i++) {
1627 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1629 if (pci_channel_offline(tp->pdev))
1636 /* tp->lock is held. */
1637 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1642 if (!tg3_readphy(tp, MII_BMCR, ®))
1644 if (!tg3_readphy(tp, MII_BMSR, ®))
1645 val |= (reg & 0xffff);
1649 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1651 if (!tg3_readphy(tp, MII_LPA, ®))
1652 val |= (reg & 0xffff);
1656 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1657 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1659 if (!tg3_readphy(tp, MII_STAT1000, ®))
1660 val |= (reg & 0xffff);
1664 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1671 /* tp->lock is held. */
1672 static void tg3_ump_link_report(struct tg3 *tp)
1676 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1679 tg3_phy_gather_ump_data(tp, data);
1681 tg3_wait_for_event_ack(tp);
1683 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1684 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1685 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1686 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1687 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1688 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1690 tg3_generate_fw_event(tp);
1693 /* tp->lock is held. */
1694 static void tg3_stop_fw(struct tg3 *tp)
1696 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1697 /* Wait for RX cpu to ACK the previous event. */
1698 tg3_wait_for_event_ack(tp);
1700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1702 tg3_generate_fw_event(tp);
1704 /* Wait for RX cpu to ACK this event. */
1705 tg3_wait_for_event_ack(tp);
1709 /* tp->lock is held. */
1710 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1712 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1713 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1715 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1717 case RESET_KIND_INIT:
1718 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1722 case RESET_KIND_SHUTDOWN:
1723 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1727 case RESET_KIND_SUSPEND:
1728 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1738 /* tp->lock is held. */
1739 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1741 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1743 case RESET_KIND_INIT:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745 DRV_STATE_START_DONE);
1748 case RESET_KIND_SHUTDOWN:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750 DRV_STATE_UNLOAD_DONE);
1759 /* tp->lock is held. */
1760 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1762 if (tg3_flag(tp, ENABLE_ASF)) {
1764 case RESET_KIND_INIT:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1769 case RESET_KIND_SHUTDOWN:
1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1774 case RESET_KIND_SUSPEND:
1775 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785 static int tg3_poll_fw(struct tg3 *tp)
1790 if (tg3_flag(tp, NO_FWARE_REPORTED))
1793 if (tg3_flag(tp, IS_SSB_CORE)) {
1794 /* We don't use firmware. */
1798 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1799 /* Wait up to 20ms for init done. */
1800 for (i = 0; i < 200; i++) {
1801 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1803 if (pci_channel_offline(tp->pdev))
1811 /* Wait for firmware initialization to complete. */
1812 for (i = 0; i < 100000; i++) {
1813 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1814 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1816 if (pci_channel_offline(tp->pdev)) {
1817 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1818 tg3_flag_set(tp, NO_FWARE_REPORTED);
1819 netdev_info(tp->dev, "No firmware running\n");
1828 /* Chip might not be fitted with firmware. Some Sun onboard
1829 * parts are configured like that. So don't signal the timeout
1830 * of the above loop as an error, but do report the lack of
1831 * running firmware once.
1833 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1834 tg3_flag_set(tp, NO_FWARE_REPORTED);
1836 netdev_info(tp->dev, "No firmware running\n");
1839 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1840 /* The 57765 A0 needs a little more
1841 * time to do some important work.
1849 static void tg3_link_report(struct tg3 *tp)
1851 if (!netif_carrier_ok(tp->dev)) {
1852 netif_info(tp, link, tp->dev, "Link is down\n");
1853 tg3_ump_link_report(tp);
1854 } else if (netif_msg_link(tp)) {
1855 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1856 (tp->link_config.active_speed == SPEED_1000 ?
1858 (tp->link_config.active_speed == SPEED_100 ?
1860 (tp->link_config.active_duplex == DUPLEX_FULL ?
1863 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1866 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1869 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1870 netdev_info(tp->dev, "EEE is %s\n",
1871 tp->setlpicnt ? "enabled" : "disabled");
1873 tg3_ump_link_report(tp);
1876 tp->link_up = netif_carrier_ok(tp->dev);
1879 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1883 if (adv & ADVERTISE_PAUSE_CAP) {
1884 flowctrl |= FLOW_CTRL_RX;
1885 if (!(adv & ADVERTISE_PAUSE_ASYM))
1886 flowctrl |= FLOW_CTRL_TX;
1887 } else if (adv & ADVERTISE_PAUSE_ASYM)
1888 flowctrl |= FLOW_CTRL_TX;
1893 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1897 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1898 miireg = ADVERTISE_1000XPAUSE;
1899 else if (flow_ctrl & FLOW_CTRL_TX)
1900 miireg = ADVERTISE_1000XPSE_ASYM;
1901 else if (flow_ctrl & FLOW_CTRL_RX)
1902 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1909 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1913 if (adv & ADVERTISE_1000XPAUSE) {
1914 flowctrl |= FLOW_CTRL_RX;
1915 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1916 flowctrl |= FLOW_CTRL_TX;
1917 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1918 flowctrl |= FLOW_CTRL_TX;
1923 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1927 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1928 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1929 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1930 if (lcladv & ADVERTISE_1000XPAUSE)
1932 if (rmtadv & ADVERTISE_1000XPAUSE)
1939 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1943 u32 old_rx_mode = tp->rx_mode;
1944 u32 old_tx_mode = tp->tx_mode;
1946 if (tg3_flag(tp, USE_PHYLIB))
1947 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1949 autoneg = tp->link_config.autoneg;
1951 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1952 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1953 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1955 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1957 flowctrl = tp->link_config.flowctrl;
1959 tp->link_config.active_flowctrl = flowctrl;
1961 if (flowctrl & FLOW_CTRL_RX)
1962 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1964 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1966 if (old_rx_mode != tp->rx_mode)
1967 tw32_f(MAC_RX_MODE, tp->rx_mode);
1969 if (flowctrl & FLOW_CTRL_TX)
1970 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1972 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1974 if (old_tx_mode != tp->tx_mode)
1975 tw32_f(MAC_TX_MODE, tp->tx_mode);
1978 static void tg3_adjust_link(struct net_device *dev)
1980 u8 oldflowctrl, linkmesg = 0;
1981 u32 mac_mode, lcl_adv, rmt_adv;
1982 struct tg3 *tp = netdev_priv(dev);
1983 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1985 spin_lock_bh(&tp->lock);
1987 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1988 MAC_MODE_HALF_DUPLEX);
1990 oldflowctrl = tp->link_config.active_flowctrl;
1996 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1997 mac_mode |= MAC_MODE_PORT_MODE_MII;
1998 else if (phydev->speed == SPEED_1000 ||
1999 tg3_asic_rev(tp) != ASIC_REV_5785)
2000 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2002 mac_mode |= MAC_MODE_PORT_MODE_MII;
2004 if (phydev->duplex == DUPLEX_HALF)
2005 mac_mode |= MAC_MODE_HALF_DUPLEX;
2007 lcl_adv = mii_advertise_flowctrl(
2008 tp->link_config.flowctrl);
2011 rmt_adv = LPA_PAUSE_CAP;
2012 if (phydev->asym_pause)
2013 rmt_adv |= LPA_PAUSE_ASYM;
2016 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2018 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2020 if (mac_mode != tp->mac_mode) {
2021 tp->mac_mode = mac_mode;
2022 tw32_f(MAC_MODE, tp->mac_mode);
2026 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2027 if (phydev->speed == SPEED_10)
2029 MAC_MI_STAT_10MBPS_MODE |
2030 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2032 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2035 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2036 tw32(MAC_TX_LENGTHS,
2037 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2038 (6 << TX_LENGTHS_IPG_SHIFT) |
2039 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2041 tw32(MAC_TX_LENGTHS,
2042 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2043 (6 << TX_LENGTHS_IPG_SHIFT) |
2044 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2046 if (phydev->link != tp->old_link ||
2047 phydev->speed != tp->link_config.active_speed ||
2048 phydev->duplex != tp->link_config.active_duplex ||
2049 oldflowctrl != tp->link_config.active_flowctrl)
2052 tp->old_link = phydev->link;
2053 tp->link_config.active_speed = phydev->speed;
2054 tp->link_config.active_duplex = phydev->duplex;
2056 spin_unlock_bh(&tp->lock);
2059 tg3_link_report(tp);
2062 static int tg3_phy_init(struct tg3 *tp)
2064 struct phy_device *phydev;
2066 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2069 /* Bring the PHY back to a known state. */
2072 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2074 /* Attach the MAC to the PHY. */
2075 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2076 tg3_adjust_link, phydev->interface);
2077 if (IS_ERR(phydev)) {
2078 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2079 return PTR_ERR(phydev);
2082 /* Mask with MAC supported features. */
2083 switch (phydev->interface) {
2084 case PHY_INTERFACE_MODE_GMII:
2085 case PHY_INTERFACE_MODE_RGMII:
2086 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2087 phydev->supported &= (PHY_GBIT_FEATURES |
2089 SUPPORTED_Asym_Pause);
2093 case PHY_INTERFACE_MODE_MII:
2094 phydev->supported &= (PHY_BASIC_FEATURES |
2096 SUPPORTED_Asym_Pause);
2099 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2103 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2105 phydev->advertising = phydev->supported;
2110 static void tg3_phy_start(struct tg3 *tp)
2112 struct phy_device *phydev;
2114 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2117 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2119 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2120 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2121 phydev->speed = tp->link_config.speed;
2122 phydev->duplex = tp->link_config.duplex;
2123 phydev->autoneg = tp->link_config.autoneg;
2124 phydev->advertising = tp->link_config.advertising;
2129 phy_start_aneg(phydev);
2132 static void tg3_phy_stop(struct tg3 *tp)
2134 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2137 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2140 static void tg3_phy_fini(struct tg3 *tp)
2142 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2143 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2144 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2148 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2153 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2156 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2157 /* Cannot do read-modify-write on 5401 */
2158 err = tg3_phy_auxctl_write(tp,
2159 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2160 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2165 err = tg3_phy_auxctl_read(tp,
2166 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2170 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2171 err = tg3_phy_auxctl_write(tp,
2172 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2178 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2182 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2185 tg3_writephy(tp, MII_TG3_FET_TEST,
2186 phytest | MII_TG3_FET_SHADOW_EN);
2187 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2189 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2191 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2192 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2194 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2198 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2202 if (!tg3_flag(tp, 5705_PLUS) ||
2203 (tg3_flag(tp, 5717_PLUS) &&
2204 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2207 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2208 tg3_phy_fet_toggle_apd(tp, enable);
2212 reg = MII_TG3_MISC_SHDW_WREN |
2213 MII_TG3_MISC_SHDW_SCR5_SEL |
2214 MII_TG3_MISC_SHDW_SCR5_LPED |
2215 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2216 MII_TG3_MISC_SHDW_SCR5_SDTL |
2217 MII_TG3_MISC_SHDW_SCR5_C125OE;
2218 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2219 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2221 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2224 reg = MII_TG3_MISC_SHDW_WREN |
2225 MII_TG3_MISC_SHDW_APD_SEL |
2226 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2228 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2230 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2233 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2237 if (!tg3_flag(tp, 5705_PLUS) ||
2238 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2241 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2244 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2245 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2247 tg3_writephy(tp, MII_TG3_FET_TEST,
2248 ephy | MII_TG3_FET_SHADOW_EN);
2249 if (!tg3_readphy(tp, reg, &phy)) {
2251 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2253 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2254 tg3_writephy(tp, reg, phy);
2256 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2261 ret = tg3_phy_auxctl_read(tp,
2262 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2265 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2267 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2268 tg3_phy_auxctl_write(tp,
2269 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2274 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2279 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2282 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2284 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2285 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2288 static void tg3_phy_apply_otp(struct tg3 *tp)
2297 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2300 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2301 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2302 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2304 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2305 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2306 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2308 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2309 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2310 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2312 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2313 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2315 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2316 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2318 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2319 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2320 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2322 tg3_phy_toggle_auxctl_smdsp(tp, false);
2325 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2328 struct ethtool_eee *dest = &tp->eee;
2330 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2336 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2339 /* Pull eee_active */
2340 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2341 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2342 dest->eee_active = 1;
2344 dest->eee_active = 0;
2346 /* Pull lp advertised settings */
2347 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2349 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2351 /* Pull advertised and eee_enabled settings */
2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2354 dest->eee_enabled = !!val;
2355 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2357 /* Pull tx_lpi_enabled */
2358 val = tr32(TG3_CPMU_EEE_MODE);
2359 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2361 /* Pull lpi timer value */
2362 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2365 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2369 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2374 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2376 tp->link_config.active_duplex == DUPLEX_FULL &&
2377 (tp->link_config.active_speed == SPEED_100 ||
2378 tp->link_config.active_speed == SPEED_1000)) {
2381 if (tp->link_config.active_speed == SPEED_1000)
2382 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2384 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2386 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2388 tg3_eee_pull_config(tp, NULL);
2389 if (tp->eee.eee_active)
2393 if (!tp->setlpicnt) {
2394 if (current_link_up &&
2395 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2396 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2397 tg3_phy_toggle_auxctl_smdsp(tp, false);
2400 val = tr32(TG3_CPMU_EEE_MODE);
2401 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2405 static void tg3_phy_eee_enable(struct tg3 *tp)
2409 if (tp->link_config.active_speed == SPEED_1000 &&
2410 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2411 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2412 tg3_flag(tp, 57765_CLASS)) &&
2413 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2414 val = MII_TG3_DSP_TAP26_ALNOKO |
2415 MII_TG3_DSP_TAP26_RMRXSTO;
2416 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2417 tg3_phy_toggle_auxctl_smdsp(tp, false);
2420 val = tr32(TG3_CPMU_EEE_MODE);
2421 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2424 static int tg3_wait_macro_done(struct tg3 *tp)
2431 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2432 if ((tmp32 & 0x1000) == 0)
2442 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2444 static const u32 test_pat[4][6] = {
2445 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2446 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2447 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2448 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2452 for (chan = 0; chan < 4; chan++) {
2455 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2456 (chan * 0x2000) | 0x0200);
2457 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2459 for (i = 0; i < 6; i++)
2460 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2463 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2464 if (tg3_wait_macro_done(tp)) {
2469 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2470 (chan * 0x2000) | 0x0200);
2471 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2472 if (tg3_wait_macro_done(tp)) {
2477 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2478 if (tg3_wait_macro_done(tp)) {
2483 for (i = 0; i < 6; i += 2) {
2486 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2487 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2488 tg3_wait_macro_done(tp)) {
2494 if (low != test_pat[chan][i] ||
2495 high != test_pat[chan][i+1]) {
2496 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2497 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2498 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2508 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2512 for (chan = 0; chan < 4; chan++) {
2515 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2516 (chan * 0x2000) | 0x0200);
2517 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2518 for (i = 0; i < 6; i++)
2519 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2520 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2521 if (tg3_wait_macro_done(tp))
2528 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2530 u32 reg32, phy9_orig;
2531 int retries, do_phy_reset, err;
2537 err = tg3_bmcr_reset(tp);
2543 /* Disable transmitter and interrupt. */
2544 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2548 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2550 /* Set full-duplex, 1000 mbps. */
2551 tg3_writephy(tp, MII_BMCR,
2552 BMCR_FULLDPLX | BMCR_SPEED1000);
2554 /* Set to master mode. */
2555 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2558 tg3_writephy(tp, MII_CTRL1000,
2559 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2561 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2565 /* Block the PHY control access. */
2566 tg3_phydsp_write(tp, 0x8005, 0x0800);
2568 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2571 } while (--retries);
2573 err = tg3_phy_reset_chanpat(tp);
2577 tg3_phydsp_write(tp, 0x8005, 0x0000);
2579 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2580 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2582 tg3_phy_toggle_auxctl_smdsp(tp, false);
2584 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2586 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2588 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2595 static void tg3_carrier_off(struct tg3 *tp)
2597 netif_carrier_off(tp->dev);
2598 tp->link_up = false;
2601 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2603 if (tg3_flag(tp, ENABLE_ASF))
2604 netdev_warn(tp->dev,
2605 "Management side-band traffic will be interrupted during phy settings change\n");
2608 /* This will reset the tigon3 PHY if there is no valid
2609 * link unless the FORCE argument is non-zero.
2611 static int tg3_phy_reset(struct tg3 *tp)
2616 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2617 val = tr32(GRC_MISC_CFG);
2618 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2621 err = tg3_readphy(tp, MII_BMSR, &val);
2622 err |= tg3_readphy(tp, MII_BMSR, &val);
2626 if (netif_running(tp->dev) && tp->link_up) {
2627 netif_carrier_off(tp->dev);
2628 tg3_link_report(tp);
2631 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2632 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2633 tg3_asic_rev(tp) == ASIC_REV_5705) {
2634 err = tg3_phy_reset_5703_4_5(tp);
2641 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2642 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2643 cpmuctrl = tr32(TG3_CPMU_CTRL);
2644 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2646 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2649 err = tg3_bmcr_reset(tp);
2653 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2654 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2655 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2657 tw32(TG3_CPMU_CTRL, cpmuctrl);
2660 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2661 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2662 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2663 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2664 CPMU_LSPD_1000MB_MACCLK_12_5) {
2665 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2667 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2671 if (tg3_flag(tp, 5717_PLUS) &&
2672 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2675 tg3_phy_apply_otp(tp);
2677 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2678 tg3_phy_toggle_apd(tp, true);
2680 tg3_phy_toggle_apd(tp, false);
2683 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2684 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2685 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2686 tg3_phydsp_write(tp, 0x000a, 0x0323);
2687 tg3_phy_toggle_auxctl_smdsp(tp, false);
2690 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2691 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2692 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2695 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2696 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2697 tg3_phydsp_write(tp, 0x000a, 0x310b);
2698 tg3_phydsp_write(tp, 0x201f, 0x9506);
2699 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2700 tg3_phy_toggle_auxctl_smdsp(tp, false);
2702 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2703 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2704 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2705 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2706 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2707 tg3_writephy(tp, MII_TG3_TEST1,
2708 MII_TG3_TEST1_TRIM_EN | 0x4);
2710 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2712 tg3_phy_toggle_auxctl_smdsp(tp, false);
2716 /* Set Extended packet length bit (bit 14) on all chips that */
2717 /* support jumbo frames */
2718 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2719 /* Cannot do read-modify-write on 5401 */
2720 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2721 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2722 /* Set bit 14 with read-modify-write to preserve other bits */
2723 err = tg3_phy_auxctl_read(tp,
2724 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2726 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2727 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2730 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2731 * jumbo frames transmission.
2733 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2734 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2735 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2736 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2739 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2740 /* adjust output voltage */
2741 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2745 tg3_phydsp_write(tp, 0xffb, 0x4000);
2747 tg3_phy_toggle_automdix(tp, true);
2748 tg3_phy_set_wirespeed(tp);
2752 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2753 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2754 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2755 TG3_GPIO_MSG_NEED_VAUX)
2756 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2757 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2758 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2759 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2760 (TG3_GPIO_MSG_DRVR_PRES << 12))
2762 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2763 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2764 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2765 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2766 (TG3_GPIO_MSG_NEED_VAUX << 12))
2768 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2772 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2773 tg3_asic_rev(tp) == ASIC_REV_5719)
2774 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2776 status = tr32(TG3_CPMU_DRV_STATUS);
2778 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2779 status &= ~(TG3_GPIO_MSG_MASK << shift);
2780 status |= (newstat << shift);
2782 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2783 tg3_asic_rev(tp) == ASIC_REV_5719)
2784 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2786 tw32(TG3_CPMU_DRV_STATUS, status);
2788 return status >> TG3_APE_GPIO_MSG_SHIFT;
2791 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2793 if (!tg3_flag(tp, IS_NIC))
2796 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2797 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2798 tg3_asic_rev(tp) == ASIC_REV_5720) {
2799 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2802 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2804 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2805 TG3_GRC_LCLCTL_PWRSW_DELAY);
2807 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2809 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2810 TG3_GRC_LCLCTL_PWRSW_DELAY);
2816 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2820 if (!tg3_flag(tp, IS_NIC) ||
2821 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2822 tg3_asic_rev(tp) == ASIC_REV_5701)
2825 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2827 tw32_wait_f(GRC_LOCAL_CTRL,
2828 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2831 tw32_wait_f(GRC_LOCAL_CTRL,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2835 tw32_wait_f(GRC_LOCAL_CTRL,
2836 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2837 TG3_GRC_LCLCTL_PWRSW_DELAY);
2840 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2842 if (!tg3_flag(tp, IS_NIC))
2845 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2846 tg3_asic_rev(tp) == ASIC_REV_5701) {
2847 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2848 (GRC_LCLCTRL_GPIO_OE0 |
2849 GRC_LCLCTRL_GPIO_OE1 |
2850 GRC_LCLCTRL_GPIO_OE2 |
2851 GRC_LCLCTRL_GPIO_OUTPUT0 |
2852 GRC_LCLCTRL_GPIO_OUTPUT1),
2853 TG3_GRC_LCLCTL_PWRSW_DELAY);
2854 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2855 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2856 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2857 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2858 GRC_LCLCTRL_GPIO_OE1 |
2859 GRC_LCLCTRL_GPIO_OE2 |
2860 GRC_LCLCTRL_GPIO_OUTPUT0 |
2861 GRC_LCLCTRL_GPIO_OUTPUT1 |
2863 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2864 TG3_GRC_LCLCTL_PWRSW_DELAY);
2866 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2867 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2868 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2871 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2872 TG3_GRC_LCLCTL_PWRSW_DELAY);
2875 u32 grc_local_ctrl = 0;
2877 /* Workaround to prevent overdrawing Amps. */
2878 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2879 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2880 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2882 TG3_GRC_LCLCTL_PWRSW_DELAY);
2885 /* On 5753 and variants, GPIO2 cannot be used. */
2886 no_gpio2 = tp->nic_sram_data_cfg &
2887 NIC_SRAM_DATA_CFG_NO_GPIO2;
2889 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2890 GRC_LCLCTRL_GPIO_OE1 |
2891 GRC_LCLCTRL_GPIO_OE2 |
2892 GRC_LCLCTRL_GPIO_OUTPUT1 |
2893 GRC_LCLCTRL_GPIO_OUTPUT2;
2895 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2896 GRC_LCLCTRL_GPIO_OUTPUT2);
2898 tw32_wait_f(GRC_LOCAL_CTRL,
2899 tp->grc_local_ctrl | grc_local_ctrl,
2900 TG3_GRC_LCLCTL_PWRSW_DELAY);
2902 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2904 tw32_wait_f(GRC_LOCAL_CTRL,
2905 tp->grc_local_ctrl | grc_local_ctrl,
2906 TG3_GRC_LCLCTL_PWRSW_DELAY);
2909 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2910 tw32_wait_f(GRC_LOCAL_CTRL,
2911 tp->grc_local_ctrl | grc_local_ctrl,
2912 TG3_GRC_LCLCTL_PWRSW_DELAY);
2917 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2921 /* Serialize power state transitions */
2922 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2925 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2926 msg = TG3_GPIO_MSG_NEED_VAUX;
2928 msg = tg3_set_function_status(tp, msg);
2930 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2933 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2934 tg3_pwrsrc_switch_to_vaux(tp);
2936 tg3_pwrsrc_die_with_vmain(tp);
2939 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2942 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2944 bool need_vaux = false;
2946 /* The GPIOs do something completely different on 57765. */
2947 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2950 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2951 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2952 tg3_asic_rev(tp) == ASIC_REV_5720) {
2953 tg3_frob_aux_power_5717(tp, include_wol ?
2954 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2958 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2959 struct net_device *dev_peer;
2961 dev_peer = pci_get_drvdata(tp->pdev_peer);
2963 /* remove_one() may have been run on the peer. */
2965 struct tg3 *tp_peer = netdev_priv(dev_peer);
2967 if (tg3_flag(tp_peer, INIT_COMPLETE))
2970 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2971 tg3_flag(tp_peer, ENABLE_ASF))
2976 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2977 tg3_flag(tp, ENABLE_ASF))
2981 tg3_pwrsrc_switch_to_vaux(tp);
2983 tg3_pwrsrc_die_with_vmain(tp);
2986 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2988 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2990 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2991 if (speed != SPEED_10)
2993 } else if (speed == SPEED_10)
2999 static bool tg3_phy_power_bug(struct tg3 *tp)
3001 switch (tg3_asic_rev(tp)) {
3006 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3015 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3024 static bool tg3_phy_led_bug(struct tg3 *tp)
3026 switch (tg3_asic_rev(tp)) {
3029 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3038 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3042 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3045 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3046 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3047 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3048 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3051 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3052 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3053 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3058 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3060 val = tr32(GRC_MISC_CFG);
3061 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3064 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3066 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3069 tg3_writephy(tp, MII_ADVERTISE, 0);
3070 tg3_writephy(tp, MII_BMCR,
3071 BMCR_ANENABLE | BMCR_ANRESTART);
3073 tg3_writephy(tp, MII_TG3_FET_TEST,
3074 phytest | MII_TG3_FET_SHADOW_EN);
3075 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3076 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3078 MII_TG3_FET_SHDW_AUXMODE4,
3081 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3084 } else if (do_low_power) {
3085 if (!tg3_phy_led_bug(tp))
3086 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3087 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3089 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3090 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3091 MII_TG3_AUXCTL_PCTL_VREG_11V;
3092 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3095 /* The PHY should not be powered down on some chips because
3098 if (tg3_phy_power_bug(tp))
3101 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3102 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3103 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3104 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3105 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3106 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3109 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3112 /* tp->lock is held. */
3113 static int tg3_nvram_lock(struct tg3 *tp)
3115 if (tg3_flag(tp, NVRAM)) {
3118 if (tp->nvram_lock_cnt == 0) {
3119 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3120 for (i = 0; i < 8000; i++) {
3121 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3126 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3130 tp->nvram_lock_cnt++;
3135 /* tp->lock is held. */
3136 static void tg3_nvram_unlock(struct tg3 *tp)
3138 if (tg3_flag(tp, NVRAM)) {
3139 if (tp->nvram_lock_cnt > 0)
3140 tp->nvram_lock_cnt--;
3141 if (tp->nvram_lock_cnt == 0)
3142 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3146 /* tp->lock is held. */
3147 static void tg3_enable_nvram_access(struct tg3 *tp)
3149 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3150 u32 nvaccess = tr32(NVRAM_ACCESS);
3152 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3156 /* tp->lock is held. */
3157 static void tg3_disable_nvram_access(struct tg3 *tp)
3159 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3160 u32 nvaccess = tr32(NVRAM_ACCESS);
3162 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3166 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3167 u32 offset, u32 *val)
3172 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3175 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3176 EEPROM_ADDR_DEVID_MASK |
3178 tw32(GRC_EEPROM_ADDR,
3180 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3181 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3182 EEPROM_ADDR_ADDR_MASK) |
3183 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3185 for (i = 0; i < 1000; i++) {
3186 tmp = tr32(GRC_EEPROM_ADDR);
3188 if (tmp & EEPROM_ADDR_COMPLETE)
3192 if (!(tmp & EEPROM_ADDR_COMPLETE))
3195 tmp = tr32(GRC_EEPROM_DATA);
3198 * The data will always be opposite the native endian
3199 * format. Perform a blind byteswap to compensate.
3206 #define NVRAM_CMD_TIMEOUT 10000
3208 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3212 tw32(NVRAM_CMD, nvram_cmd);
3213 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3215 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3221 if (i == NVRAM_CMD_TIMEOUT)
3227 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3229 if (tg3_flag(tp, NVRAM) &&
3230 tg3_flag(tp, NVRAM_BUFFERED) &&
3231 tg3_flag(tp, FLASH) &&
3232 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3233 (tp->nvram_jedecnum == JEDEC_ATMEL))
3235 addr = ((addr / tp->nvram_pagesize) <<
3236 ATMEL_AT45DB0X1B_PAGE_POS) +
3237 (addr % tp->nvram_pagesize);
3242 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3244 if (tg3_flag(tp, NVRAM) &&
3245 tg3_flag(tp, NVRAM_BUFFERED) &&
3246 tg3_flag(tp, FLASH) &&
3247 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3248 (tp->nvram_jedecnum == JEDEC_ATMEL))
3250 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3251 tp->nvram_pagesize) +
3252 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3257 /* NOTE: Data read in from NVRAM is byteswapped according to
3258 * the byteswapping settings for all other register accesses.
3259 * tg3 devices are BE devices, so on a BE machine, the data
3260 * returned will be exactly as it is seen in NVRAM. On a LE
3261 * machine, the 32-bit value will be byteswapped.
3263 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3267 if (!tg3_flag(tp, NVRAM))
3268 return tg3_nvram_read_using_eeprom(tp, offset, val);
3270 offset = tg3_nvram_phys_addr(tp, offset);
3272 if (offset > NVRAM_ADDR_MSK)
3275 ret = tg3_nvram_lock(tp);
3279 tg3_enable_nvram_access(tp);
3281 tw32(NVRAM_ADDR, offset);
3282 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3283 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3286 *val = tr32(NVRAM_RDDATA);
3288 tg3_disable_nvram_access(tp);
3290 tg3_nvram_unlock(tp);
3295 /* Ensures NVRAM data is in bytestream format. */
3296 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3299 int res = tg3_nvram_read(tp, offset, &v);
3301 *val = cpu_to_be32(v);
3305 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3306 u32 offset, u32 len, u8 *buf)
3311 for (i = 0; i < len; i += 4) {
3317 memcpy(&data, buf + i, 4);
3320 * The SEEPROM interface expects the data to always be opposite
3321 * the native endian format. We accomplish this by reversing
3322 * all the operations that would have been performed on the
3323 * data from a call to tg3_nvram_read_be32().
3325 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3327 val = tr32(GRC_EEPROM_ADDR);
3328 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3330 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3332 tw32(GRC_EEPROM_ADDR, val |
3333 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3334 (addr & EEPROM_ADDR_ADDR_MASK) |
3338 for (j = 0; j < 1000; j++) {
3339 val = tr32(GRC_EEPROM_ADDR);
3341 if (val & EEPROM_ADDR_COMPLETE)
3345 if (!(val & EEPROM_ADDR_COMPLETE)) {
3354 /* offset and length are dword aligned */
3355 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3359 u32 pagesize = tp->nvram_pagesize;
3360 u32 pagemask = pagesize - 1;
3364 tmp = kmalloc(pagesize, GFP_KERNEL);
3370 u32 phy_addr, page_off, size;
3372 phy_addr = offset & ~pagemask;
3374 for (j = 0; j < pagesize; j += 4) {
3375 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3376 (__be32 *) (tmp + j));
3383 page_off = offset & pagemask;
3390 memcpy(tmp + page_off, buf, size);
3392 offset = offset + (pagesize - page_off);
3394 tg3_enable_nvram_access(tp);
3397 * Before we can erase the flash page, we need
3398 * to issue a special "write enable" command.
3400 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3402 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3405 /* Erase the target page */
3406 tw32(NVRAM_ADDR, phy_addr);
3408 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3409 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3411 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3414 /* Issue another write enable to start the write. */
3415 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3417 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420 for (j = 0; j < pagesize; j += 4) {
3423 data = *((__be32 *) (tmp + j));
3425 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3427 tw32(NVRAM_ADDR, phy_addr + j);
3429 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3433 nvram_cmd |= NVRAM_CMD_FIRST;
3434 else if (j == (pagesize - 4))
3435 nvram_cmd |= NVRAM_CMD_LAST;
3437 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3445 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3446 tg3_nvram_exec_cmd(tp, nvram_cmd);
3453 /* offset and length are dword aligned */
3454 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3459 for (i = 0; i < len; i += 4, offset += 4) {
3460 u32 page_off, phy_addr, nvram_cmd;
3463 memcpy(&data, buf + i, 4);
3464 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3466 page_off = offset % tp->nvram_pagesize;
3468 phy_addr = tg3_nvram_phys_addr(tp, offset);
3470 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3472 if (page_off == 0 || i == 0)
3473 nvram_cmd |= NVRAM_CMD_FIRST;
3474 if (page_off == (tp->nvram_pagesize - 4))
3475 nvram_cmd |= NVRAM_CMD_LAST;
3478 nvram_cmd |= NVRAM_CMD_LAST;
3480 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3481 !tg3_flag(tp, FLASH) ||
3482 !tg3_flag(tp, 57765_PLUS))
3483 tw32(NVRAM_ADDR, phy_addr);
3485 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3486 !tg3_flag(tp, 5755_PLUS) &&
3487 (tp->nvram_jedecnum == JEDEC_ST) &&
3488 (nvram_cmd & NVRAM_CMD_FIRST)) {
3491 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3492 ret = tg3_nvram_exec_cmd(tp, cmd);
3496 if (!tg3_flag(tp, FLASH)) {
3497 /* We always do complete word writes to eeprom. */
3498 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3501 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3508 /* offset and length are dword aligned */
3509 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3513 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3514 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3515 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3519 if (!tg3_flag(tp, NVRAM)) {
3520 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3524 ret = tg3_nvram_lock(tp);
3528 tg3_enable_nvram_access(tp);
3529 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3530 tw32(NVRAM_WRITE1, 0x406);
3532 grc_mode = tr32(GRC_MODE);
3533 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3535 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3536 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3539 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3543 grc_mode = tr32(GRC_MODE);
3544 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3546 tg3_disable_nvram_access(tp);
3547 tg3_nvram_unlock(tp);
3550 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3551 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3558 #define RX_CPU_SCRATCH_BASE 0x30000
3559 #define RX_CPU_SCRATCH_SIZE 0x04000
3560 #define TX_CPU_SCRATCH_BASE 0x34000
3561 #define TX_CPU_SCRATCH_SIZE 0x04000
3563 /* tp->lock is held. */
3564 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3567 const int iters = 10000;
3569 for (i = 0; i < iters; i++) {
3570 tw32(cpu_base + CPU_STATE, 0xffffffff);
3571 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3572 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3574 if (pci_channel_offline(tp->pdev))
3578 return (i == iters) ? -EBUSY : 0;
3581 /* tp->lock is held. */
3582 static int tg3_rxcpu_pause(struct tg3 *tp)
3584 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3586 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3587 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3593 /* tp->lock is held. */
3594 static int tg3_txcpu_pause(struct tg3 *tp)
3596 return tg3_pause_cpu(tp, TX_CPU_BASE);
3599 /* tp->lock is held. */
3600 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3602 tw32(cpu_base + CPU_STATE, 0xffffffff);
3603 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3606 /* tp->lock is held. */
3607 static void tg3_rxcpu_resume(struct tg3 *tp)
3609 tg3_resume_cpu(tp, RX_CPU_BASE);
3612 /* tp->lock is held. */
3613 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3617 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3619 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3620 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3622 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3625 if (cpu_base == RX_CPU_BASE) {
3626 rc = tg3_rxcpu_pause(tp);
3629 * There is only an Rx CPU for the 5750 derivative in the
3632 if (tg3_flag(tp, IS_SSB_CORE))
3635 rc = tg3_txcpu_pause(tp);
3639 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3640 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3644 /* Clear firmware's nvram arbitration. */
3645 if (tg3_flag(tp, NVRAM))
3646 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3650 static int tg3_fw_data_len(struct tg3 *tp,
3651 const struct tg3_firmware_hdr *fw_hdr)
3655 /* Non fragmented firmware have one firmware header followed by a
3656 * contiguous chunk of data to be written. The length field in that
3657 * header is not the length of data to be written but the complete
3658 * length of the bss. The data length is determined based on
3659 * tp->fw->size minus headers.
3661 * Fragmented firmware have a main header followed by multiple
3662 * fragments. Each fragment is identical to non fragmented firmware
3663 * with a firmware header followed by a contiguous chunk of data. In
3664 * the main header, the length field is unused and set to 0xffffffff.
3665 * In each fragment header the length is the entire size of that
3666 * fragment i.e. fragment data + header length. Data length is
3667 * therefore length field in the header minus TG3_FW_HDR_LEN.
3669 if (tp->fw_len == 0xffffffff)
3670 fw_len = be32_to_cpu(fw_hdr->len);
3672 fw_len = tp->fw->size;
3674 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3677 /* tp->lock is held. */
3678 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3679 u32 cpu_scratch_base, int cpu_scratch_size,
3680 const struct tg3_firmware_hdr *fw_hdr)
3683 void (*write_op)(struct tg3 *, u32, u32);
3684 int total_len = tp->fw->size;
3686 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3688 "%s: Trying to load TX cpu firmware which is 5705\n",
3693 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3694 write_op = tg3_write_mem;
3696 write_op = tg3_write_indirect_reg32;
3698 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3699 /* It is possible that bootcode is still loading at this point.
3700 * Get the nvram lock first before halting the cpu.
3702 int lock_err = tg3_nvram_lock(tp);
3703 err = tg3_halt_cpu(tp, cpu_base);
3705 tg3_nvram_unlock(tp);
3709 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3710 write_op(tp, cpu_scratch_base + i, 0);
3711 tw32(cpu_base + CPU_STATE, 0xffffffff);
3712 tw32(cpu_base + CPU_MODE,
3713 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3715 /* Subtract additional main header for fragmented firmware and
3716 * advance to the first fragment
3718 total_len -= TG3_FW_HDR_LEN;
3723 u32 *fw_data = (u32 *)(fw_hdr + 1);
3724 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3725 write_op(tp, cpu_scratch_base +
3726 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3728 be32_to_cpu(fw_data[i]));
3730 total_len -= be32_to_cpu(fw_hdr->len);
3732 /* Advance to next fragment */
3733 fw_hdr = (struct tg3_firmware_hdr *)
3734 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3735 } while (total_len > 0);
3743 /* tp->lock is held. */
3744 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3747 const int iters = 5;
3749 tw32(cpu_base + CPU_STATE, 0xffffffff);
3750 tw32_f(cpu_base + CPU_PC, pc);
3752 for (i = 0; i < iters; i++) {
3753 if (tr32(cpu_base + CPU_PC) == pc)
3755 tw32(cpu_base + CPU_STATE, 0xffffffff);
3756 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3757 tw32_f(cpu_base + CPU_PC, pc);
3761 return (i == iters) ? -EBUSY : 0;
3764 /* tp->lock is held. */
3765 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3767 const struct tg3_firmware_hdr *fw_hdr;
3770 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3772 /* Firmware blob starts with version numbers, followed by
3773 start address and length. We are setting complete length.
3774 length = end_address_of_bss - start_address_of_text.
3775 Remainder is the blob to be loaded contiguously
3776 from start address. */
3778 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3779 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3784 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3785 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3790 /* Now startup only the RX cpu. */
3791 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3792 be32_to_cpu(fw_hdr->base_addr));
3794 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3795 "should be %08x\n", __func__,
3796 tr32(RX_CPU_BASE + CPU_PC),
3797 be32_to_cpu(fw_hdr->base_addr));
3801 tg3_rxcpu_resume(tp);
3806 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3808 const int iters = 1000;
3812 /* Wait for boot code to complete initialization and enter service
3813 * loop. It is then safe to download service patches
3815 for (i = 0; i < iters; i++) {
3816 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3823 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3827 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3829 netdev_warn(tp->dev,
3830 "Other patches exist. Not downloading EEE patch\n");
3837 /* tp->lock is held. */
3838 static void tg3_load_57766_firmware(struct tg3 *tp)
3840 struct tg3_firmware_hdr *fw_hdr;
3842 if (!tg3_flag(tp, NO_NVRAM))
3845 if (tg3_validate_rxcpu_state(tp))
3851 /* This firmware blob has a different format than older firmware
3852 * releases as given below. The main difference is we have fragmented
3853 * data to be written to non-contiguous locations.
3855 * In the beginning we have a firmware header identical to other
3856 * firmware which consists of version, base addr and length. The length
3857 * here is unused and set to 0xffffffff.
3859 * This is followed by a series of firmware fragments which are
3860 * individually identical to previous firmware. i.e. they have the
3861 * firmware header and followed by data for that fragment. The version
3862 * field of the individual fragment header is unused.
3865 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3866 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3869 if (tg3_rxcpu_pause(tp))
3872 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3873 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3875 tg3_rxcpu_resume(tp);
3878 /* tp->lock is held. */
3879 static int tg3_load_tso_firmware(struct tg3 *tp)
3881 const struct tg3_firmware_hdr *fw_hdr;
3882 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3885 if (!tg3_flag(tp, FW_TSO))
3888 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3890 /* Firmware blob starts with version numbers, followed by
3891 start address and length. We are setting complete length.
3892 length = end_address_of_bss - start_address_of_text.
3893 Remainder is the blob to be loaded contiguously
3894 from start address. */
3896 cpu_scratch_size = tp->fw_len;
3898 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3899 cpu_base = RX_CPU_BASE;
3900 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3902 cpu_base = TX_CPU_BASE;
3903 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3904 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3907 err = tg3_load_firmware_cpu(tp, cpu_base,
3908 cpu_scratch_base, cpu_scratch_size,
3913 /* Now startup the cpu. */
3914 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3915 be32_to_cpu(fw_hdr->base_addr));
3918 "%s fails to set CPU PC, is %08x should be %08x\n",
3919 __func__, tr32(cpu_base + CPU_PC),
3920 be32_to_cpu(fw_hdr->base_addr));
3924 tg3_resume_cpu(tp, cpu_base);
3929 /* tp->lock is held. */
3930 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3932 u32 addr_high, addr_low;
3935 addr_high = ((tp->dev->dev_addr[0] << 8) |
3936 tp->dev->dev_addr[1]);
3937 addr_low = ((tp->dev->dev_addr[2] << 24) |
3938 (tp->dev->dev_addr[3] << 16) |
3939 (tp->dev->dev_addr[4] << 8) |
3940 (tp->dev->dev_addr[5] << 0));
3941 for (i = 0; i < 4; i++) {
3942 if (i == 1 && skip_mac_1)
3944 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3945 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3948 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3949 tg3_asic_rev(tp) == ASIC_REV_5704) {
3950 for (i = 0; i < 12; i++) {
3951 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3952 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3956 addr_high = (tp->dev->dev_addr[0] +
3957 tp->dev->dev_addr[1] +
3958 tp->dev->dev_addr[2] +
3959 tp->dev->dev_addr[3] +
3960 tp->dev->dev_addr[4] +
3961 tp->dev->dev_addr[5]) &
3962 TX_BACKOFF_SEED_MASK;
3963 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3966 static void tg3_enable_register_access(struct tg3 *tp)
3969 * Make sure register accesses (indirect or otherwise) will function
3972 pci_write_config_dword(tp->pdev,
3973 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3976 static int tg3_power_up(struct tg3 *tp)
3980 tg3_enable_register_access(tp);
3982 err = pci_set_power_state(tp->pdev, PCI_D0);
3984 /* Switch out of Vaux if it is a NIC */
3985 tg3_pwrsrc_switch_to_vmain(tp);
3987 netdev_err(tp->dev, "Transition to D0 failed\n");
3993 static int tg3_setup_phy(struct tg3 *, bool);
3995 static int tg3_power_down_prepare(struct tg3 *tp)
3998 bool device_should_wake, do_low_power;
4000 tg3_enable_register_access(tp);
4002 /* Restore the CLKREQ setting. */
4003 if (tg3_flag(tp, CLKREQ_BUG))
4004 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4005 PCI_EXP_LNKCTL_CLKREQ_EN);
4007 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4008 tw32(TG3PCI_MISC_HOST_CTRL,
4009 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4011 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4012 tg3_flag(tp, WOL_ENABLE);
4014 if (tg3_flag(tp, USE_PHYLIB)) {
4015 do_low_power = false;
4016 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4017 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4018 struct phy_device *phydev;
4019 u32 phyid, advertising;
4021 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4023 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4025 tp->link_config.speed = phydev->speed;
4026 tp->link_config.duplex = phydev->duplex;
4027 tp->link_config.autoneg = phydev->autoneg;
4028 tp->link_config.advertising = phydev->advertising;
4030 advertising = ADVERTISED_TP |
4032 ADVERTISED_Autoneg |
4033 ADVERTISED_10baseT_Half;
4035 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4036 if (tg3_flag(tp, WOL_SPEED_100MB))
4038 ADVERTISED_100baseT_Half |
4039 ADVERTISED_100baseT_Full |
4040 ADVERTISED_10baseT_Full;
4042 advertising |= ADVERTISED_10baseT_Full;
4045 phydev->advertising = advertising;
4047 phy_start_aneg(phydev);
4049 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4050 if (phyid != PHY_ID_BCMAC131) {
4051 phyid &= PHY_BCM_OUI_MASK;
4052 if (phyid == PHY_BCM_OUI_1 ||
4053 phyid == PHY_BCM_OUI_2 ||
4054 phyid == PHY_BCM_OUI_3)
4055 do_low_power = true;
4059 do_low_power = true;
4061 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4062 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4064 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4065 tg3_setup_phy(tp, false);
4068 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4071 val = tr32(GRC_VCPU_EXT_CTRL);
4072 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4073 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4077 for (i = 0; i < 200; i++) {
4078 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4079 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4084 if (tg3_flag(tp, WOL_CAP))
4085 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4086 WOL_DRV_STATE_SHUTDOWN |
4090 if (device_should_wake) {
4093 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4095 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4096 tg3_phy_auxctl_write(tp,
4097 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4098 MII_TG3_AUXCTL_PCTL_WOL_EN |
4099 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4100 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4104 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4105 mac_mode = MAC_MODE_PORT_MODE_GMII;
4106 else if (tp->phy_flags &
4107 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4108 if (tp->link_config.active_speed == SPEED_1000)
4109 mac_mode = MAC_MODE_PORT_MODE_GMII;
4111 mac_mode = MAC_MODE_PORT_MODE_MII;
4113 mac_mode = MAC_MODE_PORT_MODE_MII;
4115 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4116 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4117 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4118 SPEED_100 : SPEED_10;
4119 if (tg3_5700_link_polarity(tp, speed))
4120 mac_mode |= MAC_MODE_LINK_POLARITY;
4122 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4125 mac_mode = MAC_MODE_PORT_MODE_TBI;
4128 if (!tg3_flag(tp, 5750_PLUS))
4129 tw32(MAC_LED_CTRL, tp->led_ctrl);
4131 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4132 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4133 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4134 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4136 if (tg3_flag(tp, ENABLE_APE))
4137 mac_mode |= MAC_MODE_APE_TX_EN |
4138 MAC_MODE_APE_RX_EN |
4139 MAC_MODE_TDE_ENABLE;
4141 tw32_f(MAC_MODE, mac_mode);
4144 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4148 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4149 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4150 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4153 base_val = tp->pci_clock_ctrl;
4154 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4155 CLOCK_CTRL_TXCLK_DISABLE);
4157 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4158 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4159 } else if (tg3_flag(tp, 5780_CLASS) ||
4160 tg3_flag(tp, CPMU_PRESENT) ||
4161 tg3_asic_rev(tp) == ASIC_REV_5906) {
4163 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4164 u32 newbits1, newbits2;
4166 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4167 tg3_asic_rev(tp) == ASIC_REV_5701) {
4168 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4169 CLOCK_CTRL_TXCLK_DISABLE |
4171 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4172 } else if (tg3_flag(tp, 5705_PLUS)) {
4173 newbits1 = CLOCK_CTRL_625_CORE;
4174 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4176 newbits1 = CLOCK_CTRL_ALTCLK;
4177 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4180 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4183 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4186 if (!tg3_flag(tp, 5705_PLUS)) {
4189 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190 tg3_asic_rev(tp) == ASIC_REV_5701) {
4191 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4192 CLOCK_CTRL_TXCLK_DISABLE |
4193 CLOCK_CTRL_44MHZ_CORE);
4195 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4198 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4199 tp->pci_clock_ctrl | newbits3, 40);
4203 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4204 tg3_power_down_phy(tp, do_low_power);
4206 tg3_frob_aux_power(tp, true);
4208 /* Workaround for unstable PLL clock */
4209 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4210 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4211 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4212 u32 val = tr32(0x7d00);
4214 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4216 if (!tg3_flag(tp, ENABLE_ASF)) {
4219 err = tg3_nvram_lock(tp);
4220 tg3_halt_cpu(tp, RX_CPU_BASE);
4222 tg3_nvram_unlock(tp);
4226 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4228 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4233 static void tg3_power_down(struct tg3 *tp)
4235 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4236 pci_set_power_state(tp->pdev, PCI_D3hot);
4239 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4241 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4242 case MII_TG3_AUX_STAT_10HALF:
4244 *duplex = DUPLEX_HALF;
4247 case MII_TG3_AUX_STAT_10FULL:
4249 *duplex = DUPLEX_FULL;
4252 case MII_TG3_AUX_STAT_100HALF:
4254 *duplex = DUPLEX_HALF;
4257 case MII_TG3_AUX_STAT_100FULL:
4259 *duplex = DUPLEX_FULL;
4262 case MII_TG3_AUX_STAT_1000HALF:
4263 *speed = SPEED_1000;
4264 *duplex = DUPLEX_HALF;
4267 case MII_TG3_AUX_STAT_1000FULL:
4268 *speed = SPEED_1000;
4269 *duplex = DUPLEX_FULL;
4273 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4274 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4276 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4280 *speed = SPEED_UNKNOWN;
4281 *duplex = DUPLEX_UNKNOWN;
4286 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4291 new_adv = ADVERTISE_CSMA;
4292 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4293 new_adv |= mii_advertise_flowctrl(flowctrl);
4295 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4299 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4300 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4302 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4303 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4304 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4306 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4311 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4314 tw32(TG3_CPMU_EEE_MODE,
4315 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4317 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4322 /* Advertise 100-BaseTX EEE ability */
4323 if (advertise & ADVERTISED_100baseT_Full)
4324 val |= MDIO_AN_EEE_ADV_100TX;
4325 /* Advertise 1000-BaseT EEE ability */
4326 if (advertise & ADVERTISED_1000baseT_Full)
4327 val |= MDIO_AN_EEE_ADV_1000T;
4329 if (!tp->eee.eee_enabled) {
4331 tp->eee.advertised = 0;
4333 tp->eee.advertised = advertise &
4334 (ADVERTISED_100baseT_Full |
4335 ADVERTISED_1000baseT_Full);
4338 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4342 switch (tg3_asic_rev(tp)) {
4344 case ASIC_REV_57765:
4345 case ASIC_REV_57766:
4347 /* If we advertised any eee advertisements above... */
4349 val = MII_TG3_DSP_TAP26_ALNOKO |
4350 MII_TG3_DSP_TAP26_RMRXSTO |
4351 MII_TG3_DSP_TAP26_OPCSINPT;
4352 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4356 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4357 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4358 MII_TG3_DSP_CH34TP2_HIBW01);
4361 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4370 static void tg3_phy_copper_begin(struct tg3 *tp)
4372 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4373 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4376 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4377 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4378 adv = ADVERTISED_10baseT_Half |
4379 ADVERTISED_10baseT_Full;
4380 if (tg3_flag(tp, WOL_SPEED_100MB))
4381 adv |= ADVERTISED_100baseT_Half |
4382 ADVERTISED_100baseT_Full;
4383 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4384 adv |= ADVERTISED_1000baseT_Half |
4385 ADVERTISED_1000baseT_Full;
4387 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4389 adv = tp->link_config.advertising;
4390 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4391 adv &= ~(ADVERTISED_1000baseT_Half |
4392 ADVERTISED_1000baseT_Full);
4394 fc = tp->link_config.flowctrl;
4397 tg3_phy_autoneg_cfg(tp, adv, fc);
4399 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4400 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4401 /* Normally during power down we want to autonegotiate
4402 * the lowest possible speed for WOL. However, to avoid
4403 * link flap, we leave it untouched.
4408 tg3_writephy(tp, MII_BMCR,
4409 BMCR_ANENABLE | BMCR_ANRESTART);
4412 u32 bmcr, orig_bmcr;
4414 tp->link_config.active_speed = tp->link_config.speed;
4415 tp->link_config.active_duplex = tp->link_config.duplex;
4417 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4418 /* With autoneg disabled, 5715 only links up when the
4419 * advertisement register has the configured speed
4422 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4426 switch (tp->link_config.speed) {
4432 bmcr |= BMCR_SPEED100;
4436 bmcr |= BMCR_SPEED1000;
4440 if (tp->link_config.duplex == DUPLEX_FULL)
4441 bmcr |= BMCR_FULLDPLX;
4443 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4444 (bmcr != orig_bmcr)) {
4445 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4446 for (i = 0; i < 1500; i++) {
4450 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4451 tg3_readphy(tp, MII_BMSR, &tmp))
4453 if (!(tmp & BMSR_LSTATUS)) {
4458 tg3_writephy(tp, MII_BMCR, bmcr);
4464 static int tg3_phy_pull_config(struct tg3 *tp)
4469 err = tg3_readphy(tp, MII_BMCR, &val);
4473 if (!(val & BMCR_ANENABLE)) {
4474 tp->link_config.autoneg = AUTONEG_DISABLE;
4475 tp->link_config.advertising = 0;
4476 tg3_flag_clear(tp, PAUSE_AUTONEG);
4480 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4482 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4485 tp->link_config.speed = SPEED_10;
4488 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4491 tp->link_config.speed = SPEED_100;
4493 case BMCR_SPEED1000:
4494 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4495 tp->link_config.speed = SPEED_1000;
4503 if (val & BMCR_FULLDPLX)
4504 tp->link_config.duplex = DUPLEX_FULL;
4506 tp->link_config.duplex = DUPLEX_HALF;
4508 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4514 tp->link_config.autoneg = AUTONEG_ENABLE;
4515 tp->link_config.advertising = ADVERTISED_Autoneg;
4516 tg3_flag_set(tp, PAUSE_AUTONEG);
4518 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4521 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4525 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4526 tp->link_config.advertising |= adv | ADVERTISED_TP;
4528 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4530 tp->link_config.advertising |= ADVERTISED_FIBRE;
4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4536 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4537 err = tg3_readphy(tp, MII_CTRL1000, &val);
4541 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4543 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4547 adv = tg3_decode_flowctrl_1000X(val);
4548 tp->link_config.flowctrl = adv;
4550 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4551 adv = mii_adv_to_ethtool_adv_x(val);
4554 tp->link_config.advertising |= adv;
4561 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4565 /* Turn off tap power management. */
4566 /* Set Extended packet length bit */
4567 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4569 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4570 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4571 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4572 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4573 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4580 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4582 struct ethtool_eee eee;
4584 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4587 tg3_eee_pull_config(tp, &eee);
4589 if (tp->eee.eee_enabled) {
4590 if (tp->eee.advertised != eee.advertised ||
4591 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4592 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4595 /* EEE is disabled but we're advertising */
4603 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4605 u32 advmsk, tgtadv, advertising;
4607 advertising = tp->link_config.advertising;
4608 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4610 advmsk = ADVERTISE_ALL;
4611 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4612 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4613 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4616 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4619 if ((*lcladv & advmsk) != tgtadv)
4622 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4625 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4627 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4631 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4632 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4633 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4634 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4635 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4637 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4640 if (tg3_ctrl != tgtadv)
4647 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4651 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4654 if (tg3_readphy(tp, MII_STAT1000, &val))
4657 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4660 if (tg3_readphy(tp, MII_LPA, rmtadv))
4663 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4664 tp->link_config.rmt_adv = lpeth;
4669 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4671 if (curr_link_up != tp->link_up) {
4673 netif_carrier_on(tp->dev);
4675 netif_carrier_off(tp->dev);
4676 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4677 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4680 tg3_link_report(tp);
4687 static void tg3_clear_mac_status(struct tg3 *tp)
4692 MAC_STATUS_SYNC_CHANGED |
4693 MAC_STATUS_CFG_CHANGED |
4694 MAC_STATUS_MI_COMPLETION |
4695 MAC_STATUS_LNKSTATE_CHANGED);
4699 static void tg3_setup_eee(struct tg3 *tp)
4703 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4704 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4705 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4706 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4708 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4710 tw32_f(TG3_CPMU_EEE_CTRL,
4711 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4713 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4714 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4715 TG3_CPMU_EEEMD_LPI_IN_RX |
4716 TG3_CPMU_EEEMD_EEE_ENABLE;
4718 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4719 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4721 if (tg3_flag(tp, ENABLE_APE))
4722 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4724 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4726 tw32_f(TG3_CPMU_EEE_DBTMR1,
4727 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4728 (tp->eee.tx_lpi_timer & 0xffff));
4730 tw32_f(TG3_CPMU_EEE_DBTMR2,
4731 TG3_CPMU_DBTMR2_APE_TX_2047US |
4732 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4735 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4737 bool current_link_up;
4739 u32 lcl_adv, rmt_adv;
4744 tg3_clear_mac_status(tp);
4746 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4748 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4752 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4754 /* Some third-party PHYs need to be reset on link going
4757 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4758 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4759 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4761 tg3_readphy(tp, MII_BMSR, &bmsr);
4762 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4763 !(bmsr & BMSR_LSTATUS))
4769 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4770 tg3_readphy(tp, MII_BMSR, &bmsr);
4771 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4772 !tg3_flag(tp, INIT_COMPLETE))
4775 if (!(bmsr & BMSR_LSTATUS)) {
4776 err = tg3_init_5401phy_dsp(tp);
4780 tg3_readphy(tp, MII_BMSR, &bmsr);
4781 for (i = 0; i < 1000; i++) {
4783 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4784 (bmsr & BMSR_LSTATUS)) {
4790 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4791 TG3_PHY_REV_BCM5401_B0 &&
4792 !(bmsr & BMSR_LSTATUS) &&
4793 tp->link_config.active_speed == SPEED_1000) {
4794 err = tg3_phy_reset(tp);
4796 err = tg3_init_5401phy_dsp(tp);
4801 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4802 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4803 /* 5701 {A0,B0} CRC bug workaround */
4804 tg3_writephy(tp, 0x15, 0x0a75);
4805 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4806 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4807 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4810 /* Clear pending interrupts... */
4811 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4812 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4814 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4815 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4816 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4817 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4819 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4820 tg3_asic_rev(tp) == ASIC_REV_5701) {
4821 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4822 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4823 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4825 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4828 current_link_up = false;
4829 current_speed = SPEED_UNKNOWN;
4830 current_duplex = DUPLEX_UNKNOWN;
4831 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4832 tp->link_config.rmt_adv = 0;
4834 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4835 err = tg3_phy_auxctl_read(tp,
4836 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4838 if (!err && !(val & (1 << 10))) {
4839 tg3_phy_auxctl_write(tp,
4840 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4847 for (i = 0; i < 100; i++) {
4848 tg3_readphy(tp, MII_BMSR, &bmsr);
4849 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4850 (bmsr & BMSR_LSTATUS))
4855 if (bmsr & BMSR_LSTATUS) {
4858 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4859 for (i = 0; i < 2000; i++) {
4861 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4866 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4871 for (i = 0; i < 200; i++) {
4872 tg3_readphy(tp, MII_BMCR, &bmcr);
4873 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4875 if (bmcr && bmcr != 0x7fff)
4883 tp->link_config.active_speed = current_speed;
4884 tp->link_config.active_duplex = current_duplex;
4886 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4887 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4889 if ((bmcr & BMCR_ANENABLE) &&
4891 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4892 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4893 current_link_up = true;
4895 /* EEE settings changes take effect only after a phy
4896 * reset. If we have skipped a reset due to Link Flap
4897 * Avoidance being enabled, do it now.
4899 if (!eee_config_ok &&
4900 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4906 if (!(bmcr & BMCR_ANENABLE) &&
4907 tp->link_config.speed == current_speed &&
4908 tp->link_config.duplex == current_duplex) {
4909 current_link_up = true;
4913 if (current_link_up &&
4914 tp->link_config.active_duplex == DUPLEX_FULL) {
4917 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4918 reg = MII_TG3_FET_GEN_STAT;
4919 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4921 reg = MII_TG3_EXT_STAT;
4922 bit = MII_TG3_EXT_STAT_MDIX;
4925 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4926 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4928 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4933 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4934 tg3_phy_copper_begin(tp);
4936 if (tg3_flag(tp, ROBOSWITCH)) {
4937 current_link_up = true;
4938 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4939 current_speed = SPEED_1000;
4940 current_duplex = DUPLEX_FULL;
4941 tp->link_config.active_speed = current_speed;
4942 tp->link_config.active_duplex = current_duplex;
4945 tg3_readphy(tp, MII_BMSR, &bmsr);
4946 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4947 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4948 current_link_up = true;
4951 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4952 if (current_link_up) {
4953 if (tp->link_config.active_speed == SPEED_100 ||
4954 tp->link_config.active_speed == SPEED_10)
4955 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4957 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4958 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4959 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4961 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4963 /* In order for the 5750 core in BCM4785 chip to work properly
4964 * in RGMII mode, the Led Control Register must be set up.
4966 if (tg3_flag(tp, RGMII_MODE)) {
4967 u32 led_ctrl = tr32(MAC_LED_CTRL);
4968 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4970 if (tp->link_config.active_speed == SPEED_10)
4971 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4972 else if (tp->link_config.active_speed == SPEED_100)
4973 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4974 LED_CTRL_100MBPS_ON);
4975 else if (tp->link_config.active_speed == SPEED_1000)
4976 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4977 LED_CTRL_1000MBPS_ON);
4979 tw32(MAC_LED_CTRL, led_ctrl);
4983 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4984 if (tp->link_config.active_duplex == DUPLEX_HALF)
4985 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4987 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4988 if (current_link_up &&
4989 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4990 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4992 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4995 /* ??? Without this setting Netgear GA302T PHY does not
4996 * ??? send/receive packets...
4998 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4999 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5000 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5001 tw32_f(MAC_MI_MODE, tp->mi_mode);
5005 tw32_f(MAC_MODE, tp->mac_mode);
5008 tg3_phy_eee_adjust(tp, current_link_up);
5010 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5011 /* Polled via timer. */
5012 tw32_f(MAC_EVENT, 0);
5014 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5018 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5020 tp->link_config.active_speed == SPEED_1000 &&
5021 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5024 (MAC_STATUS_SYNC_CHANGED |
5025 MAC_STATUS_CFG_CHANGED));
5028 NIC_SRAM_FIRMWARE_MBOX,
5029 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5032 /* Prevent send BD corruption. */
5033 if (tg3_flag(tp, CLKREQ_BUG)) {
5034 if (tp->link_config.active_speed == SPEED_100 ||
5035 tp->link_config.active_speed == SPEED_10)
5036 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5037 PCI_EXP_LNKCTL_CLKREQ_EN);
5039 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5040 PCI_EXP_LNKCTL_CLKREQ_EN);
5043 tg3_test_and_report_link_chg(tp, current_link_up);
5048 struct tg3_fiber_aneginfo {
5050 #define ANEG_STATE_UNKNOWN 0
5051 #define ANEG_STATE_AN_ENABLE 1
5052 #define ANEG_STATE_RESTART_INIT 2
5053 #define ANEG_STATE_RESTART 3
5054 #define ANEG_STATE_DISABLE_LINK_OK 4
5055 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5056 #define ANEG_STATE_ABILITY_DETECT 6
5057 #define ANEG_STATE_ACK_DETECT_INIT 7
5058 #define ANEG_STATE_ACK_DETECT 8
5059 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5060 #define ANEG_STATE_COMPLETE_ACK 10
5061 #define ANEG_STATE_IDLE_DETECT_INIT 11
5062 #define ANEG_STATE_IDLE_DETECT 12
5063 #define ANEG_STATE_LINK_OK 13
5064 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5065 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5068 #define MR_AN_ENABLE 0x00000001
5069 #define MR_RESTART_AN 0x00000002
5070 #define MR_AN_COMPLETE 0x00000004
5071 #define MR_PAGE_RX 0x00000008
5072 #define MR_NP_LOADED 0x00000010
5073 #define MR_TOGGLE_TX 0x00000020
5074 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5075 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5076 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5077 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5078 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5079 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5080 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5081 #define MR_TOGGLE_RX 0x00002000
5082 #define MR_NP_RX 0x00004000
5084 #define MR_LINK_OK 0x80000000
5086 unsigned long link_time, cur_time;
5088 u32 ability_match_cfg;
5089 int ability_match_count;
5091 char ability_match, idle_match, ack_match;
5093 u32 txconfig, rxconfig;
5094 #define ANEG_CFG_NP 0x00000080
5095 #define ANEG_CFG_ACK 0x00000040
5096 #define ANEG_CFG_RF2 0x00000020
5097 #define ANEG_CFG_RF1 0x00000010
5098 #define ANEG_CFG_PS2 0x00000001
5099 #define ANEG_CFG_PS1 0x00008000
5100 #define ANEG_CFG_HD 0x00004000
5101 #define ANEG_CFG_FD 0x00002000
5102 #define ANEG_CFG_INVAL 0x00001f06
5107 #define ANEG_TIMER_ENAB 2
5108 #define ANEG_FAILED -1
5110 #define ANEG_STATE_SETTLE_TIME 10000
5112 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5113 struct tg3_fiber_aneginfo *ap)
5116 unsigned long delta;
5120 if (ap->state == ANEG_STATE_UNKNOWN) {
5124 ap->ability_match_cfg = 0;
5125 ap->ability_match_count = 0;
5126 ap->ability_match = 0;
5132 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5133 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5135 if (rx_cfg_reg != ap->ability_match_cfg) {
5136 ap->ability_match_cfg = rx_cfg_reg;
5137 ap->ability_match = 0;
5138 ap->ability_match_count = 0;
5140 if (++ap->ability_match_count > 1) {
5141 ap->ability_match = 1;
5142 ap->ability_match_cfg = rx_cfg_reg;
5145 if (rx_cfg_reg & ANEG_CFG_ACK)
5153 ap->ability_match_cfg = 0;
5154 ap->ability_match_count = 0;
5155 ap->ability_match = 0;
5161 ap->rxconfig = rx_cfg_reg;
5164 switch (ap->state) {
5165 case ANEG_STATE_UNKNOWN:
5166 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5167 ap->state = ANEG_STATE_AN_ENABLE;
5170 case ANEG_STATE_AN_ENABLE:
5171 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5172 if (ap->flags & MR_AN_ENABLE) {
5175 ap->ability_match_cfg = 0;
5176 ap->ability_match_count = 0;
5177 ap->ability_match = 0;
5181 ap->state = ANEG_STATE_RESTART_INIT;
5183 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5187 case ANEG_STATE_RESTART_INIT:
5188 ap->link_time = ap->cur_time;
5189 ap->flags &= ~(MR_NP_LOADED);
5191 tw32(MAC_TX_AUTO_NEG, 0);
5192 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5193 tw32_f(MAC_MODE, tp->mac_mode);
5196 ret = ANEG_TIMER_ENAB;
5197 ap->state = ANEG_STATE_RESTART;
5200 case ANEG_STATE_RESTART:
5201 delta = ap->cur_time - ap->link_time;
5202 if (delta > ANEG_STATE_SETTLE_TIME)
5203 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5205 ret = ANEG_TIMER_ENAB;
5208 case ANEG_STATE_DISABLE_LINK_OK:
5212 case ANEG_STATE_ABILITY_DETECT_INIT:
5213 ap->flags &= ~(MR_TOGGLE_TX);
5214 ap->txconfig = ANEG_CFG_FD;
5215 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5216 if (flowctrl & ADVERTISE_1000XPAUSE)
5217 ap->txconfig |= ANEG_CFG_PS1;
5218 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5219 ap->txconfig |= ANEG_CFG_PS2;
5220 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5221 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5222 tw32_f(MAC_MODE, tp->mac_mode);
5225 ap->state = ANEG_STATE_ABILITY_DETECT;
5228 case ANEG_STATE_ABILITY_DETECT:
5229 if (ap->ability_match != 0 && ap->rxconfig != 0)
5230 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5233 case ANEG_STATE_ACK_DETECT_INIT:
5234 ap->txconfig |= ANEG_CFG_ACK;
5235 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5236 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5237 tw32_f(MAC_MODE, tp->mac_mode);
5240 ap->state = ANEG_STATE_ACK_DETECT;
5243 case ANEG_STATE_ACK_DETECT:
5244 if (ap->ack_match != 0) {
5245 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5246 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5247 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5249 ap->state = ANEG_STATE_AN_ENABLE;
5251 } else if (ap->ability_match != 0 &&
5252 ap->rxconfig == 0) {
5253 ap->state = ANEG_STATE_AN_ENABLE;
5257 case ANEG_STATE_COMPLETE_ACK_INIT:
5258 if (ap->rxconfig & ANEG_CFG_INVAL) {
5262 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5263 MR_LP_ADV_HALF_DUPLEX |
5264 MR_LP_ADV_SYM_PAUSE |
5265 MR_LP_ADV_ASYM_PAUSE |
5266 MR_LP_ADV_REMOTE_FAULT1 |
5267 MR_LP_ADV_REMOTE_FAULT2 |
5268 MR_LP_ADV_NEXT_PAGE |
5271 if (ap->rxconfig & ANEG_CFG_FD)
5272 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5273 if (ap->rxconfig & ANEG_CFG_HD)
5274 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5275 if (ap->rxconfig & ANEG_CFG_PS1)
5276 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5277 if (ap->rxconfig & ANEG_CFG_PS2)
5278 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5279 if (ap->rxconfig & ANEG_CFG_RF1)
5280 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5281 if (ap->rxconfig & ANEG_CFG_RF2)
5282 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5283 if (ap->rxconfig & ANEG_CFG_NP)
5284 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5286 ap->link_time = ap->cur_time;
5288 ap->flags ^= (MR_TOGGLE_TX);
5289 if (ap->rxconfig & 0x0008)
5290 ap->flags |= MR_TOGGLE_RX;
5291 if (ap->rxconfig & ANEG_CFG_NP)
5292 ap->flags |= MR_NP_RX;
5293 ap->flags |= MR_PAGE_RX;
5295 ap->state = ANEG_STATE_COMPLETE_ACK;
5296 ret = ANEG_TIMER_ENAB;
5299 case ANEG_STATE_COMPLETE_ACK:
5300 if (ap->ability_match != 0 &&
5301 ap->rxconfig == 0) {
5302 ap->state = ANEG_STATE_AN_ENABLE;
5305 delta = ap->cur_time - ap->link_time;
5306 if (delta > ANEG_STATE_SETTLE_TIME) {
5307 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5308 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5310 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5311 !(ap->flags & MR_NP_RX)) {
5312 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5320 case ANEG_STATE_IDLE_DETECT_INIT:
5321 ap->link_time = ap->cur_time;
5322 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5323 tw32_f(MAC_MODE, tp->mac_mode);
5326 ap->state = ANEG_STATE_IDLE_DETECT;
5327 ret = ANEG_TIMER_ENAB;
5330 case ANEG_STATE_IDLE_DETECT:
5331 if (ap->ability_match != 0 &&
5332 ap->rxconfig == 0) {
5333 ap->state = ANEG_STATE_AN_ENABLE;
5336 delta = ap->cur_time - ap->link_time;
5337 if (delta > ANEG_STATE_SETTLE_TIME) {
5338 /* XXX another gem from the Broadcom driver :( */
5339 ap->state = ANEG_STATE_LINK_OK;
5343 case ANEG_STATE_LINK_OK:
5344 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5348 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5349 /* ??? unimplemented */
5352 case ANEG_STATE_NEXT_PAGE_WAIT:
5353 /* ??? unimplemented */
5364 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5367 struct tg3_fiber_aneginfo aninfo;
5368 int status = ANEG_FAILED;
5372 tw32_f(MAC_TX_AUTO_NEG, 0);
5374 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5375 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5378 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5381 memset(&aninfo, 0, sizeof(aninfo));
5382 aninfo.flags |= MR_AN_ENABLE;
5383 aninfo.state = ANEG_STATE_UNKNOWN;
5384 aninfo.cur_time = 0;
5386 while (++tick < 195000) {
5387 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5388 if (status == ANEG_DONE || status == ANEG_FAILED)
5394 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5395 tw32_f(MAC_MODE, tp->mac_mode);
5398 *txflags = aninfo.txconfig;
5399 *rxflags = aninfo.flags;
5401 if (status == ANEG_DONE &&
5402 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5403 MR_LP_ADV_FULL_DUPLEX)))
5409 static void tg3_init_bcm8002(struct tg3 *tp)
5411 u32 mac_status = tr32(MAC_STATUS);
5414 /* Reset when initting first time or we have a link. */
5415 if (tg3_flag(tp, INIT_COMPLETE) &&
5416 !(mac_status & MAC_STATUS_PCS_SYNCED))
5419 /* Set PLL lock range. */
5420 tg3_writephy(tp, 0x16, 0x8007);
5423 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5425 /* Wait for reset to complete. */
5426 /* XXX schedule_timeout() ... */
5427 for (i = 0; i < 500; i++)
5430 /* Config mode; select PMA/Ch 1 regs. */
5431 tg3_writephy(tp, 0x10, 0x8411);
5433 /* Enable auto-lock and comdet, select txclk for tx. */
5434 tg3_writephy(tp, 0x11, 0x0a10);
5436 tg3_writephy(tp, 0x18, 0x00a0);
5437 tg3_writephy(tp, 0x16, 0x41ff);
5439 /* Assert and deassert POR. */
5440 tg3_writephy(tp, 0x13, 0x0400);
5442 tg3_writephy(tp, 0x13, 0x0000);
5444 tg3_writephy(tp, 0x11, 0x0a50);
5446 tg3_writephy(tp, 0x11, 0x0a10);
5448 /* Wait for signal to stabilize */
5449 /* XXX schedule_timeout() ... */
5450 for (i = 0; i < 15000; i++)
5453 /* Deselect the channel register so we can read the PHYID
5456 tg3_writephy(tp, 0x10, 0x8011);
5459 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5462 bool current_link_up;
5463 u32 sg_dig_ctrl, sg_dig_status;
5464 u32 serdes_cfg, expected_sg_dig_ctrl;
5465 int workaround, port_a;
5468 expected_sg_dig_ctrl = 0;
5471 current_link_up = false;
5473 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5474 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5476 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5479 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5480 /* preserve bits 20-23 for voltage regulator */
5481 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5484 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5486 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5487 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5489 u32 val = serdes_cfg;
5495 tw32_f(MAC_SERDES_CFG, val);
5498 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5500 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5501 tg3_setup_flow_control(tp, 0, 0);
5502 current_link_up = true;
5507 /* Want auto-negotiation. */
5508 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5510 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5511 if (flowctrl & ADVERTISE_1000XPAUSE)
5512 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5513 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5514 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5516 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5517 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5518 tp->serdes_counter &&
5519 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5520 MAC_STATUS_RCVD_CFG)) ==
5521 MAC_STATUS_PCS_SYNCED)) {
5522 tp->serdes_counter--;
5523 current_link_up = true;
5528 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5529 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5531 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5533 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5534 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5535 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5536 MAC_STATUS_SIGNAL_DET)) {
5537 sg_dig_status = tr32(SG_DIG_STATUS);
5538 mac_status = tr32(MAC_STATUS);
5540 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5541 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5542 u32 local_adv = 0, remote_adv = 0;
5544 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5545 local_adv |= ADVERTISE_1000XPAUSE;
5546 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5547 local_adv |= ADVERTISE_1000XPSE_ASYM;
5549 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5550 remote_adv |= LPA_1000XPAUSE;
5551 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5552 remote_adv |= LPA_1000XPAUSE_ASYM;
5554 tp->link_config.rmt_adv =
5555 mii_adv_to_ethtool_adv_x(remote_adv);
5557 tg3_setup_flow_control(tp, local_adv, remote_adv);
5558 current_link_up = true;
5559 tp->serdes_counter = 0;
5560 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5561 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5562 if (tp->serdes_counter)
5563 tp->serdes_counter--;
5566 u32 val = serdes_cfg;
5573 tw32_f(MAC_SERDES_CFG, val);
5576 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5579 /* Link parallel detection - link is up */
5580 /* only if we have PCS_SYNC and not */
5581 /* receiving config code words */
5582 mac_status = tr32(MAC_STATUS);
5583 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5584 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5585 tg3_setup_flow_control(tp, 0, 0);
5586 current_link_up = true;
5588 TG3_PHYFLG_PARALLEL_DETECT;
5589 tp->serdes_counter =
5590 SERDES_PARALLEL_DET_TIMEOUT;
5592 goto restart_autoneg;
5596 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5597 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5601 return current_link_up;
5604 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5606 bool current_link_up = false;
5608 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5611 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5612 u32 txflags, rxflags;
5615 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5616 u32 local_adv = 0, remote_adv = 0;
5618 if (txflags & ANEG_CFG_PS1)
5619 local_adv |= ADVERTISE_1000XPAUSE;
5620 if (txflags & ANEG_CFG_PS2)
5621 local_adv |= ADVERTISE_1000XPSE_ASYM;
5623 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5624 remote_adv |= LPA_1000XPAUSE;
5625 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5626 remote_adv |= LPA_1000XPAUSE_ASYM;
5628 tp->link_config.rmt_adv =
5629 mii_adv_to_ethtool_adv_x(remote_adv);
5631 tg3_setup_flow_control(tp, local_adv, remote_adv);
5633 current_link_up = true;
5635 for (i = 0; i < 30; i++) {
5638 (MAC_STATUS_SYNC_CHANGED |
5639 MAC_STATUS_CFG_CHANGED));
5641 if ((tr32(MAC_STATUS) &
5642 (MAC_STATUS_SYNC_CHANGED |
5643 MAC_STATUS_CFG_CHANGED)) == 0)
5647 mac_status = tr32(MAC_STATUS);
5648 if (!current_link_up &&
5649 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5650 !(mac_status & MAC_STATUS_RCVD_CFG))
5651 current_link_up = true;
5653 tg3_setup_flow_control(tp, 0, 0);
5655 /* Forcing 1000FD link up. */
5656 current_link_up = true;
5658 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5661 tw32_f(MAC_MODE, tp->mac_mode);
5666 return current_link_up;
5669 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5672 u16 orig_active_speed;
5673 u8 orig_active_duplex;
5675 bool current_link_up;
5678 orig_pause_cfg = tp->link_config.active_flowctrl;
5679 orig_active_speed = tp->link_config.active_speed;
5680 orig_active_duplex = tp->link_config.active_duplex;
5682 if (!tg3_flag(tp, HW_AUTONEG) &&
5684 tg3_flag(tp, INIT_COMPLETE)) {
5685 mac_status = tr32(MAC_STATUS);
5686 mac_status &= (MAC_STATUS_PCS_SYNCED |
5687 MAC_STATUS_SIGNAL_DET |
5688 MAC_STATUS_CFG_CHANGED |
5689 MAC_STATUS_RCVD_CFG);
5690 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5691 MAC_STATUS_SIGNAL_DET)) {
5692 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5693 MAC_STATUS_CFG_CHANGED));
5698 tw32_f(MAC_TX_AUTO_NEG, 0);
5700 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5701 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5702 tw32_f(MAC_MODE, tp->mac_mode);
5705 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5706 tg3_init_bcm8002(tp);
5708 /* Enable link change event even when serdes polling. */
5709 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5712 current_link_up = false;
5713 tp->link_config.rmt_adv = 0;
5714 mac_status = tr32(MAC_STATUS);
5716 if (tg3_flag(tp, HW_AUTONEG))
5717 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5719 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5721 tp->napi[0].hw_status->status =
5722 (SD_STATUS_UPDATED |
5723 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5725 for (i = 0; i < 100; i++) {
5726 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5727 MAC_STATUS_CFG_CHANGED));
5729 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5730 MAC_STATUS_CFG_CHANGED |
5731 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5735 mac_status = tr32(MAC_STATUS);
5736 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5737 current_link_up = false;
5738 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5739 tp->serdes_counter == 0) {
5740 tw32_f(MAC_MODE, (tp->mac_mode |
5741 MAC_MODE_SEND_CONFIGS));
5743 tw32_f(MAC_MODE, tp->mac_mode);
5747 if (current_link_up) {
5748 tp->link_config.active_speed = SPEED_1000;
5749 tp->link_config.active_duplex = DUPLEX_FULL;
5750 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5751 LED_CTRL_LNKLED_OVERRIDE |
5752 LED_CTRL_1000MBPS_ON));
5754 tp->link_config.active_speed = SPEED_UNKNOWN;
5755 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5756 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5757 LED_CTRL_LNKLED_OVERRIDE |
5758 LED_CTRL_TRAFFIC_OVERRIDE));
5761 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5762 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5763 if (orig_pause_cfg != now_pause_cfg ||
5764 orig_active_speed != tp->link_config.active_speed ||
5765 orig_active_duplex != tp->link_config.active_duplex)
5766 tg3_link_report(tp);
5772 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5776 u16 current_speed = SPEED_UNKNOWN;
5777 u8 current_duplex = DUPLEX_UNKNOWN;
5778 bool current_link_up = false;
5779 u32 local_adv, remote_adv, sgsr;
5781 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5782 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5783 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5784 (sgsr & SERDES_TG3_SGMII_MODE)) {
5789 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5791 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5792 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5794 current_link_up = true;
5795 if (sgsr & SERDES_TG3_SPEED_1000) {
5796 current_speed = SPEED_1000;
5797 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5798 } else if (sgsr & SERDES_TG3_SPEED_100) {
5799 current_speed = SPEED_100;
5800 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5802 current_speed = SPEED_10;
5803 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5806 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5807 current_duplex = DUPLEX_FULL;
5809 current_duplex = DUPLEX_HALF;
5812 tw32_f(MAC_MODE, tp->mac_mode);
5815 tg3_clear_mac_status(tp);
5817 goto fiber_setup_done;
5820 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5821 tw32_f(MAC_MODE, tp->mac_mode);
5824 tg3_clear_mac_status(tp);
5829 tp->link_config.rmt_adv = 0;
5831 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5832 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5833 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5834 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5835 bmsr |= BMSR_LSTATUS;
5837 bmsr &= ~BMSR_LSTATUS;
5840 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5842 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5843 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5844 /* do nothing, just check for link up at the end */
5845 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5848 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5849 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5850 ADVERTISE_1000XPAUSE |
5851 ADVERTISE_1000XPSE_ASYM |
5854 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5855 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5857 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5858 tg3_writephy(tp, MII_ADVERTISE, newadv);
5859 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5860 tg3_writephy(tp, MII_BMCR, bmcr);
5862 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5863 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5864 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5871 bmcr &= ~BMCR_SPEED1000;
5872 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5874 if (tp->link_config.duplex == DUPLEX_FULL)
5875 new_bmcr |= BMCR_FULLDPLX;
5877 if (new_bmcr != bmcr) {
5878 /* BMCR_SPEED1000 is a reserved bit that needs
5879 * to be set on write.
5881 new_bmcr |= BMCR_SPEED1000;
5883 /* Force a linkdown */
5887 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5888 adv &= ~(ADVERTISE_1000XFULL |
5889 ADVERTISE_1000XHALF |
5891 tg3_writephy(tp, MII_ADVERTISE, adv);
5892 tg3_writephy(tp, MII_BMCR, bmcr |
5896 tg3_carrier_off(tp);
5898 tg3_writephy(tp, MII_BMCR, new_bmcr);
5900 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5901 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5902 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5903 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5904 bmsr |= BMSR_LSTATUS;
5906 bmsr &= ~BMSR_LSTATUS;
5908 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5912 if (bmsr & BMSR_LSTATUS) {
5913 current_speed = SPEED_1000;
5914 current_link_up = true;
5915 if (bmcr & BMCR_FULLDPLX)
5916 current_duplex = DUPLEX_FULL;
5918 current_duplex = DUPLEX_HALF;
5923 if (bmcr & BMCR_ANENABLE) {
5926 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5927 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5928 common = local_adv & remote_adv;
5929 if (common & (ADVERTISE_1000XHALF |
5930 ADVERTISE_1000XFULL)) {
5931 if (common & ADVERTISE_1000XFULL)
5932 current_duplex = DUPLEX_FULL;
5934 current_duplex = DUPLEX_HALF;
5936 tp->link_config.rmt_adv =
5937 mii_adv_to_ethtool_adv_x(remote_adv);
5938 } else if (!tg3_flag(tp, 5780_CLASS)) {
5939 /* Link is up via parallel detect */
5941 current_link_up = false;
5947 if (current_link_up && current_duplex == DUPLEX_FULL)
5948 tg3_setup_flow_control(tp, local_adv, remote_adv);
5950 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5951 if (tp->link_config.active_duplex == DUPLEX_HALF)
5952 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5954 tw32_f(MAC_MODE, tp->mac_mode);
5957 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5959 tp->link_config.active_speed = current_speed;
5960 tp->link_config.active_duplex = current_duplex;
5962 tg3_test_and_report_link_chg(tp, current_link_up);
5966 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5968 if (tp->serdes_counter) {
5969 /* Give autoneg time to complete. */
5970 tp->serdes_counter--;
5975 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5978 tg3_readphy(tp, MII_BMCR, &bmcr);
5979 if (bmcr & BMCR_ANENABLE) {
5982 /* Select shadow register 0x1f */
5983 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5984 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5986 /* Select expansion interrupt status register */
5987 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5988 MII_TG3_DSP_EXP1_INT_STAT);
5989 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5990 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5992 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5993 /* We have signal detect and not receiving
5994 * config code words, link is up by parallel
5998 bmcr &= ~BMCR_ANENABLE;
5999 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6000 tg3_writephy(tp, MII_BMCR, bmcr);
6001 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6004 } else if (tp->link_up &&
6005 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6006 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6009 /* Select expansion interrupt status register */
6010 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6011 MII_TG3_DSP_EXP1_INT_STAT);
6012 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6016 /* Config code words received, turn on autoneg. */
6017 tg3_readphy(tp, MII_BMCR, &bmcr);
6018 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6020 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6026 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6031 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6032 err = tg3_setup_fiber_phy(tp, force_reset);
6033 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6034 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6036 err = tg3_setup_copper_phy(tp, force_reset);
6038 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6041 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6042 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6044 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6049 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6050 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6051 tw32(GRC_MISC_CFG, val);
6054 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6055 (6 << TX_LENGTHS_IPG_SHIFT);
6056 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6057 tg3_asic_rev(tp) == ASIC_REV_5762)
6058 val |= tr32(MAC_TX_LENGTHS) &
6059 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6060 TX_LENGTHS_CNT_DWN_VAL_MSK);
6062 if (tp->link_config.active_speed == SPEED_1000 &&
6063 tp->link_config.active_duplex == DUPLEX_HALF)
6064 tw32(MAC_TX_LENGTHS, val |
6065 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6067 tw32(MAC_TX_LENGTHS, val |
6068 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6070 if (!tg3_flag(tp, 5705_PLUS)) {
6072 tw32(HOSTCC_STAT_COAL_TICKS,
6073 tp->coal.stats_block_coalesce_usecs);
6075 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6079 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6080 val = tr32(PCIE_PWR_MGMT_THRESH);
6082 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6085 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6086 tw32(PCIE_PWR_MGMT_THRESH, val);
6092 /* tp->lock must be held */
6093 static u64 tg3_refclk_read(struct tg3 *tp)
6095 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6096 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6099 /* tp->lock must be held */
6100 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6102 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6104 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6105 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6106 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6107 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6110 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6111 static inline void tg3_full_unlock(struct tg3 *tp);
6112 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6114 struct tg3 *tp = netdev_priv(dev);
6116 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6117 SOF_TIMESTAMPING_RX_SOFTWARE |
6118 SOF_TIMESTAMPING_SOFTWARE;
6120 if (tg3_flag(tp, PTP_CAPABLE)) {
6121 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6122 SOF_TIMESTAMPING_RX_HARDWARE |
6123 SOF_TIMESTAMPING_RAW_HARDWARE;
6127 info->phc_index = ptp_clock_index(tp->ptp_clock);
6129 info->phc_index = -1;
6131 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6133 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6134 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6135 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6136 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6140 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6142 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6143 bool neg_adj = false;
6151 /* Frequency adjustment is performed using hardware with a 24 bit
6152 * accumulator and a programmable correction value. On each clk, the
6153 * correction value gets added to the accumulator and when it
6154 * overflows, the time counter is incremented/decremented.
6156 * So conversion from ppb to correction value is
6157 * ppb * (1 << 24) / 1000000000
6159 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6160 TG3_EAV_REF_CLK_CORRECT_MASK;
6162 tg3_full_lock(tp, 0);
6165 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6166 TG3_EAV_REF_CLK_CORRECT_EN |
6167 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6169 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6171 tg3_full_unlock(tp);
6176 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6178 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6180 tg3_full_lock(tp, 0);
6181 tp->ptp_adjust += delta;
6182 tg3_full_unlock(tp);
6187 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6191 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6193 tg3_full_lock(tp, 0);
6194 ns = tg3_refclk_read(tp);
6195 ns += tp->ptp_adjust;
6196 tg3_full_unlock(tp);
6198 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6199 ts->tv_nsec = remainder;
6204 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6205 const struct timespec *ts)
6208 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6210 ns = timespec_to_ns(ts);
6212 tg3_full_lock(tp, 0);
6213 tg3_refclk_write(tp, ns);
6215 tg3_full_unlock(tp);
6220 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6221 struct ptp_clock_request *rq, int on)
6223 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228 case PTP_CLK_REQ_PEROUT:
6229 if (rq->perout.index != 0)
6232 tg3_full_lock(tp, 0);
6233 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6234 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6239 nsec = rq->perout.start.sec * 1000000000ULL +
6240 rq->perout.start.nsec;
6242 if (rq->perout.period.sec || rq->perout.period.nsec) {
6243 netdev_warn(tp->dev,
6244 "Device supports only a one-shot timesync output, period must be 0\n");
6249 if (nsec & (1ULL << 63)) {
6250 netdev_warn(tp->dev,
6251 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6256 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6257 tw32(TG3_EAV_WATCHDOG0_MSB,
6258 TG3_EAV_WATCHDOG0_EN |
6259 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6261 tw32(TG3_EAV_REF_CLCK_CTL,
6262 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6264 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6265 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6269 tg3_full_unlock(tp);
6279 static const struct ptp_clock_info tg3_ptp_caps = {
6280 .owner = THIS_MODULE,
6281 .name = "tg3 clock",
6282 .max_adj = 250000000,
6287 .adjfreq = tg3_ptp_adjfreq,
6288 .adjtime = tg3_ptp_adjtime,
6289 .gettime = tg3_ptp_gettime,
6290 .settime = tg3_ptp_settime,
6291 .enable = tg3_ptp_enable,
6294 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6295 struct skb_shared_hwtstamps *timestamp)
6297 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6298 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6302 /* tp->lock must be held */
6303 static void tg3_ptp_init(struct tg3 *tp)
6305 if (!tg3_flag(tp, PTP_CAPABLE))
6308 /* Initialize the hardware clock to the system time. */
6309 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6311 tp->ptp_info = tg3_ptp_caps;
6314 /* tp->lock must be held */
6315 static void tg3_ptp_resume(struct tg3 *tp)
6317 if (!tg3_flag(tp, PTP_CAPABLE))
6320 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6324 static void tg3_ptp_fini(struct tg3 *tp)
6326 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6329 ptp_clock_unregister(tp->ptp_clock);
6330 tp->ptp_clock = NULL;
6334 static inline int tg3_irq_sync(struct tg3 *tp)
6336 return tp->irq_sync;
6339 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6343 dst = (u32 *)((u8 *)dst + off);
6344 for (i = 0; i < len; i += sizeof(u32))
6345 *dst++ = tr32(off + i);
6348 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6350 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6351 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6352 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6353 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6354 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6355 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6356 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6357 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6358 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6359 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6360 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6361 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6362 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6363 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6364 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6365 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6366 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6367 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6368 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6370 if (tg3_flag(tp, SUPPORT_MSIX))
6371 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6373 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6374 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6375 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6376 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6377 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6378 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6379 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6380 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6382 if (!tg3_flag(tp, 5705_PLUS)) {
6383 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6384 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6385 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6388 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6389 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6390 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6391 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6392 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6394 if (tg3_flag(tp, NVRAM))
6395 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6398 static void tg3_dump_state(struct tg3 *tp)
6403 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6407 if (tg3_flag(tp, PCI_EXPRESS)) {
6408 /* Read up to but not including private PCI registers */
6409 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6410 regs[i / sizeof(u32)] = tr32(i);
6412 tg3_dump_legacy_regs(tp, regs);
6414 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6415 if (!regs[i + 0] && !regs[i + 1] &&
6416 !regs[i + 2] && !regs[i + 3])
6419 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6421 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6426 for (i = 0; i < tp->irq_cnt; i++) {
6427 struct tg3_napi *tnapi = &tp->napi[i];
6429 /* SW status block */
6431 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6433 tnapi->hw_status->status,
6434 tnapi->hw_status->status_tag,
6435 tnapi->hw_status->rx_jumbo_consumer,
6436 tnapi->hw_status->rx_consumer,
6437 tnapi->hw_status->rx_mini_consumer,
6438 tnapi->hw_status->idx[0].rx_producer,
6439 tnapi->hw_status->idx[0].tx_consumer);
6442 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6444 tnapi->last_tag, tnapi->last_irq_tag,
6445 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6447 tnapi->prodring.rx_std_prod_idx,
6448 tnapi->prodring.rx_std_cons_idx,
6449 tnapi->prodring.rx_jmb_prod_idx,
6450 tnapi->prodring.rx_jmb_cons_idx);
6454 /* This is called whenever we suspect that the system chipset is re-
6455 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6456 * is bogus tx completions. We try to recover by setting the
6457 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6460 static void tg3_tx_recover(struct tg3 *tp)
6462 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6463 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6465 netdev_warn(tp->dev,
6466 "The system may be re-ordering memory-mapped I/O "
6467 "cycles to the network device, attempting to recover. "
6468 "Please report the problem to the driver maintainer "
6469 "and include system chipset information.\n");
6471 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6474 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6476 /* Tell compiler to fetch tx indices from memory. */
6478 return tnapi->tx_pending -
6479 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6482 /* Tigon3 never reports partial packet sends. So we do not
6483 * need special logic to handle SKBs that have not had all
6484 * of their frags sent yet, like SunGEM does.
6486 static void tg3_tx(struct tg3_napi *tnapi)
6488 struct tg3 *tp = tnapi->tp;
6489 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6490 u32 sw_idx = tnapi->tx_cons;
6491 struct netdev_queue *txq;
6492 int index = tnapi - tp->napi;
6493 unsigned int pkts_compl = 0, bytes_compl = 0;
6495 if (tg3_flag(tp, ENABLE_TSS))
6498 txq = netdev_get_tx_queue(tp->dev, index);
6500 while (sw_idx != hw_idx) {
6501 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6502 struct sk_buff *skb = ri->skb;
6505 if (unlikely(skb == NULL)) {
6510 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6511 struct skb_shared_hwtstamps timestamp;
6512 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6513 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6515 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6517 skb_tstamp_tx(skb, ×tamp);
6520 pci_unmap_single(tp->pdev,
6521 dma_unmap_addr(ri, mapping),
6527 while (ri->fragmented) {
6528 ri->fragmented = false;
6529 sw_idx = NEXT_TX(sw_idx);
6530 ri = &tnapi->tx_buffers[sw_idx];
6533 sw_idx = NEXT_TX(sw_idx);
6535 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6536 ri = &tnapi->tx_buffers[sw_idx];
6537 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6540 pci_unmap_page(tp->pdev,
6541 dma_unmap_addr(ri, mapping),
6542 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6545 while (ri->fragmented) {
6546 ri->fragmented = false;
6547 sw_idx = NEXT_TX(sw_idx);
6548 ri = &tnapi->tx_buffers[sw_idx];
6551 sw_idx = NEXT_TX(sw_idx);
6555 bytes_compl += skb->len;
6559 if (unlikely(tx_bug)) {
6565 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6567 tnapi->tx_cons = sw_idx;
6569 /* Need to make the tx_cons update visible to tg3_start_xmit()
6570 * before checking for netif_queue_stopped(). Without the
6571 * memory barrier, there is a small possibility that tg3_start_xmit()
6572 * will miss it and cause the queue to be stopped forever.
6576 if (unlikely(netif_tx_queue_stopped(txq) &&
6577 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6578 __netif_tx_lock(txq, smp_processor_id());
6579 if (netif_tx_queue_stopped(txq) &&
6580 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6581 netif_tx_wake_queue(txq);
6582 __netif_tx_unlock(txq);
6586 static void tg3_frag_free(bool is_frag, void *data)
6589 put_page(virt_to_head_page(data));
6594 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6596 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6597 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6602 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6603 map_sz, PCI_DMA_FROMDEVICE);
6604 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6609 /* Returns size of skb allocated or < 0 on error.
6611 * We only need to fill in the address because the other members
6612 * of the RX descriptor are invariant, see tg3_init_rings.
6614 * Note the purposeful assymetry of cpu vs. chip accesses. For
6615 * posting buffers we only dirty the first cache line of the RX
6616 * descriptor (containing the address). Whereas for the RX status
6617 * buffers the cpu only reads the last cacheline of the RX descriptor
6618 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6620 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6621 u32 opaque_key, u32 dest_idx_unmasked,
6622 unsigned int *frag_size)
6624 struct tg3_rx_buffer_desc *desc;
6625 struct ring_info *map;
6628 int skb_size, data_size, dest_idx;
6630 switch (opaque_key) {
6631 case RXD_OPAQUE_RING_STD:
6632 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6633 desc = &tpr->rx_std[dest_idx];
6634 map = &tpr->rx_std_buffers[dest_idx];
6635 data_size = tp->rx_pkt_map_sz;
6638 case RXD_OPAQUE_RING_JUMBO:
6639 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6640 desc = &tpr->rx_jmb[dest_idx].std;
6641 map = &tpr->rx_jmb_buffers[dest_idx];
6642 data_size = TG3_RX_JMB_MAP_SZ;
6649 /* Do not overwrite any of the map or rp information
6650 * until we are sure we can commit to a new buffer.
6652 * Callers depend upon this behavior and assume that
6653 * we leave everything unchanged if we fail.
6655 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6656 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6657 if (skb_size <= PAGE_SIZE) {
6658 data = netdev_alloc_frag(skb_size);
6659 *frag_size = skb_size;
6661 data = kmalloc(skb_size, GFP_ATOMIC);
6667 mapping = pci_map_single(tp->pdev,
6668 data + TG3_RX_OFFSET(tp),
6670 PCI_DMA_FROMDEVICE);
6671 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6672 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6677 dma_unmap_addr_set(map, mapping, mapping);
6679 desc->addr_hi = ((u64)mapping >> 32);
6680 desc->addr_lo = ((u64)mapping & 0xffffffff);
6685 /* We only need to move over in the address because the other
6686 * members of the RX descriptor are invariant. See notes above
6687 * tg3_alloc_rx_data for full details.
6689 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6690 struct tg3_rx_prodring_set *dpr,
6691 u32 opaque_key, int src_idx,
6692 u32 dest_idx_unmasked)
6694 struct tg3 *tp = tnapi->tp;
6695 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6696 struct ring_info *src_map, *dest_map;
6697 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6700 switch (opaque_key) {
6701 case RXD_OPAQUE_RING_STD:
6702 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6703 dest_desc = &dpr->rx_std[dest_idx];
6704 dest_map = &dpr->rx_std_buffers[dest_idx];
6705 src_desc = &spr->rx_std[src_idx];
6706 src_map = &spr->rx_std_buffers[src_idx];
6709 case RXD_OPAQUE_RING_JUMBO:
6710 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6711 dest_desc = &dpr->rx_jmb[dest_idx].std;
6712 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6713 src_desc = &spr->rx_jmb[src_idx].std;
6714 src_map = &spr->rx_jmb_buffers[src_idx];
6721 dest_map->data = src_map->data;
6722 dma_unmap_addr_set(dest_map, mapping,
6723 dma_unmap_addr(src_map, mapping));
6724 dest_desc->addr_hi = src_desc->addr_hi;
6725 dest_desc->addr_lo = src_desc->addr_lo;
6727 /* Ensure that the update to the skb happens after the physical
6728 * addresses have been transferred to the new BD location.
6732 src_map->data = NULL;
6735 /* The RX ring scheme is composed of multiple rings which post fresh
6736 * buffers to the chip, and one special ring the chip uses to report
6737 * status back to the host.
6739 * The special ring reports the status of received packets to the
6740 * host. The chip does not write into the original descriptor the
6741 * RX buffer was obtained from. The chip simply takes the original
6742 * descriptor as provided by the host, updates the status and length
6743 * field, then writes this into the next status ring entry.
6745 * Each ring the host uses to post buffers to the chip is described
6746 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6747 * it is first placed into the on-chip ram. When the packet's length
6748 * is known, it walks down the TG3_BDINFO entries to select the ring.
6749 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6750 * which is within the range of the new packet's length is chosen.
6752 * The "separate ring for rx status" scheme may sound queer, but it makes
6753 * sense from a cache coherency perspective. If only the host writes
6754 * to the buffer post rings, and only the chip writes to the rx status
6755 * rings, then cache lines never move beyond shared-modified state.
6756 * If both the host and chip were to write into the same ring, cache line
6757 * eviction could occur since both entities want it in an exclusive state.
6759 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6761 struct tg3 *tp = tnapi->tp;
6762 u32 work_mask, rx_std_posted = 0;
6763 u32 std_prod_idx, jmb_prod_idx;
6764 u32 sw_idx = tnapi->rx_rcb_ptr;
6767 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6769 hw_idx = *(tnapi->rx_rcb_prod_idx);
6771 * We need to order the read of hw_idx and the read of
6772 * the opaque cookie.
6777 std_prod_idx = tpr->rx_std_prod_idx;
6778 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6779 while (sw_idx != hw_idx && budget > 0) {
6780 struct ring_info *ri;
6781 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6783 struct sk_buff *skb;
6784 dma_addr_t dma_addr;
6785 u32 opaque_key, desc_idx, *post_ptr;
6789 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6790 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6791 if (opaque_key == RXD_OPAQUE_RING_STD) {
6792 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6793 dma_addr = dma_unmap_addr(ri, mapping);
6795 post_ptr = &std_prod_idx;
6797 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6798 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6799 dma_addr = dma_unmap_addr(ri, mapping);
6801 post_ptr = &jmb_prod_idx;
6803 goto next_pkt_nopost;
6805 work_mask |= opaque_key;
6807 if (desc->err_vlan & RXD_ERR_MASK) {
6809 tg3_recycle_rx(tnapi, tpr, opaque_key,
6810 desc_idx, *post_ptr);
6812 /* Other statistics kept track of by card. */
6817 prefetch(data + TG3_RX_OFFSET(tp));
6818 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6821 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6822 RXD_FLAG_PTPSTAT_PTPV1 ||
6823 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6824 RXD_FLAG_PTPSTAT_PTPV2) {
6825 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6826 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6829 if (len > TG3_RX_COPY_THRESH(tp)) {
6831 unsigned int frag_size;
6833 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6834 *post_ptr, &frag_size);
6838 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6839 PCI_DMA_FROMDEVICE);
6841 /* Ensure that the update to the data happens
6842 * after the usage of the old DMA mapping.
6848 skb = build_skb(data, frag_size);
6850 tg3_frag_free(frag_size != 0, data);
6851 goto drop_it_no_recycle;
6853 skb_reserve(skb, TG3_RX_OFFSET(tp));
6855 tg3_recycle_rx(tnapi, tpr, opaque_key,
6856 desc_idx, *post_ptr);
6858 skb = netdev_alloc_skb(tp->dev,
6859 len + TG3_RAW_IP_ALIGN);
6861 goto drop_it_no_recycle;
6863 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6864 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6866 data + TG3_RX_OFFSET(tp),
6868 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6873 tg3_hwclock_to_timestamp(tp, tstamp,
6874 skb_hwtstamps(skb));
6876 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6877 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6878 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6879 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6880 skb->ip_summed = CHECKSUM_UNNECESSARY;
6882 skb_checksum_none_assert(skb);
6884 skb->protocol = eth_type_trans(skb, tp->dev);
6886 if (len > (tp->dev->mtu + ETH_HLEN) &&
6887 skb->protocol != htons(ETH_P_8021Q)) {
6889 goto drop_it_no_recycle;
6892 if (desc->type_flags & RXD_FLAG_VLAN &&
6893 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6894 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6895 desc->err_vlan & RXD_VLAN_MASK);
6897 napi_gro_receive(&tnapi->napi, skb);
6905 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6906 tpr->rx_std_prod_idx = std_prod_idx &
6907 tp->rx_std_ring_mask;
6908 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6909 tpr->rx_std_prod_idx);
6910 work_mask &= ~RXD_OPAQUE_RING_STD;
6915 sw_idx &= tp->rx_ret_ring_mask;
6917 /* Refresh hw_idx to see if there is new work */
6918 if (sw_idx == hw_idx) {
6919 hw_idx = *(tnapi->rx_rcb_prod_idx);
6924 /* ACK the status ring. */
6925 tnapi->rx_rcb_ptr = sw_idx;
6926 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6928 /* Refill RX ring(s). */
6929 if (!tg3_flag(tp, ENABLE_RSS)) {
6930 /* Sync BD data before updating mailbox */
6933 if (work_mask & RXD_OPAQUE_RING_STD) {
6934 tpr->rx_std_prod_idx = std_prod_idx &
6935 tp->rx_std_ring_mask;
6936 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6937 tpr->rx_std_prod_idx);
6939 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6940 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6941 tp->rx_jmb_ring_mask;
6942 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6943 tpr->rx_jmb_prod_idx);
6946 } else if (work_mask) {
6947 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6948 * updated before the producer indices can be updated.
6952 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6953 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6955 if (tnapi != &tp->napi[1]) {
6956 tp->rx_refill = true;
6957 napi_schedule(&tp->napi[1].napi);
6964 static void tg3_poll_link(struct tg3 *tp)
6966 /* handle link change and other phy events */
6967 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6968 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6970 if (sblk->status & SD_STATUS_LINK_CHG) {
6971 sblk->status = SD_STATUS_UPDATED |
6972 (sblk->status & ~SD_STATUS_LINK_CHG);
6973 spin_lock(&tp->lock);
6974 if (tg3_flag(tp, USE_PHYLIB)) {
6976 (MAC_STATUS_SYNC_CHANGED |
6977 MAC_STATUS_CFG_CHANGED |
6978 MAC_STATUS_MI_COMPLETION |
6979 MAC_STATUS_LNKSTATE_CHANGED));
6982 tg3_setup_phy(tp, false);
6983 spin_unlock(&tp->lock);
6988 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6989 struct tg3_rx_prodring_set *dpr,
6990 struct tg3_rx_prodring_set *spr)
6992 u32 si, di, cpycnt, src_prod_idx;
6996 src_prod_idx = spr->rx_std_prod_idx;
6998 /* Make sure updates to the rx_std_buffers[] entries and the
6999 * standard producer index are seen in the correct order.
7003 if (spr->rx_std_cons_idx == src_prod_idx)
7006 if (spr->rx_std_cons_idx < src_prod_idx)
7007 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7009 cpycnt = tp->rx_std_ring_mask + 1 -
7010 spr->rx_std_cons_idx;
7012 cpycnt = min(cpycnt,
7013 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7015 si = spr->rx_std_cons_idx;
7016 di = dpr->rx_std_prod_idx;
7018 for (i = di; i < di + cpycnt; i++) {
7019 if (dpr->rx_std_buffers[i].data) {
7029 /* Ensure that updates to the rx_std_buffers ring and the
7030 * shadowed hardware producer ring from tg3_recycle_skb() are
7031 * ordered correctly WRT the skb check above.
7035 memcpy(&dpr->rx_std_buffers[di],
7036 &spr->rx_std_buffers[si],
7037 cpycnt * sizeof(struct ring_info));
7039 for (i = 0; i < cpycnt; i++, di++, si++) {
7040 struct tg3_rx_buffer_desc *sbd, *dbd;
7041 sbd = &spr->rx_std[si];
7042 dbd = &dpr->rx_std[di];
7043 dbd->addr_hi = sbd->addr_hi;
7044 dbd->addr_lo = sbd->addr_lo;
7047 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7048 tp->rx_std_ring_mask;
7049 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7050 tp->rx_std_ring_mask;
7054 src_prod_idx = spr->rx_jmb_prod_idx;
7056 /* Make sure updates to the rx_jmb_buffers[] entries and
7057 * the jumbo producer index are seen in the correct order.
7061 if (spr->rx_jmb_cons_idx == src_prod_idx)
7064 if (spr->rx_jmb_cons_idx < src_prod_idx)
7065 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7067 cpycnt = tp->rx_jmb_ring_mask + 1 -
7068 spr->rx_jmb_cons_idx;
7070 cpycnt = min(cpycnt,
7071 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7073 si = spr->rx_jmb_cons_idx;
7074 di = dpr->rx_jmb_prod_idx;
7076 for (i = di; i < di + cpycnt; i++) {
7077 if (dpr->rx_jmb_buffers[i].data) {
7087 /* Ensure that updates to the rx_jmb_buffers ring and the
7088 * shadowed hardware producer ring from tg3_recycle_skb() are
7089 * ordered correctly WRT the skb check above.
7093 memcpy(&dpr->rx_jmb_buffers[di],
7094 &spr->rx_jmb_buffers[si],
7095 cpycnt * sizeof(struct ring_info));
7097 for (i = 0; i < cpycnt; i++, di++, si++) {
7098 struct tg3_rx_buffer_desc *sbd, *dbd;
7099 sbd = &spr->rx_jmb[si].std;
7100 dbd = &dpr->rx_jmb[di].std;
7101 dbd->addr_hi = sbd->addr_hi;
7102 dbd->addr_lo = sbd->addr_lo;
7105 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7106 tp->rx_jmb_ring_mask;
7107 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7108 tp->rx_jmb_ring_mask;
7114 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7116 struct tg3 *tp = tnapi->tp;
7118 /* run TX completion thread */
7119 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7121 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7125 if (!tnapi->rx_rcb_prod_idx)
7128 /* run RX thread, within the bounds set by NAPI.
7129 * All RX "locking" is done by ensuring outside
7130 * code synchronizes with tg3->napi.poll()
7132 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7133 work_done += tg3_rx(tnapi, budget - work_done);
7135 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7136 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7138 u32 std_prod_idx = dpr->rx_std_prod_idx;
7139 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7141 tp->rx_refill = false;
7142 for (i = 1; i <= tp->rxq_cnt; i++)
7143 err |= tg3_rx_prodring_xfer(tp, dpr,
7144 &tp->napi[i].prodring);
7148 if (std_prod_idx != dpr->rx_std_prod_idx)
7149 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7150 dpr->rx_std_prod_idx);
7152 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7153 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7154 dpr->rx_jmb_prod_idx);
7159 tw32_f(HOSTCC_MODE, tp->coal_now);
7165 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7167 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7168 schedule_work(&tp->reset_task);
7171 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7173 cancel_work_sync(&tp->reset_task);
7174 tg3_flag_clear(tp, RESET_TASK_PENDING);
7175 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7178 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7180 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7181 struct tg3 *tp = tnapi->tp;
7183 struct tg3_hw_status *sblk = tnapi->hw_status;
7186 work_done = tg3_poll_work(tnapi, work_done, budget);
7188 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7191 if (unlikely(work_done >= budget))
7194 /* tp->last_tag is used in tg3_int_reenable() below
7195 * to tell the hw how much work has been processed,
7196 * so we must read it before checking for more work.
7198 tnapi->last_tag = sblk->status_tag;
7199 tnapi->last_irq_tag = tnapi->last_tag;
7202 /* check for RX/TX work to do */
7203 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7204 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7206 /* This test here is not race free, but will reduce
7207 * the number of interrupts by looping again.
7209 if (tnapi == &tp->napi[1] && tp->rx_refill)
7212 napi_complete(napi);
7213 /* Reenable interrupts. */
7214 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7216 /* This test here is synchronized by napi_schedule()
7217 * and napi_complete() to close the race condition.
7219 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7220 tw32(HOSTCC_MODE, tp->coalesce_mode |
7221 HOSTCC_MODE_ENABLE |
7232 /* work_done is guaranteed to be less than budget. */
7233 napi_complete(napi);
7234 tg3_reset_task_schedule(tp);
7238 static void tg3_process_error(struct tg3 *tp)
7241 bool real_error = false;
7243 if (tg3_flag(tp, ERROR_PROCESSED))
7246 /* Check Flow Attention register */
7247 val = tr32(HOSTCC_FLOW_ATTN);
7248 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7249 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7253 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7254 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7258 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7259 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7268 tg3_flag_set(tp, ERROR_PROCESSED);
7269 tg3_reset_task_schedule(tp);
7272 static int tg3_poll(struct napi_struct *napi, int budget)
7274 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7275 struct tg3 *tp = tnapi->tp;
7277 struct tg3_hw_status *sblk = tnapi->hw_status;
7280 if (sblk->status & SD_STATUS_ERROR)
7281 tg3_process_error(tp);
7285 work_done = tg3_poll_work(tnapi, work_done, budget);
7287 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7290 if (unlikely(work_done >= budget))
7293 if (tg3_flag(tp, TAGGED_STATUS)) {
7294 /* tp->last_tag is used in tg3_int_reenable() below
7295 * to tell the hw how much work has been processed,
7296 * so we must read it before checking for more work.
7298 tnapi->last_tag = sblk->status_tag;
7299 tnapi->last_irq_tag = tnapi->last_tag;
7302 sblk->status &= ~SD_STATUS_UPDATED;
7304 if (likely(!tg3_has_work(tnapi))) {
7305 napi_complete(napi);
7306 tg3_int_reenable(tnapi);
7314 /* work_done is guaranteed to be less than budget. */
7315 napi_complete(napi);
7316 tg3_reset_task_schedule(tp);
7320 static void tg3_napi_disable(struct tg3 *tp)
7324 for (i = tp->irq_cnt - 1; i >= 0; i--)
7325 napi_disable(&tp->napi[i].napi);
7328 static void tg3_napi_enable(struct tg3 *tp)
7332 for (i = 0; i < tp->irq_cnt; i++)
7333 napi_enable(&tp->napi[i].napi);
7336 static void tg3_napi_init(struct tg3 *tp)
7340 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7341 for (i = 1; i < tp->irq_cnt; i++)
7342 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7345 static void tg3_napi_fini(struct tg3 *tp)
7349 for (i = 0; i < tp->irq_cnt; i++)
7350 netif_napi_del(&tp->napi[i].napi);
7353 static inline void tg3_netif_stop(struct tg3 *tp)
7355 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7356 tg3_napi_disable(tp);
7357 netif_carrier_off(tp->dev);
7358 netif_tx_disable(tp->dev);
7361 /* tp->lock must be held */
7362 static inline void tg3_netif_start(struct tg3 *tp)
7366 /* NOTE: unconditional netif_tx_wake_all_queues is only
7367 * appropriate so long as all callers are assured to
7368 * have free tx slots (such as after tg3_init_hw)
7370 netif_tx_wake_all_queues(tp->dev);
7373 netif_carrier_on(tp->dev);
7375 tg3_napi_enable(tp);
7376 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7377 tg3_enable_ints(tp);
7380 static void tg3_irq_quiesce(struct tg3 *tp)
7384 BUG_ON(tp->irq_sync);
7389 for (i = 0; i < tp->irq_cnt; i++)
7390 synchronize_irq(tp->napi[i].irq_vec);
7393 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7394 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7395 * with as well. Most of the time, this is not necessary except when
7396 * shutting down the device.
7398 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7400 spin_lock_bh(&tp->lock);
7402 tg3_irq_quiesce(tp);
7405 static inline void tg3_full_unlock(struct tg3 *tp)
7407 spin_unlock_bh(&tp->lock);
7410 /* One-shot MSI handler - Chip automatically disables interrupt
7411 * after sending MSI so driver doesn't have to do it.
7413 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7415 struct tg3_napi *tnapi = dev_id;
7416 struct tg3 *tp = tnapi->tp;
7418 prefetch(tnapi->hw_status);
7420 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7422 if (likely(!tg3_irq_sync(tp)))
7423 napi_schedule(&tnapi->napi);
7428 /* MSI ISR - No need to check for interrupt sharing and no need to
7429 * flush status block and interrupt mailbox. PCI ordering rules
7430 * guarantee that MSI will arrive after the status block.
7432 static irqreturn_t tg3_msi(int irq, void *dev_id)
7434 struct tg3_napi *tnapi = dev_id;
7435 struct tg3 *tp = tnapi->tp;
7437 prefetch(tnapi->hw_status);
7439 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7441 * Writing any value to intr-mbox-0 clears PCI INTA# and
7442 * chip-internal interrupt pending events.
7443 * Writing non-zero to intr-mbox-0 additional tells the
7444 * NIC to stop sending us irqs, engaging "in-intr-handler"
7447 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7448 if (likely(!tg3_irq_sync(tp)))
7449 napi_schedule(&tnapi->napi);
7451 return IRQ_RETVAL(1);
7454 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7456 struct tg3_napi *tnapi = dev_id;
7457 struct tg3 *tp = tnapi->tp;
7458 struct tg3_hw_status *sblk = tnapi->hw_status;
7459 unsigned int handled = 1;
7461 /* In INTx mode, it is possible for the interrupt to arrive at
7462 * the CPU before the status block posted prior to the interrupt.
7463 * Reading the PCI State register will confirm whether the
7464 * interrupt is ours and will flush the status block.
7466 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7467 if (tg3_flag(tp, CHIP_RESETTING) ||
7468 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7475 * Writing any value to intr-mbox-0 clears PCI INTA# and
7476 * chip-internal interrupt pending events.
7477 * Writing non-zero to intr-mbox-0 additional tells the
7478 * NIC to stop sending us irqs, engaging "in-intr-handler"
7481 * Flush the mailbox to de-assert the IRQ immediately to prevent
7482 * spurious interrupts. The flush impacts performance but
7483 * excessive spurious interrupts can be worse in some cases.
7485 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7486 if (tg3_irq_sync(tp))
7488 sblk->status &= ~SD_STATUS_UPDATED;
7489 if (likely(tg3_has_work(tnapi))) {
7490 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7491 napi_schedule(&tnapi->napi);
7493 /* No work, shared interrupt perhaps? re-enable
7494 * interrupts, and flush that PCI write
7496 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7500 return IRQ_RETVAL(handled);
7503 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7505 struct tg3_napi *tnapi = dev_id;
7506 struct tg3 *tp = tnapi->tp;
7507 struct tg3_hw_status *sblk = tnapi->hw_status;
7508 unsigned int handled = 1;
7510 /* In INTx mode, it is possible for the interrupt to arrive at
7511 * the CPU before the status block posted prior to the interrupt.
7512 * Reading the PCI State register will confirm whether the
7513 * interrupt is ours and will flush the status block.
7515 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7516 if (tg3_flag(tp, CHIP_RESETTING) ||
7517 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7524 * writing any value to intr-mbox-0 clears PCI INTA# and
7525 * chip-internal interrupt pending events.
7526 * writing non-zero to intr-mbox-0 additional tells the
7527 * NIC to stop sending us irqs, engaging "in-intr-handler"
7530 * Flush the mailbox to de-assert the IRQ immediately to prevent
7531 * spurious interrupts. The flush impacts performance but
7532 * excessive spurious interrupts can be worse in some cases.
7534 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7537 * In a shared interrupt configuration, sometimes other devices'
7538 * interrupts will scream. We record the current status tag here
7539 * so that the above check can report that the screaming interrupts
7540 * are unhandled. Eventually they will be silenced.
7542 tnapi->last_irq_tag = sblk->status_tag;
7544 if (tg3_irq_sync(tp))
7547 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7549 napi_schedule(&tnapi->napi);
7552 return IRQ_RETVAL(handled);
7555 /* ISR for interrupt test */
7556 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7558 struct tg3_napi *tnapi = dev_id;
7559 struct tg3 *tp = tnapi->tp;
7560 struct tg3_hw_status *sblk = tnapi->hw_status;
7562 if ((sblk->status & SD_STATUS_UPDATED) ||
7563 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7564 tg3_disable_ints(tp);
7565 return IRQ_RETVAL(1);
7567 return IRQ_RETVAL(0);
7570 #ifdef CONFIG_NET_POLL_CONTROLLER
7571 static void tg3_poll_controller(struct net_device *dev)
7574 struct tg3 *tp = netdev_priv(dev);
7576 if (tg3_irq_sync(tp))
7579 for (i = 0; i < tp->irq_cnt; i++)
7580 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7584 static void tg3_tx_timeout(struct net_device *dev)
7586 struct tg3 *tp = netdev_priv(dev);
7588 if (netif_msg_tx_err(tp)) {
7589 netdev_err(dev, "transmit timed out, resetting\n");
7593 tg3_reset_task_schedule(tp);
7596 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7597 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7599 u32 base = (u32) mapping & 0xffffffff;
7601 return base + len + 8 < base;
7604 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7605 * of any 4GB boundaries: 4G, 8G, etc
7607 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7610 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7611 u32 base = (u32) mapping & 0xffffffff;
7613 return ((base + len + (mss & 0x3fff)) < base);
7618 /* Test for DMA addresses > 40-bit */
7619 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7622 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7623 if (tg3_flag(tp, 40BIT_DMA_BUG))
7624 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7631 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7632 dma_addr_t mapping, u32 len, u32 flags,
7635 txbd->addr_hi = ((u64) mapping >> 32);
7636 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7637 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7638 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7641 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7642 dma_addr_t map, u32 len, u32 flags,
7645 struct tg3 *tp = tnapi->tp;
7648 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7651 if (tg3_4g_overflow_test(map, len))
7654 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7657 if (tg3_40bit_overflow_test(tp, map, len))
7660 if (tp->dma_limit) {
7661 u32 prvidx = *entry;
7662 u32 tmp_flag = flags & ~TXD_FLAG_END;
7663 while (len > tp->dma_limit && *budget) {
7664 u32 frag_len = tp->dma_limit;
7665 len -= tp->dma_limit;
7667 /* Avoid the 8byte DMA problem */
7669 len += tp->dma_limit / 2;
7670 frag_len = tp->dma_limit / 2;
7673 tnapi->tx_buffers[*entry].fragmented = true;
7675 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7676 frag_len, tmp_flag, mss, vlan);
7679 *entry = NEXT_TX(*entry);
7686 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7687 len, flags, mss, vlan);
7689 *entry = NEXT_TX(*entry);
7692 tnapi->tx_buffers[prvidx].fragmented = false;
7696 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7697 len, flags, mss, vlan);
7698 *entry = NEXT_TX(*entry);
7704 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7707 struct sk_buff *skb;
7708 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7713 pci_unmap_single(tnapi->tp->pdev,
7714 dma_unmap_addr(txb, mapping),
7718 while (txb->fragmented) {
7719 txb->fragmented = false;
7720 entry = NEXT_TX(entry);
7721 txb = &tnapi->tx_buffers[entry];
7724 for (i = 0; i <= last; i++) {
7725 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7727 entry = NEXT_TX(entry);
7728 txb = &tnapi->tx_buffers[entry];
7730 pci_unmap_page(tnapi->tp->pdev,
7731 dma_unmap_addr(txb, mapping),
7732 skb_frag_size(frag), PCI_DMA_TODEVICE);
7734 while (txb->fragmented) {
7735 txb->fragmented = false;
7736 entry = NEXT_TX(entry);
7737 txb = &tnapi->tx_buffers[entry];
7742 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7743 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7744 struct sk_buff **pskb,
7745 u32 *entry, u32 *budget,
7746 u32 base_flags, u32 mss, u32 vlan)
7748 struct tg3 *tp = tnapi->tp;
7749 struct sk_buff *new_skb, *skb = *pskb;
7750 dma_addr_t new_addr = 0;
7753 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7754 new_skb = skb_copy(skb, GFP_ATOMIC);
7756 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7758 new_skb = skb_copy_expand(skb,
7759 skb_headroom(skb) + more_headroom,
7760 skb_tailroom(skb), GFP_ATOMIC);
7766 /* New SKB is guaranteed to be linear. */
7767 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7769 /* Make sure the mapping succeeded */
7770 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7771 dev_kfree_skb(new_skb);
7774 u32 save_entry = *entry;
7776 base_flags |= TXD_FLAG_END;
7778 tnapi->tx_buffers[*entry].skb = new_skb;
7779 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7782 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7783 new_skb->len, base_flags,
7785 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7786 dev_kfree_skb(new_skb);
7797 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7799 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7800 * TSO header is greater than 80 bytes.
7802 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7804 struct sk_buff *segs, *nskb;
7805 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7807 /* Estimate the number of fragments in the worst case */
7808 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7809 netif_stop_queue(tp->dev);
7811 /* netif_tx_stop_queue() must be done before checking
7812 * checking tx index in tg3_tx_avail() below, because in
7813 * tg3_tx(), we update tx index before checking for
7814 * netif_tx_queue_stopped().
7817 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7818 return NETDEV_TX_BUSY;
7820 netif_wake_queue(tp->dev);
7823 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7825 goto tg3_tso_bug_end;
7831 tg3_start_xmit(nskb, tp->dev);
7837 return NETDEV_TX_OK;
7840 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7841 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7843 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7845 struct tg3 *tp = netdev_priv(dev);
7846 u32 len, entry, base_flags, mss, vlan = 0;
7848 int i = -1, would_hit_hwbug;
7850 struct tg3_napi *tnapi;
7851 struct netdev_queue *txq;
7854 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7855 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7856 if (tg3_flag(tp, ENABLE_TSS))
7859 budget = tg3_tx_avail(tnapi);
7861 /* We are running in BH disabled context with netif_tx_lock
7862 * and TX reclaim runs via tp->napi.poll inside of a software
7863 * interrupt. Furthermore, IRQ processing runs lockless so we have
7864 * no IRQ context deadlocks to worry about either. Rejoice!
7866 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7867 if (!netif_tx_queue_stopped(txq)) {
7868 netif_tx_stop_queue(txq);
7870 /* This is a hard error, log it. */
7872 "BUG! Tx Ring full when queue awake!\n");
7874 return NETDEV_TX_BUSY;
7877 entry = tnapi->tx_prod;
7879 if (skb->ip_summed == CHECKSUM_PARTIAL)
7880 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7882 mss = skb_shinfo(skb)->gso_size;
7885 u32 tcp_opt_len, hdr_len;
7887 if (skb_header_cloned(skb) &&
7888 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7892 tcp_opt_len = tcp_optlen(skb);
7894 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7896 if (!skb_is_gso_v6(skb)) {
7898 iph->tot_len = htons(mss + hdr_len);
7901 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7902 tg3_flag(tp, TSO_BUG))
7903 return tg3_tso_bug(tp, skb);
7905 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7906 TXD_FLAG_CPU_POST_DMA);
7908 if (tg3_flag(tp, HW_TSO_1) ||
7909 tg3_flag(tp, HW_TSO_2) ||
7910 tg3_flag(tp, HW_TSO_3)) {
7911 tcp_hdr(skb)->check = 0;
7912 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7914 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7919 if (tg3_flag(tp, HW_TSO_3)) {
7920 mss |= (hdr_len & 0xc) << 12;
7922 base_flags |= 0x00000010;
7923 base_flags |= (hdr_len & 0x3e0) << 5;
7924 } else if (tg3_flag(tp, HW_TSO_2))
7925 mss |= hdr_len << 9;
7926 else if (tg3_flag(tp, HW_TSO_1) ||
7927 tg3_asic_rev(tp) == ASIC_REV_5705) {
7928 if (tcp_opt_len || iph->ihl > 5) {
7931 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7932 mss |= (tsflags << 11);
7935 if (tcp_opt_len || iph->ihl > 5) {
7938 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7939 base_flags |= tsflags << 12;
7944 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7945 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7946 base_flags |= TXD_FLAG_JMB_PKT;
7948 if (vlan_tx_tag_present(skb)) {
7949 base_flags |= TXD_FLAG_VLAN;
7950 vlan = vlan_tx_tag_get(skb);
7953 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7954 tg3_flag(tp, TX_TSTAMP_EN)) {
7955 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7956 base_flags |= TXD_FLAG_HWTSTAMP;
7959 len = skb_headlen(skb);
7961 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7962 if (pci_dma_mapping_error(tp->pdev, mapping))
7966 tnapi->tx_buffers[entry].skb = skb;
7967 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7969 would_hit_hwbug = 0;
7971 if (tg3_flag(tp, 5701_DMA_BUG))
7972 would_hit_hwbug = 1;
7974 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7975 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7977 would_hit_hwbug = 1;
7978 } else if (skb_shinfo(skb)->nr_frags > 0) {
7981 if (!tg3_flag(tp, HW_TSO_1) &&
7982 !tg3_flag(tp, HW_TSO_2) &&
7983 !tg3_flag(tp, HW_TSO_3))
7986 /* Now loop through additional data
7987 * fragments, and queue them.
7989 last = skb_shinfo(skb)->nr_frags - 1;
7990 for (i = 0; i <= last; i++) {
7991 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7993 len = skb_frag_size(frag);
7994 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7995 len, DMA_TO_DEVICE);
7997 tnapi->tx_buffers[entry].skb = NULL;
7998 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8000 if (dma_mapping_error(&tp->pdev->dev, mapping))
8004 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8006 ((i == last) ? TXD_FLAG_END : 0),
8008 would_hit_hwbug = 1;
8014 if (would_hit_hwbug) {
8015 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8017 /* If the workaround fails due to memory/mapping
8018 * failure, silently drop this packet.
8020 entry = tnapi->tx_prod;
8021 budget = tg3_tx_avail(tnapi);
8022 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8023 base_flags, mss, vlan))
8027 skb_tx_timestamp(skb);
8028 netdev_tx_sent_queue(txq, skb->len);
8030 /* Sync BD data before updating mailbox */
8033 /* Packets are ready, update Tx producer idx local and on card. */
8034 tw32_tx_mbox(tnapi->prodmbox, entry);
8036 tnapi->tx_prod = entry;
8037 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8038 netif_tx_stop_queue(txq);
8040 /* netif_tx_stop_queue() must be done before checking
8041 * checking tx index in tg3_tx_avail() below, because in
8042 * tg3_tx(), we update tx index before checking for
8043 * netif_tx_queue_stopped().
8046 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8047 netif_tx_wake_queue(txq);
8051 return NETDEV_TX_OK;
8054 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8055 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8060 return NETDEV_TX_OK;
8063 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8066 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8067 MAC_MODE_PORT_MODE_MASK);
8069 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8071 if (!tg3_flag(tp, 5705_PLUS))
8072 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8074 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8075 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8077 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8079 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8081 if (tg3_flag(tp, 5705_PLUS) ||
8082 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8083 tg3_asic_rev(tp) == ASIC_REV_5700)
8084 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8087 tw32(MAC_MODE, tp->mac_mode);
8091 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8093 u32 val, bmcr, mac_mode, ptest = 0;
8095 tg3_phy_toggle_apd(tp, false);
8096 tg3_phy_toggle_automdix(tp, false);
8098 if (extlpbk && tg3_phy_set_extloopbk(tp))
8101 bmcr = BMCR_FULLDPLX;
8106 bmcr |= BMCR_SPEED100;
8110 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8112 bmcr |= BMCR_SPEED100;
8115 bmcr |= BMCR_SPEED1000;
8120 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8121 tg3_readphy(tp, MII_CTRL1000, &val);
8122 val |= CTL1000_AS_MASTER |
8123 CTL1000_ENABLE_MASTER;
8124 tg3_writephy(tp, MII_CTRL1000, val);
8126 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8127 MII_TG3_FET_PTEST_TRIM_2;
8128 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8131 bmcr |= BMCR_LOOPBACK;
8133 tg3_writephy(tp, MII_BMCR, bmcr);
8135 /* The write needs to be flushed for the FETs */
8136 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8137 tg3_readphy(tp, MII_BMCR, &bmcr);
8141 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8142 tg3_asic_rev(tp) == ASIC_REV_5785) {
8143 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8144 MII_TG3_FET_PTEST_FRC_TX_LINK |
8145 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8147 /* The write needs to be flushed for the AC131 */
8148 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8151 /* Reset to prevent losing 1st rx packet intermittently */
8152 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8153 tg3_flag(tp, 5780_CLASS)) {
8154 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8156 tw32_f(MAC_RX_MODE, tp->rx_mode);
8159 mac_mode = tp->mac_mode &
8160 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8161 if (speed == SPEED_1000)
8162 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8164 mac_mode |= MAC_MODE_PORT_MODE_MII;
8166 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8167 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8169 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8170 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8171 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8172 mac_mode |= MAC_MODE_LINK_POLARITY;
8174 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8175 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8178 tw32(MAC_MODE, mac_mode);
8184 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8186 struct tg3 *tp = netdev_priv(dev);
8188 if (features & NETIF_F_LOOPBACK) {
8189 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8192 spin_lock_bh(&tp->lock);
8193 tg3_mac_loopback(tp, true);
8194 netif_carrier_on(tp->dev);
8195 spin_unlock_bh(&tp->lock);
8196 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8198 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8201 spin_lock_bh(&tp->lock);
8202 tg3_mac_loopback(tp, false);
8203 /* Force link status check */
8204 tg3_setup_phy(tp, true);
8205 spin_unlock_bh(&tp->lock);
8206 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8210 static netdev_features_t tg3_fix_features(struct net_device *dev,
8211 netdev_features_t features)
8213 struct tg3 *tp = netdev_priv(dev);
8215 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8216 features &= ~NETIF_F_ALL_TSO;
8221 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8223 netdev_features_t changed = dev->features ^ features;
8225 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8226 tg3_set_loopback(dev, features);
8231 static void tg3_rx_prodring_free(struct tg3 *tp,
8232 struct tg3_rx_prodring_set *tpr)
8236 if (tpr != &tp->napi[0].prodring) {
8237 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8238 i = (i + 1) & tp->rx_std_ring_mask)
8239 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8242 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8243 for (i = tpr->rx_jmb_cons_idx;
8244 i != tpr->rx_jmb_prod_idx;
8245 i = (i + 1) & tp->rx_jmb_ring_mask) {
8246 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8254 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8255 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8258 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8259 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8260 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8265 /* Initialize rx rings for packet processing.
8267 * The chip has been shut down and the driver detached from
8268 * the networking, so no interrupts or new tx packets will
8269 * end up in the driver. tp->{tx,}lock are held and thus
8272 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8273 struct tg3_rx_prodring_set *tpr)
8275 u32 i, rx_pkt_dma_sz;
8277 tpr->rx_std_cons_idx = 0;
8278 tpr->rx_std_prod_idx = 0;
8279 tpr->rx_jmb_cons_idx = 0;
8280 tpr->rx_jmb_prod_idx = 0;
8282 if (tpr != &tp->napi[0].prodring) {
8283 memset(&tpr->rx_std_buffers[0], 0,
8284 TG3_RX_STD_BUFF_RING_SIZE(tp));
8285 if (tpr->rx_jmb_buffers)
8286 memset(&tpr->rx_jmb_buffers[0], 0,
8287 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8291 /* Zero out all descriptors. */
8292 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8294 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8295 if (tg3_flag(tp, 5780_CLASS) &&
8296 tp->dev->mtu > ETH_DATA_LEN)
8297 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8298 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8300 /* Initialize invariants of the rings, we only set this
8301 * stuff once. This works because the card does not
8302 * write into the rx buffer posting rings.
8304 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8305 struct tg3_rx_buffer_desc *rxd;
8307 rxd = &tpr->rx_std[i];
8308 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8309 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8310 rxd->opaque = (RXD_OPAQUE_RING_STD |
8311 (i << RXD_OPAQUE_INDEX_SHIFT));
8314 /* Now allocate fresh SKBs for each rx ring. */
8315 for (i = 0; i < tp->rx_pending; i++) {
8316 unsigned int frag_size;
8318 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8320 netdev_warn(tp->dev,
8321 "Using a smaller RX standard ring. Only "
8322 "%d out of %d buffers were allocated "
8323 "successfully\n", i, tp->rx_pending);
8331 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8334 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8336 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8339 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8340 struct tg3_rx_buffer_desc *rxd;
8342 rxd = &tpr->rx_jmb[i].std;
8343 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8344 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8346 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8347 (i << RXD_OPAQUE_INDEX_SHIFT));
8350 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8351 unsigned int frag_size;
8353 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8355 netdev_warn(tp->dev,
8356 "Using a smaller RX jumbo ring. Only %d "
8357 "out of %d buffers were allocated "
8358 "successfully\n", i, tp->rx_jumbo_pending);
8361 tp->rx_jumbo_pending = i;
8370 tg3_rx_prodring_free(tp, tpr);
8374 static void tg3_rx_prodring_fini(struct tg3 *tp,
8375 struct tg3_rx_prodring_set *tpr)
8377 kfree(tpr->rx_std_buffers);
8378 tpr->rx_std_buffers = NULL;
8379 kfree(tpr->rx_jmb_buffers);
8380 tpr->rx_jmb_buffers = NULL;
8382 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8383 tpr->rx_std, tpr->rx_std_mapping);
8387 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8388 tpr->rx_jmb, tpr->rx_jmb_mapping);
8393 static int tg3_rx_prodring_init(struct tg3 *tp,
8394 struct tg3_rx_prodring_set *tpr)
8396 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8398 if (!tpr->rx_std_buffers)
8401 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8402 TG3_RX_STD_RING_BYTES(tp),
8403 &tpr->rx_std_mapping,
8408 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8409 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8411 if (!tpr->rx_jmb_buffers)
8414 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8415 TG3_RX_JMB_RING_BYTES(tp),
8416 &tpr->rx_jmb_mapping,
8425 tg3_rx_prodring_fini(tp, tpr);
8429 /* Free up pending packets in all rx/tx rings.
8431 * The chip has been shut down and the driver detached from
8432 * the networking, so no interrupts or new tx packets will
8433 * end up in the driver. tp->{tx,}lock is not held and we are not
8434 * in an interrupt context and thus may sleep.
8436 static void tg3_free_rings(struct tg3 *tp)
8440 for (j = 0; j < tp->irq_cnt; j++) {
8441 struct tg3_napi *tnapi = &tp->napi[j];
8443 tg3_rx_prodring_free(tp, &tnapi->prodring);
8445 if (!tnapi->tx_buffers)
8448 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8449 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8454 tg3_tx_skb_unmap(tnapi, i,
8455 skb_shinfo(skb)->nr_frags - 1);
8457 dev_kfree_skb_any(skb);
8459 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8463 /* Initialize tx/rx rings for packet processing.
8465 * The chip has been shut down and the driver detached from
8466 * the networking, so no interrupts or new tx packets will
8467 * end up in the driver. tp->{tx,}lock are held and thus
8470 static int tg3_init_rings(struct tg3 *tp)
8474 /* Free up all the SKBs. */
8477 for (i = 0; i < tp->irq_cnt; i++) {
8478 struct tg3_napi *tnapi = &tp->napi[i];
8480 tnapi->last_tag = 0;
8481 tnapi->last_irq_tag = 0;
8482 tnapi->hw_status->status = 0;
8483 tnapi->hw_status->status_tag = 0;
8484 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8489 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8491 tnapi->rx_rcb_ptr = 0;
8493 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8495 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8504 static void tg3_mem_tx_release(struct tg3 *tp)
8508 for (i = 0; i < tp->irq_max; i++) {
8509 struct tg3_napi *tnapi = &tp->napi[i];
8511 if (tnapi->tx_ring) {
8512 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8513 tnapi->tx_ring, tnapi->tx_desc_mapping);
8514 tnapi->tx_ring = NULL;
8517 kfree(tnapi->tx_buffers);
8518 tnapi->tx_buffers = NULL;
8522 static int tg3_mem_tx_acquire(struct tg3 *tp)
8525 struct tg3_napi *tnapi = &tp->napi[0];
8527 /* If multivector TSS is enabled, vector 0 does not handle
8528 * tx interrupts. Don't allocate any resources for it.
8530 if (tg3_flag(tp, ENABLE_TSS))
8533 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8534 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8535 TG3_TX_RING_SIZE, GFP_KERNEL);
8536 if (!tnapi->tx_buffers)
8539 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8541 &tnapi->tx_desc_mapping,
8543 if (!tnapi->tx_ring)
8550 tg3_mem_tx_release(tp);
8554 static void tg3_mem_rx_release(struct tg3 *tp)
8558 for (i = 0; i < tp->irq_max; i++) {
8559 struct tg3_napi *tnapi = &tp->napi[i];
8561 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8566 dma_free_coherent(&tp->pdev->dev,
8567 TG3_RX_RCB_RING_BYTES(tp),
8569 tnapi->rx_rcb_mapping);
8570 tnapi->rx_rcb = NULL;
8574 static int tg3_mem_rx_acquire(struct tg3 *tp)
8576 unsigned int i, limit;
8578 limit = tp->rxq_cnt;
8580 /* If RSS is enabled, we need a (dummy) producer ring
8581 * set on vector zero. This is the true hw prodring.
8583 if (tg3_flag(tp, ENABLE_RSS))
8586 for (i = 0; i < limit; i++) {
8587 struct tg3_napi *tnapi = &tp->napi[i];
8589 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8592 /* If multivector RSS is enabled, vector 0
8593 * does not handle rx or tx interrupts.
8594 * Don't allocate any resources for it.
8596 if (!i && tg3_flag(tp, ENABLE_RSS))
8599 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8600 TG3_RX_RCB_RING_BYTES(tp),
8601 &tnapi->rx_rcb_mapping,
8610 tg3_mem_rx_release(tp);
8615 * Must not be invoked with interrupt sources disabled and
8616 * the hardware shutdown down.
8618 static void tg3_free_consistent(struct tg3 *tp)
8622 for (i = 0; i < tp->irq_cnt; i++) {
8623 struct tg3_napi *tnapi = &tp->napi[i];
8625 if (tnapi->hw_status) {
8626 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8628 tnapi->status_mapping);
8629 tnapi->hw_status = NULL;
8633 tg3_mem_rx_release(tp);
8634 tg3_mem_tx_release(tp);
8637 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8638 tp->hw_stats, tp->stats_mapping);
8639 tp->hw_stats = NULL;
8644 * Must not be invoked with interrupt sources disabled and
8645 * the hardware shutdown down. Can sleep.
8647 static int tg3_alloc_consistent(struct tg3 *tp)
8651 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8652 sizeof(struct tg3_hw_stats),
8653 &tp->stats_mapping, GFP_KERNEL);
8657 for (i = 0; i < tp->irq_cnt; i++) {
8658 struct tg3_napi *tnapi = &tp->napi[i];
8659 struct tg3_hw_status *sblk;
8661 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8663 &tnapi->status_mapping,
8665 if (!tnapi->hw_status)
8668 sblk = tnapi->hw_status;
8670 if (tg3_flag(tp, ENABLE_RSS)) {
8671 u16 *prodptr = NULL;
8674 * When RSS is enabled, the status block format changes
8675 * slightly. The "rx_jumbo_consumer", "reserved",
8676 * and "rx_mini_consumer" members get mapped to the
8677 * other three rx return ring producer indexes.
8681 prodptr = &sblk->idx[0].rx_producer;
8684 prodptr = &sblk->rx_jumbo_consumer;
8687 prodptr = &sblk->reserved;
8690 prodptr = &sblk->rx_mini_consumer;
8693 tnapi->rx_rcb_prod_idx = prodptr;
8695 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8699 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8705 tg3_free_consistent(tp);
8709 #define MAX_WAIT_CNT 1000
8711 /* To stop a block, clear the enable bit and poll till it
8712 * clears. tp->lock is held.
8714 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8719 if (tg3_flag(tp, 5705_PLUS)) {
8726 /* We can't enable/disable these bits of the
8727 * 5705/5750, just say success.
8740 for (i = 0; i < MAX_WAIT_CNT; i++) {
8741 if (pci_channel_offline(tp->pdev)) {
8742 dev_err(&tp->pdev->dev,
8743 "tg3_stop_block device offline, "
8744 "ofs=%lx enable_bit=%x\n",
8751 if ((val & enable_bit) == 0)
8755 if (i == MAX_WAIT_CNT && !silent) {
8756 dev_err(&tp->pdev->dev,
8757 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8765 /* tp->lock is held. */
8766 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8770 tg3_disable_ints(tp);
8772 if (pci_channel_offline(tp->pdev)) {
8773 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8774 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8779 tp->rx_mode &= ~RX_MODE_ENABLE;
8780 tw32_f(MAC_RX_MODE, tp->rx_mode);
8783 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8784 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8785 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8786 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8787 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8788 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8790 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8791 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8792 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8793 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8794 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8795 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8796 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8798 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8799 tw32_f(MAC_MODE, tp->mac_mode);
8802 tp->tx_mode &= ~TX_MODE_ENABLE;
8803 tw32_f(MAC_TX_MODE, tp->tx_mode);
8805 for (i = 0; i < MAX_WAIT_CNT; i++) {
8807 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8810 if (i >= MAX_WAIT_CNT) {
8811 dev_err(&tp->pdev->dev,
8812 "%s timed out, TX_MODE_ENABLE will not clear "
8813 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8817 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8818 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8819 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8821 tw32(FTQ_RESET, 0xffffffff);
8822 tw32(FTQ_RESET, 0x00000000);
8824 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8825 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8828 for (i = 0; i < tp->irq_cnt; i++) {
8829 struct tg3_napi *tnapi = &tp->napi[i];
8830 if (tnapi->hw_status)
8831 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8837 /* Save PCI command register before chip reset */
8838 static void tg3_save_pci_state(struct tg3 *tp)
8840 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8843 /* Restore PCI state after chip reset */
8844 static void tg3_restore_pci_state(struct tg3 *tp)
8848 /* Re-enable indirect register accesses. */
8849 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8850 tp->misc_host_ctrl);
8852 /* Set MAX PCI retry to zero. */
8853 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8854 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8855 tg3_flag(tp, PCIX_MODE))
8856 val |= PCISTATE_RETRY_SAME_DMA;
8857 /* Allow reads and writes to the APE register and memory space. */
8858 if (tg3_flag(tp, ENABLE_APE))
8859 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8860 PCISTATE_ALLOW_APE_SHMEM_WR |
8861 PCISTATE_ALLOW_APE_PSPACE_WR;
8862 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8864 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8866 if (!tg3_flag(tp, PCI_EXPRESS)) {
8867 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8868 tp->pci_cacheline_sz);
8869 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8873 /* Make sure PCI-X relaxed ordering bit is clear. */
8874 if (tg3_flag(tp, PCIX_MODE)) {
8877 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8879 pcix_cmd &= ~PCI_X_CMD_ERO;
8880 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8884 if (tg3_flag(tp, 5780_CLASS)) {
8886 /* Chip reset on 5780 will reset MSI enable bit,
8887 * so need to restore it.
8889 if (tg3_flag(tp, USING_MSI)) {
8892 pci_read_config_word(tp->pdev,
8893 tp->msi_cap + PCI_MSI_FLAGS,
8895 pci_write_config_word(tp->pdev,
8896 tp->msi_cap + PCI_MSI_FLAGS,
8897 ctrl | PCI_MSI_FLAGS_ENABLE);
8898 val = tr32(MSGINT_MODE);
8899 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8904 /* tp->lock is held. */
8905 static int tg3_chip_reset(struct tg3 *tp)
8908 void (*write_op)(struct tg3 *, u32, u32);
8913 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8915 /* No matching tg3_nvram_unlock() after this because
8916 * chip reset below will undo the nvram lock.
8918 tp->nvram_lock_cnt = 0;
8920 /* GRC_MISC_CFG core clock reset will clear the memory
8921 * enable bit in PCI register 4 and the MSI enable bit
8922 * on some chips, so we save relevant registers here.
8924 tg3_save_pci_state(tp);
8926 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8927 tg3_flag(tp, 5755_PLUS))
8928 tw32(GRC_FASTBOOT_PC, 0);
8931 * We must avoid the readl() that normally takes place.
8932 * It locks machines, causes machine checks, and other
8933 * fun things. So, temporarily disable the 5701
8934 * hardware workaround, while we do the reset.
8936 write_op = tp->write32;
8937 if (write_op == tg3_write_flush_reg32)
8938 tp->write32 = tg3_write32;
8940 /* Prevent the irq handler from reading or writing PCI registers
8941 * during chip reset when the memory enable bit in the PCI command
8942 * register may be cleared. The chip does not generate interrupt
8943 * at this time, but the irq handler may still be called due to irq
8944 * sharing or irqpoll.
8946 tg3_flag_set(tp, CHIP_RESETTING);
8947 for (i = 0; i < tp->irq_cnt; i++) {
8948 struct tg3_napi *tnapi = &tp->napi[i];
8949 if (tnapi->hw_status) {
8950 tnapi->hw_status->status = 0;
8951 tnapi->hw_status->status_tag = 0;
8953 tnapi->last_tag = 0;
8954 tnapi->last_irq_tag = 0;
8958 for (i = 0; i < tp->irq_cnt; i++)
8959 synchronize_irq(tp->napi[i].irq_vec);
8961 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8962 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8963 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8967 val = GRC_MISC_CFG_CORECLK_RESET;
8969 if (tg3_flag(tp, PCI_EXPRESS)) {
8970 /* Force PCIe 1.0a mode */
8971 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8972 !tg3_flag(tp, 57765_PLUS) &&
8973 tr32(TG3_PCIE_PHY_TSTCTL) ==
8974 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8975 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8977 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8978 tw32(GRC_MISC_CFG, (1 << 29));
8983 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8984 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8985 tw32(GRC_VCPU_EXT_CTRL,
8986 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8989 /* Manage gphy power for all CPMU absent PCIe devices. */
8990 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8991 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8993 tw32(GRC_MISC_CFG, val);
8995 /* restore 5701 hardware bug workaround write method */
8996 tp->write32 = write_op;
8998 /* Unfortunately, we have to delay before the PCI read back.
8999 * Some 575X chips even will not respond to a PCI cfg access
9000 * when the reset command is given to the chip.
9002 * How do these hardware designers expect things to work
9003 * properly if the PCI write is posted for a long period
9004 * of time? It is always necessary to have some method by
9005 * which a register read back can occur to push the write
9006 * out which does the reset.
9008 * For most tg3 variants the trick below was working.
9013 /* Flush PCI posted writes. The normal MMIO registers
9014 * are inaccessible at this time so this is the only
9015 * way to make this reliably (actually, this is no longer
9016 * the case, see above). I tried to use indirect
9017 * register read/write but this upset some 5701 variants.
9019 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9023 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9026 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9030 /* Wait for link training to complete. */
9031 for (j = 0; j < 5000; j++)
9034 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9035 pci_write_config_dword(tp->pdev, 0xc4,
9036 cfg_val | (1 << 15));
9039 /* Clear the "no snoop" and "relaxed ordering" bits. */
9040 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9042 * Older PCIe devices only support the 128 byte
9043 * MPS setting. Enforce the restriction.
9045 if (!tg3_flag(tp, CPMU_PRESENT))
9046 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9047 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9049 /* Clear error status */
9050 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9051 PCI_EXP_DEVSTA_CED |
9052 PCI_EXP_DEVSTA_NFED |
9053 PCI_EXP_DEVSTA_FED |
9054 PCI_EXP_DEVSTA_URD);
9057 tg3_restore_pci_state(tp);
9059 tg3_flag_clear(tp, CHIP_RESETTING);
9060 tg3_flag_clear(tp, ERROR_PROCESSED);
9063 if (tg3_flag(tp, 5780_CLASS))
9064 val = tr32(MEMARB_MODE);
9065 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9067 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9069 tw32(0x5000, 0x400);
9072 if (tg3_flag(tp, IS_SSB_CORE)) {
9074 * BCM4785: In order to avoid repercussions from using
9075 * potentially defective internal ROM, stop the Rx RISC CPU,
9076 * which is not required.
9079 tg3_halt_cpu(tp, RX_CPU_BASE);
9082 err = tg3_poll_fw(tp);
9086 tw32(GRC_MODE, tp->grc_mode);
9088 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9091 tw32(0xc4, val | (1 << 15));
9094 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9095 tg3_asic_rev(tp) == ASIC_REV_5705) {
9096 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9097 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9098 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9099 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9102 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9103 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9105 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9106 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9111 tw32_f(MAC_MODE, val);
9114 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9118 if (tg3_flag(tp, PCI_EXPRESS) &&
9119 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9120 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9121 !tg3_flag(tp, 57765_PLUS)) {
9124 tw32(0x7c00, val | (1 << 25));
9127 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9128 val = tr32(TG3_CPMU_CLCK_ORIDE);
9129 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9132 /* Reprobe ASF enable state. */
9133 tg3_flag_clear(tp, ENABLE_ASF);
9134 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9135 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9137 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9138 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9139 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9142 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9143 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9144 tg3_flag_set(tp, ENABLE_ASF);
9145 tp->last_event_jiffies = jiffies;
9146 if (tg3_flag(tp, 5750_PLUS))
9147 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9149 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9150 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9151 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9152 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9153 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9160 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9161 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9163 /* tp->lock is held. */
9164 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9170 tg3_write_sig_pre_reset(tp, kind);
9172 tg3_abort_hw(tp, silent);
9173 err = tg3_chip_reset(tp);
9175 __tg3_set_mac_addr(tp, false);
9177 tg3_write_sig_legacy(tp, kind);
9178 tg3_write_sig_post_reset(tp, kind);
9181 /* Save the stats across chip resets... */
9182 tg3_get_nstats(tp, &tp->net_stats_prev);
9183 tg3_get_estats(tp, &tp->estats_prev);
9185 /* And make sure the next sample is new data */
9186 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9195 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9197 struct tg3 *tp = netdev_priv(dev);
9198 struct sockaddr *addr = p;
9200 bool skip_mac_1 = false;
9202 if (!is_valid_ether_addr(addr->sa_data))
9203 return -EADDRNOTAVAIL;
9205 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9207 if (!netif_running(dev))
9210 if (tg3_flag(tp, ENABLE_ASF)) {
9211 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9213 addr0_high = tr32(MAC_ADDR_0_HIGH);
9214 addr0_low = tr32(MAC_ADDR_0_LOW);
9215 addr1_high = tr32(MAC_ADDR_1_HIGH);
9216 addr1_low = tr32(MAC_ADDR_1_LOW);
9218 /* Skip MAC addr 1 if ASF is using it. */
9219 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9220 !(addr1_high == 0 && addr1_low == 0))
9223 spin_lock_bh(&tp->lock);
9224 __tg3_set_mac_addr(tp, skip_mac_1);
9225 spin_unlock_bh(&tp->lock);
9230 /* tp->lock is held. */
9231 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9232 dma_addr_t mapping, u32 maxlen_flags,
9236 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9237 ((u64) mapping >> 32));
9239 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9240 ((u64) mapping & 0xffffffff));
9242 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9245 if (!tg3_flag(tp, 5705_PLUS))
9247 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9252 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9256 if (!tg3_flag(tp, ENABLE_TSS)) {
9257 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9258 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9259 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9261 tw32(HOSTCC_TXCOL_TICKS, 0);
9262 tw32(HOSTCC_TXMAX_FRAMES, 0);
9263 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9265 for (; i < tp->txq_cnt; i++) {
9268 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9269 tw32(reg, ec->tx_coalesce_usecs);
9270 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9271 tw32(reg, ec->tx_max_coalesced_frames);
9272 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9273 tw32(reg, ec->tx_max_coalesced_frames_irq);
9277 for (; i < tp->irq_max - 1; i++) {
9278 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9279 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9280 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9284 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9287 u32 limit = tp->rxq_cnt;
9289 if (!tg3_flag(tp, ENABLE_RSS)) {
9290 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9291 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9292 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9295 tw32(HOSTCC_RXCOL_TICKS, 0);
9296 tw32(HOSTCC_RXMAX_FRAMES, 0);
9297 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9300 for (; i < limit; i++) {
9303 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9304 tw32(reg, ec->rx_coalesce_usecs);
9305 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9306 tw32(reg, ec->rx_max_coalesced_frames);
9307 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9308 tw32(reg, ec->rx_max_coalesced_frames_irq);
9311 for (; i < tp->irq_max - 1; i++) {
9312 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9313 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9314 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9318 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9320 tg3_coal_tx_init(tp, ec);
9321 tg3_coal_rx_init(tp, ec);
9323 if (!tg3_flag(tp, 5705_PLUS)) {
9324 u32 val = ec->stats_block_coalesce_usecs;
9326 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9327 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9332 tw32(HOSTCC_STAT_COAL_TICKS, val);
9336 /* tp->lock is held. */
9337 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9341 /* Disable all transmit rings but the first. */
9342 if (!tg3_flag(tp, 5705_PLUS))
9343 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9344 else if (tg3_flag(tp, 5717_PLUS))
9345 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9346 else if (tg3_flag(tp, 57765_CLASS) ||
9347 tg3_asic_rev(tp) == ASIC_REV_5762)
9348 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9350 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9352 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9353 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9354 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9355 BDINFO_FLAGS_DISABLED);
9358 /* tp->lock is held. */
9359 static void tg3_tx_rcbs_init(struct tg3 *tp)
9362 u32 txrcb = NIC_SRAM_SEND_RCB;
9364 if (tg3_flag(tp, ENABLE_TSS))
9367 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9368 struct tg3_napi *tnapi = &tp->napi[i];
9370 if (!tnapi->tx_ring)
9373 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9374 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9375 NIC_SRAM_TX_BUFFER_DESC);
9379 /* tp->lock is held. */
9380 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9384 /* Disable all receive return rings but the first. */
9385 if (tg3_flag(tp, 5717_PLUS))
9386 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9387 else if (!tg3_flag(tp, 5705_PLUS))
9388 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9389 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9390 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9391 tg3_flag(tp, 57765_CLASS))
9392 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9394 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9396 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9397 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9398 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9399 BDINFO_FLAGS_DISABLED);
9402 /* tp->lock is held. */
9403 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9406 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9408 if (tg3_flag(tp, ENABLE_RSS))
9411 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9412 struct tg3_napi *tnapi = &tp->napi[i];
9417 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9418 (tp->rx_ret_ring_mask + 1) <<
9419 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9423 /* tp->lock is held. */
9424 static void tg3_rings_reset(struct tg3 *tp)
9428 struct tg3_napi *tnapi = &tp->napi[0];
9430 tg3_tx_rcbs_disable(tp);
9432 tg3_rx_ret_rcbs_disable(tp);
9434 /* Disable interrupts */
9435 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9436 tp->napi[0].chk_msi_cnt = 0;
9437 tp->napi[0].last_rx_cons = 0;
9438 tp->napi[0].last_tx_cons = 0;
9440 /* Zero mailbox registers. */
9441 if (tg3_flag(tp, SUPPORT_MSIX)) {
9442 for (i = 1; i < tp->irq_max; i++) {
9443 tp->napi[i].tx_prod = 0;
9444 tp->napi[i].tx_cons = 0;
9445 if (tg3_flag(tp, ENABLE_TSS))
9446 tw32_mailbox(tp->napi[i].prodmbox, 0);
9447 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9448 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9449 tp->napi[i].chk_msi_cnt = 0;
9450 tp->napi[i].last_rx_cons = 0;
9451 tp->napi[i].last_tx_cons = 0;
9453 if (!tg3_flag(tp, ENABLE_TSS))
9454 tw32_mailbox(tp->napi[0].prodmbox, 0);
9456 tp->napi[0].tx_prod = 0;
9457 tp->napi[0].tx_cons = 0;
9458 tw32_mailbox(tp->napi[0].prodmbox, 0);
9459 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9462 /* Make sure the NIC-based send BD rings are disabled. */
9463 if (!tg3_flag(tp, 5705_PLUS)) {
9464 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9465 for (i = 0; i < 16; i++)
9466 tw32_tx_mbox(mbox + i * 8, 0);
9469 /* Clear status block in ram. */
9470 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9472 /* Set status block DMA address */
9473 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9474 ((u64) tnapi->status_mapping >> 32));
9475 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9476 ((u64) tnapi->status_mapping & 0xffffffff));
9478 stblk = HOSTCC_STATBLCK_RING1;
9480 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9481 u64 mapping = (u64)tnapi->status_mapping;
9482 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9483 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9486 /* Clear status block in ram. */
9487 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9490 tg3_tx_rcbs_init(tp);
9491 tg3_rx_ret_rcbs_init(tp);
9494 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9496 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9498 if (!tg3_flag(tp, 5750_PLUS) ||
9499 tg3_flag(tp, 5780_CLASS) ||
9500 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9501 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9502 tg3_flag(tp, 57765_PLUS))
9503 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9504 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9505 tg3_asic_rev(tp) == ASIC_REV_5787)
9506 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9508 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9510 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9511 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9513 val = min(nic_rep_thresh, host_rep_thresh);
9514 tw32(RCVBDI_STD_THRESH, val);
9516 if (tg3_flag(tp, 57765_PLUS))
9517 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9519 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9522 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9524 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9526 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9527 tw32(RCVBDI_JUMBO_THRESH, val);
9529 if (tg3_flag(tp, 57765_PLUS))
9530 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9533 static inline u32 calc_crc(unsigned char *buf, int len)
9541 for (j = 0; j < len; j++) {
9544 for (k = 0; k < 8; k++) {
9557 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9559 /* accept or reject all multicast frames */
9560 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9561 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9562 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9563 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9566 static void __tg3_set_rx_mode(struct net_device *dev)
9568 struct tg3 *tp = netdev_priv(dev);
9571 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9572 RX_MODE_KEEP_VLAN_TAG);
9574 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9575 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9578 if (!tg3_flag(tp, ENABLE_ASF))
9579 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9582 if (dev->flags & IFF_PROMISC) {
9583 /* Promiscuous mode. */
9584 rx_mode |= RX_MODE_PROMISC;
9585 } else if (dev->flags & IFF_ALLMULTI) {
9586 /* Accept all multicast. */
9587 tg3_set_multi(tp, 1);
9588 } else if (netdev_mc_empty(dev)) {
9589 /* Reject all multicast. */
9590 tg3_set_multi(tp, 0);
9592 /* Accept one or more multicast(s). */
9593 struct netdev_hw_addr *ha;
9594 u32 mc_filter[4] = { 0, };
9599 netdev_for_each_mc_addr(ha, dev) {
9600 crc = calc_crc(ha->addr, ETH_ALEN);
9602 regidx = (bit & 0x60) >> 5;
9604 mc_filter[regidx] |= (1 << bit);
9607 tw32(MAC_HASH_REG_0, mc_filter[0]);
9608 tw32(MAC_HASH_REG_1, mc_filter[1]);
9609 tw32(MAC_HASH_REG_2, mc_filter[2]);
9610 tw32(MAC_HASH_REG_3, mc_filter[3]);
9613 if (rx_mode != tp->rx_mode) {
9614 tp->rx_mode = rx_mode;
9615 tw32_f(MAC_RX_MODE, rx_mode);
9620 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9624 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9625 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9628 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9632 if (!tg3_flag(tp, SUPPORT_MSIX))
9635 if (tp->rxq_cnt == 1) {
9636 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9640 /* Validate table against current IRQ count */
9641 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9642 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9646 if (i != TG3_RSS_INDIR_TBL_SIZE)
9647 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9650 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9653 u32 reg = MAC_RSS_INDIR_TBL_0;
9655 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9656 u32 val = tp->rss_ind_tbl[i];
9658 for (; i % 8; i++) {
9660 val |= tp->rss_ind_tbl[i];
9667 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9669 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9670 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9672 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9675 /* tp->lock is held. */
9676 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9678 u32 val, rdmac_mode;
9680 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9682 tg3_disable_ints(tp);
9686 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9688 if (tg3_flag(tp, INIT_COMPLETE))
9689 tg3_abort_hw(tp, 1);
9691 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9692 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9693 tg3_phy_pull_config(tp);
9694 tg3_eee_pull_config(tp, NULL);
9695 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9698 /* Enable MAC control of LPI */
9699 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9705 err = tg3_chip_reset(tp);
9709 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9711 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9712 val = tr32(TG3_CPMU_CTRL);
9713 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9714 tw32(TG3_CPMU_CTRL, val);
9716 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9717 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9718 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9719 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9721 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9722 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9723 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9724 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9726 val = tr32(TG3_CPMU_HST_ACC);
9727 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9728 val |= CPMU_HST_ACC_MACCLK_6_25;
9729 tw32(TG3_CPMU_HST_ACC, val);
9732 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9733 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9734 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9735 PCIE_PWR_MGMT_L1_THRESH_4MS;
9736 tw32(PCIE_PWR_MGMT_THRESH, val);
9738 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9739 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9741 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9743 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9744 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9747 if (tg3_flag(tp, L1PLLPD_EN)) {
9748 u32 grc_mode = tr32(GRC_MODE);
9750 /* Access the lower 1K of PL PCIE block registers. */
9751 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9752 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9754 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9755 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9756 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9758 tw32(GRC_MODE, grc_mode);
9761 if (tg3_flag(tp, 57765_CLASS)) {
9762 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9763 u32 grc_mode = tr32(GRC_MODE);
9765 /* Access the lower 1K of PL PCIE block registers. */
9766 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9767 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9769 val = tr32(TG3_PCIE_TLDLPL_PORT +
9770 TG3_PCIE_PL_LO_PHYCTL5);
9771 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9772 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9774 tw32(GRC_MODE, grc_mode);
9777 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9780 /* Fix transmit hangs */
9781 val = tr32(TG3_CPMU_PADRNG_CTL);
9782 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9783 tw32(TG3_CPMU_PADRNG_CTL, val);
9785 grc_mode = tr32(GRC_MODE);
9787 /* Access the lower 1K of DL PCIE block registers. */
9788 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9789 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9791 val = tr32(TG3_PCIE_TLDLPL_PORT +
9792 TG3_PCIE_DL_LO_FTSMAX);
9793 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9794 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9795 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9797 tw32(GRC_MODE, grc_mode);
9800 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9801 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9802 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9803 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9806 /* This works around an issue with Athlon chipsets on
9807 * B3 tigon3 silicon. This bit has no effect on any
9808 * other revision. But do not set this on PCI Express
9809 * chips and don't even touch the clocks if the CPMU is present.
9811 if (!tg3_flag(tp, CPMU_PRESENT)) {
9812 if (!tg3_flag(tp, PCI_EXPRESS))
9813 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9814 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9817 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9818 tg3_flag(tp, PCIX_MODE)) {
9819 val = tr32(TG3PCI_PCISTATE);
9820 val |= PCISTATE_RETRY_SAME_DMA;
9821 tw32(TG3PCI_PCISTATE, val);
9824 if (tg3_flag(tp, ENABLE_APE)) {
9825 /* Allow reads and writes to the
9826 * APE register and memory space.
9828 val = tr32(TG3PCI_PCISTATE);
9829 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9830 PCISTATE_ALLOW_APE_SHMEM_WR |
9831 PCISTATE_ALLOW_APE_PSPACE_WR;
9832 tw32(TG3PCI_PCISTATE, val);
9835 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9836 /* Enable some hw fixes. */
9837 val = tr32(TG3PCI_MSI_DATA);
9838 val |= (1 << 26) | (1 << 28) | (1 << 29);
9839 tw32(TG3PCI_MSI_DATA, val);
9842 /* Descriptor ring init may make accesses to the
9843 * NIC SRAM area to setup the TX descriptors, so we
9844 * can only do this after the hardware has been
9845 * successfully reset.
9847 err = tg3_init_rings(tp);
9851 if (tg3_flag(tp, 57765_PLUS)) {
9852 val = tr32(TG3PCI_DMA_RW_CTRL) &
9853 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9854 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9855 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9856 if (!tg3_flag(tp, 57765_CLASS) &&
9857 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9858 tg3_asic_rev(tp) != ASIC_REV_5762)
9859 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9860 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9861 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9862 tg3_asic_rev(tp) != ASIC_REV_5761) {
9863 /* This value is determined during the probe time DMA
9864 * engine test, tg3_test_dma.
9866 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9869 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9870 GRC_MODE_4X_NIC_SEND_RINGS |
9871 GRC_MODE_NO_TX_PHDR_CSUM |
9872 GRC_MODE_NO_RX_PHDR_CSUM);
9873 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9875 /* Pseudo-header checksum is done by hardware logic and not
9876 * the offload processers, so make the chip do the pseudo-
9877 * header checksums on receive. For transmit it is more
9878 * convenient to do the pseudo-header checksum in software
9879 * as Linux does that on transmit for us in all cases.
9881 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9883 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9885 tw32(TG3_RX_PTP_CTL,
9886 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9888 if (tg3_flag(tp, PTP_CAPABLE))
9889 val |= GRC_MODE_TIME_SYNC_ENABLE;
9891 tw32(GRC_MODE, tp->grc_mode | val);
9893 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9894 val = tr32(GRC_MISC_CFG);
9896 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9897 tw32(GRC_MISC_CFG, val);
9899 /* Initialize MBUF/DESC pool. */
9900 if (tg3_flag(tp, 5750_PLUS)) {
9902 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9903 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9904 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9905 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9907 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9908 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9909 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9910 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9913 fw_len = tp->fw_len;
9914 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9915 tw32(BUFMGR_MB_POOL_ADDR,
9916 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9917 tw32(BUFMGR_MB_POOL_SIZE,
9918 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9921 if (tp->dev->mtu <= ETH_DATA_LEN) {
9922 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9923 tp->bufmgr_config.mbuf_read_dma_low_water);
9924 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9925 tp->bufmgr_config.mbuf_mac_rx_low_water);
9926 tw32(BUFMGR_MB_HIGH_WATER,
9927 tp->bufmgr_config.mbuf_high_water);
9929 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9930 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9931 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9932 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9933 tw32(BUFMGR_MB_HIGH_WATER,
9934 tp->bufmgr_config.mbuf_high_water_jumbo);
9936 tw32(BUFMGR_DMA_LOW_WATER,
9937 tp->bufmgr_config.dma_low_water);
9938 tw32(BUFMGR_DMA_HIGH_WATER,
9939 tp->bufmgr_config.dma_high_water);
9941 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9942 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9943 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9944 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9945 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9946 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9947 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9948 tw32(BUFMGR_MODE, val);
9949 for (i = 0; i < 2000; i++) {
9950 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9955 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9959 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9960 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9962 tg3_setup_rxbd_thresholds(tp);
9964 /* Initialize TG3_BDINFO's at:
9965 * RCVDBDI_STD_BD: standard eth size rx ring
9966 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9967 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9970 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9971 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9972 * ring attribute flags
9973 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9975 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9976 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9978 * The size of each ring is fixed in the firmware, but the location is
9981 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9982 ((u64) tpr->rx_std_mapping >> 32));
9983 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9984 ((u64) tpr->rx_std_mapping & 0xffffffff));
9985 if (!tg3_flag(tp, 5717_PLUS))
9986 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9987 NIC_SRAM_RX_BUFFER_DESC);
9989 /* Disable the mini ring */
9990 if (!tg3_flag(tp, 5705_PLUS))
9991 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9992 BDINFO_FLAGS_DISABLED);
9994 /* Program the jumbo buffer descriptor ring control
9995 * blocks on those devices that have them.
9997 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9998 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10000 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10001 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10002 ((u64) tpr->rx_jmb_mapping >> 32));
10003 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10004 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10005 val = TG3_RX_JMB_RING_SIZE(tp) <<
10006 BDINFO_FLAGS_MAXLEN_SHIFT;
10007 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10008 val | BDINFO_FLAGS_USE_EXT_RECV);
10009 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10010 tg3_flag(tp, 57765_CLASS) ||
10011 tg3_asic_rev(tp) == ASIC_REV_5762)
10012 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10013 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10015 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10016 BDINFO_FLAGS_DISABLED);
10019 if (tg3_flag(tp, 57765_PLUS)) {
10020 val = TG3_RX_STD_RING_SIZE(tp);
10021 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10022 val |= (TG3_RX_STD_DMA_SZ << 2);
10024 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10026 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10028 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10030 tpr->rx_std_prod_idx = tp->rx_pending;
10031 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10033 tpr->rx_jmb_prod_idx =
10034 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10035 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10037 tg3_rings_reset(tp);
10039 /* Initialize MAC address and backoff seed. */
10040 __tg3_set_mac_addr(tp, false);
10042 /* MTU + ethernet header + FCS + optional VLAN tag */
10043 tw32(MAC_RX_MTU_SIZE,
10044 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10046 /* The slot time is changed by tg3_setup_phy if we
10047 * run at gigabit with half duplex.
10049 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10050 (6 << TX_LENGTHS_IPG_SHIFT) |
10051 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10053 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10054 tg3_asic_rev(tp) == ASIC_REV_5762)
10055 val |= tr32(MAC_TX_LENGTHS) &
10056 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10057 TX_LENGTHS_CNT_DWN_VAL_MSK);
10059 tw32(MAC_TX_LENGTHS, val);
10061 /* Receive rules. */
10062 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10063 tw32(RCVLPC_CONFIG, 0x0181);
10065 /* Calculate RDMAC_MODE setting early, we need it to determine
10066 * the RCVLPC_STATE_ENABLE mask.
10068 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10069 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10070 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10071 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10072 RDMAC_MODE_LNGREAD_ENAB);
10074 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10075 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10077 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10078 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10079 tg3_asic_rev(tp) == ASIC_REV_57780)
10080 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10081 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10082 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10084 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10085 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10086 if (tg3_flag(tp, TSO_CAPABLE) &&
10087 tg3_asic_rev(tp) == ASIC_REV_5705) {
10088 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10089 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10090 !tg3_flag(tp, IS_5788)) {
10091 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10095 if (tg3_flag(tp, PCI_EXPRESS))
10096 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10098 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10100 if (tp->dev->mtu <= ETH_DATA_LEN) {
10101 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10102 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10106 if (tg3_flag(tp, HW_TSO_1) ||
10107 tg3_flag(tp, HW_TSO_2) ||
10108 tg3_flag(tp, HW_TSO_3))
10109 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10111 if (tg3_flag(tp, 57765_PLUS) ||
10112 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10113 tg3_asic_rev(tp) == ASIC_REV_57780)
10114 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10116 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10117 tg3_asic_rev(tp) == ASIC_REV_5762)
10118 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10120 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10121 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10122 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10123 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10124 tg3_flag(tp, 57765_PLUS)) {
10127 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10128 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10130 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10132 val = tr32(tgtreg);
10133 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10134 tg3_asic_rev(tp) == ASIC_REV_5762) {
10135 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10136 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10137 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10138 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10139 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10140 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10142 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10145 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10146 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10147 tg3_asic_rev(tp) == ASIC_REV_5762) {
10150 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10151 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10153 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10155 val = tr32(tgtreg);
10157 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10158 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10161 /* Receive/send statistics. */
10162 if (tg3_flag(tp, 5750_PLUS)) {
10163 val = tr32(RCVLPC_STATS_ENABLE);
10164 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10165 tw32(RCVLPC_STATS_ENABLE, val);
10166 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10167 tg3_flag(tp, TSO_CAPABLE)) {
10168 val = tr32(RCVLPC_STATS_ENABLE);
10169 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10170 tw32(RCVLPC_STATS_ENABLE, val);
10172 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10174 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10175 tw32(SNDDATAI_STATSENAB, 0xffffff);
10176 tw32(SNDDATAI_STATSCTRL,
10177 (SNDDATAI_SCTRL_ENABLE |
10178 SNDDATAI_SCTRL_FASTUPD));
10180 /* Setup host coalescing engine. */
10181 tw32(HOSTCC_MODE, 0);
10182 for (i = 0; i < 2000; i++) {
10183 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10188 __tg3_set_coalesce(tp, &tp->coal);
10190 if (!tg3_flag(tp, 5705_PLUS)) {
10191 /* Status/statistics block address. See tg3_timer,
10192 * the tg3_periodic_fetch_stats call there, and
10193 * tg3_get_stats to see how this works for 5705/5750 chips.
10195 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10196 ((u64) tp->stats_mapping >> 32));
10197 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10198 ((u64) tp->stats_mapping & 0xffffffff));
10199 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10201 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10203 /* Clear statistics and status block memory areas */
10204 for (i = NIC_SRAM_STATS_BLK;
10205 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10206 i += sizeof(u32)) {
10207 tg3_write_mem(tp, i, 0);
10212 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10214 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10215 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10216 if (!tg3_flag(tp, 5705_PLUS))
10217 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10219 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10220 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10221 /* reset to prevent losing 1st rx packet intermittently */
10222 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10226 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10227 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10228 MAC_MODE_FHDE_ENABLE;
10229 if (tg3_flag(tp, ENABLE_APE))
10230 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10231 if (!tg3_flag(tp, 5705_PLUS) &&
10232 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10233 tg3_asic_rev(tp) != ASIC_REV_5700)
10234 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10235 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10238 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10239 * If TG3_FLAG_IS_NIC is zero, we should read the
10240 * register to preserve the GPIO settings for LOMs. The GPIOs,
10241 * whether used as inputs or outputs, are set by boot code after
10244 if (!tg3_flag(tp, IS_NIC)) {
10247 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10248 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10249 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10251 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10252 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10253 GRC_LCLCTRL_GPIO_OUTPUT3;
10255 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10256 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10258 tp->grc_local_ctrl &= ~gpio_mask;
10259 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10261 /* GPIO1 must be driven high for eeprom write protect */
10262 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10263 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10264 GRC_LCLCTRL_GPIO_OUTPUT1);
10266 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10269 if (tg3_flag(tp, USING_MSIX)) {
10270 val = tr32(MSGINT_MODE);
10271 val |= MSGINT_MODE_ENABLE;
10272 if (tp->irq_cnt > 1)
10273 val |= MSGINT_MODE_MULTIVEC_EN;
10274 if (!tg3_flag(tp, 1SHOT_MSI))
10275 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10276 tw32(MSGINT_MODE, val);
10279 if (!tg3_flag(tp, 5705_PLUS)) {
10280 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10284 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10285 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10286 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10287 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10288 WDMAC_MODE_LNGREAD_ENAB);
10290 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10291 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10292 if (tg3_flag(tp, TSO_CAPABLE) &&
10293 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10294 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10296 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10297 !tg3_flag(tp, IS_5788)) {
10298 val |= WDMAC_MODE_RX_ACCEL;
10302 /* Enable host coalescing bug fix */
10303 if (tg3_flag(tp, 5755_PLUS))
10304 val |= WDMAC_MODE_STATUS_TAG_FIX;
10306 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10307 val |= WDMAC_MODE_BURST_ALL_DATA;
10309 tw32_f(WDMAC_MODE, val);
10312 if (tg3_flag(tp, PCIX_MODE)) {
10315 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10317 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10318 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10319 pcix_cmd |= PCI_X_CMD_READ_2K;
10320 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10321 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10322 pcix_cmd |= PCI_X_CMD_READ_2K;
10324 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10328 tw32_f(RDMAC_MODE, rdmac_mode);
10331 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10332 tg3_asic_rev(tp) == ASIC_REV_5720) {
10333 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10334 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10337 if (i < TG3_NUM_RDMA_CHANNELS) {
10338 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10339 val |= tg3_lso_rd_dma_workaround_bit(tp);
10340 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10341 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10345 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10346 if (!tg3_flag(tp, 5705_PLUS))
10347 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10349 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10350 tw32(SNDDATAC_MODE,
10351 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10353 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10355 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10356 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10357 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10358 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10359 val |= RCVDBDI_MODE_LRG_RING_SZ;
10360 tw32(RCVDBDI_MODE, val);
10361 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10362 if (tg3_flag(tp, HW_TSO_1) ||
10363 tg3_flag(tp, HW_TSO_2) ||
10364 tg3_flag(tp, HW_TSO_3))
10365 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10366 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10367 if (tg3_flag(tp, ENABLE_TSS))
10368 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10369 tw32(SNDBDI_MODE, val);
10370 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10372 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10373 err = tg3_load_5701_a0_firmware_fix(tp);
10378 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10379 /* Ignore any errors for the firmware download. If download
10380 * fails, the device will operate with EEE disabled
10382 tg3_load_57766_firmware(tp);
10385 if (tg3_flag(tp, TSO_CAPABLE)) {
10386 err = tg3_load_tso_firmware(tp);
10391 tp->tx_mode = TX_MODE_ENABLE;
10393 if (tg3_flag(tp, 5755_PLUS) ||
10394 tg3_asic_rev(tp) == ASIC_REV_5906)
10395 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10397 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10398 tg3_asic_rev(tp) == ASIC_REV_5762) {
10399 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10400 tp->tx_mode &= ~val;
10401 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10404 tw32_f(MAC_TX_MODE, tp->tx_mode);
10407 if (tg3_flag(tp, ENABLE_RSS)) {
10408 tg3_rss_write_indir_tbl(tp);
10410 /* Setup the "secret" hash key. */
10411 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10412 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10413 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10414 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10415 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10416 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10417 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10418 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10419 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10420 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10423 tp->rx_mode = RX_MODE_ENABLE;
10424 if (tg3_flag(tp, 5755_PLUS))
10425 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10427 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10428 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10430 if (tg3_flag(tp, ENABLE_RSS))
10431 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10432 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10433 RX_MODE_RSS_IPV6_HASH_EN |
10434 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10435 RX_MODE_RSS_IPV4_HASH_EN |
10436 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10438 tw32_f(MAC_RX_MODE, tp->rx_mode);
10441 tw32(MAC_LED_CTRL, tp->led_ctrl);
10443 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10444 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10445 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10448 tw32_f(MAC_RX_MODE, tp->rx_mode);
10451 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10452 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10453 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10454 /* Set drive transmission level to 1.2V */
10455 /* only if the signal pre-emphasis bit is not set */
10456 val = tr32(MAC_SERDES_CFG);
10459 tw32(MAC_SERDES_CFG, val);
10461 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10462 tw32(MAC_SERDES_CFG, 0x616000);
10465 /* Prevent chip from dropping frames when flow control
10468 if (tg3_flag(tp, 57765_CLASS))
10472 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10474 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10475 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10476 /* Use hardware link auto-negotiation */
10477 tg3_flag_set(tp, HW_AUTONEG);
10480 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10481 tg3_asic_rev(tp) == ASIC_REV_5714) {
10484 tmp = tr32(SERDES_RX_CTRL);
10485 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10486 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10487 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10488 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10491 if (!tg3_flag(tp, USE_PHYLIB)) {
10492 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10493 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10495 err = tg3_setup_phy(tp, false);
10499 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10500 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10503 /* Clear CRC stats. */
10504 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10505 tg3_writephy(tp, MII_TG3_TEST1,
10506 tmp | MII_TG3_TEST1_CRC_EN);
10507 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10512 __tg3_set_rx_mode(tp->dev);
10514 /* Initialize receive rules. */
10515 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10516 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10517 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10518 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10520 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10524 if (tg3_flag(tp, ENABLE_ASF))
10528 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10530 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10532 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10534 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10536 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10538 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10540 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10542 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10544 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10546 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10548 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10550 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10552 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10554 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10562 if (tg3_flag(tp, ENABLE_APE))
10563 /* Write our heartbeat update interval to APE. */
10564 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10565 APE_HOST_HEARTBEAT_INT_DISABLE);
10567 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10572 /* Called at device open time to get the chip ready for
10573 * packet processing. Invoked with tp->lock held.
10575 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10577 /* Chip may have been just powered on. If so, the boot code may still
10578 * be running initialization. Wait for it to finish to avoid races in
10579 * accessing the hardware.
10581 tg3_enable_register_access(tp);
10584 tg3_switch_clocks(tp);
10586 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10588 return tg3_reset_hw(tp, reset_phy);
10591 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10595 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10596 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10598 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10601 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10602 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10603 memset(ocir, 0, TG3_OCIR_LEN);
10607 /* sysfs attributes for hwmon */
10608 static ssize_t tg3_show_temp(struct device *dev,
10609 struct device_attribute *devattr, char *buf)
10611 struct pci_dev *pdev = to_pci_dev(dev);
10612 struct net_device *netdev = pci_get_drvdata(pdev);
10613 struct tg3 *tp = netdev_priv(netdev);
10614 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10617 spin_lock_bh(&tp->lock);
10618 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10619 sizeof(temperature));
10620 spin_unlock_bh(&tp->lock);
10621 return sprintf(buf, "%u\n", temperature);
10625 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10626 TG3_TEMP_SENSOR_OFFSET);
10627 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10628 TG3_TEMP_CAUTION_OFFSET);
10629 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10630 TG3_TEMP_MAX_OFFSET);
10632 static struct attribute *tg3_attributes[] = {
10633 &sensor_dev_attr_temp1_input.dev_attr.attr,
10634 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10635 &sensor_dev_attr_temp1_max.dev_attr.attr,
10639 static const struct attribute_group tg3_group = {
10640 .attrs = tg3_attributes,
10643 static void tg3_hwmon_close(struct tg3 *tp)
10645 if (tp->hwmon_dev) {
10646 hwmon_device_unregister(tp->hwmon_dev);
10647 tp->hwmon_dev = NULL;
10648 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10652 static void tg3_hwmon_open(struct tg3 *tp)
10656 struct pci_dev *pdev = tp->pdev;
10657 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10659 tg3_sd_scan_scratchpad(tp, ocirs);
10661 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10662 if (!ocirs[i].src_data_length)
10665 size += ocirs[i].src_hdr_length;
10666 size += ocirs[i].src_data_length;
10672 /* Register hwmon sysfs hooks */
10673 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10675 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10679 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10680 if (IS_ERR(tp->hwmon_dev)) {
10681 tp->hwmon_dev = NULL;
10682 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10683 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10688 #define TG3_STAT_ADD32(PSTAT, REG) \
10689 do { u32 __val = tr32(REG); \
10690 (PSTAT)->low += __val; \
10691 if ((PSTAT)->low < __val) \
10692 (PSTAT)->high += 1; \
10695 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10697 struct tg3_hw_stats *sp = tp->hw_stats;
10702 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10703 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10704 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10705 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10706 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10707 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10708 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10709 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10710 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10711 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10712 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10713 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10714 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10715 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10716 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10717 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10720 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10721 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10722 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10723 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10726 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10727 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10728 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10729 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10730 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10731 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10732 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10733 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10734 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10735 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10736 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10737 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10738 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10739 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10741 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10742 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10743 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10744 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10745 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10747 u32 val = tr32(HOSTCC_FLOW_ATTN);
10748 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10750 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10751 sp->rx_discards.low += val;
10752 if (sp->rx_discards.low < val)
10753 sp->rx_discards.high += 1;
10755 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10757 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10760 static void tg3_chk_missed_msi(struct tg3 *tp)
10764 for (i = 0; i < tp->irq_cnt; i++) {
10765 struct tg3_napi *tnapi = &tp->napi[i];
10767 if (tg3_has_work(tnapi)) {
10768 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10769 tnapi->last_tx_cons == tnapi->tx_cons) {
10770 if (tnapi->chk_msi_cnt < 1) {
10771 tnapi->chk_msi_cnt++;
10777 tnapi->chk_msi_cnt = 0;
10778 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10779 tnapi->last_tx_cons = tnapi->tx_cons;
10783 static void tg3_timer(unsigned long __opaque)
10785 struct tg3 *tp = (struct tg3 *) __opaque;
10787 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10788 goto restart_timer;
10790 spin_lock(&tp->lock);
10792 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10793 tg3_flag(tp, 57765_CLASS))
10794 tg3_chk_missed_msi(tp);
10796 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10797 /* BCM4785: Flush posted writes from GbE to host memory. */
10801 if (!tg3_flag(tp, TAGGED_STATUS)) {
10802 /* All of this garbage is because when using non-tagged
10803 * IRQ status the mailbox/status_block protocol the chip
10804 * uses with the cpu is race prone.
10806 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10807 tw32(GRC_LOCAL_CTRL,
10808 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10810 tw32(HOSTCC_MODE, tp->coalesce_mode |
10811 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10814 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10815 spin_unlock(&tp->lock);
10816 tg3_reset_task_schedule(tp);
10817 goto restart_timer;
10821 /* This part only runs once per second. */
10822 if (!--tp->timer_counter) {
10823 if (tg3_flag(tp, 5705_PLUS))
10824 tg3_periodic_fetch_stats(tp);
10826 if (tp->setlpicnt && !--tp->setlpicnt)
10827 tg3_phy_eee_enable(tp);
10829 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10833 mac_stat = tr32(MAC_STATUS);
10836 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10837 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10839 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10843 tg3_setup_phy(tp, false);
10844 } else if (tg3_flag(tp, POLL_SERDES)) {
10845 u32 mac_stat = tr32(MAC_STATUS);
10846 int need_setup = 0;
10849 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10852 if (!tp->link_up &&
10853 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10854 MAC_STATUS_SIGNAL_DET))) {
10858 if (!tp->serdes_counter) {
10861 ~MAC_MODE_PORT_MODE_MASK));
10863 tw32_f(MAC_MODE, tp->mac_mode);
10866 tg3_setup_phy(tp, false);
10868 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10869 tg3_flag(tp, 5780_CLASS)) {
10870 tg3_serdes_parallel_detect(tp);
10873 tp->timer_counter = tp->timer_multiplier;
10876 /* Heartbeat is only sent once every 2 seconds.
10878 * The heartbeat is to tell the ASF firmware that the host
10879 * driver is still alive. In the event that the OS crashes,
10880 * ASF needs to reset the hardware to free up the FIFO space
10881 * that may be filled with rx packets destined for the host.
10882 * If the FIFO is full, ASF will no longer function properly.
10884 * Unintended resets have been reported on real time kernels
10885 * where the timer doesn't run on time. Netpoll will also have
10888 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10889 * to check the ring condition when the heartbeat is expiring
10890 * before doing the reset. This will prevent most unintended
10893 if (!--tp->asf_counter) {
10894 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10895 tg3_wait_for_event_ack(tp);
10897 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10898 FWCMD_NICDRV_ALIVE3);
10899 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10900 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10901 TG3_FW_UPDATE_TIMEOUT_SEC);
10903 tg3_generate_fw_event(tp);
10905 tp->asf_counter = tp->asf_multiplier;
10908 spin_unlock(&tp->lock);
10911 tp->timer.expires = jiffies + tp->timer_offset;
10912 add_timer(&tp->timer);
10915 static void tg3_timer_init(struct tg3 *tp)
10917 if (tg3_flag(tp, TAGGED_STATUS) &&
10918 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10919 !tg3_flag(tp, 57765_CLASS))
10920 tp->timer_offset = HZ;
10922 tp->timer_offset = HZ / 10;
10924 BUG_ON(tp->timer_offset > HZ);
10926 tp->timer_multiplier = (HZ / tp->timer_offset);
10927 tp->asf_multiplier = (HZ / tp->timer_offset) *
10928 TG3_FW_UPDATE_FREQ_SEC;
10930 init_timer(&tp->timer);
10931 tp->timer.data = (unsigned long) tp;
10932 tp->timer.function = tg3_timer;
10935 static void tg3_timer_start(struct tg3 *tp)
10937 tp->asf_counter = tp->asf_multiplier;
10938 tp->timer_counter = tp->timer_multiplier;
10940 tp->timer.expires = jiffies + tp->timer_offset;
10941 add_timer(&tp->timer);
10944 static void tg3_timer_stop(struct tg3 *tp)
10946 del_timer_sync(&tp->timer);
10949 /* Restart hardware after configuration changes, self-test, etc.
10950 * Invoked with tp->lock held.
10952 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10953 __releases(tp->lock)
10954 __acquires(tp->lock)
10958 err = tg3_init_hw(tp, reset_phy);
10960 netdev_err(tp->dev,
10961 "Failed to re-initialize device, aborting\n");
10962 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10963 tg3_full_unlock(tp);
10964 tg3_timer_stop(tp);
10966 tg3_napi_enable(tp);
10967 dev_close(tp->dev);
10968 tg3_full_lock(tp, 0);
10973 static void tg3_reset_task(struct work_struct *work)
10975 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10978 tg3_full_lock(tp, 0);
10980 if (!netif_running(tp->dev)) {
10981 tg3_flag_clear(tp, RESET_TASK_PENDING);
10982 tg3_full_unlock(tp);
10986 tg3_full_unlock(tp);
10990 tg3_netif_stop(tp);
10992 tg3_full_lock(tp, 1);
10994 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10995 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10996 tp->write32_rx_mbox = tg3_write_flush_reg32;
10997 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10998 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11001 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11002 err = tg3_init_hw(tp, true);
11006 tg3_netif_start(tp);
11009 tg3_full_unlock(tp);
11014 tg3_flag_clear(tp, RESET_TASK_PENDING);
11017 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11020 unsigned long flags;
11022 struct tg3_napi *tnapi = &tp->napi[irq_num];
11024 if (tp->irq_cnt == 1)
11025 name = tp->dev->name;
11027 name = &tnapi->irq_lbl[0];
11028 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
11029 name[IFNAMSIZ-1] = 0;
11032 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11034 if (tg3_flag(tp, 1SHOT_MSI))
11035 fn = tg3_msi_1shot;
11038 fn = tg3_interrupt;
11039 if (tg3_flag(tp, TAGGED_STATUS))
11040 fn = tg3_interrupt_tagged;
11041 flags = IRQF_SHARED;
11044 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11047 static int tg3_test_interrupt(struct tg3 *tp)
11049 struct tg3_napi *tnapi = &tp->napi[0];
11050 struct net_device *dev = tp->dev;
11051 int err, i, intr_ok = 0;
11054 if (!netif_running(dev))
11057 tg3_disable_ints(tp);
11059 free_irq(tnapi->irq_vec, tnapi);
11062 * Turn off MSI one shot mode. Otherwise this test has no
11063 * observable way to know whether the interrupt was delivered.
11065 if (tg3_flag(tp, 57765_PLUS)) {
11066 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11067 tw32(MSGINT_MODE, val);
11070 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11071 IRQF_SHARED, dev->name, tnapi);
11075 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11076 tg3_enable_ints(tp);
11078 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11081 for (i = 0; i < 5; i++) {
11082 u32 int_mbox, misc_host_ctrl;
11084 int_mbox = tr32_mailbox(tnapi->int_mbox);
11085 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11087 if ((int_mbox != 0) ||
11088 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11093 if (tg3_flag(tp, 57765_PLUS) &&
11094 tnapi->hw_status->status_tag != tnapi->last_tag)
11095 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11100 tg3_disable_ints(tp);
11102 free_irq(tnapi->irq_vec, tnapi);
11104 err = tg3_request_irq(tp, 0);
11110 /* Reenable MSI one shot mode. */
11111 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11112 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11113 tw32(MSGINT_MODE, val);
11121 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11122 * successfully restored
11124 static int tg3_test_msi(struct tg3 *tp)
11129 if (!tg3_flag(tp, USING_MSI))
11132 /* Turn off SERR reporting in case MSI terminates with Master
11135 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11136 pci_write_config_word(tp->pdev, PCI_COMMAND,
11137 pci_cmd & ~PCI_COMMAND_SERR);
11139 err = tg3_test_interrupt(tp);
11141 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11146 /* other failures */
11150 /* MSI test failed, go back to INTx mode */
11151 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11152 "to INTx mode. Please report this failure to the PCI "
11153 "maintainer and include system chipset information\n");
11155 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11157 pci_disable_msi(tp->pdev);
11159 tg3_flag_clear(tp, USING_MSI);
11160 tp->napi[0].irq_vec = tp->pdev->irq;
11162 err = tg3_request_irq(tp, 0);
11166 /* Need to reset the chip because the MSI cycle may have terminated
11167 * with Master Abort.
11169 tg3_full_lock(tp, 1);
11171 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11172 err = tg3_init_hw(tp, true);
11174 tg3_full_unlock(tp);
11177 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11182 static int tg3_reject_firmware(struct tg3 *tp)
11184 const struct tg3_firmware_hdr *fw_hdr;
11186 if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11187 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11192 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11194 /* Firmware blob starts with version numbers, followed by
11195 * start address and _full_ length including BSS sections
11196 * (which must be longer than the actual data, of course
11199 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11200 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11201 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11202 tp->fw_len, tp->fw_needed);
11203 release_firmware(tp->fw);
11208 /* We no longer need firmware; we have it. */
11209 tp->fw_needed = NULL;
11213 static u32 tg3_irq_count(struct tg3 *tp)
11215 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11218 /* We want as many rx rings enabled as there are cpus.
11219 * In multiqueue MSI-X mode, the first MSI-X vector
11220 * only deals with link interrupts, etc, so we add
11221 * one to the number of vectors we are requesting.
11223 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11229 static bool tg3_enable_msix(struct tg3 *tp)
11232 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11234 tp->txq_cnt = tp->txq_req;
11235 tp->rxq_cnt = tp->rxq_req;
11237 tp->rxq_cnt = netif_get_num_default_rss_queues();
11238 if (tp->rxq_cnt > tp->rxq_max)
11239 tp->rxq_cnt = tp->rxq_max;
11241 /* Disable multiple TX rings by default. Simple round-robin hardware
11242 * scheduling of the TX rings can cause starvation of rings with
11243 * small packets when other rings have TSO or jumbo packets.
11248 tp->irq_cnt = tg3_irq_count(tp);
11250 for (i = 0; i < tp->irq_max; i++) {
11251 msix_ent[i].entry = i;
11252 msix_ent[i].vector = 0;
11255 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11258 } else if (rc != 0) {
11259 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11261 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11264 tp->rxq_cnt = max(rc - 1, 1);
11266 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11269 for (i = 0; i < tp->irq_max; i++)
11270 tp->napi[i].irq_vec = msix_ent[i].vector;
11272 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11273 pci_disable_msix(tp->pdev);
11277 if (tp->irq_cnt == 1)
11280 tg3_flag_set(tp, ENABLE_RSS);
11282 if (tp->txq_cnt > 1)
11283 tg3_flag_set(tp, ENABLE_TSS);
11285 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11290 static void tg3_ints_init(struct tg3 *tp)
11292 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11293 !tg3_flag(tp, TAGGED_STATUS)) {
11294 /* All MSI supporting chips should support tagged
11295 * status. Assert that this is the case.
11297 netdev_warn(tp->dev,
11298 "MSI without TAGGED_STATUS? Not using MSI\n");
11302 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11303 tg3_flag_set(tp, USING_MSIX);
11304 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11305 tg3_flag_set(tp, USING_MSI);
11307 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11308 u32 msi_mode = tr32(MSGINT_MODE);
11309 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11310 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11311 if (!tg3_flag(tp, 1SHOT_MSI))
11312 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11313 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11316 if (!tg3_flag(tp, USING_MSIX)) {
11318 tp->napi[0].irq_vec = tp->pdev->irq;
11321 if (tp->irq_cnt == 1) {
11324 netif_set_real_num_tx_queues(tp->dev, 1);
11325 netif_set_real_num_rx_queues(tp->dev, 1);
11329 static void tg3_ints_fini(struct tg3 *tp)
11331 if (tg3_flag(tp, USING_MSIX))
11332 pci_disable_msix(tp->pdev);
11333 else if (tg3_flag(tp, USING_MSI))
11334 pci_disable_msi(tp->pdev);
11335 tg3_flag_clear(tp, USING_MSI);
11336 tg3_flag_clear(tp, USING_MSIX);
11337 tg3_flag_clear(tp, ENABLE_RSS);
11338 tg3_flag_clear(tp, ENABLE_TSS);
11341 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11344 struct net_device *dev = tp->dev;
11348 * Setup interrupts first so we know how
11349 * many NAPI resources to allocate
11353 tg3_rss_check_indir_tbl(tp);
11355 /* The placement of this call is tied
11356 * to the setup and use of Host TX descriptors.
11358 err = tg3_alloc_consistent(tp);
11360 goto out_ints_fini;
11364 tg3_napi_enable(tp);
11366 for (i = 0; i < tp->irq_cnt; i++) {
11367 struct tg3_napi *tnapi = &tp->napi[i];
11368 err = tg3_request_irq(tp, i);
11370 for (i--; i >= 0; i--) {
11371 tnapi = &tp->napi[i];
11372 free_irq(tnapi->irq_vec, tnapi);
11374 goto out_napi_fini;
11378 tg3_full_lock(tp, 0);
11381 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11383 err = tg3_init_hw(tp, reset_phy);
11385 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11386 tg3_free_rings(tp);
11389 tg3_full_unlock(tp);
11394 if (test_irq && tg3_flag(tp, USING_MSI)) {
11395 err = tg3_test_msi(tp);
11398 tg3_full_lock(tp, 0);
11399 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11400 tg3_free_rings(tp);
11401 tg3_full_unlock(tp);
11403 goto out_napi_fini;
11406 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11407 u32 val = tr32(PCIE_TRANSACTION_CFG);
11409 tw32(PCIE_TRANSACTION_CFG,
11410 val | PCIE_TRANS_CFG_1SHOT_MSI);
11416 tg3_hwmon_open(tp);
11418 tg3_full_lock(tp, 0);
11420 tg3_timer_start(tp);
11421 tg3_flag_set(tp, INIT_COMPLETE);
11422 tg3_enable_ints(tp);
11427 tg3_ptp_resume(tp);
11430 tg3_full_unlock(tp);
11432 netif_tx_start_all_queues(dev);
11435 * Reset loopback feature if it was turned on while the device was down
11436 * make sure that it's installed properly now.
11438 if (dev->features & NETIF_F_LOOPBACK)
11439 tg3_set_loopback(dev, dev->features);
11444 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11445 struct tg3_napi *tnapi = &tp->napi[i];
11446 free_irq(tnapi->irq_vec, tnapi);
11450 tg3_napi_disable(tp);
11452 tg3_free_consistent(tp);
11460 static void tg3_stop(struct tg3 *tp)
11464 tg3_reset_task_cancel(tp);
11465 tg3_netif_stop(tp);
11467 tg3_timer_stop(tp);
11469 tg3_hwmon_close(tp);
11473 tg3_full_lock(tp, 1);
11475 tg3_disable_ints(tp);
11477 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11478 tg3_free_rings(tp);
11479 tg3_flag_clear(tp, INIT_COMPLETE);
11481 tg3_full_unlock(tp);
11483 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11484 struct tg3_napi *tnapi = &tp->napi[i];
11485 free_irq(tnapi->irq_vec, tnapi);
11492 tg3_free_consistent(tp);
11495 static int tg3_open(struct net_device *dev)
11497 struct tg3 *tp = netdev_priv(dev);
11500 if (tp->fw_needed) {
11501 err = tg3_reject_firmware(tp);
11502 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11504 netdev_warn(tp->dev, "EEE capability disabled\n");
11505 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11506 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11507 netdev_warn(tp->dev, "EEE capability restored\n");
11508 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11510 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11514 netdev_warn(tp->dev, "TSO capability disabled\n");
11515 tg3_flag_clear(tp, TSO_CAPABLE);
11516 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11517 netdev_notice(tp->dev, "TSO capability restored\n");
11518 tg3_flag_set(tp, TSO_CAPABLE);
11522 tg3_carrier_off(tp);
11524 err = tg3_power_up(tp);
11528 tg3_full_lock(tp, 0);
11530 tg3_disable_ints(tp);
11531 tg3_flag_clear(tp, INIT_COMPLETE);
11533 tg3_full_unlock(tp);
11535 err = tg3_start(tp,
11536 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11539 tg3_frob_aux_power(tp, false);
11540 pci_set_power_state(tp->pdev, PCI_D3hot);
11543 if (tg3_flag(tp, PTP_CAPABLE)) {
11544 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11546 if (IS_ERR(tp->ptp_clock))
11547 tp->ptp_clock = NULL;
11553 static int tg3_close(struct net_device *dev)
11555 struct tg3 *tp = netdev_priv(dev);
11561 /* Clear stats across close / open calls */
11562 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11563 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11565 tg3_power_down_prepare(tp);
11567 tg3_carrier_off(tp);
11572 static inline u64 get_stat64(tg3_stat64_t *val)
11574 return ((u64)val->high << 32) | ((u64)val->low);
11577 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11579 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11581 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11582 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11583 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11586 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11587 tg3_writephy(tp, MII_TG3_TEST1,
11588 val | MII_TG3_TEST1_CRC_EN);
11589 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11593 tp->phy_crc_errors += val;
11595 return tp->phy_crc_errors;
11598 return get_stat64(&hw_stats->rx_fcs_errors);
11601 #define ESTAT_ADD(member) \
11602 estats->member = old_estats->member + \
11603 get_stat64(&hw_stats->member)
11605 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11607 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11608 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11610 ESTAT_ADD(rx_octets);
11611 ESTAT_ADD(rx_fragments);
11612 ESTAT_ADD(rx_ucast_packets);
11613 ESTAT_ADD(rx_mcast_packets);
11614 ESTAT_ADD(rx_bcast_packets);
11615 ESTAT_ADD(rx_fcs_errors);
11616 ESTAT_ADD(rx_align_errors);
11617 ESTAT_ADD(rx_xon_pause_rcvd);
11618 ESTAT_ADD(rx_xoff_pause_rcvd);
11619 ESTAT_ADD(rx_mac_ctrl_rcvd);
11620 ESTAT_ADD(rx_xoff_entered);
11621 ESTAT_ADD(rx_frame_too_long_errors);
11622 ESTAT_ADD(rx_jabbers);
11623 ESTAT_ADD(rx_undersize_packets);
11624 ESTAT_ADD(rx_in_length_errors);
11625 ESTAT_ADD(rx_out_length_errors);
11626 ESTAT_ADD(rx_64_or_less_octet_packets);
11627 ESTAT_ADD(rx_65_to_127_octet_packets);
11628 ESTAT_ADD(rx_128_to_255_octet_packets);
11629 ESTAT_ADD(rx_256_to_511_octet_packets);
11630 ESTAT_ADD(rx_512_to_1023_octet_packets);
11631 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11632 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11633 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11634 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11635 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11637 ESTAT_ADD(tx_octets);
11638 ESTAT_ADD(tx_collisions);
11639 ESTAT_ADD(tx_xon_sent);
11640 ESTAT_ADD(tx_xoff_sent);
11641 ESTAT_ADD(tx_flow_control);
11642 ESTAT_ADD(tx_mac_errors);
11643 ESTAT_ADD(tx_single_collisions);
11644 ESTAT_ADD(tx_mult_collisions);
11645 ESTAT_ADD(tx_deferred);
11646 ESTAT_ADD(tx_excessive_collisions);
11647 ESTAT_ADD(tx_late_collisions);
11648 ESTAT_ADD(tx_collide_2times);
11649 ESTAT_ADD(tx_collide_3times);
11650 ESTAT_ADD(tx_collide_4times);
11651 ESTAT_ADD(tx_collide_5times);
11652 ESTAT_ADD(tx_collide_6times);
11653 ESTAT_ADD(tx_collide_7times);
11654 ESTAT_ADD(tx_collide_8times);
11655 ESTAT_ADD(tx_collide_9times);
11656 ESTAT_ADD(tx_collide_10times);
11657 ESTAT_ADD(tx_collide_11times);
11658 ESTAT_ADD(tx_collide_12times);
11659 ESTAT_ADD(tx_collide_13times);
11660 ESTAT_ADD(tx_collide_14times);
11661 ESTAT_ADD(tx_collide_15times);
11662 ESTAT_ADD(tx_ucast_packets);
11663 ESTAT_ADD(tx_mcast_packets);
11664 ESTAT_ADD(tx_bcast_packets);
11665 ESTAT_ADD(tx_carrier_sense_errors);
11666 ESTAT_ADD(tx_discards);
11667 ESTAT_ADD(tx_errors);
11669 ESTAT_ADD(dma_writeq_full);
11670 ESTAT_ADD(dma_write_prioq_full);
11671 ESTAT_ADD(rxbds_empty);
11672 ESTAT_ADD(rx_discards);
11673 ESTAT_ADD(rx_errors);
11674 ESTAT_ADD(rx_threshold_hit);
11676 ESTAT_ADD(dma_readq_full);
11677 ESTAT_ADD(dma_read_prioq_full);
11678 ESTAT_ADD(tx_comp_queue_full);
11680 ESTAT_ADD(ring_set_send_prod_index);
11681 ESTAT_ADD(ring_status_update);
11682 ESTAT_ADD(nic_irqs);
11683 ESTAT_ADD(nic_avoided_irqs);
11684 ESTAT_ADD(nic_tx_threshold_hit);
11686 ESTAT_ADD(mbuf_lwm_thresh_hit);
11689 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11691 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11692 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11694 stats->rx_packets = old_stats->rx_packets +
11695 get_stat64(&hw_stats->rx_ucast_packets) +
11696 get_stat64(&hw_stats->rx_mcast_packets) +
11697 get_stat64(&hw_stats->rx_bcast_packets);
11699 stats->tx_packets = old_stats->tx_packets +
11700 get_stat64(&hw_stats->tx_ucast_packets) +
11701 get_stat64(&hw_stats->tx_mcast_packets) +
11702 get_stat64(&hw_stats->tx_bcast_packets);
11704 stats->rx_bytes = old_stats->rx_bytes +
11705 get_stat64(&hw_stats->rx_octets);
11706 stats->tx_bytes = old_stats->tx_bytes +
11707 get_stat64(&hw_stats->tx_octets);
11709 stats->rx_errors = old_stats->rx_errors +
11710 get_stat64(&hw_stats->rx_errors);
11711 stats->tx_errors = old_stats->tx_errors +
11712 get_stat64(&hw_stats->tx_errors) +
11713 get_stat64(&hw_stats->tx_mac_errors) +
11714 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11715 get_stat64(&hw_stats->tx_discards);
11717 stats->multicast = old_stats->multicast +
11718 get_stat64(&hw_stats->rx_mcast_packets);
11719 stats->collisions = old_stats->collisions +
11720 get_stat64(&hw_stats->tx_collisions);
11722 stats->rx_length_errors = old_stats->rx_length_errors +
11723 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11724 get_stat64(&hw_stats->rx_undersize_packets);
11726 stats->rx_over_errors = old_stats->rx_over_errors +
11727 get_stat64(&hw_stats->rxbds_empty);
11728 stats->rx_frame_errors = old_stats->rx_frame_errors +
11729 get_stat64(&hw_stats->rx_align_errors);
11730 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11731 get_stat64(&hw_stats->tx_discards);
11732 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11733 get_stat64(&hw_stats->tx_carrier_sense_errors);
11735 stats->rx_crc_errors = old_stats->rx_crc_errors +
11736 tg3_calc_crc_errors(tp);
11738 stats->rx_missed_errors = old_stats->rx_missed_errors +
11739 get_stat64(&hw_stats->rx_discards);
11741 stats->rx_dropped = tp->rx_dropped;
11742 stats->tx_dropped = tp->tx_dropped;
11745 static int tg3_get_regs_len(struct net_device *dev)
11747 return TG3_REG_BLK_SIZE;
11750 static void tg3_get_regs(struct net_device *dev,
11751 struct ethtool_regs *regs, void *_p)
11753 struct tg3 *tp = netdev_priv(dev);
11757 memset(_p, 0, TG3_REG_BLK_SIZE);
11759 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11762 tg3_full_lock(tp, 0);
11764 tg3_dump_legacy_regs(tp, (u32 *)_p);
11766 tg3_full_unlock(tp);
11769 static int tg3_get_eeprom_len(struct net_device *dev)
11771 struct tg3 *tp = netdev_priv(dev);
11773 return tp->nvram_size;
11776 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11778 struct tg3 *tp = netdev_priv(dev);
11781 u32 i, offset, len, b_offset, b_count;
11784 if (tg3_flag(tp, NO_NVRAM))
11787 offset = eeprom->offset;
11791 eeprom->magic = TG3_EEPROM_MAGIC;
11794 /* adjustments to start on required 4 byte boundary */
11795 b_offset = offset & 3;
11796 b_count = 4 - b_offset;
11797 if (b_count > len) {
11798 /* i.e. offset=1 len=2 */
11801 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11804 memcpy(data, ((char *)&val) + b_offset, b_count);
11807 eeprom->len += b_count;
11810 /* read bytes up to the last 4 byte boundary */
11811 pd = &data[eeprom->len];
11812 for (i = 0; i < (len - (len & 3)); i += 4) {
11813 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11818 memcpy(pd + i, &val, 4);
11823 /* read last bytes not ending on 4 byte boundary */
11824 pd = &data[eeprom->len];
11826 b_offset = offset + len - b_count;
11827 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11830 memcpy(pd, &val, b_count);
11831 eeprom->len += b_count;
11836 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11838 struct tg3 *tp = netdev_priv(dev);
11840 u32 offset, len, b_offset, odd_len;
11844 if (tg3_flag(tp, NO_NVRAM) ||
11845 eeprom->magic != TG3_EEPROM_MAGIC)
11848 offset = eeprom->offset;
11851 if ((b_offset = (offset & 3))) {
11852 /* adjustments to start on required 4 byte boundary */
11853 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11864 /* adjustments to end on required 4 byte boundary */
11866 len = (len + 3) & ~3;
11867 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11873 if (b_offset || odd_len) {
11874 buf = kmalloc(len, GFP_KERNEL);
11878 memcpy(buf, &start, 4);
11880 memcpy(buf+len-4, &end, 4);
11881 memcpy(buf + b_offset, data, eeprom->len);
11884 ret = tg3_nvram_write_block(tp, offset, len, buf);
11892 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11894 struct tg3 *tp = netdev_priv(dev);
11896 if (tg3_flag(tp, USE_PHYLIB)) {
11897 struct phy_device *phydev;
11898 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11900 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11901 return phy_ethtool_gset(phydev, cmd);
11904 cmd->supported = (SUPPORTED_Autoneg);
11906 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11907 cmd->supported |= (SUPPORTED_1000baseT_Half |
11908 SUPPORTED_1000baseT_Full);
11910 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11911 cmd->supported |= (SUPPORTED_100baseT_Half |
11912 SUPPORTED_100baseT_Full |
11913 SUPPORTED_10baseT_Half |
11914 SUPPORTED_10baseT_Full |
11916 cmd->port = PORT_TP;
11918 cmd->supported |= SUPPORTED_FIBRE;
11919 cmd->port = PORT_FIBRE;
11922 cmd->advertising = tp->link_config.advertising;
11923 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11924 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11925 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11926 cmd->advertising |= ADVERTISED_Pause;
11928 cmd->advertising |= ADVERTISED_Pause |
11929 ADVERTISED_Asym_Pause;
11931 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11932 cmd->advertising |= ADVERTISED_Asym_Pause;
11935 if (netif_running(dev) && tp->link_up) {
11936 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11937 cmd->duplex = tp->link_config.active_duplex;
11938 cmd->lp_advertising = tp->link_config.rmt_adv;
11939 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11940 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11941 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11943 cmd->eth_tp_mdix = ETH_TP_MDI;
11946 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11947 cmd->duplex = DUPLEX_UNKNOWN;
11948 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11950 cmd->phy_address = tp->phy_addr;
11951 cmd->transceiver = XCVR_INTERNAL;
11952 cmd->autoneg = tp->link_config.autoneg;
11958 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11960 struct tg3 *tp = netdev_priv(dev);
11961 u32 speed = ethtool_cmd_speed(cmd);
11963 if (tg3_flag(tp, USE_PHYLIB)) {
11964 struct phy_device *phydev;
11965 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11967 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11968 return phy_ethtool_sset(phydev, cmd);
11971 if (cmd->autoneg != AUTONEG_ENABLE &&
11972 cmd->autoneg != AUTONEG_DISABLE)
11975 if (cmd->autoneg == AUTONEG_DISABLE &&
11976 cmd->duplex != DUPLEX_FULL &&
11977 cmd->duplex != DUPLEX_HALF)
11980 if (cmd->autoneg == AUTONEG_ENABLE) {
11981 u32 mask = ADVERTISED_Autoneg |
11983 ADVERTISED_Asym_Pause;
11985 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11986 mask |= ADVERTISED_1000baseT_Half |
11987 ADVERTISED_1000baseT_Full;
11989 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11990 mask |= ADVERTISED_100baseT_Half |
11991 ADVERTISED_100baseT_Full |
11992 ADVERTISED_10baseT_Half |
11993 ADVERTISED_10baseT_Full |
11996 mask |= ADVERTISED_FIBRE;
11998 if (cmd->advertising & ~mask)
12001 mask &= (ADVERTISED_1000baseT_Half |
12002 ADVERTISED_1000baseT_Full |
12003 ADVERTISED_100baseT_Half |
12004 ADVERTISED_100baseT_Full |
12005 ADVERTISED_10baseT_Half |
12006 ADVERTISED_10baseT_Full);
12008 cmd->advertising &= mask;
12010 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12011 if (speed != SPEED_1000)
12014 if (cmd->duplex != DUPLEX_FULL)
12017 if (speed != SPEED_100 &&
12023 tg3_full_lock(tp, 0);
12025 tp->link_config.autoneg = cmd->autoneg;
12026 if (cmd->autoneg == AUTONEG_ENABLE) {
12027 tp->link_config.advertising = (cmd->advertising |
12028 ADVERTISED_Autoneg);
12029 tp->link_config.speed = SPEED_UNKNOWN;
12030 tp->link_config.duplex = DUPLEX_UNKNOWN;
12032 tp->link_config.advertising = 0;
12033 tp->link_config.speed = speed;
12034 tp->link_config.duplex = cmd->duplex;
12037 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12039 tg3_warn_mgmt_link_flap(tp);
12041 if (netif_running(dev))
12042 tg3_setup_phy(tp, true);
12044 tg3_full_unlock(tp);
12049 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12051 struct tg3 *tp = netdev_priv(dev);
12053 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12054 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12055 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12056 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12059 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12061 struct tg3 *tp = netdev_priv(dev);
12063 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12064 wol->supported = WAKE_MAGIC;
12066 wol->supported = 0;
12068 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12069 wol->wolopts = WAKE_MAGIC;
12070 memset(&wol->sopass, 0, sizeof(wol->sopass));
12073 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12075 struct tg3 *tp = netdev_priv(dev);
12076 struct device *dp = &tp->pdev->dev;
12078 if (wol->wolopts & ~WAKE_MAGIC)
12080 if ((wol->wolopts & WAKE_MAGIC) &&
12081 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12084 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12086 spin_lock_bh(&tp->lock);
12087 if (device_may_wakeup(dp))
12088 tg3_flag_set(tp, WOL_ENABLE);
12090 tg3_flag_clear(tp, WOL_ENABLE);
12091 spin_unlock_bh(&tp->lock);
12096 static u32 tg3_get_msglevel(struct net_device *dev)
12098 struct tg3 *tp = netdev_priv(dev);
12099 return tp->msg_enable;
12102 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12104 struct tg3 *tp = netdev_priv(dev);
12105 tp->msg_enable = value;
12108 static int tg3_nway_reset(struct net_device *dev)
12110 struct tg3 *tp = netdev_priv(dev);
12113 if (!netif_running(dev))
12116 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12119 tg3_warn_mgmt_link_flap(tp);
12121 if (tg3_flag(tp, USE_PHYLIB)) {
12122 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12124 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12128 spin_lock_bh(&tp->lock);
12130 tg3_readphy(tp, MII_BMCR, &bmcr);
12131 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12132 ((bmcr & BMCR_ANENABLE) ||
12133 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12134 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12138 spin_unlock_bh(&tp->lock);
12144 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12146 struct tg3 *tp = netdev_priv(dev);
12148 ering->rx_max_pending = tp->rx_std_ring_mask;
12149 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12150 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12152 ering->rx_jumbo_max_pending = 0;
12154 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12156 ering->rx_pending = tp->rx_pending;
12157 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12158 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12160 ering->rx_jumbo_pending = 0;
12162 ering->tx_pending = tp->napi[0].tx_pending;
12165 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12167 struct tg3 *tp = netdev_priv(dev);
12168 int i, irq_sync = 0, err = 0;
12170 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12171 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12172 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12173 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12174 (tg3_flag(tp, TSO_BUG) &&
12175 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12178 if (netif_running(dev)) {
12180 tg3_netif_stop(tp);
12184 tg3_full_lock(tp, irq_sync);
12186 tp->rx_pending = ering->rx_pending;
12188 if (tg3_flag(tp, MAX_RXPEND_64) &&
12189 tp->rx_pending > 63)
12190 tp->rx_pending = 63;
12191 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12193 for (i = 0; i < tp->irq_max; i++)
12194 tp->napi[i].tx_pending = ering->tx_pending;
12196 if (netif_running(dev)) {
12197 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12198 err = tg3_restart_hw(tp, false);
12200 tg3_netif_start(tp);
12203 tg3_full_unlock(tp);
12205 if (irq_sync && !err)
12211 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12213 struct tg3 *tp = netdev_priv(dev);
12215 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12217 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12218 epause->rx_pause = 1;
12220 epause->rx_pause = 0;
12222 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12223 epause->tx_pause = 1;
12225 epause->tx_pause = 0;
12228 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12230 struct tg3 *tp = netdev_priv(dev);
12233 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12234 tg3_warn_mgmt_link_flap(tp);
12236 if (tg3_flag(tp, USE_PHYLIB)) {
12238 struct phy_device *phydev;
12240 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12242 if (!(phydev->supported & SUPPORTED_Pause) ||
12243 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12244 (epause->rx_pause != epause->tx_pause)))
12247 tp->link_config.flowctrl = 0;
12248 if (epause->rx_pause) {
12249 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12251 if (epause->tx_pause) {
12252 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12253 newadv = ADVERTISED_Pause;
12255 newadv = ADVERTISED_Pause |
12256 ADVERTISED_Asym_Pause;
12257 } else if (epause->tx_pause) {
12258 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12259 newadv = ADVERTISED_Asym_Pause;
12263 if (epause->autoneg)
12264 tg3_flag_set(tp, PAUSE_AUTONEG);
12266 tg3_flag_clear(tp, PAUSE_AUTONEG);
12268 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12269 u32 oldadv = phydev->advertising &
12270 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12271 if (oldadv != newadv) {
12272 phydev->advertising &=
12273 ~(ADVERTISED_Pause |
12274 ADVERTISED_Asym_Pause);
12275 phydev->advertising |= newadv;
12276 if (phydev->autoneg) {
12278 * Always renegotiate the link to
12279 * inform our link partner of our
12280 * flow control settings, even if the
12281 * flow control is forced. Let
12282 * tg3_adjust_link() do the final
12283 * flow control setup.
12285 return phy_start_aneg(phydev);
12289 if (!epause->autoneg)
12290 tg3_setup_flow_control(tp, 0, 0);
12292 tp->link_config.advertising &=
12293 ~(ADVERTISED_Pause |
12294 ADVERTISED_Asym_Pause);
12295 tp->link_config.advertising |= newadv;
12300 if (netif_running(dev)) {
12301 tg3_netif_stop(tp);
12305 tg3_full_lock(tp, irq_sync);
12307 if (epause->autoneg)
12308 tg3_flag_set(tp, PAUSE_AUTONEG);
12310 tg3_flag_clear(tp, PAUSE_AUTONEG);
12311 if (epause->rx_pause)
12312 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12314 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12315 if (epause->tx_pause)
12316 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12318 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12320 if (netif_running(dev)) {
12321 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12322 err = tg3_restart_hw(tp, false);
12324 tg3_netif_start(tp);
12327 tg3_full_unlock(tp);
12330 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12335 static int tg3_get_sset_count(struct net_device *dev, int sset)
12339 return TG3_NUM_TEST;
12341 return TG3_NUM_STATS;
12343 return -EOPNOTSUPP;
12347 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12348 u32 *rules __always_unused)
12350 struct tg3 *tp = netdev_priv(dev);
12352 if (!tg3_flag(tp, SUPPORT_MSIX))
12353 return -EOPNOTSUPP;
12355 switch (info->cmd) {
12356 case ETHTOOL_GRXRINGS:
12357 if (netif_running(tp->dev))
12358 info->data = tp->rxq_cnt;
12360 info->data = num_online_cpus();
12361 if (info->data > TG3_RSS_MAX_NUM_QS)
12362 info->data = TG3_RSS_MAX_NUM_QS;
12365 /* The first interrupt vector only
12366 * handles link interrupts.
12372 return -EOPNOTSUPP;
12376 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12379 struct tg3 *tp = netdev_priv(dev);
12381 if (tg3_flag(tp, SUPPORT_MSIX))
12382 size = TG3_RSS_INDIR_TBL_SIZE;
12387 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12389 struct tg3 *tp = netdev_priv(dev);
12392 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12393 indir[i] = tp->rss_ind_tbl[i];
12398 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12400 struct tg3 *tp = netdev_priv(dev);
12403 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12404 tp->rss_ind_tbl[i] = indir[i];
12406 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12409 /* It is legal to write the indirection
12410 * table while the device is running.
12412 tg3_full_lock(tp, 0);
12413 tg3_rss_write_indir_tbl(tp);
12414 tg3_full_unlock(tp);
12419 static void tg3_get_channels(struct net_device *dev,
12420 struct ethtool_channels *channel)
12422 struct tg3 *tp = netdev_priv(dev);
12423 u32 deflt_qs = netif_get_num_default_rss_queues();
12425 channel->max_rx = tp->rxq_max;
12426 channel->max_tx = tp->txq_max;
12428 if (netif_running(dev)) {
12429 channel->rx_count = tp->rxq_cnt;
12430 channel->tx_count = tp->txq_cnt;
12433 channel->rx_count = tp->rxq_req;
12435 channel->rx_count = min(deflt_qs, tp->rxq_max);
12438 channel->tx_count = tp->txq_req;
12440 channel->tx_count = min(deflt_qs, tp->txq_max);
12444 static int tg3_set_channels(struct net_device *dev,
12445 struct ethtool_channels *channel)
12447 struct tg3 *tp = netdev_priv(dev);
12449 if (!tg3_flag(tp, SUPPORT_MSIX))
12450 return -EOPNOTSUPP;
12452 if (channel->rx_count > tp->rxq_max ||
12453 channel->tx_count > tp->txq_max)
12456 tp->rxq_req = channel->rx_count;
12457 tp->txq_req = channel->tx_count;
12459 if (!netif_running(dev))
12464 tg3_carrier_off(tp);
12466 tg3_start(tp, true, false, false);
12471 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12473 switch (stringset) {
12475 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12478 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12481 WARN_ON(1); /* we need a WARN() */
12486 static int tg3_set_phys_id(struct net_device *dev,
12487 enum ethtool_phys_id_state state)
12489 struct tg3 *tp = netdev_priv(dev);
12491 if (!netif_running(tp->dev))
12495 case ETHTOOL_ID_ACTIVE:
12496 return 1; /* cycle on/off once per second */
12498 case ETHTOOL_ID_ON:
12499 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12500 LED_CTRL_1000MBPS_ON |
12501 LED_CTRL_100MBPS_ON |
12502 LED_CTRL_10MBPS_ON |
12503 LED_CTRL_TRAFFIC_OVERRIDE |
12504 LED_CTRL_TRAFFIC_BLINK |
12505 LED_CTRL_TRAFFIC_LED);
12508 case ETHTOOL_ID_OFF:
12509 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12510 LED_CTRL_TRAFFIC_OVERRIDE);
12513 case ETHTOOL_ID_INACTIVE:
12514 tw32(MAC_LED_CTRL, tp->led_ctrl);
12521 static void tg3_get_ethtool_stats(struct net_device *dev,
12522 struct ethtool_stats *estats, u64 *tmp_stats)
12524 struct tg3 *tp = netdev_priv(dev);
12527 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12529 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12532 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12536 u32 offset = 0, len = 0;
12539 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12542 if (magic == TG3_EEPROM_MAGIC) {
12543 for (offset = TG3_NVM_DIR_START;
12544 offset < TG3_NVM_DIR_END;
12545 offset += TG3_NVM_DIRENT_SIZE) {
12546 if (tg3_nvram_read(tp, offset, &val))
12549 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12550 TG3_NVM_DIRTYPE_EXTVPD)
12554 if (offset != TG3_NVM_DIR_END) {
12555 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12556 if (tg3_nvram_read(tp, offset + 4, &offset))
12559 offset = tg3_nvram_logical_addr(tp, offset);
12563 if (!offset || !len) {
12564 offset = TG3_NVM_VPD_OFF;
12565 len = TG3_NVM_VPD_LEN;
12568 buf = kmalloc(len, GFP_KERNEL);
12572 if (magic == TG3_EEPROM_MAGIC) {
12573 for (i = 0; i < len; i += 4) {
12574 /* The data is in little-endian format in NVRAM.
12575 * Use the big-endian read routines to preserve
12576 * the byte order as it exists in NVRAM.
12578 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12584 unsigned int pos = 0;
12586 ptr = (u8 *)&buf[0];
12587 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12588 cnt = pci_read_vpd(tp->pdev, pos,
12590 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12608 #define NVRAM_TEST_SIZE 0x100
12609 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12610 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12611 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12612 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12613 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12614 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12615 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12616 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12618 static int tg3_test_nvram(struct tg3 *tp)
12620 u32 csum, magic, len;
12622 int i, j, k, err = 0, size;
12624 if (tg3_flag(tp, NO_NVRAM))
12627 if (tg3_nvram_read(tp, 0, &magic) != 0)
12630 if (magic == TG3_EEPROM_MAGIC)
12631 size = NVRAM_TEST_SIZE;
12632 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12633 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12634 TG3_EEPROM_SB_FORMAT_1) {
12635 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12636 case TG3_EEPROM_SB_REVISION_0:
12637 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12639 case TG3_EEPROM_SB_REVISION_2:
12640 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12642 case TG3_EEPROM_SB_REVISION_3:
12643 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12645 case TG3_EEPROM_SB_REVISION_4:
12646 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12648 case TG3_EEPROM_SB_REVISION_5:
12649 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12651 case TG3_EEPROM_SB_REVISION_6:
12652 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12659 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12660 size = NVRAM_SELFBOOT_HW_SIZE;
12664 buf = kmalloc(size, GFP_KERNEL);
12669 for (i = 0, j = 0; i < size; i += 4, j++) {
12670 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12677 /* Selfboot format */
12678 magic = be32_to_cpu(buf[0]);
12679 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12680 TG3_EEPROM_MAGIC_FW) {
12681 u8 *buf8 = (u8 *) buf, csum8 = 0;
12683 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12684 TG3_EEPROM_SB_REVISION_2) {
12685 /* For rev 2, the csum doesn't include the MBA. */
12686 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12688 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12691 for (i = 0; i < size; i++)
12704 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12705 TG3_EEPROM_MAGIC_HW) {
12706 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12707 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12708 u8 *buf8 = (u8 *) buf;
12710 /* Separate the parity bits and the data bytes. */
12711 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12712 if ((i == 0) || (i == 8)) {
12716 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12717 parity[k++] = buf8[i] & msk;
12719 } else if (i == 16) {
12723 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12724 parity[k++] = buf8[i] & msk;
12727 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12728 parity[k++] = buf8[i] & msk;
12731 data[j++] = buf8[i];
12735 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12736 u8 hw8 = hweight8(data[i]);
12738 if ((hw8 & 0x1) && parity[i])
12740 else if (!(hw8 & 0x1) && !parity[i])
12749 /* Bootstrap checksum at offset 0x10 */
12750 csum = calc_crc((unsigned char *) buf, 0x10);
12751 if (csum != le32_to_cpu(buf[0x10/4]))
12754 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12755 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12756 if (csum != le32_to_cpu(buf[0xfc/4]))
12761 buf = tg3_vpd_readblock(tp, &len);
12765 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12767 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12771 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12774 i += PCI_VPD_LRDT_TAG_SIZE;
12775 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12776 PCI_VPD_RO_KEYWORD_CHKSUM);
12780 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12782 for (i = 0; i <= j; i++)
12783 csum8 += ((u8 *)buf)[i];
12797 #define TG3_SERDES_TIMEOUT_SEC 2
12798 #define TG3_COPPER_TIMEOUT_SEC 6
12800 static int tg3_test_link(struct tg3 *tp)
12804 if (!netif_running(tp->dev))
12807 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12808 max = TG3_SERDES_TIMEOUT_SEC;
12810 max = TG3_COPPER_TIMEOUT_SEC;
12812 for (i = 0; i < max; i++) {
12816 if (msleep_interruptible(1000))
12823 /* Only test the commonly used registers */
12824 static int tg3_test_registers(struct tg3 *tp)
12826 int i, is_5705, is_5750;
12827 u32 offset, read_mask, write_mask, val, save_val, read_val;
12831 #define TG3_FL_5705 0x1
12832 #define TG3_FL_NOT_5705 0x2
12833 #define TG3_FL_NOT_5788 0x4
12834 #define TG3_FL_NOT_5750 0x8
12838 /* MAC Control Registers */
12839 { MAC_MODE, TG3_FL_NOT_5705,
12840 0x00000000, 0x00ef6f8c },
12841 { MAC_MODE, TG3_FL_5705,
12842 0x00000000, 0x01ef6b8c },
12843 { MAC_STATUS, TG3_FL_NOT_5705,
12844 0x03800107, 0x00000000 },
12845 { MAC_STATUS, TG3_FL_5705,
12846 0x03800100, 0x00000000 },
12847 { MAC_ADDR_0_HIGH, 0x0000,
12848 0x00000000, 0x0000ffff },
12849 { MAC_ADDR_0_LOW, 0x0000,
12850 0x00000000, 0xffffffff },
12851 { MAC_RX_MTU_SIZE, 0x0000,
12852 0x00000000, 0x0000ffff },
12853 { MAC_TX_MODE, 0x0000,
12854 0x00000000, 0x00000070 },
12855 { MAC_TX_LENGTHS, 0x0000,
12856 0x00000000, 0x00003fff },
12857 { MAC_RX_MODE, TG3_FL_NOT_5705,
12858 0x00000000, 0x000007fc },
12859 { MAC_RX_MODE, TG3_FL_5705,
12860 0x00000000, 0x000007dc },
12861 { MAC_HASH_REG_0, 0x0000,
12862 0x00000000, 0xffffffff },
12863 { MAC_HASH_REG_1, 0x0000,
12864 0x00000000, 0xffffffff },
12865 { MAC_HASH_REG_2, 0x0000,
12866 0x00000000, 0xffffffff },
12867 { MAC_HASH_REG_3, 0x0000,
12868 0x00000000, 0xffffffff },
12870 /* Receive Data and Receive BD Initiator Control Registers. */
12871 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12872 0x00000000, 0xffffffff },
12873 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12874 0x00000000, 0xffffffff },
12875 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12876 0x00000000, 0x00000003 },
12877 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12878 0x00000000, 0xffffffff },
12879 { RCVDBDI_STD_BD+0, 0x0000,
12880 0x00000000, 0xffffffff },
12881 { RCVDBDI_STD_BD+4, 0x0000,
12882 0x00000000, 0xffffffff },
12883 { RCVDBDI_STD_BD+8, 0x0000,
12884 0x00000000, 0xffff0002 },
12885 { RCVDBDI_STD_BD+0xc, 0x0000,
12886 0x00000000, 0xffffffff },
12888 /* Receive BD Initiator Control Registers. */
12889 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12890 0x00000000, 0xffffffff },
12891 { RCVBDI_STD_THRESH, TG3_FL_5705,
12892 0x00000000, 0x000003ff },
12893 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12894 0x00000000, 0xffffffff },
12896 /* Host Coalescing Control Registers. */
12897 { HOSTCC_MODE, TG3_FL_NOT_5705,
12898 0x00000000, 0x00000004 },
12899 { HOSTCC_MODE, TG3_FL_5705,
12900 0x00000000, 0x000000f6 },
12901 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12902 0x00000000, 0xffffffff },
12903 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12904 0x00000000, 0x000003ff },
12905 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12906 0x00000000, 0xffffffff },
12907 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12908 0x00000000, 0x000003ff },
12909 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12910 0x00000000, 0xffffffff },
12911 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12912 0x00000000, 0x000000ff },
12913 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12914 0x00000000, 0xffffffff },
12915 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12916 0x00000000, 0x000000ff },
12917 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12918 0x00000000, 0xffffffff },
12919 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12920 0x00000000, 0xffffffff },
12921 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12922 0x00000000, 0xffffffff },
12923 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12924 0x00000000, 0x000000ff },
12925 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12926 0x00000000, 0xffffffff },
12927 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12928 0x00000000, 0x000000ff },
12929 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12930 0x00000000, 0xffffffff },
12931 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12932 0x00000000, 0xffffffff },
12933 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12934 0x00000000, 0xffffffff },
12935 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12936 0x00000000, 0xffffffff },
12937 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12938 0x00000000, 0xffffffff },
12939 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12940 0xffffffff, 0x00000000 },
12941 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12942 0xffffffff, 0x00000000 },
12944 /* Buffer Manager Control Registers. */
12945 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12946 0x00000000, 0x007fff80 },
12947 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12948 0x00000000, 0x007fffff },
12949 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12950 0x00000000, 0x0000003f },
12951 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12952 0x00000000, 0x000001ff },
12953 { BUFMGR_MB_HIGH_WATER, 0x0000,
12954 0x00000000, 0x000001ff },
12955 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12956 0xffffffff, 0x00000000 },
12957 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12958 0xffffffff, 0x00000000 },
12960 /* Mailbox Registers */
12961 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12962 0x00000000, 0x000001ff },
12963 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12964 0x00000000, 0x000001ff },
12965 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12966 0x00000000, 0x000007ff },
12967 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12968 0x00000000, 0x000001ff },
12970 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12973 is_5705 = is_5750 = 0;
12974 if (tg3_flag(tp, 5705_PLUS)) {
12976 if (tg3_flag(tp, 5750_PLUS))
12980 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12981 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12984 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12987 if (tg3_flag(tp, IS_5788) &&
12988 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12991 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12994 offset = (u32) reg_tbl[i].offset;
12995 read_mask = reg_tbl[i].read_mask;
12996 write_mask = reg_tbl[i].write_mask;
12998 /* Save the original register content */
12999 save_val = tr32(offset);
13001 /* Determine the read-only value. */
13002 read_val = save_val & read_mask;
13004 /* Write zero to the register, then make sure the read-only bits
13005 * are not changed and the read/write bits are all zeros.
13009 val = tr32(offset);
13011 /* Test the read-only and read/write bits. */
13012 if (((val & read_mask) != read_val) || (val & write_mask))
13015 /* Write ones to all the bits defined by RdMask and WrMask, then
13016 * make sure the read-only bits are not changed and the
13017 * read/write bits are all ones.
13019 tw32(offset, read_mask | write_mask);
13021 val = tr32(offset);
13023 /* Test the read-only bits. */
13024 if ((val & read_mask) != read_val)
13027 /* Test the read/write bits. */
13028 if ((val & write_mask) != write_mask)
13031 tw32(offset, save_val);
13037 if (netif_msg_hw(tp))
13038 netdev_err(tp->dev,
13039 "Register test failed at offset %x\n", offset);
13040 tw32(offset, save_val);
13044 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13046 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13050 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13051 for (j = 0; j < len; j += 4) {
13054 tg3_write_mem(tp, offset + j, test_pattern[i]);
13055 tg3_read_mem(tp, offset + j, &val);
13056 if (val != test_pattern[i])
13063 static int tg3_test_memory(struct tg3 *tp)
13065 static struct mem_entry {
13068 } mem_tbl_570x[] = {
13069 { 0x00000000, 0x00b50},
13070 { 0x00002000, 0x1c000},
13071 { 0xffffffff, 0x00000}
13072 }, mem_tbl_5705[] = {
13073 { 0x00000100, 0x0000c},
13074 { 0x00000200, 0x00008},
13075 { 0x00004000, 0x00800},
13076 { 0x00006000, 0x01000},
13077 { 0x00008000, 0x02000},
13078 { 0x00010000, 0x0e000},
13079 { 0xffffffff, 0x00000}
13080 }, mem_tbl_5755[] = {
13081 { 0x00000200, 0x00008},
13082 { 0x00004000, 0x00800},
13083 { 0x00006000, 0x00800},
13084 { 0x00008000, 0x02000},
13085 { 0x00010000, 0x0c000},
13086 { 0xffffffff, 0x00000}
13087 }, mem_tbl_5906[] = {
13088 { 0x00000200, 0x00008},
13089 { 0x00004000, 0x00400},
13090 { 0x00006000, 0x00400},
13091 { 0x00008000, 0x01000},
13092 { 0x00010000, 0x01000},
13093 { 0xffffffff, 0x00000}
13094 }, mem_tbl_5717[] = {
13095 { 0x00000200, 0x00008},
13096 { 0x00010000, 0x0a000},
13097 { 0x00020000, 0x13c00},
13098 { 0xffffffff, 0x00000}
13099 }, mem_tbl_57765[] = {
13100 { 0x00000200, 0x00008},
13101 { 0x00004000, 0x00800},
13102 { 0x00006000, 0x09800},
13103 { 0x00010000, 0x0a000},
13104 { 0xffffffff, 0x00000}
13106 struct mem_entry *mem_tbl;
13110 if (tg3_flag(tp, 5717_PLUS))
13111 mem_tbl = mem_tbl_5717;
13112 else if (tg3_flag(tp, 57765_CLASS) ||
13113 tg3_asic_rev(tp) == ASIC_REV_5762)
13114 mem_tbl = mem_tbl_57765;
13115 else if (tg3_flag(tp, 5755_PLUS))
13116 mem_tbl = mem_tbl_5755;
13117 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13118 mem_tbl = mem_tbl_5906;
13119 else if (tg3_flag(tp, 5705_PLUS))
13120 mem_tbl = mem_tbl_5705;
13122 mem_tbl = mem_tbl_570x;
13124 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13125 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13133 #define TG3_TSO_MSS 500
13135 #define TG3_TSO_IP_HDR_LEN 20
13136 #define TG3_TSO_TCP_HDR_LEN 20
13137 #define TG3_TSO_TCP_OPT_LEN 12
13139 static const u8 tg3_tso_header[] = {
13141 0x45, 0x00, 0x00, 0x00,
13142 0x00, 0x00, 0x40, 0x00,
13143 0x40, 0x06, 0x00, 0x00,
13144 0x0a, 0x00, 0x00, 0x01,
13145 0x0a, 0x00, 0x00, 0x02,
13146 0x0d, 0x00, 0xe0, 0x00,
13147 0x00, 0x00, 0x01, 0x00,
13148 0x00, 0x00, 0x02, 0x00,
13149 0x80, 0x10, 0x10, 0x00,
13150 0x14, 0x09, 0x00, 0x00,
13151 0x01, 0x01, 0x08, 0x0a,
13152 0x11, 0x11, 0x11, 0x11,
13153 0x11, 0x11, 0x11, 0x11,
13156 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13158 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13159 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13161 struct sk_buff *skb;
13162 u8 *tx_data, *rx_data;
13164 int num_pkts, tx_len, rx_len, i, err;
13165 struct tg3_rx_buffer_desc *desc;
13166 struct tg3_napi *tnapi, *rnapi;
13167 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13169 tnapi = &tp->napi[0];
13170 rnapi = &tp->napi[0];
13171 if (tp->irq_cnt > 1) {
13172 if (tg3_flag(tp, ENABLE_RSS))
13173 rnapi = &tp->napi[1];
13174 if (tg3_flag(tp, ENABLE_TSS))
13175 tnapi = &tp->napi[1];
13177 coal_now = tnapi->coal_now | rnapi->coal_now;
13182 skb = netdev_alloc_skb(tp->dev, tx_len);
13186 tx_data = skb_put(skb, tx_len);
13187 memcpy(tx_data, tp->dev->dev_addr, 6);
13188 memset(tx_data + 6, 0x0, 8);
13190 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13192 if (tso_loopback) {
13193 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13195 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13196 TG3_TSO_TCP_OPT_LEN;
13198 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13199 sizeof(tg3_tso_header));
13202 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13203 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13205 /* Set the total length field in the IP header */
13206 iph->tot_len = htons((u16)(mss + hdr_len));
13208 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13209 TXD_FLAG_CPU_POST_DMA);
13211 if (tg3_flag(tp, HW_TSO_1) ||
13212 tg3_flag(tp, HW_TSO_2) ||
13213 tg3_flag(tp, HW_TSO_3)) {
13215 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13216 th = (struct tcphdr *)&tx_data[val];
13219 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13221 if (tg3_flag(tp, HW_TSO_3)) {
13222 mss |= (hdr_len & 0xc) << 12;
13223 if (hdr_len & 0x10)
13224 base_flags |= 0x00000010;
13225 base_flags |= (hdr_len & 0x3e0) << 5;
13226 } else if (tg3_flag(tp, HW_TSO_2))
13227 mss |= hdr_len << 9;
13228 else if (tg3_flag(tp, HW_TSO_1) ||
13229 tg3_asic_rev(tp) == ASIC_REV_5705) {
13230 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13232 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13235 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13238 data_off = ETH_HLEN;
13240 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13241 tx_len > VLAN_ETH_FRAME_LEN)
13242 base_flags |= TXD_FLAG_JMB_PKT;
13245 for (i = data_off; i < tx_len; i++)
13246 tx_data[i] = (u8) (i & 0xff);
13248 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13249 if (pci_dma_mapping_error(tp->pdev, map)) {
13250 dev_kfree_skb(skb);
13254 val = tnapi->tx_prod;
13255 tnapi->tx_buffers[val].skb = skb;
13256 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13258 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13263 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13265 budget = tg3_tx_avail(tnapi);
13266 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13267 base_flags | TXD_FLAG_END, mss, 0)) {
13268 tnapi->tx_buffers[val].skb = NULL;
13269 dev_kfree_skb(skb);
13275 /* Sync BD data before updating mailbox */
13278 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13279 tr32_mailbox(tnapi->prodmbox);
13283 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13284 for (i = 0; i < 35; i++) {
13285 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13290 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13291 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13292 if ((tx_idx == tnapi->tx_prod) &&
13293 (rx_idx == (rx_start_idx + num_pkts)))
13297 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13298 dev_kfree_skb(skb);
13300 if (tx_idx != tnapi->tx_prod)
13303 if (rx_idx != rx_start_idx + num_pkts)
13307 while (rx_idx != rx_start_idx) {
13308 desc = &rnapi->rx_rcb[rx_start_idx++];
13309 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13310 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13312 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13313 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13316 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13319 if (!tso_loopback) {
13320 if (rx_len != tx_len)
13323 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13324 if (opaque_key != RXD_OPAQUE_RING_STD)
13327 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13330 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13331 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13332 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13336 if (opaque_key == RXD_OPAQUE_RING_STD) {
13337 rx_data = tpr->rx_std_buffers[desc_idx].data;
13338 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13340 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13341 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13342 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13347 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13348 PCI_DMA_FROMDEVICE);
13350 rx_data += TG3_RX_OFFSET(tp);
13351 for (i = data_off; i < rx_len; i++, val++) {
13352 if (*(rx_data + i) != (u8) (val & 0xff))
13359 /* tg3_free_rings will unmap and free the rx_data */
13364 #define TG3_STD_LOOPBACK_FAILED 1
13365 #define TG3_JMB_LOOPBACK_FAILED 2
13366 #define TG3_TSO_LOOPBACK_FAILED 4
13367 #define TG3_LOOPBACK_FAILED \
13368 (TG3_STD_LOOPBACK_FAILED | \
13369 TG3_JMB_LOOPBACK_FAILED | \
13370 TG3_TSO_LOOPBACK_FAILED)
13372 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13376 u32 jmb_pkt_sz = 9000;
13379 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13381 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13382 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13384 if (!netif_running(tp->dev)) {
13385 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13386 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13388 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13392 err = tg3_reset_hw(tp, true);
13394 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13395 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13397 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13401 if (tg3_flag(tp, ENABLE_RSS)) {
13404 /* Reroute all rx packets to the 1st queue */
13405 for (i = MAC_RSS_INDIR_TBL_0;
13406 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13410 /* HW errata - mac loopback fails in some cases on 5780.
13411 * Normal traffic and PHY loopback are not affected by
13412 * errata. Also, the MAC loopback test is deprecated for
13413 * all newer ASIC revisions.
13415 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13416 !tg3_flag(tp, CPMU_PRESENT)) {
13417 tg3_mac_loopback(tp, true);
13419 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13420 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13422 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13423 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13424 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13426 tg3_mac_loopback(tp, false);
13429 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13430 !tg3_flag(tp, USE_PHYLIB)) {
13433 tg3_phy_lpbk_set(tp, 0, false);
13435 /* Wait for link */
13436 for (i = 0; i < 100; i++) {
13437 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13442 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13443 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13444 if (tg3_flag(tp, TSO_CAPABLE) &&
13445 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13446 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13447 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13448 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13449 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13452 tg3_phy_lpbk_set(tp, 0, true);
13454 /* All link indications report up, but the hardware
13455 * isn't really ready for about 20 msec. Double it
13460 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13461 data[TG3_EXT_LOOPB_TEST] |=
13462 TG3_STD_LOOPBACK_FAILED;
13463 if (tg3_flag(tp, TSO_CAPABLE) &&
13464 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13465 data[TG3_EXT_LOOPB_TEST] |=
13466 TG3_TSO_LOOPBACK_FAILED;
13467 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13468 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13469 data[TG3_EXT_LOOPB_TEST] |=
13470 TG3_JMB_LOOPBACK_FAILED;
13473 /* Re-enable gphy autopowerdown. */
13474 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13475 tg3_phy_toggle_apd(tp, true);
13478 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13479 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13482 tp->phy_flags |= eee_cap;
13487 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13490 struct tg3 *tp = netdev_priv(dev);
13491 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13493 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13494 if (tg3_power_up(tp)) {
13495 etest->flags |= ETH_TEST_FL_FAILED;
13496 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13499 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13502 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13504 if (tg3_test_nvram(tp) != 0) {
13505 etest->flags |= ETH_TEST_FL_FAILED;
13506 data[TG3_NVRAM_TEST] = 1;
13508 if (!doextlpbk && tg3_test_link(tp)) {
13509 etest->flags |= ETH_TEST_FL_FAILED;
13510 data[TG3_LINK_TEST] = 1;
13512 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13513 int err, err2 = 0, irq_sync = 0;
13515 if (netif_running(dev)) {
13517 tg3_netif_stop(tp);
13521 tg3_full_lock(tp, irq_sync);
13522 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13523 err = tg3_nvram_lock(tp);
13524 tg3_halt_cpu(tp, RX_CPU_BASE);
13525 if (!tg3_flag(tp, 5705_PLUS))
13526 tg3_halt_cpu(tp, TX_CPU_BASE);
13528 tg3_nvram_unlock(tp);
13530 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13533 if (tg3_test_registers(tp) != 0) {
13534 etest->flags |= ETH_TEST_FL_FAILED;
13535 data[TG3_REGISTER_TEST] = 1;
13538 if (tg3_test_memory(tp) != 0) {
13539 etest->flags |= ETH_TEST_FL_FAILED;
13540 data[TG3_MEMORY_TEST] = 1;
13544 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13546 if (tg3_test_loopback(tp, data, doextlpbk))
13547 etest->flags |= ETH_TEST_FL_FAILED;
13549 tg3_full_unlock(tp);
13551 if (tg3_test_interrupt(tp) != 0) {
13552 etest->flags |= ETH_TEST_FL_FAILED;
13553 data[TG3_INTERRUPT_TEST] = 1;
13556 tg3_full_lock(tp, 0);
13558 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13559 if (netif_running(dev)) {
13560 tg3_flag_set(tp, INIT_COMPLETE);
13561 err2 = tg3_restart_hw(tp, true);
13563 tg3_netif_start(tp);
13566 tg3_full_unlock(tp);
13568 if (irq_sync && !err2)
13571 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13572 tg3_power_down_prepare(tp);
13576 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13577 struct ifreq *ifr, int cmd)
13579 struct tg3 *tp = netdev_priv(dev);
13580 struct hwtstamp_config stmpconf;
13582 if (!tg3_flag(tp, PTP_CAPABLE))
13585 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13588 if (stmpconf.flags)
13591 switch (stmpconf.tx_type) {
13592 case HWTSTAMP_TX_ON:
13593 tg3_flag_set(tp, TX_TSTAMP_EN);
13595 case HWTSTAMP_TX_OFF:
13596 tg3_flag_clear(tp, TX_TSTAMP_EN);
13602 switch (stmpconf.rx_filter) {
13603 case HWTSTAMP_FILTER_NONE:
13606 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13607 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13608 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13610 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13611 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13612 TG3_RX_PTP_CTL_SYNC_EVNT;
13614 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13615 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13616 TG3_RX_PTP_CTL_DELAY_REQ;
13618 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13619 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13620 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13622 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13623 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13624 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13626 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13627 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13628 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13630 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13631 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13632 TG3_RX_PTP_CTL_SYNC_EVNT;
13634 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13635 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13636 TG3_RX_PTP_CTL_SYNC_EVNT;
13638 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13639 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13640 TG3_RX_PTP_CTL_SYNC_EVNT;
13642 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13643 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13644 TG3_RX_PTP_CTL_DELAY_REQ;
13646 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13647 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13648 TG3_RX_PTP_CTL_DELAY_REQ;
13650 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13651 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13652 TG3_RX_PTP_CTL_DELAY_REQ;
13658 if (netif_running(dev) && tp->rxptpctl)
13659 tw32(TG3_RX_PTP_CTL,
13660 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13662 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13666 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13668 struct mii_ioctl_data *data = if_mii(ifr);
13669 struct tg3 *tp = netdev_priv(dev);
13672 if (tg3_flag(tp, USE_PHYLIB)) {
13673 struct phy_device *phydev;
13674 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13676 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13677 return phy_mii_ioctl(phydev, ifr, cmd);
13682 data->phy_id = tp->phy_addr;
13685 case SIOCGMIIREG: {
13688 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13689 break; /* We have no PHY */
13691 if (!netif_running(dev))
13694 spin_lock_bh(&tp->lock);
13695 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13696 data->reg_num & 0x1f, &mii_regval);
13697 spin_unlock_bh(&tp->lock);
13699 data->val_out = mii_regval;
13705 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13706 break; /* We have no PHY */
13708 if (!netif_running(dev))
13711 spin_lock_bh(&tp->lock);
13712 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13713 data->reg_num & 0x1f, data->val_in);
13714 spin_unlock_bh(&tp->lock);
13718 case SIOCSHWTSTAMP:
13719 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13725 return -EOPNOTSUPP;
13728 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13730 struct tg3 *tp = netdev_priv(dev);
13732 memcpy(ec, &tp->coal, sizeof(*ec));
13736 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13738 struct tg3 *tp = netdev_priv(dev);
13739 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13740 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13742 if (!tg3_flag(tp, 5705_PLUS)) {
13743 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13744 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13745 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13746 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13749 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13750 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13751 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13752 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13753 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13754 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13755 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13756 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13757 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13758 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13761 /* No rx interrupts will be generated if both are zero */
13762 if ((ec->rx_coalesce_usecs == 0) &&
13763 (ec->rx_max_coalesced_frames == 0))
13766 /* No tx interrupts will be generated if both are zero */
13767 if ((ec->tx_coalesce_usecs == 0) &&
13768 (ec->tx_max_coalesced_frames == 0))
13771 /* Only copy relevant parameters, ignore all others. */
13772 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13773 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13774 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13775 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13776 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13777 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13778 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13779 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13780 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13782 if (netif_running(dev)) {
13783 tg3_full_lock(tp, 0);
13784 __tg3_set_coalesce(tp, &tp->coal);
13785 tg3_full_unlock(tp);
13790 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13792 struct tg3 *tp = netdev_priv(dev);
13794 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13795 netdev_warn(tp->dev, "Board does not support EEE!\n");
13796 return -EOPNOTSUPP;
13799 if (edata->advertised != tp->eee.advertised) {
13800 netdev_warn(tp->dev,
13801 "Direct manipulation of EEE advertisement is not supported\n");
13805 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13806 netdev_warn(tp->dev,
13807 "Maximal Tx Lpi timer supported is %#x(u)\n",
13808 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13814 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13815 tg3_warn_mgmt_link_flap(tp);
13817 if (netif_running(tp->dev)) {
13818 tg3_full_lock(tp, 0);
13821 tg3_full_unlock(tp);
13827 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13829 struct tg3 *tp = netdev_priv(dev);
13831 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13832 netdev_warn(tp->dev,
13833 "Board does not support EEE!\n");
13834 return -EOPNOTSUPP;
13841 static const struct ethtool_ops tg3_ethtool_ops = {
13842 .get_settings = tg3_get_settings,
13843 .set_settings = tg3_set_settings,
13844 .get_drvinfo = tg3_get_drvinfo,
13845 .get_regs_len = tg3_get_regs_len,
13846 .get_regs = tg3_get_regs,
13847 .get_wol = tg3_get_wol,
13848 .set_wol = tg3_set_wol,
13849 .get_msglevel = tg3_get_msglevel,
13850 .set_msglevel = tg3_set_msglevel,
13851 .nway_reset = tg3_nway_reset,
13852 .get_link = ethtool_op_get_link,
13853 .get_eeprom_len = tg3_get_eeprom_len,
13854 .get_eeprom = tg3_get_eeprom,
13855 .set_eeprom = tg3_set_eeprom,
13856 .get_ringparam = tg3_get_ringparam,
13857 .set_ringparam = tg3_set_ringparam,
13858 .get_pauseparam = tg3_get_pauseparam,
13859 .set_pauseparam = tg3_set_pauseparam,
13860 .self_test = tg3_self_test,
13861 .get_strings = tg3_get_strings,
13862 .set_phys_id = tg3_set_phys_id,
13863 .get_ethtool_stats = tg3_get_ethtool_stats,
13864 .get_coalesce = tg3_get_coalesce,
13865 .set_coalesce = tg3_set_coalesce,
13866 .get_sset_count = tg3_get_sset_count,
13867 .get_rxnfc = tg3_get_rxnfc,
13868 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13869 .get_rxfh_indir = tg3_get_rxfh_indir,
13870 .set_rxfh_indir = tg3_set_rxfh_indir,
13871 .get_channels = tg3_get_channels,
13872 .set_channels = tg3_set_channels,
13873 .get_ts_info = tg3_get_ts_info,
13874 .get_eee = tg3_get_eee,
13875 .set_eee = tg3_set_eee,
13878 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13879 struct rtnl_link_stats64 *stats)
13881 struct tg3 *tp = netdev_priv(dev);
13883 spin_lock_bh(&tp->lock);
13884 if (!tp->hw_stats) {
13885 spin_unlock_bh(&tp->lock);
13886 return &tp->net_stats_prev;
13889 tg3_get_nstats(tp, stats);
13890 spin_unlock_bh(&tp->lock);
13895 static void tg3_set_rx_mode(struct net_device *dev)
13897 struct tg3 *tp = netdev_priv(dev);
13899 if (!netif_running(dev))
13902 tg3_full_lock(tp, 0);
13903 __tg3_set_rx_mode(dev);
13904 tg3_full_unlock(tp);
13907 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13910 dev->mtu = new_mtu;
13912 if (new_mtu > ETH_DATA_LEN) {
13913 if (tg3_flag(tp, 5780_CLASS)) {
13914 netdev_update_features(dev);
13915 tg3_flag_clear(tp, TSO_CAPABLE);
13917 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13920 if (tg3_flag(tp, 5780_CLASS)) {
13921 tg3_flag_set(tp, TSO_CAPABLE);
13922 netdev_update_features(dev);
13924 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13928 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13930 struct tg3 *tp = netdev_priv(dev);
13932 bool reset_phy = false;
13934 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13937 if (!netif_running(dev)) {
13938 /* We'll just catch it later when the
13941 tg3_set_mtu(dev, tp, new_mtu);
13947 tg3_netif_stop(tp);
13949 tg3_set_mtu(dev, tp, new_mtu);
13951 tg3_full_lock(tp, 1);
13953 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13955 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13956 * breaks all requests to 256 bytes.
13958 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13961 err = tg3_restart_hw(tp, reset_phy);
13964 tg3_netif_start(tp);
13966 tg3_full_unlock(tp);
13974 static const struct net_device_ops tg3_netdev_ops = {
13975 .ndo_open = tg3_open,
13976 .ndo_stop = tg3_close,
13977 .ndo_start_xmit = tg3_start_xmit,
13978 .ndo_get_stats64 = tg3_get_stats64,
13979 .ndo_validate_addr = eth_validate_addr,
13980 .ndo_set_rx_mode = tg3_set_rx_mode,
13981 .ndo_set_mac_address = tg3_set_mac_addr,
13982 .ndo_do_ioctl = tg3_ioctl,
13983 .ndo_tx_timeout = tg3_tx_timeout,
13984 .ndo_change_mtu = tg3_change_mtu,
13985 .ndo_fix_features = tg3_fix_features,
13986 .ndo_set_features = tg3_set_features,
13987 #ifdef CONFIG_NET_POLL_CONTROLLER
13988 .ndo_poll_controller = tg3_poll_controller,
13992 static void tg3_get_eeprom_size(struct tg3 *tp)
13994 u32 cursize, val, magic;
13996 tp->nvram_size = EEPROM_CHIP_SIZE;
13998 if (tg3_nvram_read(tp, 0, &magic) != 0)
14001 if ((magic != TG3_EEPROM_MAGIC) &&
14002 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14003 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14007 * Size the chip by reading offsets at increasing powers of two.
14008 * When we encounter our validation signature, we know the addressing
14009 * has wrapped around, and thus have our chip size.
14013 while (cursize < tp->nvram_size) {
14014 if (tg3_nvram_read(tp, cursize, &val) != 0)
14023 tp->nvram_size = cursize;
14026 static void tg3_get_nvram_size(struct tg3 *tp)
14030 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14033 /* Selfboot format */
14034 if (val != TG3_EEPROM_MAGIC) {
14035 tg3_get_eeprom_size(tp);
14039 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14041 /* This is confusing. We want to operate on the
14042 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14043 * call will read from NVRAM and byteswap the data
14044 * according to the byteswapping settings for all
14045 * other register accesses. This ensures the data we
14046 * want will always reside in the lower 16-bits.
14047 * However, the data in NVRAM is in LE format, which
14048 * means the data from the NVRAM read will always be
14049 * opposite the endianness of the CPU. The 16-bit
14050 * byteswap then brings the data to CPU endianness.
14052 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14056 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14059 static void tg3_get_nvram_info(struct tg3 *tp)
14063 nvcfg1 = tr32(NVRAM_CFG1);
14064 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14065 tg3_flag_set(tp, FLASH);
14067 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14068 tw32(NVRAM_CFG1, nvcfg1);
14071 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14072 tg3_flag(tp, 5780_CLASS)) {
14073 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14074 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14075 tp->nvram_jedecnum = JEDEC_ATMEL;
14076 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14077 tg3_flag_set(tp, NVRAM_BUFFERED);
14079 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14080 tp->nvram_jedecnum = JEDEC_ATMEL;
14081 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14083 case FLASH_VENDOR_ATMEL_EEPROM:
14084 tp->nvram_jedecnum = JEDEC_ATMEL;
14085 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14086 tg3_flag_set(tp, NVRAM_BUFFERED);
14088 case FLASH_VENDOR_ST:
14089 tp->nvram_jedecnum = JEDEC_ST;
14090 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14091 tg3_flag_set(tp, NVRAM_BUFFERED);
14093 case FLASH_VENDOR_SAIFUN:
14094 tp->nvram_jedecnum = JEDEC_SAIFUN;
14095 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14097 case FLASH_VENDOR_SST_SMALL:
14098 case FLASH_VENDOR_SST_LARGE:
14099 tp->nvram_jedecnum = JEDEC_SST;
14100 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14104 tp->nvram_jedecnum = JEDEC_ATMEL;
14105 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14106 tg3_flag_set(tp, NVRAM_BUFFERED);
14110 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14112 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14113 case FLASH_5752PAGE_SIZE_256:
14114 tp->nvram_pagesize = 256;
14116 case FLASH_5752PAGE_SIZE_512:
14117 tp->nvram_pagesize = 512;
14119 case FLASH_5752PAGE_SIZE_1K:
14120 tp->nvram_pagesize = 1024;
14122 case FLASH_5752PAGE_SIZE_2K:
14123 tp->nvram_pagesize = 2048;
14125 case FLASH_5752PAGE_SIZE_4K:
14126 tp->nvram_pagesize = 4096;
14128 case FLASH_5752PAGE_SIZE_264:
14129 tp->nvram_pagesize = 264;
14131 case FLASH_5752PAGE_SIZE_528:
14132 tp->nvram_pagesize = 528;
14137 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14141 nvcfg1 = tr32(NVRAM_CFG1);
14143 /* NVRAM protection for TPM */
14144 if (nvcfg1 & (1 << 27))
14145 tg3_flag_set(tp, PROTECTED_NVRAM);
14147 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14148 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14149 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14150 tp->nvram_jedecnum = JEDEC_ATMEL;
14151 tg3_flag_set(tp, NVRAM_BUFFERED);
14153 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14154 tp->nvram_jedecnum = JEDEC_ATMEL;
14155 tg3_flag_set(tp, NVRAM_BUFFERED);
14156 tg3_flag_set(tp, FLASH);
14158 case FLASH_5752VENDOR_ST_M45PE10:
14159 case FLASH_5752VENDOR_ST_M45PE20:
14160 case FLASH_5752VENDOR_ST_M45PE40:
14161 tp->nvram_jedecnum = JEDEC_ST;
14162 tg3_flag_set(tp, NVRAM_BUFFERED);
14163 tg3_flag_set(tp, FLASH);
14167 if (tg3_flag(tp, FLASH)) {
14168 tg3_nvram_get_pagesize(tp, nvcfg1);
14170 /* For eeprom, set pagesize to maximum eeprom size */
14171 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14173 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14174 tw32(NVRAM_CFG1, nvcfg1);
14178 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14180 u32 nvcfg1, protect = 0;
14182 nvcfg1 = tr32(NVRAM_CFG1);
14184 /* NVRAM protection for TPM */
14185 if (nvcfg1 & (1 << 27)) {
14186 tg3_flag_set(tp, PROTECTED_NVRAM);
14190 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14192 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14193 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14194 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14195 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14196 tp->nvram_jedecnum = JEDEC_ATMEL;
14197 tg3_flag_set(tp, NVRAM_BUFFERED);
14198 tg3_flag_set(tp, FLASH);
14199 tp->nvram_pagesize = 264;
14200 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14201 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14202 tp->nvram_size = (protect ? 0x3e200 :
14203 TG3_NVRAM_SIZE_512KB);
14204 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14205 tp->nvram_size = (protect ? 0x1f200 :
14206 TG3_NVRAM_SIZE_256KB);
14208 tp->nvram_size = (protect ? 0x1f200 :
14209 TG3_NVRAM_SIZE_128KB);
14211 case FLASH_5752VENDOR_ST_M45PE10:
14212 case FLASH_5752VENDOR_ST_M45PE20:
14213 case FLASH_5752VENDOR_ST_M45PE40:
14214 tp->nvram_jedecnum = JEDEC_ST;
14215 tg3_flag_set(tp, NVRAM_BUFFERED);
14216 tg3_flag_set(tp, FLASH);
14217 tp->nvram_pagesize = 256;
14218 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14219 tp->nvram_size = (protect ?
14220 TG3_NVRAM_SIZE_64KB :
14221 TG3_NVRAM_SIZE_128KB);
14222 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14223 tp->nvram_size = (protect ?
14224 TG3_NVRAM_SIZE_64KB :
14225 TG3_NVRAM_SIZE_256KB);
14227 tp->nvram_size = (protect ?
14228 TG3_NVRAM_SIZE_128KB :
14229 TG3_NVRAM_SIZE_512KB);
14234 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14238 nvcfg1 = tr32(NVRAM_CFG1);
14240 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14241 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14242 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14243 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14244 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14245 tp->nvram_jedecnum = JEDEC_ATMEL;
14246 tg3_flag_set(tp, NVRAM_BUFFERED);
14247 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14249 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14250 tw32(NVRAM_CFG1, nvcfg1);
14252 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14253 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14254 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14255 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14256 tp->nvram_jedecnum = JEDEC_ATMEL;
14257 tg3_flag_set(tp, NVRAM_BUFFERED);
14258 tg3_flag_set(tp, FLASH);
14259 tp->nvram_pagesize = 264;
14261 case FLASH_5752VENDOR_ST_M45PE10:
14262 case FLASH_5752VENDOR_ST_M45PE20:
14263 case FLASH_5752VENDOR_ST_M45PE40:
14264 tp->nvram_jedecnum = JEDEC_ST;
14265 tg3_flag_set(tp, NVRAM_BUFFERED);
14266 tg3_flag_set(tp, FLASH);
14267 tp->nvram_pagesize = 256;
14272 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14274 u32 nvcfg1, protect = 0;
14276 nvcfg1 = tr32(NVRAM_CFG1);
14278 /* NVRAM protection for TPM */
14279 if (nvcfg1 & (1 << 27)) {
14280 tg3_flag_set(tp, PROTECTED_NVRAM);
14284 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14286 case FLASH_5761VENDOR_ATMEL_ADB021D:
14287 case FLASH_5761VENDOR_ATMEL_ADB041D:
14288 case FLASH_5761VENDOR_ATMEL_ADB081D:
14289 case FLASH_5761VENDOR_ATMEL_ADB161D:
14290 case FLASH_5761VENDOR_ATMEL_MDB021D:
14291 case FLASH_5761VENDOR_ATMEL_MDB041D:
14292 case FLASH_5761VENDOR_ATMEL_MDB081D:
14293 case FLASH_5761VENDOR_ATMEL_MDB161D:
14294 tp->nvram_jedecnum = JEDEC_ATMEL;
14295 tg3_flag_set(tp, NVRAM_BUFFERED);
14296 tg3_flag_set(tp, FLASH);
14297 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14298 tp->nvram_pagesize = 256;
14300 case FLASH_5761VENDOR_ST_A_M45PE20:
14301 case FLASH_5761VENDOR_ST_A_M45PE40:
14302 case FLASH_5761VENDOR_ST_A_M45PE80:
14303 case FLASH_5761VENDOR_ST_A_M45PE16:
14304 case FLASH_5761VENDOR_ST_M_M45PE20:
14305 case FLASH_5761VENDOR_ST_M_M45PE40:
14306 case FLASH_5761VENDOR_ST_M_M45PE80:
14307 case FLASH_5761VENDOR_ST_M_M45PE16:
14308 tp->nvram_jedecnum = JEDEC_ST;
14309 tg3_flag_set(tp, NVRAM_BUFFERED);
14310 tg3_flag_set(tp, FLASH);
14311 tp->nvram_pagesize = 256;
14316 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14319 case FLASH_5761VENDOR_ATMEL_ADB161D:
14320 case FLASH_5761VENDOR_ATMEL_MDB161D:
14321 case FLASH_5761VENDOR_ST_A_M45PE16:
14322 case FLASH_5761VENDOR_ST_M_M45PE16:
14323 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14325 case FLASH_5761VENDOR_ATMEL_ADB081D:
14326 case FLASH_5761VENDOR_ATMEL_MDB081D:
14327 case FLASH_5761VENDOR_ST_A_M45PE80:
14328 case FLASH_5761VENDOR_ST_M_M45PE80:
14329 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14331 case FLASH_5761VENDOR_ATMEL_ADB041D:
14332 case FLASH_5761VENDOR_ATMEL_MDB041D:
14333 case FLASH_5761VENDOR_ST_A_M45PE40:
14334 case FLASH_5761VENDOR_ST_M_M45PE40:
14335 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14337 case FLASH_5761VENDOR_ATMEL_ADB021D:
14338 case FLASH_5761VENDOR_ATMEL_MDB021D:
14339 case FLASH_5761VENDOR_ST_A_M45PE20:
14340 case FLASH_5761VENDOR_ST_M_M45PE20:
14341 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14347 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14349 tp->nvram_jedecnum = JEDEC_ATMEL;
14350 tg3_flag_set(tp, NVRAM_BUFFERED);
14351 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14354 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14358 nvcfg1 = tr32(NVRAM_CFG1);
14360 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14361 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14362 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14363 tp->nvram_jedecnum = JEDEC_ATMEL;
14364 tg3_flag_set(tp, NVRAM_BUFFERED);
14365 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14367 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14368 tw32(NVRAM_CFG1, nvcfg1);
14370 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14371 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14372 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14373 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14374 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14375 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14376 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14377 tp->nvram_jedecnum = JEDEC_ATMEL;
14378 tg3_flag_set(tp, NVRAM_BUFFERED);
14379 tg3_flag_set(tp, FLASH);
14381 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14382 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14383 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14384 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14385 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14387 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14388 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14389 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14391 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14392 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14393 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14397 case FLASH_5752VENDOR_ST_M45PE10:
14398 case FLASH_5752VENDOR_ST_M45PE20:
14399 case FLASH_5752VENDOR_ST_M45PE40:
14400 tp->nvram_jedecnum = JEDEC_ST;
14401 tg3_flag_set(tp, NVRAM_BUFFERED);
14402 tg3_flag_set(tp, FLASH);
14404 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14405 case FLASH_5752VENDOR_ST_M45PE10:
14406 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14408 case FLASH_5752VENDOR_ST_M45PE20:
14409 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14411 case FLASH_5752VENDOR_ST_M45PE40:
14412 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14417 tg3_flag_set(tp, NO_NVRAM);
14421 tg3_nvram_get_pagesize(tp, nvcfg1);
14422 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14423 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14427 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14431 nvcfg1 = tr32(NVRAM_CFG1);
14433 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14434 case FLASH_5717VENDOR_ATMEL_EEPROM:
14435 case FLASH_5717VENDOR_MICRO_EEPROM:
14436 tp->nvram_jedecnum = JEDEC_ATMEL;
14437 tg3_flag_set(tp, NVRAM_BUFFERED);
14438 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14440 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14441 tw32(NVRAM_CFG1, nvcfg1);
14443 case FLASH_5717VENDOR_ATMEL_MDB011D:
14444 case FLASH_5717VENDOR_ATMEL_ADB011B:
14445 case FLASH_5717VENDOR_ATMEL_ADB011D:
14446 case FLASH_5717VENDOR_ATMEL_MDB021D:
14447 case FLASH_5717VENDOR_ATMEL_ADB021B:
14448 case FLASH_5717VENDOR_ATMEL_ADB021D:
14449 case FLASH_5717VENDOR_ATMEL_45USPT:
14450 tp->nvram_jedecnum = JEDEC_ATMEL;
14451 tg3_flag_set(tp, NVRAM_BUFFERED);
14452 tg3_flag_set(tp, FLASH);
14454 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14455 case FLASH_5717VENDOR_ATMEL_MDB021D:
14456 /* Detect size with tg3_nvram_get_size() */
14458 case FLASH_5717VENDOR_ATMEL_ADB021B:
14459 case FLASH_5717VENDOR_ATMEL_ADB021D:
14460 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14463 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14467 case FLASH_5717VENDOR_ST_M_M25PE10:
14468 case FLASH_5717VENDOR_ST_A_M25PE10:
14469 case FLASH_5717VENDOR_ST_M_M45PE10:
14470 case FLASH_5717VENDOR_ST_A_M45PE10:
14471 case FLASH_5717VENDOR_ST_M_M25PE20:
14472 case FLASH_5717VENDOR_ST_A_M25PE20:
14473 case FLASH_5717VENDOR_ST_M_M45PE20:
14474 case FLASH_5717VENDOR_ST_A_M45PE20:
14475 case FLASH_5717VENDOR_ST_25USPT:
14476 case FLASH_5717VENDOR_ST_45USPT:
14477 tp->nvram_jedecnum = JEDEC_ST;
14478 tg3_flag_set(tp, NVRAM_BUFFERED);
14479 tg3_flag_set(tp, FLASH);
14481 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14482 case FLASH_5717VENDOR_ST_M_M25PE20:
14483 case FLASH_5717VENDOR_ST_M_M45PE20:
14484 /* Detect size with tg3_nvram_get_size() */
14486 case FLASH_5717VENDOR_ST_A_M25PE20:
14487 case FLASH_5717VENDOR_ST_A_M45PE20:
14488 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14491 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14496 tg3_flag_set(tp, NO_NVRAM);
14500 tg3_nvram_get_pagesize(tp, nvcfg1);
14501 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14502 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14505 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14507 u32 nvcfg1, nvmpinstrp;
14509 nvcfg1 = tr32(NVRAM_CFG1);
14510 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14512 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14513 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14514 tg3_flag_set(tp, NO_NVRAM);
14518 switch (nvmpinstrp) {
14519 case FLASH_5762_EEPROM_HD:
14520 nvmpinstrp = FLASH_5720_EEPROM_HD;
14522 case FLASH_5762_EEPROM_LD:
14523 nvmpinstrp = FLASH_5720_EEPROM_LD;
14525 case FLASH_5720VENDOR_M_ST_M45PE20:
14526 /* This pinstrap supports multiple sizes, so force it
14527 * to read the actual size from location 0xf0.
14529 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14534 switch (nvmpinstrp) {
14535 case FLASH_5720_EEPROM_HD:
14536 case FLASH_5720_EEPROM_LD:
14537 tp->nvram_jedecnum = JEDEC_ATMEL;
14538 tg3_flag_set(tp, NVRAM_BUFFERED);
14540 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14541 tw32(NVRAM_CFG1, nvcfg1);
14542 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14543 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14545 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14547 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14548 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14549 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14550 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14551 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14552 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14553 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14554 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14555 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14556 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14557 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14558 case FLASH_5720VENDOR_ATMEL_45USPT:
14559 tp->nvram_jedecnum = JEDEC_ATMEL;
14560 tg3_flag_set(tp, NVRAM_BUFFERED);
14561 tg3_flag_set(tp, FLASH);
14563 switch (nvmpinstrp) {
14564 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14565 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14566 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14567 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14569 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14570 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14571 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14572 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14574 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14575 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14576 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14579 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14580 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14584 case FLASH_5720VENDOR_M_ST_M25PE10:
14585 case FLASH_5720VENDOR_M_ST_M45PE10:
14586 case FLASH_5720VENDOR_A_ST_M25PE10:
14587 case FLASH_5720VENDOR_A_ST_M45PE10:
14588 case FLASH_5720VENDOR_M_ST_M25PE20:
14589 case FLASH_5720VENDOR_M_ST_M45PE20:
14590 case FLASH_5720VENDOR_A_ST_M25PE20:
14591 case FLASH_5720VENDOR_A_ST_M45PE20:
14592 case FLASH_5720VENDOR_M_ST_M25PE40:
14593 case FLASH_5720VENDOR_M_ST_M45PE40:
14594 case FLASH_5720VENDOR_A_ST_M25PE40:
14595 case FLASH_5720VENDOR_A_ST_M45PE40:
14596 case FLASH_5720VENDOR_M_ST_M25PE80:
14597 case FLASH_5720VENDOR_M_ST_M45PE80:
14598 case FLASH_5720VENDOR_A_ST_M25PE80:
14599 case FLASH_5720VENDOR_A_ST_M45PE80:
14600 case FLASH_5720VENDOR_ST_25USPT:
14601 case FLASH_5720VENDOR_ST_45USPT:
14602 tp->nvram_jedecnum = JEDEC_ST;
14603 tg3_flag_set(tp, NVRAM_BUFFERED);
14604 tg3_flag_set(tp, FLASH);
14606 switch (nvmpinstrp) {
14607 case FLASH_5720VENDOR_M_ST_M25PE20:
14608 case FLASH_5720VENDOR_M_ST_M45PE20:
14609 case FLASH_5720VENDOR_A_ST_M25PE20:
14610 case FLASH_5720VENDOR_A_ST_M45PE20:
14611 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14613 case FLASH_5720VENDOR_M_ST_M25PE40:
14614 case FLASH_5720VENDOR_M_ST_M45PE40:
14615 case FLASH_5720VENDOR_A_ST_M25PE40:
14616 case FLASH_5720VENDOR_A_ST_M45PE40:
14617 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14619 case FLASH_5720VENDOR_M_ST_M25PE80:
14620 case FLASH_5720VENDOR_M_ST_M45PE80:
14621 case FLASH_5720VENDOR_A_ST_M25PE80:
14622 case FLASH_5720VENDOR_A_ST_M45PE80:
14623 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14626 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14627 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14632 tg3_flag_set(tp, NO_NVRAM);
14636 tg3_nvram_get_pagesize(tp, nvcfg1);
14637 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14638 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14640 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14643 if (tg3_nvram_read(tp, 0, &val))
14646 if (val != TG3_EEPROM_MAGIC &&
14647 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14648 tg3_flag_set(tp, NO_NVRAM);
14652 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14653 static void tg3_nvram_init(struct tg3 *tp)
14655 if (tg3_flag(tp, IS_SSB_CORE)) {
14656 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14657 tg3_flag_clear(tp, NVRAM);
14658 tg3_flag_clear(tp, NVRAM_BUFFERED);
14659 tg3_flag_set(tp, NO_NVRAM);
14663 tw32_f(GRC_EEPROM_ADDR,
14664 (EEPROM_ADDR_FSM_RESET |
14665 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14666 EEPROM_ADDR_CLKPERD_SHIFT)));
14670 /* Enable seeprom accesses. */
14671 tw32_f(GRC_LOCAL_CTRL,
14672 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14675 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14676 tg3_asic_rev(tp) != ASIC_REV_5701) {
14677 tg3_flag_set(tp, NVRAM);
14679 if (tg3_nvram_lock(tp)) {
14680 netdev_warn(tp->dev,
14681 "Cannot get nvram lock, %s failed\n",
14685 tg3_enable_nvram_access(tp);
14687 tp->nvram_size = 0;
14689 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14690 tg3_get_5752_nvram_info(tp);
14691 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14692 tg3_get_5755_nvram_info(tp);
14693 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14694 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14695 tg3_asic_rev(tp) == ASIC_REV_5785)
14696 tg3_get_5787_nvram_info(tp);
14697 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14698 tg3_get_5761_nvram_info(tp);
14699 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14700 tg3_get_5906_nvram_info(tp);
14701 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14702 tg3_flag(tp, 57765_CLASS))
14703 tg3_get_57780_nvram_info(tp);
14704 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14705 tg3_asic_rev(tp) == ASIC_REV_5719)
14706 tg3_get_5717_nvram_info(tp);
14707 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14708 tg3_asic_rev(tp) == ASIC_REV_5762)
14709 tg3_get_5720_nvram_info(tp);
14711 tg3_get_nvram_info(tp);
14713 if (tp->nvram_size == 0)
14714 tg3_get_nvram_size(tp);
14716 tg3_disable_nvram_access(tp);
14717 tg3_nvram_unlock(tp);
14720 tg3_flag_clear(tp, NVRAM);
14721 tg3_flag_clear(tp, NVRAM_BUFFERED);
14723 tg3_get_eeprom_size(tp);
14727 struct subsys_tbl_ent {
14728 u16 subsys_vendor, subsys_devid;
14732 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14733 /* Broadcom boards. */
14734 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14735 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14736 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14737 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14738 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14739 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14740 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14741 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14742 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14743 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14744 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14745 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14746 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14747 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14748 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14749 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14750 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14751 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14752 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14753 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14754 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14755 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14758 { TG3PCI_SUBVENDOR_ID_3COM,
14759 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14760 { TG3PCI_SUBVENDOR_ID_3COM,
14761 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14762 { TG3PCI_SUBVENDOR_ID_3COM,
14763 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14764 { TG3PCI_SUBVENDOR_ID_3COM,
14765 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14766 { TG3PCI_SUBVENDOR_ID_3COM,
14767 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14770 { TG3PCI_SUBVENDOR_ID_DELL,
14771 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14772 { TG3PCI_SUBVENDOR_ID_DELL,
14773 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14774 { TG3PCI_SUBVENDOR_ID_DELL,
14775 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14776 { TG3PCI_SUBVENDOR_ID_DELL,
14777 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14779 /* Compaq boards. */
14780 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14781 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14782 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14783 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14784 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14785 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14786 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14787 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14788 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14789 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14792 { TG3PCI_SUBVENDOR_ID_IBM,
14793 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14796 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14800 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14801 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14802 tp->pdev->subsystem_vendor) &&
14803 (subsys_id_to_phy_id[i].subsys_devid ==
14804 tp->pdev->subsystem_device))
14805 return &subsys_id_to_phy_id[i];
14810 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14814 tp->phy_id = TG3_PHY_ID_INVALID;
14815 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14817 /* Assume an onboard device and WOL capable by default. */
14818 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14819 tg3_flag_set(tp, WOL_CAP);
14821 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14822 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14823 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14824 tg3_flag_set(tp, IS_NIC);
14826 val = tr32(VCPU_CFGSHDW);
14827 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14828 tg3_flag_set(tp, ASPM_WORKAROUND);
14829 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14830 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14831 tg3_flag_set(tp, WOL_ENABLE);
14832 device_set_wakeup_enable(&tp->pdev->dev, true);
14837 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14838 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14839 u32 nic_cfg, led_cfg;
14840 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14841 int eeprom_phy_serdes = 0;
14843 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14844 tp->nic_sram_data_cfg = nic_cfg;
14846 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14847 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14848 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14849 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14850 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14851 (ver > 0) && (ver < 0x100))
14852 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14854 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14855 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14857 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14858 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14859 eeprom_phy_serdes = 1;
14861 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14862 if (nic_phy_id != 0) {
14863 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14864 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14866 eeprom_phy_id = (id1 >> 16) << 10;
14867 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14868 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14872 tp->phy_id = eeprom_phy_id;
14873 if (eeprom_phy_serdes) {
14874 if (!tg3_flag(tp, 5705_PLUS))
14875 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14877 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14880 if (tg3_flag(tp, 5750_PLUS))
14881 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14882 SHASTA_EXT_LED_MODE_MASK);
14884 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14888 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14889 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14892 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14893 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14896 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14897 tp->led_ctrl = LED_CTRL_MODE_MAC;
14899 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14900 * read on some older 5700/5701 bootcode.
14902 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14903 tg3_asic_rev(tp) == ASIC_REV_5701)
14904 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14908 case SHASTA_EXT_LED_SHARED:
14909 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14910 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14911 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14912 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14913 LED_CTRL_MODE_PHY_2);
14916 case SHASTA_EXT_LED_MAC:
14917 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14920 case SHASTA_EXT_LED_COMBO:
14921 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14922 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14923 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14924 LED_CTRL_MODE_PHY_2);
14929 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14930 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14931 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14932 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14934 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14935 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14937 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14938 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14939 if ((tp->pdev->subsystem_vendor ==
14940 PCI_VENDOR_ID_ARIMA) &&
14941 (tp->pdev->subsystem_device == 0x205a ||
14942 tp->pdev->subsystem_device == 0x2063))
14943 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14945 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14946 tg3_flag_set(tp, IS_NIC);
14949 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14950 tg3_flag_set(tp, ENABLE_ASF);
14951 if (tg3_flag(tp, 5750_PLUS))
14952 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14955 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14956 tg3_flag(tp, 5750_PLUS))
14957 tg3_flag_set(tp, ENABLE_APE);
14959 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14960 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14961 tg3_flag_clear(tp, WOL_CAP);
14963 if (tg3_flag(tp, WOL_CAP) &&
14964 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14965 tg3_flag_set(tp, WOL_ENABLE);
14966 device_set_wakeup_enable(&tp->pdev->dev, true);
14969 if (cfg2 & (1 << 17))
14970 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14972 /* serdes signal pre-emphasis in register 0x590 set by */
14973 /* bootcode if bit 18 is set */
14974 if (cfg2 & (1 << 18))
14975 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14977 if ((tg3_flag(tp, 57765_PLUS) ||
14978 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14979 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14980 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14981 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14983 if (tg3_flag(tp, PCI_EXPRESS)) {
14986 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14987 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14988 !tg3_flag(tp, 57765_PLUS) &&
14989 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14990 tg3_flag_set(tp, ASPM_WORKAROUND);
14991 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14992 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14993 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14994 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14997 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14998 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14999 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15000 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15001 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15002 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15005 if (tg3_flag(tp, WOL_CAP))
15006 device_set_wakeup_enable(&tp->pdev->dev,
15007 tg3_flag(tp, WOL_ENABLE));
15009 device_set_wakeup_capable(&tp->pdev->dev, false);
15012 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15015 u32 val2, off = offset * 8;
15017 err = tg3_nvram_lock(tp);
15021 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15022 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15023 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15024 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15027 for (i = 0; i < 100; i++) {
15028 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15029 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15030 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15036 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15038 tg3_nvram_unlock(tp);
15039 if (val2 & APE_OTP_STATUS_CMD_DONE)
15045 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15050 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15051 tw32(OTP_CTRL, cmd);
15053 /* Wait for up to 1 ms for command to execute. */
15054 for (i = 0; i < 100; i++) {
15055 val = tr32(OTP_STATUS);
15056 if (val & OTP_STATUS_CMD_DONE)
15061 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15064 /* Read the gphy configuration from the OTP region of the chip. The gphy
15065 * configuration is a 32-bit value that straddles the alignment boundary.
15066 * We do two 32-bit reads and then shift and merge the results.
15068 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15070 u32 bhalf_otp, thalf_otp;
15072 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15074 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15077 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15079 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15082 thalf_otp = tr32(OTP_READ_DATA);
15084 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15086 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15089 bhalf_otp = tr32(OTP_READ_DATA);
15091 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15094 static void tg3_phy_init_link_config(struct tg3 *tp)
15096 u32 adv = ADVERTISED_Autoneg;
15098 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15099 adv |= ADVERTISED_1000baseT_Half |
15100 ADVERTISED_1000baseT_Full;
15102 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15103 adv |= ADVERTISED_100baseT_Half |
15104 ADVERTISED_100baseT_Full |
15105 ADVERTISED_10baseT_Half |
15106 ADVERTISED_10baseT_Full |
15109 adv |= ADVERTISED_FIBRE;
15111 tp->link_config.advertising = adv;
15112 tp->link_config.speed = SPEED_UNKNOWN;
15113 tp->link_config.duplex = DUPLEX_UNKNOWN;
15114 tp->link_config.autoneg = AUTONEG_ENABLE;
15115 tp->link_config.active_speed = SPEED_UNKNOWN;
15116 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15121 static int tg3_phy_probe(struct tg3 *tp)
15123 u32 hw_phy_id_1, hw_phy_id_2;
15124 u32 hw_phy_id, hw_phy_id_masked;
15127 /* flow control autonegotiation is default behavior */
15128 tg3_flag_set(tp, PAUSE_AUTONEG);
15129 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15131 if (tg3_flag(tp, ENABLE_APE)) {
15132 switch (tp->pci_fn) {
15134 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15137 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15140 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15143 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15148 if (!tg3_flag(tp, ENABLE_ASF) &&
15149 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15150 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15151 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15152 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15154 if (tg3_flag(tp, USE_PHYLIB))
15155 return tg3_phy_init(tp);
15157 /* Reading the PHY ID register can conflict with ASF
15158 * firmware access to the PHY hardware.
15161 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15162 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15164 /* Now read the physical PHY_ID from the chip and verify
15165 * that it is sane. If it doesn't look good, we fall back
15166 * to either the hard-coded table based PHY_ID and failing
15167 * that the value found in the eeprom area.
15169 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15170 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15172 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15173 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15174 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15176 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15179 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15180 tp->phy_id = hw_phy_id;
15181 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15182 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15184 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15186 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15187 /* Do nothing, phy ID already set up in
15188 * tg3_get_eeprom_hw_cfg().
15191 struct subsys_tbl_ent *p;
15193 /* No eeprom signature? Try the hardcoded
15194 * subsys device table.
15196 p = tg3_lookup_by_subsys(tp);
15198 tp->phy_id = p->phy_id;
15199 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15200 /* For now we saw the IDs 0xbc050cd0,
15201 * 0xbc050f80 and 0xbc050c30 on devices
15202 * connected to an BCM4785 and there are
15203 * probably more. Just assume that the phy is
15204 * supported when it is connected to a SSB core
15211 tp->phy_id == TG3_PHY_ID_BCM8002)
15212 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15216 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15217 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15218 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15219 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15220 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15221 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15222 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15223 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15224 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15225 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15227 tp->eee.supported = SUPPORTED_100baseT_Full |
15228 SUPPORTED_1000baseT_Full;
15229 tp->eee.advertised = ADVERTISED_100baseT_Full |
15230 ADVERTISED_1000baseT_Full;
15231 tp->eee.eee_enabled = 1;
15232 tp->eee.tx_lpi_enabled = 1;
15233 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15236 tg3_phy_init_link_config(tp);
15238 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15239 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15240 !tg3_flag(tp, ENABLE_APE) &&
15241 !tg3_flag(tp, ENABLE_ASF)) {
15244 tg3_readphy(tp, MII_BMSR, &bmsr);
15245 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15246 (bmsr & BMSR_LSTATUS))
15247 goto skip_phy_reset;
15249 err = tg3_phy_reset(tp);
15253 tg3_phy_set_wirespeed(tp);
15255 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15256 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15257 tp->link_config.flowctrl);
15259 tg3_writephy(tp, MII_BMCR,
15260 BMCR_ANENABLE | BMCR_ANRESTART);
15265 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15266 err = tg3_init_5401phy_dsp(tp);
15270 err = tg3_init_5401phy_dsp(tp);
15276 static void tg3_read_vpd(struct tg3 *tp)
15279 unsigned int block_end, rosize, len;
15283 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15287 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15289 goto out_not_found;
15291 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15292 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15293 i += PCI_VPD_LRDT_TAG_SIZE;
15295 if (block_end > vpdlen)
15296 goto out_not_found;
15298 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15299 PCI_VPD_RO_KEYWORD_MFR_ID);
15301 len = pci_vpd_info_field_size(&vpd_data[j]);
15303 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15304 if (j + len > block_end || len != 4 ||
15305 memcmp(&vpd_data[j], "1028", 4))
15308 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15309 PCI_VPD_RO_KEYWORD_VENDOR0);
15313 len = pci_vpd_info_field_size(&vpd_data[j]);
15315 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15316 if (j + len > block_end)
15319 if (len >= sizeof(tp->fw_ver))
15320 len = sizeof(tp->fw_ver) - 1;
15321 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15322 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15327 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15328 PCI_VPD_RO_KEYWORD_PARTNO);
15330 goto out_not_found;
15332 len = pci_vpd_info_field_size(&vpd_data[i]);
15334 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15335 if (len > TG3_BPN_SIZE ||
15336 (len + i) > vpdlen)
15337 goto out_not_found;
15339 memcpy(tp->board_part_number, &vpd_data[i], len);
15343 if (tp->board_part_number[0])
15347 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15348 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15349 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15350 strcpy(tp->board_part_number, "BCM5717");
15351 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15352 strcpy(tp->board_part_number, "BCM5718");
15355 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15356 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15357 strcpy(tp->board_part_number, "BCM57780");
15358 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15359 strcpy(tp->board_part_number, "BCM57760");
15360 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15361 strcpy(tp->board_part_number, "BCM57790");
15362 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15363 strcpy(tp->board_part_number, "BCM57788");
15366 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15367 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15368 strcpy(tp->board_part_number, "BCM57761");
15369 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15370 strcpy(tp->board_part_number, "BCM57765");
15371 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15372 strcpy(tp->board_part_number, "BCM57781");
15373 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15374 strcpy(tp->board_part_number, "BCM57785");
15375 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15376 strcpy(tp->board_part_number, "BCM57791");
15377 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15378 strcpy(tp->board_part_number, "BCM57795");
15381 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15382 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15383 strcpy(tp->board_part_number, "BCM57762");
15384 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15385 strcpy(tp->board_part_number, "BCM57766");
15386 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15387 strcpy(tp->board_part_number, "BCM57782");
15388 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15389 strcpy(tp->board_part_number, "BCM57786");
15392 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15393 strcpy(tp->board_part_number, "BCM95906");
15396 strcpy(tp->board_part_number, "none");
15400 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15404 if (tg3_nvram_read(tp, offset, &val) ||
15405 (val & 0xfc000000) != 0x0c000000 ||
15406 tg3_nvram_read(tp, offset + 4, &val) ||
15413 static void tg3_read_bc_ver(struct tg3 *tp)
15415 u32 val, offset, start, ver_offset;
15417 bool newver = false;
15419 if (tg3_nvram_read(tp, 0xc, &offset) ||
15420 tg3_nvram_read(tp, 0x4, &start))
15423 offset = tg3_nvram_logical_addr(tp, offset);
15425 if (tg3_nvram_read(tp, offset, &val))
15428 if ((val & 0xfc000000) == 0x0c000000) {
15429 if (tg3_nvram_read(tp, offset + 4, &val))
15436 dst_off = strlen(tp->fw_ver);
15439 if (TG3_VER_SIZE - dst_off < 16 ||
15440 tg3_nvram_read(tp, offset + 8, &ver_offset))
15443 offset = offset + ver_offset - start;
15444 for (i = 0; i < 16; i += 4) {
15446 if (tg3_nvram_read_be32(tp, offset + i, &v))
15449 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15454 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15457 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15458 TG3_NVM_BCVER_MAJSFT;
15459 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15460 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15461 "v%d.%02d", major, minor);
15465 static void tg3_read_hwsb_ver(struct tg3 *tp)
15467 u32 val, major, minor;
15469 /* Use native endian representation */
15470 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15473 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15474 TG3_NVM_HWSB_CFG1_MAJSFT;
15475 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15476 TG3_NVM_HWSB_CFG1_MINSFT;
15478 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15481 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15483 u32 offset, major, minor, build;
15485 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15487 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15490 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15491 case TG3_EEPROM_SB_REVISION_0:
15492 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15494 case TG3_EEPROM_SB_REVISION_2:
15495 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15497 case TG3_EEPROM_SB_REVISION_3:
15498 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15500 case TG3_EEPROM_SB_REVISION_4:
15501 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15503 case TG3_EEPROM_SB_REVISION_5:
15504 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15506 case TG3_EEPROM_SB_REVISION_6:
15507 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15513 if (tg3_nvram_read(tp, offset, &val))
15516 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15517 TG3_EEPROM_SB_EDH_BLD_SHFT;
15518 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15519 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15520 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15522 if (minor > 99 || build > 26)
15525 offset = strlen(tp->fw_ver);
15526 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15527 " v%d.%02d", major, minor);
15530 offset = strlen(tp->fw_ver);
15531 if (offset < TG3_VER_SIZE - 1)
15532 tp->fw_ver[offset] = 'a' + build - 1;
15536 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15538 u32 val, offset, start;
15541 for (offset = TG3_NVM_DIR_START;
15542 offset < TG3_NVM_DIR_END;
15543 offset += TG3_NVM_DIRENT_SIZE) {
15544 if (tg3_nvram_read(tp, offset, &val))
15547 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15551 if (offset == TG3_NVM_DIR_END)
15554 if (!tg3_flag(tp, 5705_PLUS))
15555 start = 0x08000000;
15556 else if (tg3_nvram_read(tp, offset - 4, &start))
15559 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15560 !tg3_fw_img_is_valid(tp, offset) ||
15561 tg3_nvram_read(tp, offset + 8, &val))
15564 offset += val - start;
15566 vlen = strlen(tp->fw_ver);
15568 tp->fw_ver[vlen++] = ',';
15569 tp->fw_ver[vlen++] = ' ';
15571 for (i = 0; i < 4; i++) {
15573 if (tg3_nvram_read_be32(tp, offset, &v))
15576 offset += sizeof(v);
15578 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15579 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15583 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15588 static void tg3_probe_ncsi(struct tg3 *tp)
15592 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15593 if (apedata != APE_SEG_SIG_MAGIC)
15596 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15597 if (!(apedata & APE_FW_STATUS_READY))
15600 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15601 tg3_flag_set(tp, APE_HAS_NCSI);
15604 static void tg3_read_dash_ver(struct tg3 *tp)
15610 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15612 if (tg3_flag(tp, APE_HAS_NCSI))
15614 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15619 vlen = strlen(tp->fw_ver);
15621 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15623 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15624 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15625 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15626 (apedata & APE_FW_VERSION_BLDMSK));
15629 static void tg3_read_otp_ver(struct tg3 *tp)
15633 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15636 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15637 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15638 TG3_OTP_MAGIC0_VALID(val)) {
15639 u64 val64 = (u64) val << 32 | val2;
15643 for (i = 0; i < 7; i++) {
15644 if ((val64 & 0xff) == 0)
15646 ver = val64 & 0xff;
15649 vlen = strlen(tp->fw_ver);
15650 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15654 static void tg3_read_fw_ver(struct tg3 *tp)
15657 bool vpd_vers = false;
15659 if (tp->fw_ver[0] != 0)
15662 if (tg3_flag(tp, NO_NVRAM)) {
15663 strcat(tp->fw_ver, "sb");
15664 tg3_read_otp_ver(tp);
15668 if (tg3_nvram_read(tp, 0, &val))
15671 if (val == TG3_EEPROM_MAGIC)
15672 tg3_read_bc_ver(tp);
15673 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15674 tg3_read_sb_ver(tp, val);
15675 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15676 tg3_read_hwsb_ver(tp);
15678 if (tg3_flag(tp, ENABLE_ASF)) {
15679 if (tg3_flag(tp, ENABLE_APE)) {
15680 tg3_probe_ncsi(tp);
15682 tg3_read_dash_ver(tp);
15683 } else if (!vpd_vers) {
15684 tg3_read_mgmtfw_ver(tp);
15688 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15691 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15693 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15694 return TG3_RX_RET_MAX_SIZE_5717;
15695 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15696 return TG3_RX_RET_MAX_SIZE_5700;
15698 return TG3_RX_RET_MAX_SIZE_5705;
15701 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15702 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15703 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15704 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15708 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15710 struct pci_dev *peer;
15711 unsigned int func, devnr = tp->pdev->devfn & ~7;
15713 for (func = 0; func < 8; func++) {
15714 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15715 if (peer && peer != tp->pdev)
15719 /* 5704 can be configured in single-port mode, set peer to
15720 * tp->pdev in that case.
15728 * We don't need to keep the refcount elevated; there's no way
15729 * to remove one half of this device without removing the other
15736 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15738 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15739 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15742 /* All devices that use the alternate
15743 * ASIC REV location have a CPMU.
15745 tg3_flag_set(tp, CPMU_PRESENT);
15747 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15748 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15749 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15750 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15751 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15752 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15753 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15754 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15755 reg = TG3PCI_GEN2_PRODID_ASICREV;
15756 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15763 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15764 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15765 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15766 reg = TG3PCI_GEN15_PRODID_ASICREV;
15768 reg = TG3PCI_PRODID_ASICREV;
15770 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15773 /* Wrong chip ID in 5752 A0. This code can be removed later
15774 * as A0 is not in production.
15776 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15777 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15779 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15780 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15782 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15783 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15784 tg3_asic_rev(tp) == ASIC_REV_5720)
15785 tg3_flag_set(tp, 5717_PLUS);
15787 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15788 tg3_asic_rev(tp) == ASIC_REV_57766)
15789 tg3_flag_set(tp, 57765_CLASS);
15791 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15792 tg3_asic_rev(tp) == ASIC_REV_5762)
15793 tg3_flag_set(tp, 57765_PLUS);
15795 /* Intentionally exclude ASIC_REV_5906 */
15796 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15797 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15798 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15799 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15800 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15801 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15802 tg3_flag(tp, 57765_PLUS))
15803 tg3_flag_set(tp, 5755_PLUS);
15805 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15806 tg3_asic_rev(tp) == ASIC_REV_5714)
15807 tg3_flag_set(tp, 5780_CLASS);
15809 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15810 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15811 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15812 tg3_flag(tp, 5755_PLUS) ||
15813 tg3_flag(tp, 5780_CLASS))
15814 tg3_flag_set(tp, 5750_PLUS);
15816 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15817 tg3_flag(tp, 5750_PLUS))
15818 tg3_flag_set(tp, 5705_PLUS);
15821 static bool tg3_10_100_only_device(struct tg3 *tp,
15822 const struct pci_device_id *ent)
15824 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15826 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15827 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15828 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15831 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15832 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15833 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15843 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15846 u32 pci_state_reg, grc_misc_cfg;
15851 /* Force memory write invalidate off. If we leave it on,
15852 * then on 5700_BX chips we have to enable a workaround.
15853 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15854 * to match the cacheline size. The Broadcom driver have this
15855 * workaround but turns MWI off all the times so never uses
15856 * it. This seems to suggest that the workaround is insufficient.
15858 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15859 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15860 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15862 /* Important! -- Make sure register accesses are byteswapped
15863 * correctly. Also, for those chips that require it, make
15864 * sure that indirect register accesses are enabled before
15865 * the first operation.
15867 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15869 tp->misc_host_ctrl |= (misc_ctrl_reg &
15870 MISC_HOST_CTRL_CHIPREV);
15871 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15872 tp->misc_host_ctrl);
15874 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15876 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15877 * we need to disable memory and use config. cycles
15878 * only to access all registers. The 5702/03 chips
15879 * can mistakenly decode the special cycles from the
15880 * ICH chipsets as memory write cycles, causing corruption
15881 * of register and memory space. Only certain ICH bridges
15882 * will drive special cycles with non-zero data during the
15883 * address phase which can fall within the 5703's address
15884 * range. This is not an ICH bug as the PCI spec allows
15885 * non-zero address during special cycles. However, only
15886 * these ICH bridges are known to drive non-zero addresses
15887 * during special cycles.
15889 * Since special cycles do not cross PCI bridges, we only
15890 * enable this workaround if the 5703 is on the secondary
15891 * bus of these ICH bridges.
15893 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15894 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15895 static struct tg3_dev_id {
15899 } ich_chipsets[] = {
15900 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15902 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15904 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15906 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15910 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15911 struct pci_dev *bridge = NULL;
15913 while (pci_id->vendor != 0) {
15914 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15920 if (pci_id->rev != PCI_ANY_ID) {
15921 if (bridge->revision > pci_id->rev)
15924 if (bridge->subordinate &&
15925 (bridge->subordinate->number ==
15926 tp->pdev->bus->number)) {
15927 tg3_flag_set(tp, ICH_WORKAROUND);
15928 pci_dev_put(bridge);
15934 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15935 static struct tg3_dev_id {
15938 } bridge_chipsets[] = {
15939 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15940 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15943 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15944 struct pci_dev *bridge = NULL;
15946 while (pci_id->vendor != 0) {
15947 bridge = pci_get_device(pci_id->vendor,
15954 if (bridge->subordinate &&
15955 (bridge->subordinate->number <=
15956 tp->pdev->bus->number) &&
15957 (bridge->subordinate->busn_res.end >=
15958 tp->pdev->bus->number)) {
15959 tg3_flag_set(tp, 5701_DMA_BUG);
15960 pci_dev_put(bridge);
15966 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15967 * DMA addresses > 40-bit. This bridge may have other additional
15968 * 57xx devices behind it in some 4-port NIC designs for example.
15969 * Any tg3 device found behind the bridge will also need the 40-bit
15972 if (tg3_flag(tp, 5780_CLASS)) {
15973 tg3_flag_set(tp, 40BIT_DMA_BUG);
15974 tp->msi_cap = tp->pdev->msi_cap;
15976 struct pci_dev *bridge = NULL;
15979 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15980 PCI_DEVICE_ID_SERVERWORKS_EPB,
15982 if (bridge && bridge->subordinate &&
15983 (bridge->subordinate->number <=
15984 tp->pdev->bus->number) &&
15985 (bridge->subordinate->busn_res.end >=
15986 tp->pdev->bus->number)) {
15987 tg3_flag_set(tp, 40BIT_DMA_BUG);
15988 pci_dev_put(bridge);
15994 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15995 tg3_asic_rev(tp) == ASIC_REV_5714)
15996 tp->pdev_peer = tg3_find_peer(tp);
15998 /* Determine TSO capabilities */
15999 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16000 ; /* Do nothing. HW bug. */
16001 else if (tg3_flag(tp, 57765_PLUS))
16002 tg3_flag_set(tp, HW_TSO_3);
16003 else if (tg3_flag(tp, 5755_PLUS) ||
16004 tg3_asic_rev(tp) == ASIC_REV_5906)
16005 tg3_flag_set(tp, HW_TSO_2);
16006 else if (tg3_flag(tp, 5750_PLUS)) {
16007 tg3_flag_set(tp, HW_TSO_1);
16008 tg3_flag_set(tp, TSO_BUG);
16009 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16010 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16011 tg3_flag_clear(tp, TSO_BUG);
16012 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16013 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16014 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16015 tg3_flag_set(tp, FW_TSO);
16016 tg3_flag_set(tp, TSO_BUG);
16017 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16018 tp->fw_needed = FIRMWARE_TG3TSO5;
16020 tp->fw_needed = FIRMWARE_TG3TSO;
16023 /* Selectively allow TSO based on operating conditions */
16024 if (tg3_flag(tp, HW_TSO_1) ||
16025 tg3_flag(tp, HW_TSO_2) ||
16026 tg3_flag(tp, HW_TSO_3) ||
16027 tg3_flag(tp, FW_TSO)) {
16028 /* For firmware TSO, assume ASF is disabled.
16029 * We'll disable TSO later if we discover ASF
16030 * is enabled in tg3_get_eeprom_hw_cfg().
16032 tg3_flag_set(tp, TSO_CAPABLE);
16034 tg3_flag_clear(tp, TSO_CAPABLE);
16035 tg3_flag_clear(tp, TSO_BUG);
16036 tp->fw_needed = NULL;
16039 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16040 tp->fw_needed = FIRMWARE_TG3;
16042 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16043 tp->fw_needed = FIRMWARE_TG357766;
16047 if (tg3_flag(tp, 5750_PLUS)) {
16048 tg3_flag_set(tp, SUPPORT_MSI);
16049 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16050 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16051 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16052 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16053 tp->pdev_peer == tp->pdev))
16054 tg3_flag_clear(tp, SUPPORT_MSI);
16056 if (tg3_flag(tp, 5755_PLUS) ||
16057 tg3_asic_rev(tp) == ASIC_REV_5906) {
16058 tg3_flag_set(tp, 1SHOT_MSI);
16061 if (tg3_flag(tp, 57765_PLUS)) {
16062 tg3_flag_set(tp, SUPPORT_MSIX);
16063 tp->irq_max = TG3_IRQ_MAX_VECS;
16069 if (tp->irq_max > 1) {
16070 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16071 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16073 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16074 tg3_asic_rev(tp) == ASIC_REV_5720)
16075 tp->txq_max = tp->irq_max - 1;
16078 if (tg3_flag(tp, 5755_PLUS) ||
16079 tg3_asic_rev(tp) == ASIC_REV_5906)
16080 tg3_flag_set(tp, SHORT_DMA_BUG);
16082 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16083 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16085 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16086 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16087 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16088 tg3_asic_rev(tp) == ASIC_REV_5762)
16089 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16091 if (tg3_flag(tp, 57765_PLUS) &&
16092 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16093 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16095 if (!tg3_flag(tp, 5705_PLUS) ||
16096 tg3_flag(tp, 5780_CLASS) ||
16097 tg3_flag(tp, USE_JUMBO_BDFLAG))
16098 tg3_flag_set(tp, JUMBO_CAPABLE);
16100 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16103 if (pci_is_pcie(tp->pdev)) {
16106 tg3_flag_set(tp, PCI_EXPRESS);
16108 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16109 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16110 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16111 tg3_flag_clear(tp, HW_TSO_2);
16112 tg3_flag_clear(tp, TSO_CAPABLE);
16114 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16115 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16116 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16117 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16118 tg3_flag_set(tp, CLKREQ_BUG);
16119 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16120 tg3_flag_set(tp, L1PLLPD_EN);
16122 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16123 /* BCM5785 devices are effectively PCIe devices, and should
16124 * follow PCIe codepaths, but do not have a PCIe capabilities
16127 tg3_flag_set(tp, PCI_EXPRESS);
16128 } else if (!tg3_flag(tp, 5705_PLUS) ||
16129 tg3_flag(tp, 5780_CLASS)) {
16130 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16131 if (!tp->pcix_cap) {
16132 dev_err(&tp->pdev->dev,
16133 "Cannot find PCI-X capability, aborting\n");
16137 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16138 tg3_flag_set(tp, PCIX_MODE);
16141 /* If we have an AMD 762 or VIA K8T800 chipset, write
16142 * reordering to the mailbox registers done by the host
16143 * controller can cause major troubles. We read back from
16144 * every mailbox register write to force the writes to be
16145 * posted to the chip in order.
16147 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16148 !tg3_flag(tp, PCI_EXPRESS))
16149 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16151 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16152 &tp->pci_cacheline_sz);
16153 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16154 &tp->pci_lat_timer);
16155 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16156 tp->pci_lat_timer < 64) {
16157 tp->pci_lat_timer = 64;
16158 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16159 tp->pci_lat_timer);
16162 /* Important! -- It is critical that the PCI-X hw workaround
16163 * situation is decided before the first MMIO register access.
16165 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16166 /* 5700 BX chips need to have their TX producer index
16167 * mailboxes written twice to workaround a bug.
16169 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16171 /* If we are in PCI-X mode, enable register write workaround.
16173 * The workaround is to use indirect register accesses
16174 * for all chip writes not to mailbox registers.
16176 if (tg3_flag(tp, PCIX_MODE)) {
16179 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16181 /* The chip can have it's power management PCI config
16182 * space registers clobbered due to this bug.
16183 * So explicitly force the chip into D0 here.
16185 pci_read_config_dword(tp->pdev,
16186 tp->pdev->pm_cap + PCI_PM_CTRL,
16188 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16189 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16190 pci_write_config_dword(tp->pdev,
16191 tp->pdev->pm_cap + PCI_PM_CTRL,
16194 /* Also, force SERR#/PERR# in PCI command. */
16195 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16196 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16197 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16201 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16202 tg3_flag_set(tp, PCI_HIGH_SPEED);
16203 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16204 tg3_flag_set(tp, PCI_32BIT);
16206 /* Chip-specific fixup from Broadcom driver */
16207 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16208 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16209 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16210 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16213 /* Default fast path register access methods */
16214 tp->read32 = tg3_read32;
16215 tp->write32 = tg3_write32;
16216 tp->read32_mbox = tg3_read32;
16217 tp->write32_mbox = tg3_write32;
16218 tp->write32_tx_mbox = tg3_write32;
16219 tp->write32_rx_mbox = tg3_write32;
16221 /* Various workaround register access methods */
16222 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16223 tp->write32 = tg3_write_indirect_reg32;
16224 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16225 (tg3_flag(tp, PCI_EXPRESS) &&
16226 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16228 * Back to back register writes can cause problems on these
16229 * chips, the workaround is to read back all reg writes
16230 * except those to mailbox regs.
16232 * See tg3_write_indirect_reg32().
16234 tp->write32 = tg3_write_flush_reg32;
16237 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16238 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16239 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16240 tp->write32_rx_mbox = tg3_write_flush_reg32;
16243 if (tg3_flag(tp, ICH_WORKAROUND)) {
16244 tp->read32 = tg3_read_indirect_reg32;
16245 tp->write32 = tg3_write_indirect_reg32;
16246 tp->read32_mbox = tg3_read_indirect_mbox;
16247 tp->write32_mbox = tg3_write_indirect_mbox;
16248 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16249 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16254 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16255 pci_cmd &= ~PCI_COMMAND_MEMORY;
16256 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16258 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16259 tp->read32_mbox = tg3_read32_mbox_5906;
16260 tp->write32_mbox = tg3_write32_mbox_5906;
16261 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16262 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16265 if (tp->write32 == tg3_write_indirect_reg32 ||
16266 (tg3_flag(tp, PCIX_MODE) &&
16267 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16268 tg3_asic_rev(tp) == ASIC_REV_5701)))
16269 tg3_flag_set(tp, SRAM_USE_CONFIG);
16271 /* The memory arbiter has to be enabled in order for SRAM accesses
16272 * to succeed. Normally on powerup the tg3 chip firmware will make
16273 * sure it is enabled, but other entities such as system netboot
16274 * code might disable it.
16276 val = tr32(MEMARB_MODE);
16277 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16279 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16280 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16281 tg3_flag(tp, 5780_CLASS)) {
16282 if (tg3_flag(tp, PCIX_MODE)) {
16283 pci_read_config_dword(tp->pdev,
16284 tp->pcix_cap + PCI_X_STATUS,
16286 tp->pci_fn = val & 0x7;
16288 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16289 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16290 tg3_asic_rev(tp) == ASIC_REV_5720) {
16291 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16292 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16293 val = tr32(TG3_CPMU_STATUS);
16295 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16296 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16298 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16299 TG3_CPMU_STATUS_FSHFT_5719;
16302 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16303 tp->write32_tx_mbox = tg3_write_flush_reg32;
16304 tp->write32_rx_mbox = tg3_write_flush_reg32;
16307 /* Get eeprom hw config before calling tg3_set_power_state().
16308 * In particular, the TG3_FLAG_IS_NIC flag must be
16309 * determined before calling tg3_set_power_state() so that
16310 * we know whether or not to switch out of Vaux power.
16311 * When the flag is set, it means that GPIO1 is used for eeprom
16312 * write protect and also implies that it is a LOM where GPIOs
16313 * are not used to switch power.
16315 tg3_get_eeprom_hw_cfg(tp);
16317 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16318 tg3_flag_clear(tp, TSO_CAPABLE);
16319 tg3_flag_clear(tp, TSO_BUG);
16320 tp->fw_needed = NULL;
16323 if (tg3_flag(tp, ENABLE_APE)) {
16324 /* Allow reads and writes to the
16325 * APE register and memory space.
16327 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16328 PCISTATE_ALLOW_APE_SHMEM_WR |
16329 PCISTATE_ALLOW_APE_PSPACE_WR;
16330 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16333 tg3_ape_lock_init(tp);
16336 /* Set up tp->grc_local_ctrl before calling
16337 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16338 * will bring 5700's external PHY out of reset.
16339 * It is also used as eeprom write protect on LOMs.
16341 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16342 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16343 tg3_flag(tp, EEPROM_WRITE_PROT))
16344 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16345 GRC_LCLCTRL_GPIO_OUTPUT1);
16346 /* Unused GPIO3 must be driven as output on 5752 because there
16347 * are no pull-up resistors on unused GPIO pins.
16349 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16350 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16352 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16353 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16354 tg3_flag(tp, 57765_CLASS))
16355 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16357 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16358 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16359 /* Turn off the debug UART. */
16360 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16361 if (tg3_flag(tp, IS_NIC))
16362 /* Keep VMain power. */
16363 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16364 GRC_LCLCTRL_GPIO_OUTPUT0;
16367 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16368 tp->grc_local_ctrl |=
16369 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16371 /* Switch out of Vaux if it is a NIC */
16372 tg3_pwrsrc_switch_to_vmain(tp);
16374 /* Derive initial jumbo mode from MTU assigned in
16375 * ether_setup() via the alloc_etherdev() call
16377 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16378 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16380 /* Determine WakeOnLan speed to use. */
16381 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16382 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16383 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16384 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16385 tg3_flag_clear(tp, WOL_SPEED_100MB);
16387 tg3_flag_set(tp, WOL_SPEED_100MB);
16390 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16391 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16393 /* A few boards don't want Ethernet@WireSpeed phy feature */
16394 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16395 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16396 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16397 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16398 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16399 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16400 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16402 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16403 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16404 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16405 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16406 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16408 if (tg3_flag(tp, 5705_PLUS) &&
16409 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16410 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16411 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16412 !tg3_flag(tp, 57765_PLUS)) {
16413 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16414 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16415 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16416 tg3_asic_rev(tp) == ASIC_REV_5761) {
16417 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16418 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16419 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16420 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16421 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16423 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16426 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16427 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16428 tp->phy_otp = tg3_read_otp_phycfg(tp);
16429 if (tp->phy_otp == 0)
16430 tp->phy_otp = TG3_OTP_DEFAULT;
16433 if (tg3_flag(tp, CPMU_PRESENT))
16434 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16436 tp->mi_mode = MAC_MI_MODE_BASE;
16438 tp->coalesce_mode = 0;
16439 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16440 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16441 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16443 /* Set these bits to enable statistics workaround. */
16444 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16445 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16446 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16447 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16448 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16451 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16452 tg3_asic_rev(tp) == ASIC_REV_57780)
16453 tg3_flag_set(tp, USE_PHYLIB);
16455 err = tg3_mdio_init(tp);
16459 /* Initialize data/descriptor byte/word swapping. */
16460 val = tr32(GRC_MODE);
16461 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16462 tg3_asic_rev(tp) == ASIC_REV_5762)
16463 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16464 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16465 GRC_MODE_B2HRX_ENABLE |
16466 GRC_MODE_HTX2B_ENABLE |
16467 GRC_MODE_HOST_STACKUP);
16469 val &= GRC_MODE_HOST_STACKUP;
16471 tw32(GRC_MODE, val | tp->grc_mode);
16473 tg3_switch_clocks(tp);
16475 /* Clear this out for sanity. */
16476 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16478 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16479 tw32(TG3PCI_REG_BASE_ADDR, 0);
16481 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16483 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16484 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16485 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16486 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16487 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16488 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16489 void __iomem *sram_base;
16491 /* Write some dummy words into the SRAM status block
16492 * area, see if it reads back correctly. If the return
16493 * value is bad, force enable the PCIX workaround.
16495 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16497 writel(0x00000000, sram_base);
16498 writel(0x00000000, sram_base + 4);
16499 writel(0xffffffff, sram_base + 4);
16500 if (readl(sram_base) != 0x00000000)
16501 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16506 tg3_nvram_init(tp);
16508 /* If the device has an NVRAM, no need to load patch firmware */
16509 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16510 !tg3_flag(tp, NO_NVRAM))
16511 tp->fw_needed = NULL;
16513 grc_misc_cfg = tr32(GRC_MISC_CFG);
16514 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16516 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16517 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16518 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16519 tg3_flag_set(tp, IS_5788);
16521 if (!tg3_flag(tp, IS_5788) &&
16522 tg3_asic_rev(tp) != ASIC_REV_5700)
16523 tg3_flag_set(tp, TAGGED_STATUS);
16524 if (tg3_flag(tp, TAGGED_STATUS)) {
16525 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16526 HOSTCC_MODE_CLRTICK_TXBD);
16528 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16529 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16530 tp->misc_host_ctrl);
16533 /* Preserve the APE MAC_MODE bits */
16534 if (tg3_flag(tp, ENABLE_APE))
16535 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16539 if (tg3_10_100_only_device(tp, ent))
16540 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16542 err = tg3_phy_probe(tp);
16544 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16545 /* ... but do not return immediately ... */
16550 tg3_read_fw_ver(tp);
16552 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16553 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16555 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16556 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16558 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16561 /* 5700 {AX,BX} chips have a broken status block link
16562 * change bit implementation, so we must use the
16563 * status register in those cases.
16565 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16566 tg3_flag_set(tp, USE_LINKCHG_REG);
16568 tg3_flag_clear(tp, USE_LINKCHG_REG);
16570 /* The led_ctrl is set during tg3_phy_probe, here we might
16571 * have to force the link status polling mechanism based
16572 * upon subsystem IDs.
16574 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16575 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16576 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16577 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16578 tg3_flag_set(tp, USE_LINKCHG_REG);
16581 /* For all SERDES we poll the MAC status register. */
16582 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16583 tg3_flag_set(tp, POLL_SERDES);
16585 tg3_flag_clear(tp, POLL_SERDES);
16587 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16588 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16589 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16590 tg3_flag(tp, PCIX_MODE)) {
16591 tp->rx_offset = NET_SKB_PAD;
16592 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16593 tp->rx_copy_thresh = ~(u16)0;
16597 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16598 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16599 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16601 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16603 /* Increment the rx prod index on the rx std ring by at most
16604 * 8 for these chips to workaround hw errata.
16606 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16607 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16608 tg3_asic_rev(tp) == ASIC_REV_5755)
16609 tp->rx_std_max_post = 8;
16611 if (tg3_flag(tp, ASPM_WORKAROUND))
16612 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16613 PCIE_PWR_MGMT_L1_THRESH_MSK;
16618 #ifdef CONFIG_SPARC
16619 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16621 struct net_device *dev = tp->dev;
16622 struct pci_dev *pdev = tp->pdev;
16623 struct device_node *dp = pci_device_to_OF_node(pdev);
16624 const unsigned char *addr;
16627 addr = of_get_property(dp, "local-mac-address", &len);
16628 if (addr && len == 6) {
16629 memcpy(dev->dev_addr, addr, 6);
16635 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16637 struct net_device *dev = tp->dev;
16639 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16644 static int tg3_get_device_address(struct tg3 *tp)
16646 struct net_device *dev = tp->dev;
16647 u32 hi, lo, mac_offset;
16651 #ifdef CONFIG_SPARC
16652 if (!tg3_get_macaddr_sparc(tp))
16656 if (tg3_flag(tp, IS_SSB_CORE)) {
16657 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16658 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16663 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16664 tg3_flag(tp, 5780_CLASS)) {
16665 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16667 if (tg3_nvram_lock(tp))
16668 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16670 tg3_nvram_unlock(tp);
16671 } else if (tg3_flag(tp, 5717_PLUS)) {
16672 if (tp->pci_fn & 1)
16674 if (tp->pci_fn > 1)
16675 mac_offset += 0x18c;
16676 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16679 /* First try to get it from MAC address mailbox. */
16680 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16681 if ((hi >> 16) == 0x484b) {
16682 dev->dev_addr[0] = (hi >> 8) & 0xff;
16683 dev->dev_addr[1] = (hi >> 0) & 0xff;
16685 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16686 dev->dev_addr[2] = (lo >> 24) & 0xff;
16687 dev->dev_addr[3] = (lo >> 16) & 0xff;
16688 dev->dev_addr[4] = (lo >> 8) & 0xff;
16689 dev->dev_addr[5] = (lo >> 0) & 0xff;
16691 /* Some old bootcode may report a 0 MAC address in SRAM */
16692 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16695 /* Next, try NVRAM. */
16696 if (!tg3_flag(tp, NO_NVRAM) &&
16697 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16698 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16699 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16700 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16702 /* Finally just fetch it out of the MAC control regs. */
16704 hi = tr32(MAC_ADDR_0_HIGH);
16705 lo = tr32(MAC_ADDR_0_LOW);
16707 dev->dev_addr[5] = lo & 0xff;
16708 dev->dev_addr[4] = (lo >> 8) & 0xff;
16709 dev->dev_addr[3] = (lo >> 16) & 0xff;
16710 dev->dev_addr[2] = (lo >> 24) & 0xff;
16711 dev->dev_addr[1] = hi & 0xff;
16712 dev->dev_addr[0] = (hi >> 8) & 0xff;
16716 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16717 #ifdef CONFIG_SPARC
16718 if (!tg3_get_default_macaddr_sparc(tp))
16726 #define BOUNDARY_SINGLE_CACHELINE 1
16727 #define BOUNDARY_MULTI_CACHELINE 2
16729 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16731 int cacheline_size;
16735 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16737 cacheline_size = 1024;
16739 cacheline_size = (int) byte * 4;
16741 /* On 5703 and later chips, the boundary bits have no
16744 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16745 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16746 !tg3_flag(tp, PCI_EXPRESS))
16749 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16750 goal = BOUNDARY_MULTI_CACHELINE;
16752 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16753 goal = BOUNDARY_SINGLE_CACHELINE;
16759 if (tg3_flag(tp, 57765_PLUS)) {
16760 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16767 /* PCI controllers on most RISC systems tend to disconnect
16768 * when a device tries to burst across a cache-line boundary.
16769 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16771 * Unfortunately, for PCI-E there are only limited
16772 * write-side controls for this, and thus for reads
16773 * we will still get the disconnects. We'll also waste
16774 * these PCI cycles for both read and write for chips
16775 * other than 5700 and 5701 which do not implement the
16778 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16779 switch (cacheline_size) {
16784 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16785 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16786 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16788 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16789 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16794 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16795 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16799 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16800 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16803 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16804 switch (cacheline_size) {
16808 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16809 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16810 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16816 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16817 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16821 switch (cacheline_size) {
16823 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16824 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16825 DMA_RWCTRL_WRITE_BNDRY_16);
16830 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16831 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16832 DMA_RWCTRL_WRITE_BNDRY_32);
16837 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16838 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16839 DMA_RWCTRL_WRITE_BNDRY_64);
16844 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16845 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16846 DMA_RWCTRL_WRITE_BNDRY_128);
16851 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16852 DMA_RWCTRL_WRITE_BNDRY_256);
16855 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16856 DMA_RWCTRL_WRITE_BNDRY_512);
16860 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16861 DMA_RWCTRL_WRITE_BNDRY_1024);
16870 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16871 int size, bool to_device)
16873 struct tg3_internal_buffer_desc test_desc;
16874 u32 sram_dma_descs;
16877 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16879 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16880 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16881 tw32(RDMAC_STATUS, 0);
16882 tw32(WDMAC_STATUS, 0);
16884 tw32(BUFMGR_MODE, 0);
16885 tw32(FTQ_RESET, 0);
16887 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16888 test_desc.addr_lo = buf_dma & 0xffffffff;
16889 test_desc.nic_mbuf = 0x00002100;
16890 test_desc.len = size;
16893 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16894 * the *second* time the tg3 driver was getting loaded after an
16897 * Broadcom tells me:
16898 * ...the DMA engine is connected to the GRC block and a DMA
16899 * reset may affect the GRC block in some unpredictable way...
16900 * The behavior of resets to individual blocks has not been tested.
16902 * Broadcom noted the GRC reset will also reset all sub-components.
16905 test_desc.cqid_sqid = (13 << 8) | 2;
16907 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16910 test_desc.cqid_sqid = (16 << 8) | 7;
16912 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16915 test_desc.flags = 0x00000005;
16917 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16920 val = *(((u32 *)&test_desc) + i);
16921 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16922 sram_dma_descs + (i * sizeof(u32)));
16923 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16925 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16928 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16930 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16933 for (i = 0; i < 40; i++) {
16937 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16939 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16940 if ((val & 0xffff) == sram_dma_descs) {
16951 #define TEST_BUFFER_SIZE 0x2000
16953 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16954 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16958 static int tg3_test_dma(struct tg3 *tp)
16960 dma_addr_t buf_dma;
16961 u32 *buf, saved_dma_rwctrl;
16964 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16965 &buf_dma, GFP_KERNEL);
16971 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16972 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16974 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16976 if (tg3_flag(tp, 57765_PLUS))
16979 if (tg3_flag(tp, PCI_EXPRESS)) {
16980 /* DMA read watermark not used on PCIE */
16981 tp->dma_rwctrl |= 0x00180000;
16982 } else if (!tg3_flag(tp, PCIX_MODE)) {
16983 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16984 tg3_asic_rev(tp) == ASIC_REV_5750)
16985 tp->dma_rwctrl |= 0x003f0000;
16987 tp->dma_rwctrl |= 0x003f000f;
16989 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16990 tg3_asic_rev(tp) == ASIC_REV_5704) {
16991 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16992 u32 read_water = 0x7;
16994 /* If the 5704 is behind the EPB bridge, we can
16995 * do the less restrictive ONE_DMA workaround for
16996 * better performance.
16998 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16999 tg3_asic_rev(tp) == ASIC_REV_5704)
17000 tp->dma_rwctrl |= 0x8000;
17001 else if (ccval == 0x6 || ccval == 0x7)
17002 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17004 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17006 /* Set bit 23 to enable PCIX hw bug fix */
17008 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17009 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17011 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17012 /* 5780 always in PCIX mode */
17013 tp->dma_rwctrl |= 0x00144000;
17014 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17015 /* 5714 always in PCIX mode */
17016 tp->dma_rwctrl |= 0x00148000;
17018 tp->dma_rwctrl |= 0x001b000f;
17021 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17022 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17024 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17025 tg3_asic_rev(tp) == ASIC_REV_5704)
17026 tp->dma_rwctrl &= 0xfffffff0;
17028 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17029 tg3_asic_rev(tp) == ASIC_REV_5701) {
17030 /* Remove this if it causes problems for some boards. */
17031 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17033 /* On 5700/5701 chips, we need to set this bit.
17034 * Otherwise the chip will issue cacheline transactions
17035 * to streamable DMA memory with not all the byte
17036 * enables turned on. This is an error on several
17037 * RISC PCI controllers, in particular sparc64.
17039 * On 5703/5704 chips, this bit has been reassigned
17040 * a different meaning. In particular, it is used
17041 * on those chips to enable a PCI-X workaround.
17043 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17046 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17049 /* Unneeded, already done by tg3_get_invariants. */
17050 tg3_switch_clocks(tp);
17053 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17054 tg3_asic_rev(tp) != ASIC_REV_5701)
17057 /* It is best to perform DMA test with maximum write burst size
17058 * to expose the 5700/5701 write DMA bug.
17060 saved_dma_rwctrl = tp->dma_rwctrl;
17061 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17062 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17067 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17070 /* Send the buffer to the chip. */
17071 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17073 dev_err(&tp->pdev->dev,
17074 "%s: Buffer write failed. err = %d\n",
17080 /* validate data reached card RAM correctly. */
17081 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17083 tg3_read_mem(tp, 0x2100 + (i*4), &val);
17084 if (le32_to_cpu(val) != p[i]) {
17085 dev_err(&tp->pdev->dev,
17086 "%s: Buffer corrupted on device! "
17087 "(%d != %d)\n", __func__, val, i);
17088 /* ret = -ENODEV here? */
17093 /* Now read it back. */
17094 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17096 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17097 "err = %d\n", __func__, ret);
17102 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17106 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17107 DMA_RWCTRL_WRITE_BNDRY_16) {
17108 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17109 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17110 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17113 dev_err(&tp->pdev->dev,
17114 "%s: Buffer corrupted on read back! "
17115 "(%d != %d)\n", __func__, p[i], i);
17121 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17127 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17128 DMA_RWCTRL_WRITE_BNDRY_16) {
17129 /* DMA test passed without adjusting DMA boundary,
17130 * now look for chipsets that are known to expose the
17131 * DMA bug without failing the test.
17133 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17134 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17135 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17137 /* Safe to use the calculated DMA boundary. */
17138 tp->dma_rwctrl = saved_dma_rwctrl;
17141 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17145 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17150 static void tg3_init_bufmgr_config(struct tg3 *tp)
17152 if (tg3_flag(tp, 57765_PLUS)) {
17153 tp->bufmgr_config.mbuf_read_dma_low_water =
17154 DEFAULT_MB_RDMA_LOW_WATER_5705;
17155 tp->bufmgr_config.mbuf_mac_rx_low_water =
17156 DEFAULT_MB_MACRX_LOW_WATER_57765;
17157 tp->bufmgr_config.mbuf_high_water =
17158 DEFAULT_MB_HIGH_WATER_57765;
17160 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17161 DEFAULT_MB_RDMA_LOW_WATER_5705;
17162 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17163 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17164 tp->bufmgr_config.mbuf_high_water_jumbo =
17165 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17166 } else if (tg3_flag(tp, 5705_PLUS)) {
17167 tp->bufmgr_config.mbuf_read_dma_low_water =
17168 DEFAULT_MB_RDMA_LOW_WATER_5705;
17169 tp->bufmgr_config.mbuf_mac_rx_low_water =
17170 DEFAULT_MB_MACRX_LOW_WATER_5705;
17171 tp->bufmgr_config.mbuf_high_water =
17172 DEFAULT_MB_HIGH_WATER_5705;
17173 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17174 tp->bufmgr_config.mbuf_mac_rx_low_water =
17175 DEFAULT_MB_MACRX_LOW_WATER_5906;
17176 tp->bufmgr_config.mbuf_high_water =
17177 DEFAULT_MB_HIGH_WATER_5906;
17180 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17181 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17182 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17183 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17184 tp->bufmgr_config.mbuf_high_water_jumbo =
17185 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17187 tp->bufmgr_config.mbuf_read_dma_low_water =
17188 DEFAULT_MB_RDMA_LOW_WATER;
17189 tp->bufmgr_config.mbuf_mac_rx_low_water =
17190 DEFAULT_MB_MACRX_LOW_WATER;
17191 tp->bufmgr_config.mbuf_high_water =
17192 DEFAULT_MB_HIGH_WATER;
17194 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17195 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17196 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17197 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17198 tp->bufmgr_config.mbuf_high_water_jumbo =
17199 DEFAULT_MB_HIGH_WATER_JUMBO;
17202 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17203 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17206 static char *tg3_phy_string(struct tg3 *tp)
17208 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17209 case TG3_PHY_ID_BCM5400: return "5400";
17210 case TG3_PHY_ID_BCM5401: return "5401";
17211 case TG3_PHY_ID_BCM5411: return "5411";
17212 case TG3_PHY_ID_BCM5701: return "5701";
17213 case TG3_PHY_ID_BCM5703: return "5703";
17214 case TG3_PHY_ID_BCM5704: return "5704";
17215 case TG3_PHY_ID_BCM5705: return "5705";
17216 case TG3_PHY_ID_BCM5750: return "5750";
17217 case TG3_PHY_ID_BCM5752: return "5752";
17218 case TG3_PHY_ID_BCM5714: return "5714";
17219 case TG3_PHY_ID_BCM5780: return "5780";
17220 case TG3_PHY_ID_BCM5755: return "5755";
17221 case TG3_PHY_ID_BCM5787: return "5787";
17222 case TG3_PHY_ID_BCM5784: return "5784";
17223 case TG3_PHY_ID_BCM5756: return "5722/5756";
17224 case TG3_PHY_ID_BCM5906: return "5906";
17225 case TG3_PHY_ID_BCM5761: return "5761";
17226 case TG3_PHY_ID_BCM5718C: return "5718C";
17227 case TG3_PHY_ID_BCM5718S: return "5718S";
17228 case TG3_PHY_ID_BCM57765: return "57765";
17229 case TG3_PHY_ID_BCM5719C: return "5719C";
17230 case TG3_PHY_ID_BCM5720C: return "5720C";
17231 case TG3_PHY_ID_BCM5762: return "5762C";
17232 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17233 case 0: return "serdes";
17234 default: return "unknown";
17238 static char *tg3_bus_string(struct tg3 *tp, char *str)
17240 if (tg3_flag(tp, PCI_EXPRESS)) {
17241 strcpy(str, "PCI Express");
17243 } else if (tg3_flag(tp, PCIX_MODE)) {
17244 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17246 strcpy(str, "PCIX:");
17248 if ((clock_ctrl == 7) ||
17249 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17250 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17251 strcat(str, "133MHz");
17252 else if (clock_ctrl == 0)
17253 strcat(str, "33MHz");
17254 else if (clock_ctrl == 2)
17255 strcat(str, "50MHz");
17256 else if (clock_ctrl == 4)
17257 strcat(str, "66MHz");
17258 else if (clock_ctrl == 6)
17259 strcat(str, "100MHz");
17261 strcpy(str, "PCI:");
17262 if (tg3_flag(tp, PCI_HIGH_SPEED))
17263 strcat(str, "66MHz");
17265 strcat(str, "33MHz");
17267 if (tg3_flag(tp, PCI_32BIT))
17268 strcat(str, ":32-bit");
17270 strcat(str, ":64-bit");
17274 static void tg3_init_coal(struct tg3 *tp)
17276 struct ethtool_coalesce *ec = &tp->coal;
17278 memset(ec, 0, sizeof(*ec));
17279 ec->cmd = ETHTOOL_GCOALESCE;
17280 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17281 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17282 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17283 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17284 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17285 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17286 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17287 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17288 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17290 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17291 HOSTCC_MODE_CLRTICK_TXBD)) {
17292 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17293 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17294 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17295 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17298 if (tg3_flag(tp, 5705_PLUS)) {
17299 ec->rx_coalesce_usecs_irq = 0;
17300 ec->tx_coalesce_usecs_irq = 0;
17301 ec->stats_block_coalesce_usecs = 0;
17305 static int tg3_init_one(struct pci_dev *pdev,
17306 const struct pci_device_id *ent)
17308 struct net_device *dev;
17311 u32 sndmbx, rcvmbx, intmbx;
17313 u64 dma_mask, persist_dma_mask;
17314 netdev_features_t features = 0;
17316 printk_once(KERN_INFO "%s\n", version);
17318 err = pci_enable_device(pdev);
17320 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17324 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17326 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17327 goto err_out_disable_pdev;
17330 pci_set_master(pdev);
17332 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17335 goto err_out_free_res;
17338 SET_NETDEV_DEV(dev, &pdev->dev);
17340 tp = netdev_priv(dev);
17343 tp->rx_mode = TG3_DEF_RX_MODE;
17344 tp->tx_mode = TG3_DEF_TX_MODE;
17348 tp->msg_enable = tg3_debug;
17350 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17352 if (pdev_is_ssb_gige_core(pdev)) {
17353 tg3_flag_set(tp, IS_SSB_CORE);
17354 if (ssb_gige_must_flush_posted_writes(pdev))
17355 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17356 if (ssb_gige_one_dma_at_once(pdev))
17357 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17358 if (ssb_gige_have_roboswitch(pdev))
17359 tg3_flag_set(tp, ROBOSWITCH);
17360 if (ssb_gige_is_rgmii(pdev))
17361 tg3_flag_set(tp, RGMII_MODE);
17364 /* The word/byte swap controls here control register access byte
17365 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17368 tp->misc_host_ctrl =
17369 MISC_HOST_CTRL_MASK_PCI_INT |
17370 MISC_HOST_CTRL_WORD_SWAP |
17371 MISC_HOST_CTRL_INDIR_ACCESS |
17372 MISC_HOST_CTRL_PCISTATE_RW;
17374 /* The NONFRM (non-frame) byte/word swap controls take effect
17375 * on descriptor entries, anything which isn't packet data.
17377 * The StrongARM chips on the board (one for tx, one for rx)
17378 * are running in big-endian mode.
17380 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17381 GRC_MODE_WSWAP_NONFRM_DATA);
17382 #ifdef __BIG_ENDIAN
17383 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17385 spin_lock_init(&tp->lock);
17386 spin_lock_init(&tp->indirect_lock);
17387 INIT_WORK(&tp->reset_task, tg3_reset_task);
17389 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17391 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17393 goto err_out_free_dev;
17396 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17397 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17398 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17399 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17400 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17401 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17402 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17403 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17404 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17407 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17408 tg3_flag_set(tp, ENABLE_APE);
17409 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17410 if (!tp->aperegs) {
17411 dev_err(&pdev->dev,
17412 "Cannot map APE registers, aborting\n");
17414 goto err_out_iounmap;
17418 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17419 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17421 dev->ethtool_ops = &tg3_ethtool_ops;
17422 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17423 dev->netdev_ops = &tg3_netdev_ops;
17424 dev->irq = pdev->irq;
17426 err = tg3_get_invariants(tp, ent);
17428 dev_err(&pdev->dev,
17429 "Problem fetching invariants of chip, aborting\n");
17430 goto err_out_apeunmap;
17433 /* The EPB bridge inside 5714, 5715, and 5780 and any
17434 * device behind the EPB cannot support DMA addresses > 40-bit.
17435 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17436 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17437 * do DMA address check in tg3_start_xmit().
17439 if (tg3_flag(tp, IS_5788))
17440 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17441 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17442 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17443 #ifdef CONFIG_HIGHMEM
17444 dma_mask = DMA_BIT_MASK(64);
17447 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17449 /* Configure DMA attributes. */
17450 if (dma_mask > DMA_BIT_MASK(32)) {
17451 err = pci_set_dma_mask(pdev, dma_mask);
17453 features |= NETIF_F_HIGHDMA;
17454 err = pci_set_consistent_dma_mask(pdev,
17457 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17458 "DMA for consistent allocations\n");
17459 goto err_out_apeunmap;
17463 if (err || dma_mask == DMA_BIT_MASK(32)) {
17464 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17466 dev_err(&pdev->dev,
17467 "No usable DMA configuration, aborting\n");
17468 goto err_out_apeunmap;
17472 tg3_init_bufmgr_config(tp);
17474 /* 5700 B0 chips do not support checksumming correctly due
17475 * to hardware bugs.
17477 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17478 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17480 if (tg3_flag(tp, 5755_PLUS))
17481 features |= NETIF_F_IPV6_CSUM;
17484 /* TSO is on by default on chips that support hardware TSO.
17485 * Firmware TSO on older chips gives lower performance, so it
17486 * is off by default, but can be enabled using ethtool.
17488 if ((tg3_flag(tp, HW_TSO_1) ||
17489 tg3_flag(tp, HW_TSO_2) ||
17490 tg3_flag(tp, HW_TSO_3)) &&
17491 (features & NETIF_F_IP_CSUM))
17492 features |= NETIF_F_TSO;
17493 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17494 if (features & NETIF_F_IPV6_CSUM)
17495 features |= NETIF_F_TSO6;
17496 if (tg3_flag(tp, HW_TSO_3) ||
17497 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17498 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17499 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17500 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17501 tg3_asic_rev(tp) == ASIC_REV_57780)
17502 features |= NETIF_F_TSO_ECN;
17505 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17506 NETIF_F_HW_VLAN_CTAG_RX;
17507 dev->vlan_features |= features;
17510 * Add loopback capability only for a subset of devices that support
17511 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17512 * loopback for the remaining devices.
17514 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17515 !tg3_flag(tp, CPMU_PRESENT))
17516 /* Add the loopback capability */
17517 features |= NETIF_F_LOOPBACK;
17519 dev->hw_features |= features;
17521 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17522 !tg3_flag(tp, TSO_CAPABLE) &&
17523 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17524 tg3_flag_set(tp, MAX_RXPEND_64);
17525 tp->rx_pending = 63;
17528 err = tg3_get_device_address(tp);
17530 dev_err(&pdev->dev,
17531 "Could not obtain valid ethernet address, aborting\n");
17532 goto err_out_apeunmap;
17536 * Reset chip in case UNDI or EFI driver did not shutdown
17537 * DMA self test will enable WDMAC and we'll see (spurious)
17538 * pending DMA on the PCI bus at that point.
17540 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17541 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17542 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17543 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17546 err = tg3_test_dma(tp);
17548 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17549 goto err_out_apeunmap;
17552 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17553 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17554 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17555 for (i = 0; i < tp->irq_max; i++) {
17556 struct tg3_napi *tnapi = &tp->napi[i];
17559 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17561 tnapi->int_mbox = intmbx;
17567 tnapi->consmbox = rcvmbx;
17568 tnapi->prodmbox = sndmbx;
17571 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17573 tnapi->coal_now = HOSTCC_MODE_NOW;
17575 if (!tg3_flag(tp, SUPPORT_MSIX))
17579 * If we support MSIX, we'll be using RSS. If we're using
17580 * RSS, the first vector only handles link interrupts and the
17581 * remaining vectors handle rx and tx interrupts. Reuse the
17582 * mailbox values for the next iteration. The values we setup
17583 * above are still useful for the single vectored mode.
17598 pci_set_drvdata(pdev, dev);
17600 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17601 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17602 tg3_asic_rev(tp) == ASIC_REV_5762)
17603 tg3_flag_set(tp, PTP_CAPABLE);
17605 tg3_timer_init(tp);
17607 tg3_carrier_off(tp);
17609 err = register_netdev(dev);
17611 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17612 goto err_out_apeunmap;
17615 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17616 tp->board_part_number,
17617 tg3_chip_rev_id(tp),
17618 tg3_bus_string(tp, str),
17621 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17622 struct phy_device *phydev;
17623 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17625 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17626 phydev->drv->name, dev_name(&phydev->dev));
17630 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17631 ethtype = "10/100Base-TX";
17632 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17633 ethtype = "1000Base-SX";
17635 ethtype = "10/100/1000Base-T";
17637 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17638 "(WireSpeed[%d], EEE[%d])\n",
17639 tg3_phy_string(tp), ethtype,
17640 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17641 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17644 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17645 (dev->features & NETIF_F_RXCSUM) != 0,
17646 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17647 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17648 tg3_flag(tp, ENABLE_ASF) != 0,
17649 tg3_flag(tp, TSO_CAPABLE) != 0);
17650 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17652 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17653 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17655 pci_save_state(pdev);
17661 iounmap(tp->aperegs);
17662 tp->aperegs = NULL;
17675 pci_release_regions(pdev);
17677 err_out_disable_pdev:
17678 if (pci_is_enabled(pdev))
17679 pci_disable_device(pdev);
17680 pci_set_drvdata(pdev, NULL);
17684 static void tg3_remove_one(struct pci_dev *pdev)
17686 struct net_device *dev = pci_get_drvdata(pdev);
17689 struct tg3 *tp = netdev_priv(dev);
17691 release_firmware(tp->fw);
17693 tg3_reset_task_cancel(tp);
17695 if (tg3_flag(tp, USE_PHYLIB)) {
17700 unregister_netdev(dev);
17702 iounmap(tp->aperegs);
17703 tp->aperegs = NULL;
17710 pci_release_regions(pdev);
17711 pci_disable_device(pdev);
17712 pci_set_drvdata(pdev, NULL);
17716 #ifdef CONFIG_PM_SLEEP
17717 static int tg3_suspend(struct device *device)
17719 struct pci_dev *pdev = to_pci_dev(device);
17720 struct net_device *dev = pci_get_drvdata(pdev);
17721 struct tg3 *tp = netdev_priv(dev);
17724 if (!netif_running(dev))
17727 tg3_reset_task_cancel(tp);
17729 tg3_netif_stop(tp);
17731 tg3_timer_stop(tp);
17733 tg3_full_lock(tp, 1);
17734 tg3_disable_ints(tp);
17735 tg3_full_unlock(tp);
17737 netif_device_detach(dev);
17739 tg3_full_lock(tp, 0);
17740 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17741 tg3_flag_clear(tp, INIT_COMPLETE);
17742 tg3_full_unlock(tp);
17744 err = tg3_power_down_prepare(tp);
17748 tg3_full_lock(tp, 0);
17750 tg3_flag_set(tp, INIT_COMPLETE);
17751 err2 = tg3_restart_hw(tp, true);
17755 tg3_timer_start(tp);
17757 netif_device_attach(dev);
17758 tg3_netif_start(tp);
17761 tg3_full_unlock(tp);
17770 static int tg3_resume(struct device *device)
17772 struct pci_dev *pdev = to_pci_dev(device);
17773 struct net_device *dev = pci_get_drvdata(pdev);
17774 struct tg3 *tp = netdev_priv(dev);
17777 if (!netif_running(dev))
17780 netif_device_attach(dev);
17782 tg3_full_lock(tp, 0);
17784 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17786 tg3_flag_set(tp, INIT_COMPLETE);
17787 err = tg3_restart_hw(tp,
17788 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17792 tg3_timer_start(tp);
17794 tg3_netif_start(tp);
17797 tg3_full_unlock(tp);
17804 #endif /* CONFIG_PM_SLEEP */
17806 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17808 static void tg3_shutdown(struct pci_dev *pdev)
17810 struct net_device *dev = pci_get_drvdata(pdev);
17811 struct tg3 *tp = netdev_priv(dev);
17814 netif_device_detach(dev);
17816 if (netif_running(dev))
17819 if (system_state == SYSTEM_POWER_OFF)
17820 tg3_power_down(tp);
17826 * tg3_io_error_detected - called when PCI error is detected
17827 * @pdev: Pointer to PCI device
17828 * @state: The current pci connection state
17830 * This function is called after a PCI bus error affecting
17831 * this device has been detected.
17833 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17834 pci_channel_state_t state)
17836 struct net_device *netdev = pci_get_drvdata(pdev);
17837 struct tg3 *tp = netdev_priv(netdev);
17838 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17840 netdev_info(netdev, "PCI I/O error detected\n");
17844 /* We probably don't have netdev yet */
17845 if (!netdev || !netif_running(netdev))
17850 tg3_netif_stop(tp);
17852 tg3_timer_stop(tp);
17854 /* Want to make sure that the reset task doesn't run */
17855 tg3_reset_task_cancel(tp);
17857 netif_device_detach(netdev);
17859 /* Clean up software state, even if MMIO is blocked */
17860 tg3_full_lock(tp, 0);
17861 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17862 tg3_full_unlock(tp);
17865 if (state == pci_channel_io_perm_failure) {
17867 tg3_napi_enable(tp);
17870 err = PCI_ERS_RESULT_DISCONNECT;
17872 pci_disable_device(pdev);
17881 * tg3_io_slot_reset - called after the pci bus has been reset.
17882 * @pdev: Pointer to PCI device
17884 * Restart the card from scratch, as if from a cold-boot.
17885 * At this point, the card has exprienced a hard reset,
17886 * followed by fixups by BIOS, and has its config space
17887 * set up identically to what it was at cold boot.
17889 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17891 struct net_device *netdev = pci_get_drvdata(pdev);
17892 struct tg3 *tp = netdev_priv(netdev);
17893 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17898 if (pci_enable_device(pdev)) {
17899 dev_err(&pdev->dev,
17900 "Cannot re-enable PCI device after reset.\n");
17904 pci_set_master(pdev);
17905 pci_restore_state(pdev);
17906 pci_save_state(pdev);
17908 if (!netdev || !netif_running(netdev)) {
17909 rc = PCI_ERS_RESULT_RECOVERED;
17913 err = tg3_power_up(tp);
17917 rc = PCI_ERS_RESULT_RECOVERED;
17920 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17921 tg3_napi_enable(tp);
17930 * tg3_io_resume - called when traffic can start flowing again.
17931 * @pdev: Pointer to PCI device
17933 * This callback is called when the error recovery driver tells
17934 * us that its OK to resume normal operation.
17936 static void tg3_io_resume(struct pci_dev *pdev)
17938 struct net_device *netdev = pci_get_drvdata(pdev);
17939 struct tg3 *tp = netdev_priv(netdev);
17944 if (!netif_running(netdev))
17947 tg3_full_lock(tp, 0);
17948 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17949 tg3_flag_set(tp, INIT_COMPLETE);
17950 err = tg3_restart_hw(tp, true);
17952 tg3_full_unlock(tp);
17953 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17957 netif_device_attach(netdev);
17959 tg3_timer_start(tp);
17961 tg3_netif_start(tp);
17963 tg3_full_unlock(tp);
17971 static const struct pci_error_handlers tg3_err_handler = {
17972 .error_detected = tg3_io_error_detected,
17973 .slot_reset = tg3_io_slot_reset,
17974 .resume = tg3_io_resume
17977 static struct pci_driver tg3_driver = {
17978 .name = DRV_MODULE_NAME,
17979 .id_table = tg3_pci_tbl,
17980 .probe = tg3_init_one,
17981 .remove = tg3_remove_one,
17982 .err_handler = &tg3_err_handler,
17983 .driver.pm = &tg3_pm_ops,
17984 .shutdown = tg3_shutdown,
17987 module_pci_driver(tg3_driver);