1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Intel Corporation */
7 #include <linux/kobject.h>
9 #include <linux/netdevice.h>
10 #include <linux/vmalloc.h>
11 #include <linux/ethtool.h>
12 #include <linux/sctp.h>
16 /* forward declaration */
17 void igc_set_ethtool_ops(struct net_device *);
22 void igc_up(struct igc_adapter *adapter);
23 void igc_down(struct igc_adapter *adapter);
24 int igc_setup_tx_resources(struct igc_ring *ring);
25 int igc_setup_rx_resources(struct igc_ring *ring);
26 void igc_free_tx_resources(struct igc_ring *ring);
27 void igc_free_rx_resources(struct igc_ring *ring);
28 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter);
29 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
30 const u32 max_rss_queues);
31 int igc_reinit_queues(struct igc_adapter *adapter);
32 void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
33 bool igc_has_link(struct igc_adapter *adapter);
34 void igc_reset(struct igc_adapter *adapter);
35 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
36 int igc_add_mac_steering_filter(struct igc_adapter *adapter,
37 const u8 *addr, u8 queue, u8 flags);
38 int igc_del_mac_steering_filter(struct igc_adapter *adapter,
39 const u8 *addr, u8 queue, u8 flags);
40 void igc_update_stats(struct igc_adapter *adapter);
42 extern char igc_driver_name[];
43 extern char igc_driver_version[];
45 #define IGC_REGS_LEN 740
46 #define IGC_RETA_SIZE 128
48 /* Interrupt defines */
49 #define IGC_START_ITR 648 /* ~6000 ints/sec */
50 #define IGC_FLAG_HAS_MSI BIT(0)
51 #define IGC_FLAG_QUEUE_PAIRS BIT(3)
52 #define IGC_FLAG_DMAC BIT(4)
53 #define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
54 #define IGC_FLAG_MEDIA_RESET BIT(10)
55 #define IGC_FLAG_MAS_ENABLE BIT(12)
56 #define IGC_FLAG_HAS_MSIX BIT(13)
57 #define IGC_FLAG_VLAN_PROMISC BIT(15)
58 #define IGC_FLAG_RX_LEGACY BIT(16)
60 #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
61 #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
63 #define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
64 #define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
65 #define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
67 #define IGC_START_ITR 648 /* ~6000 ints/sec */
68 #define IGC_4K_ITR 980
69 #define IGC_20K_ITR 196
70 #define IGC_70K_ITR 56
72 #define IGC_DEFAULT_ITR 3 /* dynamic */
73 #define IGC_MAX_ITR_USECS 10000
74 #define IGC_MIN_ITR_USECS 10
75 #define NON_Q_VECTORS 1
76 #define MAX_MSIX_ENTRIES 10
78 /* TX/RX descriptor defines */
79 #define IGC_DEFAULT_TXD 256
80 #define IGC_DEFAULT_TX_WORK 128
81 #define IGC_MIN_TXD 80
82 #define IGC_MAX_TXD 4096
84 #define IGC_DEFAULT_RXD 256
85 #define IGC_MIN_RXD 80
86 #define IGC_MAX_RXD 4096
88 /* Transmit and receive queues */
89 #define IGC_MAX_RX_QUEUES 4
90 #define IGC_MAX_TX_QUEUES 4
92 #define MAX_Q_VECTORS 8
93 #define MAX_STD_JUMBO_FRAME_SIZE 9216
95 /* Supported Rx Buffer Sizes */
96 #define IGC_RXBUFFER_256 256
97 #define IGC_RXBUFFER_2048 2048
98 #define IGC_RXBUFFER_3072 3072
100 #define AUTO_ALL_MODES 0
101 #define IGC_RX_HDR_LEN IGC_RXBUFFER_256
103 /* RX and TX descriptor control thresholds.
104 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
105 * descriptors available in its onboard memory.
106 * Setting this to 0 disables RX descriptor prefetch.
107 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
108 * available in host memory.
109 * If PTHRESH is 0, this should also be 0.
110 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
111 * descriptors until either it has this many to write back, or the
114 #define IGC_RX_PTHRESH 8
115 #define IGC_RX_HTHRESH 8
116 #define IGC_TX_PTHRESH 8
117 #define IGC_TX_HTHRESH 1
118 #define IGC_RX_WTHRESH 4
119 #define IGC_TX_WTHRESH 16
121 #define IGC_RX_DMA_ATTR \
122 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
124 #define IGC_TS_HDR_LEN 16
126 #define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
128 #if (PAGE_SIZE < 8192)
129 #define IGC_MAX_FRAME_BUILD_SKB \
130 (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN)
132 #define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)
135 /* How many Rx Buffers do we bundle into one write to the hardware ? */
136 #define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */
138 /* igc_test_staterr - tests bits within Rx descriptor status and error fields */
139 static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
140 const u32 stat_err_bits)
142 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
149 __IGC_PTP_TX_IN_PROGRESS,
154 IGC_TX_FLAGS_VLAN = 0x01,
155 IGC_TX_FLAGS_TSO = 0x02,
156 IGC_TX_FLAGS_TSTAMP = 0x04,
159 IGC_TX_FLAGS_IPV4 = 0x10,
160 IGC_TX_FLAGS_CSUM = 0x20,
167 /* The largest size we can write to the descriptor is 65535. In order to
168 * maintain a power of two alignment we have to limit ourselves to 32K.
170 #define IGC_MAX_TXD_PWR 15
171 #define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR)
173 /* Tx Descriptors needed, worst case */
174 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
175 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
177 /* wrapper around a pointer to a socket buffer,
178 * so a DMA handle can be stored along with the buffer
180 struct igc_tx_buffer {
181 union igc_adv_tx_desc *next_to_watch;
182 unsigned long time_stamp;
184 unsigned int bytecount;
188 DEFINE_DMA_UNMAP_ADDR(dma);
189 DEFINE_DMA_UNMAP_LEN(len);
193 struct igc_rx_buffer {
196 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
204 struct igc_tx_queue_stats {
211 struct igc_rx_queue_stats {
219 struct igc_rx_packet_stats {
220 u64 ipv4_packets; /* IPv4 headers processed */
221 u64 ipv4e_packets; /* IPv4E headers with extensions processed */
222 u64 ipv6_packets; /* IPv6 headers processed */
223 u64 ipv6e_packets; /* IPv6E headers with extensions processed */
224 u64 tcp_packets; /* TCP headers processed */
225 u64 udp_packets; /* UDP headers processed */
226 u64 sctp_packets; /* SCTP headers processed */
227 u64 nfs_packets; /* NFS headers processe */
231 struct igc_ring_container {
232 struct igc_ring *ring; /* pointer to linked list of rings */
233 unsigned int total_bytes; /* total bytes processed this int */
234 unsigned int total_packets; /* total packets processed this int */
235 u16 work_limit; /* total work allowed per interrupt */
236 u8 count; /* total number of rings in vector */
237 u8 itr; /* current ITR setting for ring */
241 struct igc_q_vector *q_vector; /* backlink to q_vector */
242 struct net_device *netdev; /* back pointer to net_device */
243 struct device *dev; /* device for dma mapping */
244 union { /* array of buffer info structs */
245 struct igc_tx_buffer *tx_buffer_info;
246 struct igc_rx_buffer *rx_buffer_info;
248 void *desc; /* descriptor ring memory */
249 unsigned long flags; /* ring specific flags */
250 void __iomem *tail; /* pointer to ring tail register */
251 dma_addr_t dma; /* phys address of the ring */
252 unsigned int size; /* length of desc. ring in bytes */
254 u16 count; /* number of desc. in the ring */
255 u8 queue_index; /* logical index of the ring*/
256 u8 reg_idx; /* physical index of the ring */
258 /* everything past this point are written often */
266 struct igc_tx_queue_stats tx_stats;
267 struct u64_stats_sync tx_syncp;
268 struct u64_stats_sync tx_syncp2;
272 struct igc_rx_queue_stats rx_stats;
273 struct igc_rx_packet_stats pkt_stats;
274 struct u64_stats_sync rx_syncp;
278 } ____cacheline_internodealigned_in_smp;
280 struct igc_q_vector {
281 struct igc_adapter *adapter; /* backlink */
282 void __iomem *itr_register;
283 u32 eims_value; /* EIMS mask value */
288 struct igc_ring_container rx, tx;
290 struct napi_struct napi;
292 struct rcu_head rcu; /* to avoid race with update stats on free */
293 char name[IFNAMSIZ + 9];
294 struct net_device poll_dev;
296 /* for dynamic allocation of rings associated with this q_vector */
297 struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
300 #define MAX_ETYPE_FILTER (4 - 1)
302 enum igc_filter_match_flags {
303 IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
304 IGC_FILTER_FLAG_VLAN_TCI = 0x2,
305 IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
306 IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
309 /* RX network flow classification data structure */
310 struct igc_nfc_input {
311 /* Byte layout in order, all values with MSB first:
312 * match_flags - 1 byte
319 u8 src_addr[ETH_ALEN];
320 u8 dst_addr[ETH_ALEN];
323 struct igc_nfc_filter {
324 struct hlist_node nfc_node;
325 struct igc_nfc_input filter;
326 unsigned long cookie;
332 struct igc_mac_addr {
335 u8 state; /* bitmask */
338 #define IGC_MAC_STATE_DEFAULT 0x1
339 #define IGC_MAC_STATE_IN_USE 0x2
340 #define IGC_MAC_STATE_SRC_ADDR 0x4
341 #define IGC_MAC_STATE_QUEUE_STEERING 0x8
343 #define IGC_MAX_RXNFC_FILTERS 16
345 /* Board specific private data structure */
347 struct net_device *netdev;
351 unsigned int num_q_vectors;
353 struct msix_entry *msix_entries;
357 u32 tx_timeout_count;
359 struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
363 struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
365 struct timer_list watchdog_timer;
366 struct timer_list dma_err_timer;
367 struct timer_list phy_info_timer;
375 /* Interrupt Throttle Rate */
379 struct work_struct reset_task;
380 struct work_struct watchdog_task;
381 struct work_struct dma_err_task;
384 u8 tx_timeout_factor;
390 /* OS defined structs */
391 struct pci_dev *pdev;
392 /* lock for statistics */
393 spinlock_t stats64_lock;
394 struct rtnl_link_stats64 stats64;
396 /* structs defined in igc_hw.h */
398 struct igc_hw_stats stats;
400 struct igc_q_vector *q_vector[MAX_Q_VECTORS];
401 u32 eims_enable_mask;
407 u32 tx_hwtstamp_timeouts;
408 u32 tx_hwtstamp_skipped;
409 u32 rx_hwtstamp_cleared;
413 u32 rss_indir_tbl_init;
415 /* RX network flow classification support */
416 struct hlist_head nfc_filter_list;
417 struct hlist_head cls_flower_list;
418 unsigned int nfc_filter_count;
420 /* lock for RX network flow classification filter */
422 bool etype_bitmap[MAX_ETYPE_FILTER];
424 struct igc_mac_addr *mac_table;
426 u8 rss_indir_tbl[IGC_RETA_SIZE];
428 unsigned long link_check_timeout;
432 /* igc_desc_unused - calculate if we have unused descriptors */
433 static inline u16 igc_desc_unused(const struct igc_ring *ring)
435 u16 ntc = ring->next_to_clean;
436 u16 ntu = ring->next_to_use;
438 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
441 static inline s32 igc_get_phy_info(struct igc_hw *hw)
443 if (hw->phy.ops.get_phy_info)
444 return hw->phy.ops.get_phy_info(hw);
449 static inline s32 igc_reset_phy(struct igc_hw *hw)
451 if (hw->phy.ops.reset)
452 return hw->phy.ops.reset(hw);
457 static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
459 return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
462 enum igc_ring_flags_t {
463 IGC_RING_FLAG_RX_3K_BUFFER,
464 IGC_RING_FLAG_RX_BUILD_SKB_ENABLED,
465 IGC_RING_FLAG_RX_SCTP_CSUM,
466 IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
467 IGC_RING_FLAG_TX_CTX_IDX,
468 IGC_RING_FLAG_TX_DETECT_HANG
471 #define ring_uses_large_buffer(ring) \
472 test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
474 #define ring_uses_build_skb(ring) \
475 test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
477 static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
479 #if (PAGE_SIZE < 8192)
480 if (ring_uses_large_buffer(ring))
481 return IGC_RXBUFFER_3072;
483 if (ring_uses_build_skb(ring))
484 return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN;
486 return IGC_RXBUFFER_2048;
489 static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
491 #if (PAGE_SIZE < 8192)
492 if (ring_uses_large_buffer(ring))
498 static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
500 if (hw->phy.ops.read_reg)
501 return hw->phy.ops.read_reg(hw, offset, data);
506 /* forward declaration */
507 void igc_reinit_locked(struct igc_adapter *);
508 int igc_add_filter(struct igc_adapter *adapter,
509 struct igc_nfc_filter *input);
510 int igc_erase_filter(struct igc_adapter *adapter,
511 struct igc_nfc_filter *input);
513 #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
515 #define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
517 #define IGC_RX_DESC(R, i) \
518 (&(((union igc_adv_rx_desc *)((R)->desc))[i]))
519 #define IGC_TX_DESC(R, i) \
520 (&(((union igc_adv_tx_desc *)((R)->desc))[i]))
521 #define IGC_TX_CTXTDESC(R, i) \
522 (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i]))