1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
11 #include "i40e_diag.h"
13 #include <net/udp_tunnel.h>
14 #include <net/xdp_sock.h>
15 /* All i40e tracepoints are defined by the include below, which
16 * must be included exactly once across the whole kernel with
17 * CREATE_TRACE_POINTS defined
19 #define CREATE_TRACE_POINTS
20 #include "i40e_trace.h"
22 const char i40e_driver_name[] = "i40e";
23 static const char i40e_driver_string[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
28 #define DRV_VERSION_MAJOR 2
29 #define DRV_VERSION_MINOR 8
30 #define DRV_VERSION_BUILD 20
31 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34 const char i40e_driver_version_str[] = DRV_VERSION;
35 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
37 /* a bit of forward declarations */
38 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
39 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
40 static int i40e_add_vsi(struct i40e_vsi *vsi);
41 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
42 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
43 static int i40e_setup_misc_vector(struct i40e_pf *pf);
44 static void i40e_determine_queue_usage(struct i40e_pf *pf);
45 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
46 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
47 static int i40e_reset(struct i40e_pf *pf);
48 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
50 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
51 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
52 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
53 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
54 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
55 static int i40e_get_capabilities(struct i40e_pf *pf,
56 enum i40e_admin_queue_opc list_type);
59 /* i40e_pci_tbl - PCI Device ID Table
61 * Last entry must be all 0s
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
66 static const struct pci_device_id i40e_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
90 /* required last entry */
93 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
95 #define I40E_MAX_VF_COUNT 128
96 static int debug = -1;
97 module_param(debug, uint, 0);
98 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
100 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
101 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
102 MODULE_LICENSE("GPL v2");
103 MODULE_VERSION(DRV_VERSION);
105 static struct workqueue_struct *i40e_wq;
108 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
109 * @hw: pointer to the HW structure
110 * @mem: ptr to mem struct to fill out
111 * @size: size of memory requested
112 * @alignment: what to align the allocation to
114 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
115 u64 size, u32 alignment)
117 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
119 mem->size = ALIGN(size, alignment);
120 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
129 * i40e_free_dma_mem_d - OS specific memory free for shared code
130 * @hw: pointer to the HW structure
131 * @mem: ptr to mem struct to free
133 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
135 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
137 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
146 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
147 * @hw: pointer to the HW structure
148 * @mem: ptr to mem struct to fill out
149 * @size: size of memory requested
151 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
155 mem->va = kzalloc(size, GFP_KERNEL);
164 * i40e_free_virt_mem_d - OS specific memory free for shared code
165 * @hw: pointer to the HW structure
166 * @mem: ptr to mem struct to free
168 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
170 /* it's ok to kfree a NULL pointer */
179 * i40e_get_lump - find a lump of free generic resource
180 * @pf: board private structure
181 * @pile: the pile of resource to search
182 * @needed: the number of items needed
183 * @id: an owner id to stick on the items assigned
185 * Returns the base item index of the lump, or negative for error
187 * The search_hint trick and lack of advanced fit-finding only work
188 * because we're highly likely to have all the same size lump requests.
189 * Linear search time and any fragmentation should be minimal.
191 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
197 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
198 dev_info(&pf->pdev->dev,
199 "param err: pile=%s needed=%d id=0x%04x\n",
200 pile ? "<valid>" : "<null>", needed, id);
204 /* start the linear search with an imperfect hint */
205 i = pile->search_hint;
206 while (i < pile->num_entries) {
207 /* skip already allocated entries */
208 if (pile->list[i] & I40E_PILE_VALID_BIT) {
213 /* do we have enough in this lump? */
214 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
215 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
220 /* there was enough, so assign it to the requestor */
221 for (j = 0; j < needed; j++)
222 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
224 pile->search_hint = i + j;
228 /* not enough, so skip over it and continue looking */
236 * i40e_put_lump - return a lump of generic resource
237 * @pile: the pile of resource to search
238 * @index: the base item index
239 * @id: the owner id of the items assigned
241 * Returns the count of items in the lump
243 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
245 int valid_id = (id | I40E_PILE_VALID_BIT);
249 if (!pile || index >= pile->num_entries)
253 i < pile->num_entries && pile->list[i] == valid_id;
259 if (count && index < pile->search_hint)
260 pile->search_hint = index;
266 * i40e_find_vsi_from_id - searches for the vsi with the given id
267 * @pf: the pf structure to search for the vsi
268 * @id: id of the vsi it is searching for
270 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
274 for (i = 0; i < pf->num_alloc_vsi; i++)
275 if (pf->vsi[i] && (pf->vsi[i]->id == id))
282 * i40e_service_event_schedule - Schedule the service task to wake up
283 * @pf: board private structure
285 * If not already scheduled, this puts the task into the work queue
287 void i40e_service_event_schedule(struct i40e_pf *pf)
289 if ((!test_bit(__I40E_DOWN, pf->state) &&
290 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
291 test_bit(__I40E_RECOVERY_MODE, pf->state))
292 queue_work(i40e_wq, &pf->service_task);
296 * i40e_tx_timeout - Respond to a Tx Hang
297 * @netdev: network interface device structure
299 * If any port has noticed a Tx timeout, it is likely that the whole
300 * device is munged, not just the one netdev port, so go for the full
303 static void i40e_tx_timeout(struct net_device *netdev)
305 struct i40e_netdev_priv *np = netdev_priv(netdev);
306 struct i40e_vsi *vsi = np->vsi;
307 struct i40e_pf *pf = vsi->back;
308 struct i40e_ring *tx_ring = NULL;
309 unsigned int i, hung_queue = 0;
312 pf->tx_timeout_count++;
314 /* find the stopped queue the same way the stack does */
315 for (i = 0; i < netdev->num_tx_queues; i++) {
316 struct netdev_queue *q;
317 unsigned long trans_start;
319 q = netdev_get_tx_queue(netdev, i);
320 trans_start = q->trans_start;
321 if (netif_xmit_stopped(q) &&
323 (trans_start + netdev->watchdog_timeo))) {
329 if (i == netdev->num_tx_queues) {
330 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
332 /* now that we have an index, find the tx_ring struct */
333 for (i = 0; i < vsi->num_queue_pairs; i++) {
334 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
336 vsi->tx_rings[i]->queue_index) {
337 tx_ring = vsi->tx_rings[i];
344 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
345 pf->tx_timeout_recovery_level = 1; /* reset after some time */
346 else if (time_before(jiffies,
347 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
348 return; /* don't do any new action before the next timeout */
350 /* don't kick off another recovery if one is already pending */
351 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
355 head = i40e_get_head(tx_ring);
356 /* Read interrupt register */
357 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
359 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
360 tx_ring->vsi->base_vector - 1));
362 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
364 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
365 vsi->seid, hung_queue, tx_ring->next_to_clean,
366 head, tx_ring->next_to_use,
367 readl(tx_ring->tail), val);
370 pf->tx_timeout_last_recovery = jiffies;
371 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
372 pf->tx_timeout_recovery_level, hung_queue);
374 switch (pf->tx_timeout_recovery_level) {
376 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
379 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
382 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
385 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
389 i40e_service_event_schedule(pf);
390 pf->tx_timeout_recovery_level++;
394 * i40e_get_vsi_stats_struct - Get System Network Statistics
395 * @vsi: the VSI we care about
397 * Returns the address of the device statistics structure.
398 * The statistics are actually updated from the service task.
400 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
402 return &vsi->net_stats;
406 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
407 * @ring: Tx ring to get statistics from
408 * @stats: statistics entry to be updated
410 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
411 struct rtnl_link_stats64 *stats)
417 start = u64_stats_fetch_begin_irq(&ring->syncp);
418 packets = ring->stats.packets;
419 bytes = ring->stats.bytes;
420 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
422 stats->tx_packets += packets;
423 stats->tx_bytes += bytes;
427 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
428 * @netdev: network interface device structure
429 * @stats: data structure to store statistics
431 * Returns the address of the device statistics structure.
432 * The statistics are actually updated from the service task.
434 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
437 struct i40e_netdev_priv *np = netdev_priv(netdev);
438 struct i40e_vsi *vsi = np->vsi;
439 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
440 struct i40e_ring *ring;
443 if (test_bit(__I40E_VSI_DOWN, vsi->state))
450 for (i = 0; i < vsi->num_queue_pairs; i++) {
454 ring = READ_ONCE(vsi->tx_rings[i]);
457 i40e_get_netdev_stats_struct_tx(ring, stats);
459 if (i40e_enabled_xdp_vsi(vsi)) {
461 i40e_get_netdev_stats_struct_tx(ring, stats);
466 start = u64_stats_fetch_begin_irq(&ring->syncp);
467 packets = ring->stats.packets;
468 bytes = ring->stats.bytes;
469 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
471 stats->rx_packets += packets;
472 stats->rx_bytes += bytes;
477 /* following stats updated by i40e_watchdog_subtask() */
478 stats->multicast = vsi_stats->multicast;
479 stats->tx_errors = vsi_stats->tx_errors;
480 stats->tx_dropped = vsi_stats->tx_dropped;
481 stats->rx_errors = vsi_stats->rx_errors;
482 stats->rx_dropped = vsi_stats->rx_dropped;
483 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
484 stats->rx_length_errors = vsi_stats->rx_length_errors;
488 * i40e_vsi_reset_stats - Resets all stats of the given vsi
489 * @vsi: the VSI to have its stats reset
491 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
493 struct rtnl_link_stats64 *ns;
499 ns = i40e_get_vsi_stats_struct(vsi);
500 memset(ns, 0, sizeof(*ns));
501 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
502 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
503 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
504 if (vsi->rx_rings && vsi->rx_rings[0]) {
505 for (i = 0; i < vsi->num_queue_pairs; i++) {
506 memset(&vsi->rx_rings[i]->stats, 0,
507 sizeof(vsi->rx_rings[i]->stats));
508 memset(&vsi->rx_rings[i]->rx_stats, 0,
509 sizeof(vsi->rx_rings[i]->rx_stats));
510 memset(&vsi->tx_rings[i]->stats, 0,
511 sizeof(vsi->tx_rings[i]->stats));
512 memset(&vsi->tx_rings[i]->tx_stats, 0,
513 sizeof(vsi->tx_rings[i]->tx_stats));
516 vsi->stat_offsets_loaded = false;
520 * i40e_pf_reset_stats - Reset all of the stats for the given PF
521 * @pf: the PF to be reset
523 void i40e_pf_reset_stats(struct i40e_pf *pf)
527 memset(&pf->stats, 0, sizeof(pf->stats));
528 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
529 pf->stat_offsets_loaded = false;
531 for (i = 0; i < I40E_MAX_VEB; i++) {
533 memset(&pf->veb[i]->stats, 0,
534 sizeof(pf->veb[i]->stats));
535 memset(&pf->veb[i]->stats_offsets, 0,
536 sizeof(pf->veb[i]->stats_offsets));
537 pf->veb[i]->stat_offsets_loaded = false;
540 pf->hw_csum_rx_error = 0;
544 * i40e_stat_update48 - read and update a 48 bit stat from the chip
545 * @hw: ptr to the hardware info
546 * @hireg: the high 32 bit reg to read
547 * @loreg: the low 32 bit reg to read
548 * @offset_loaded: has the initial offset been loaded yet
549 * @offset: ptr to current offset value
550 * @stat: ptr to the stat
552 * Since the device stats are not reset at PFReset, they likely will not
553 * be zeroed when the driver starts. We'll save the first values read
554 * and use them as offsets to be subtracted from the raw values in order
555 * to report stats that count from zero. In the process, we also manage
556 * the potential roll-over.
558 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
559 bool offset_loaded, u64 *offset, u64 *stat)
563 if (hw->device_id == I40E_DEV_ID_QEMU) {
564 new_data = rd32(hw, loreg);
565 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
567 new_data = rd64(hw, loreg);
571 if (likely(new_data >= *offset))
572 *stat = new_data - *offset;
574 *stat = (new_data + BIT_ULL(48)) - *offset;
575 *stat &= 0xFFFFFFFFFFFFULL;
579 * i40e_stat_update32 - read and update a 32 bit stat from the chip
580 * @hw: ptr to the hardware info
581 * @reg: the hw reg to read
582 * @offset_loaded: has the initial offset been loaded yet
583 * @offset: ptr to current offset value
584 * @stat: ptr to the stat
586 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
587 bool offset_loaded, u64 *offset, u64 *stat)
591 new_data = rd32(hw, reg);
594 if (likely(new_data >= *offset))
595 *stat = (u32)(new_data - *offset);
597 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
601 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
602 * @hw: ptr to the hardware info
603 * @reg: the hw reg to read and clear
604 * @stat: ptr to the stat
606 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
608 u32 new_data = rd32(hw, reg);
610 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
615 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
616 * @vsi: the VSI to be updated
618 void i40e_update_eth_stats(struct i40e_vsi *vsi)
620 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
621 struct i40e_pf *pf = vsi->back;
622 struct i40e_hw *hw = &pf->hw;
623 struct i40e_eth_stats *oes;
624 struct i40e_eth_stats *es; /* device's eth stats */
626 es = &vsi->eth_stats;
627 oes = &vsi->eth_stats_offsets;
629 /* Gather up the stats that the hw collects */
630 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->tx_errors, &es->tx_errors);
633 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
634 vsi->stat_offsets_loaded,
635 &oes->rx_discards, &es->rx_discards);
636 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
637 vsi->stat_offsets_loaded,
638 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
640 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
641 I40E_GLV_GORCL(stat_idx),
642 vsi->stat_offsets_loaded,
643 &oes->rx_bytes, &es->rx_bytes);
644 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
645 I40E_GLV_UPRCL(stat_idx),
646 vsi->stat_offsets_loaded,
647 &oes->rx_unicast, &es->rx_unicast);
648 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
649 I40E_GLV_MPRCL(stat_idx),
650 vsi->stat_offsets_loaded,
651 &oes->rx_multicast, &es->rx_multicast);
652 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
653 I40E_GLV_BPRCL(stat_idx),
654 vsi->stat_offsets_loaded,
655 &oes->rx_broadcast, &es->rx_broadcast);
657 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
658 I40E_GLV_GOTCL(stat_idx),
659 vsi->stat_offsets_loaded,
660 &oes->tx_bytes, &es->tx_bytes);
661 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
662 I40E_GLV_UPTCL(stat_idx),
663 vsi->stat_offsets_loaded,
664 &oes->tx_unicast, &es->tx_unicast);
665 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
666 I40E_GLV_MPTCL(stat_idx),
667 vsi->stat_offsets_loaded,
668 &oes->tx_multicast, &es->tx_multicast);
669 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
670 I40E_GLV_BPTCL(stat_idx),
671 vsi->stat_offsets_loaded,
672 &oes->tx_broadcast, &es->tx_broadcast);
673 vsi->stat_offsets_loaded = true;
677 * i40e_update_veb_stats - Update Switch component statistics
678 * @veb: the VEB being updated
680 static void i40e_update_veb_stats(struct i40e_veb *veb)
682 struct i40e_pf *pf = veb->pf;
683 struct i40e_hw *hw = &pf->hw;
684 struct i40e_eth_stats *oes;
685 struct i40e_eth_stats *es; /* device's eth stats */
686 struct i40e_veb_tc_stats *veb_oes;
687 struct i40e_veb_tc_stats *veb_es;
690 idx = veb->stats_idx;
692 oes = &veb->stats_offsets;
693 veb_es = &veb->tc_stats;
694 veb_oes = &veb->tc_stats_offsets;
696 /* Gather up the stats that the hw collects */
697 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
698 veb->stat_offsets_loaded,
699 &oes->tx_discards, &es->tx_discards);
700 if (hw->revision_id > 0)
701 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
702 veb->stat_offsets_loaded,
703 &oes->rx_unknown_protocol,
704 &es->rx_unknown_protocol);
705 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->rx_bytes, &es->rx_bytes);
708 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->rx_unicast, &es->rx_unicast);
711 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
712 veb->stat_offsets_loaded,
713 &oes->rx_multicast, &es->rx_multicast);
714 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
715 veb->stat_offsets_loaded,
716 &oes->rx_broadcast, &es->rx_broadcast);
718 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
719 veb->stat_offsets_loaded,
720 &oes->tx_bytes, &es->tx_bytes);
721 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
722 veb->stat_offsets_loaded,
723 &oes->tx_unicast, &es->tx_unicast);
724 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
725 veb->stat_offsets_loaded,
726 &oes->tx_multicast, &es->tx_multicast);
727 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
728 veb->stat_offsets_loaded,
729 &oes->tx_broadcast, &es->tx_broadcast);
730 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
731 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
732 I40E_GLVEBTC_RPCL(i, idx),
733 veb->stat_offsets_loaded,
734 &veb_oes->tc_rx_packets[i],
735 &veb_es->tc_rx_packets[i]);
736 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
737 I40E_GLVEBTC_RBCL(i, idx),
738 veb->stat_offsets_loaded,
739 &veb_oes->tc_rx_bytes[i],
740 &veb_es->tc_rx_bytes[i]);
741 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
742 I40E_GLVEBTC_TPCL(i, idx),
743 veb->stat_offsets_loaded,
744 &veb_oes->tc_tx_packets[i],
745 &veb_es->tc_tx_packets[i]);
746 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
747 I40E_GLVEBTC_TBCL(i, idx),
748 veb->stat_offsets_loaded,
749 &veb_oes->tc_tx_bytes[i],
750 &veb_es->tc_tx_bytes[i]);
752 veb->stat_offsets_loaded = true;
756 * i40e_update_vsi_stats - Update the vsi statistics counters.
757 * @vsi: the VSI to be updated
759 * There are a few instances where we store the same stat in a
760 * couple of different structs. This is partly because we have
761 * the netdev stats that need to be filled out, which is slightly
762 * different from the "eth_stats" defined by the chip and used in
763 * VF communications. We sort it out here.
765 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
767 struct i40e_pf *pf = vsi->back;
768 struct rtnl_link_stats64 *ons;
769 struct rtnl_link_stats64 *ns; /* netdev stats */
770 struct i40e_eth_stats *oes;
771 struct i40e_eth_stats *es; /* device's eth stats */
772 u32 tx_restart, tx_busy;
783 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
784 test_bit(__I40E_CONFIG_BUSY, pf->state))
787 ns = i40e_get_vsi_stats_struct(vsi);
788 ons = &vsi->net_stats_offsets;
789 es = &vsi->eth_stats;
790 oes = &vsi->eth_stats_offsets;
792 /* Gather up the netdev and vsi stats that the driver collects
793 * on the fly during packet processing
797 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
801 for (q = 0; q < vsi->num_queue_pairs; q++) {
803 p = READ_ONCE(vsi->tx_rings[q]);
806 start = u64_stats_fetch_begin_irq(&p->syncp);
807 packets = p->stats.packets;
808 bytes = p->stats.bytes;
809 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
812 tx_restart += p->tx_stats.restart_queue;
813 tx_busy += p->tx_stats.tx_busy;
814 tx_linearize += p->tx_stats.tx_linearize;
815 tx_force_wb += p->tx_stats.tx_force_wb;
817 /* Rx queue is part of the same block as Tx queue */
820 start = u64_stats_fetch_begin_irq(&p->syncp);
821 packets = p->stats.packets;
822 bytes = p->stats.bytes;
823 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
826 rx_buf += p->rx_stats.alloc_buff_failed;
827 rx_page += p->rx_stats.alloc_page_failed;
830 vsi->tx_restart = tx_restart;
831 vsi->tx_busy = tx_busy;
832 vsi->tx_linearize = tx_linearize;
833 vsi->tx_force_wb = tx_force_wb;
834 vsi->rx_page_failed = rx_page;
835 vsi->rx_buf_failed = rx_buf;
837 ns->rx_packets = rx_p;
839 ns->tx_packets = tx_p;
842 /* update netdev stats from eth stats */
843 i40e_update_eth_stats(vsi);
844 ons->tx_errors = oes->tx_errors;
845 ns->tx_errors = es->tx_errors;
846 ons->multicast = oes->rx_multicast;
847 ns->multicast = es->rx_multicast;
848 ons->rx_dropped = oes->rx_discards;
849 ns->rx_dropped = es->rx_discards;
850 ons->tx_dropped = oes->tx_discards;
851 ns->tx_dropped = es->tx_discards;
853 /* pull in a couple PF stats if this is the main vsi */
854 if (vsi == pf->vsi[pf->lan_vsi]) {
855 ns->rx_crc_errors = pf->stats.crc_errors;
856 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
857 ns->rx_length_errors = pf->stats.rx_length_errors;
862 * i40e_update_pf_stats - Update the PF statistics counters.
863 * @pf: the PF to be updated
865 static void i40e_update_pf_stats(struct i40e_pf *pf)
867 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
868 struct i40e_hw_port_stats *nsd = &pf->stats;
869 struct i40e_hw *hw = &pf->hw;
873 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
874 I40E_GLPRT_GORCL(hw->port),
875 pf->stat_offsets_loaded,
876 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
877 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
878 I40E_GLPRT_GOTCL(hw->port),
879 pf->stat_offsets_loaded,
880 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
881 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
882 pf->stat_offsets_loaded,
883 &osd->eth.rx_discards,
884 &nsd->eth.rx_discards);
885 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
886 I40E_GLPRT_UPRCL(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->eth.rx_unicast,
889 &nsd->eth.rx_unicast);
890 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
891 I40E_GLPRT_MPRCL(hw->port),
892 pf->stat_offsets_loaded,
893 &osd->eth.rx_multicast,
894 &nsd->eth.rx_multicast);
895 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
896 I40E_GLPRT_BPRCL(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->eth.rx_broadcast,
899 &nsd->eth.rx_broadcast);
900 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
901 I40E_GLPRT_UPTCL(hw->port),
902 pf->stat_offsets_loaded,
903 &osd->eth.tx_unicast,
904 &nsd->eth.tx_unicast);
905 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
906 I40E_GLPRT_MPTCL(hw->port),
907 pf->stat_offsets_loaded,
908 &osd->eth.tx_multicast,
909 &nsd->eth.tx_multicast);
910 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
911 I40E_GLPRT_BPTCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.tx_broadcast,
914 &nsd->eth.tx_broadcast);
916 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->tx_dropped_link_down,
919 &nsd->tx_dropped_link_down);
921 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->crc_errors, &nsd->crc_errors);
925 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
926 pf->stat_offsets_loaded,
927 &osd->illegal_bytes, &nsd->illegal_bytes);
929 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
930 pf->stat_offsets_loaded,
931 &osd->mac_local_faults,
932 &nsd->mac_local_faults);
933 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
934 pf->stat_offsets_loaded,
935 &osd->mac_remote_faults,
936 &nsd->mac_remote_faults);
938 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->rx_length_errors,
941 &nsd->rx_length_errors);
943 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->link_xon_rx, &nsd->link_xon_rx);
946 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->link_xon_tx, &nsd->link_xon_tx);
949 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->link_xoff_rx, &nsd->link_xoff_rx);
952 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
953 pf->stat_offsets_loaded,
954 &osd->link_xoff_tx, &nsd->link_xoff_tx);
956 for (i = 0; i < 8; i++) {
957 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
958 pf->stat_offsets_loaded,
959 &osd->priority_xoff_rx[i],
960 &nsd->priority_xoff_rx[i]);
961 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
962 pf->stat_offsets_loaded,
963 &osd->priority_xon_rx[i],
964 &nsd->priority_xon_rx[i]);
965 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
966 pf->stat_offsets_loaded,
967 &osd->priority_xon_tx[i],
968 &nsd->priority_xon_tx[i]);
969 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
970 pf->stat_offsets_loaded,
971 &osd->priority_xoff_tx[i],
972 &nsd->priority_xoff_tx[i]);
973 i40e_stat_update32(hw,
974 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
975 pf->stat_offsets_loaded,
976 &osd->priority_xon_2_xoff[i],
977 &nsd->priority_xon_2_xoff[i]);
980 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
981 I40E_GLPRT_PRC64L(hw->port),
982 pf->stat_offsets_loaded,
983 &osd->rx_size_64, &nsd->rx_size_64);
984 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
985 I40E_GLPRT_PRC127L(hw->port),
986 pf->stat_offsets_loaded,
987 &osd->rx_size_127, &nsd->rx_size_127);
988 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
989 I40E_GLPRT_PRC255L(hw->port),
990 pf->stat_offsets_loaded,
991 &osd->rx_size_255, &nsd->rx_size_255);
992 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
993 I40E_GLPRT_PRC511L(hw->port),
994 pf->stat_offsets_loaded,
995 &osd->rx_size_511, &nsd->rx_size_511);
996 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
997 I40E_GLPRT_PRC1023L(hw->port),
998 pf->stat_offsets_loaded,
999 &osd->rx_size_1023, &nsd->rx_size_1023);
1000 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1001 I40E_GLPRT_PRC1522L(hw->port),
1002 pf->stat_offsets_loaded,
1003 &osd->rx_size_1522, &nsd->rx_size_1522);
1004 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1005 I40E_GLPRT_PRC9522L(hw->port),
1006 pf->stat_offsets_loaded,
1007 &osd->rx_size_big, &nsd->rx_size_big);
1009 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1010 I40E_GLPRT_PTC64L(hw->port),
1011 pf->stat_offsets_loaded,
1012 &osd->tx_size_64, &nsd->tx_size_64);
1013 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1014 I40E_GLPRT_PTC127L(hw->port),
1015 pf->stat_offsets_loaded,
1016 &osd->tx_size_127, &nsd->tx_size_127);
1017 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1018 I40E_GLPRT_PTC255L(hw->port),
1019 pf->stat_offsets_loaded,
1020 &osd->tx_size_255, &nsd->tx_size_255);
1021 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1022 I40E_GLPRT_PTC511L(hw->port),
1023 pf->stat_offsets_loaded,
1024 &osd->tx_size_511, &nsd->tx_size_511);
1025 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1026 I40E_GLPRT_PTC1023L(hw->port),
1027 pf->stat_offsets_loaded,
1028 &osd->tx_size_1023, &nsd->tx_size_1023);
1029 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1030 I40E_GLPRT_PTC1522L(hw->port),
1031 pf->stat_offsets_loaded,
1032 &osd->tx_size_1522, &nsd->tx_size_1522);
1033 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1034 I40E_GLPRT_PTC9522L(hw->port),
1035 pf->stat_offsets_loaded,
1036 &osd->tx_size_big, &nsd->tx_size_big);
1038 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->rx_undersize, &nsd->rx_undersize);
1041 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_fragments, &nsd->rx_fragments);
1044 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->rx_oversize, &nsd->rx_oversize);
1047 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1048 pf->stat_offsets_loaded,
1049 &osd->rx_jabber, &nsd->rx_jabber);
1052 i40e_stat_update_and_clear32(hw,
1053 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1054 &nsd->fd_atr_match);
1055 i40e_stat_update_and_clear32(hw,
1056 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1058 i40e_stat_update_and_clear32(hw,
1059 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1060 &nsd->fd_atr_tunnel_match);
1062 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1063 nsd->tx_lpi_status =
1064 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1065 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1066 nsd->rx_lpi_status =
1067 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1068 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1069 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1070 pf->stat_offsets_loaded,
1071 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1072 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1073 pf->stat_offsets_loaded,
1074 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1076 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1077 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1078 nsd->fd_sb_status = true;
1080 nsd->fd_sb_status = false;
1082 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1083 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1084 nsd->fd_atr_status = true;
1086 nsd->fd_atr_status = false;
1088 pf->stat_offsets_loaded = true;
1092 * i40e_update_stats - Update the various statistics counters.
1093 * @vsi: the VSI to be updated
1095 * Update the various stats for this VSI and its related entities.
1097 void i40e_update_stats(struct i40e_vsi *vsi)
1099 struct i40e_pf *pf = vsi->back;
1101 if (vsi == pf->vsi[pf->lan_vsi])
1102 i40e_update_pf_stats(pf);
1104 i40e_update_vsi_stats(vsi);
1108 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1109 * @vsi: the VSI to be searched
1110 * @macaddr: the MAC address
1113 * Returns ptr to the filter object or NULL
1115 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1116 const u8 *macaddr, s16 vlan)
1118 struct i40e_mac_filter *f;
1121 if (!vsi || !macaddr)
1124 key = i40e_addr_to_hkey(macaddr);
1125 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1126 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1134 * i40e_find_mac - Find a mac addr in the macvlan filters list
1135 * @vsi: the VSI to be searched
1136 * @macaddr: the MAC address we are searching for
1138 * Returns the first filter with the provided MAC address or NULL if
1139 * MAC address was not found
1141 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1143 struct i40e_mac_filter *f;
1146 if (!vsi || !macaddr)
1149 key = i40e_addr_to_hkey(macaddr);
1150 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1151 if ((ether_addr_equal(macaddr, f->macaddr)))
1158 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1159 * @vsi: the VSI to be searched
1161 * Returns true if VSI is in vlan mode or false otherwise
1163 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1165 /* If we have a PVID, always operate in VLAN mode */
1169 /* We need to operate in VLAN mode whenever we have any filters with
1170 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1171 * time, incurring search cost repeatedly. However, we can notice two
1174 * 1) the only place where we can gain a VLAN filter is in
1177 * 2) the only place where filters are actually removed is in
1178 * i40e_sync_filters_subtask.
1180 * Thus, we can simply use a boolean value, has_vlan_filters which we
1181 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1182 * we have to perform the full search after deleting filters in
1183 * i40e_sync_filters_subtask, but we already have to search
1184 * filters here and can perform the check at the same time. This
1185 * results in avoiding embedding a loop for VLAN mode inside another
1186 * loop over all the filters, and should maintain correctness as noted
1189 return vsi->has_vlan_filter;
1193 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1194 * @vsi: the VSI to configure
1195 * @tmp_add_list: list of filters ready to be added
1196 * @tmp_del_list: list of filters ready to be deleted
1197 * @vlan_filters: the number of active VLAN filters
1199 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1200 * behave as expected. If we have any active VLAN filters remaining or about
1201 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1202 * so that they only match against untagged traffic. If we no longer have any
1203 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1204 * so that they match against both tagged and untagged traffic. In this way,
1205 * we ensure that we correctly receive the desired traffic. This ensures that
1206 * when we have an active VLAN we will receive only untagged traffic and
1207 * traffic matching active VLANs. If we have no active VLANs then we will
1208 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1210 * Finally, in a similar fashion, this function also corrects filters when
1211 * there is an active PVID assigned to this VSI.
1213 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1215 * This function is only expected to be called from within
1216 * i40e_sync_vsi_filters.
1218 * NOTE: This function expects to be called while under the
1219 * mac_filter_hash_lock
1221 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1222 struct hlist_head *tmp_add_list,
1223 struct hlist_head *tmp_del_list,
1226 s16 pvid = le16_to_cpu(vsi->info.pvid);
1227 struct i40e_mac_filter *f, *add_head;
1228 struct i40e_new_mac_filter *new;
1229 struct hlist_node *h;
1232 /* To determine if a particular filter needs to be replaced we
1233 * have the three following conditions:
1235 * a) if we have a PVID assigned, then all filters which are
1236 * not marked as VLAN=PVID must be replaced with filters that
1238 * b) otherwise, if we have any active VLANS, all filters
1239 * which are marked as VLAN=-1 must be replaced with
1240 * filters marked as VLAN=0
1241 * c) finally, if we do not have any active VLANS, all filters
1242 * which are marked as VLAN=0 must be replaced with filters
1246 /* Update the filters about to be added in place */
1247 hlist_for_each_entry(new, tmp_add_list, hlist) {
1248 if (pvid && new->f->vlan != pvid)
1249 new->f->vlan = pvid;
1250 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1252 else if (!vlan_filters && new->f->vlan == 0)
1253 new->f->vlan = I40E_VLAN_ANY;
1256 /* Update the remaining active filters */
1257 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1258 /* Combine the checks for whether a filter needs to be changed
1259 * and then determine the new VLAN inside the if block, in
1260 * order to avoid duplicating code for adding the new filter
1261 * then deleting the old filter.
1263 if ((pvid && f->vlan != pvid) ||
1264 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1265 (!vlan_filters && f->vlan == 0)) {
1266 /* Determine the new vlan we will be adding */
1269 else if (vlan_filters)
1272 new_vlan = I40E_VLAN_ANY;
1274 /* Create the new filter */
1275 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1279 /* Create a temporary i40e_new_mac_filter */
1280 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1285 new->state = add_head->state;
1287 /* Add the new filter to the tmp list */
1288 hlist_add_head(&new->hlist, tmp_add_list);
1290 /* Put the original filter into the delete list */
1291 f->state = I40E_FILTER_REMOVE;
1292 hash_del(&f->hlist);
1293 hlist_add_head(&f->hlist, tmp_del_list);
1297 vsi->has_vlan_filter = !!vlan_filters;
1303 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1304 * @vsi: the PF Main VSI - inappropriate for any other VSI
1305 * @macaddr: the MAC address
1307 * Remove whatever filter the firmware set up so the driver can manage
1308 * its own filtering intelligently.
1310 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1312 struct i40e_aqc_remove_macvlan_element_data element;
1313 struct i40e_pf *pf = vsi->back;
1315 /* Only appropriate for the PF main VSI */
1316 if (vsi->type != I40E_VSI_MAIN)
1319 memset(&element, 0, sizeof(element));
1320 ether_addr_copy(element.mac_addr, macaddr);
1321 element.vlan_tag = 0;
1322 /* Ignore error returns, some firmware does it this way... */
1323 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1324 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1326 memset(&element, 0, sizeof(element));
1327 ether_addr_copy(element.mac_addr, macaddr);
1328 element.vlan_tag = 0;
1329 /* ...and some firmware does it this way. */
1330 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1331 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1332 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1336 * i40e_add_filter - Add a mac/vlan filter to the VSI
1337 * @vsi: the VSI to be searched
1338 * @macaddr: the MAC address
1341 * Returns ptr to the filter object or NULL when no memory available.
1343 * NOTE: This function is expected to be called with mac_filter_hash_lock
1346 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1347 const u8 *macaddr, s16 vlan)
1349 struct i40e_mac_filter *f;
1352 if (!vsi || !macaddr)
1355 f = i40e_find_filter(vsi, macaddr, vlan);
1357 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1361 /* Update the boolean indicating if we need to function in
1365 vsi->has_vlan_filter = true;
1367 ether_addr_copy(f->macaddr, macaddr);
1369 f->state = I40E_FILTER_NEW;
1370 INIT_HLIST_NODE(&f->hlist);
1372 key = i40e_addr_to_hkey(macaddr);
1373 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1375 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1376 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1379 /* If we're asked to add a filter that has been marked for removal, it
1380 * is safe to simply restore it to active state. __i40e_del_filter
1381 * will have simply deleted any filters which were previously marked
1382 * NEW or FAILED, so if it is currently marked REMOVE it must have
1383 * previously been ACTIVE. Since we haven't yet run the sync filters
1384 * task, just restore this filter to the ACTIVE state so that the
1385 * sync task leaves it in place
1387 if (f->state == I40E_FILTER_REMOVE)
1388 f->state = I40E_FILTER_ACTIVE;
1394 * __i40e_del_filter - Remove a specific filter from the VSI
1395 * @vsi: VSI to remove from
1396 * @f: the filter to remove from the list
1398 * This function should be called instead of i40e_del_filter only if you know
1399 * the exact filter you will remove already, such as via i40e_find_filter or
1402 * NOTE: This function is expected to be called with mac_filter_hash_lock
1404 * ANOTHER NOTE: This function MUST be called from within the context of
1405 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1406 * instead of list_for_each_entry().
1408 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1413 /* If the filter was never added to firmware then we can just delete it
1414 * directly and we don't want to set the status to remove or else an
1415 * admin queue command will unnecessarily fire.
1417 if ((f->state == I40E_FILTER_FAILED) ||
1418 (f->state == I40E_FILTER_NEW)) {
1419 hash_del(&f->hlist);
1422 f->state = I40E_FILTER_REMOVE;
1425 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1426 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1430 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1431 * @vsi: the VSI to be searched
1432 * @macaddr: the MAC address
1435 * NOTE: This function is expected to be called with mac_filter_hash_lock
1437 * ANOTHER NOTE: This function MUST be called from within the context of
1438 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1439 * instead of list_for_each_entry().
1441 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1443 struct i40e_mac_filter *f;
1445 if (!vsi || !macaddr)
1448 f = i40e_find_filter(vsi, macaddr, vlan);
1449 __i40e_del_filter(vsi, f);
1453 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1454 * @vsi: the VSI to be searched
1455 * @macaddr: the mac address to be filtered
1457 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1458 * go through all the macvlan filters and add a macvlan filter for each
1459 * unique vlan that already exists. If a PVID has been assigned, instead only
1460 * add the macaddr to that VLAN.
1462 * Returns last filter added on success, else NULL
1464 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1467 struct i40e_mac_filter *f, *add = NULL;
1468 struct hlist_node *h;
1472 return i40e_add_filter(vsi, macaddr,
1473 le16_to_cpu(vsi->info.pvid));
1475 if (!i40e_is_vsi_in_vlan(vsi))
1476 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1478 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1479 if (f->state == I40E_FILTER_REMOVE)
1481 add = i40e_add_filter(vsi, macaddr, f->vlan);
1490 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1491 * @vsi: the VSI to be searched
1492 * @macaddr: the mac address to be removed
1494 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1497 * Returns 0 for success, or error
1499 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1501 struct i40e_mac_filter *f;
1502 struct hlist_node *h;
1506 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1507 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1508 if (ether_addr_equal(macaddr, f->macaddr)) {
1509 __i40e_del_filter(vsi, f);
1521 * i40e_set_mac - NDO callback to set mac address
1522 * @netdev: network interface device structure
1523 * @p: pointer to an address structure
1525 * Returns 0 on success, negative on failure
1527 static int i40e_set_mac(struct net_device *netdev, void *p)
1529 struct i40e_netdev_priv *np = netdev_priv(netdev);
1530 struct i40e_vsi *vsi = np->vsi;
1531 struct i40e_pf *pf = vsi->back;
1532 struct i40e_hw *hw = &pf->hw;
1533 struct sockaddr *addr = p;
1535 if (!is_valid_ether_addr(addr->sa_data))
1536 return -EADDRNOTAVAIL;
1538 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1539 netdev_info(netdev, "already using mac address %pM\n",
1544 if (test_bit(__I40E_DOWN, pf->state) ||
1545 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1546 return -EADDRNOTAVAIL;
1548 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1549 netdev_info(netdev, "returning to hw mac address %pM\n",
1552 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1554 /* Copy the address first, so that we avoid a possible race with
1556 * - Remove old address from MAC filter
1557 * - Copy new address
1558 * - Add new address to MAC filter
1560 spin_lock_bh(&vsi->mac_filter_hash_lock);
1561 i40e_del_mac_filter(vsi, netdev->dev_addr);
1562 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1563 i40e_add_mac_filter(vsi, netdev->dev_addr);
1564 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1566 if (vsi->type == I40E_VSI_MAIN) {
1569 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1570 addr->sa_data, NULL);
1572 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1573 i40e_stat_str(hw, ret),
1574 i40e_aq_str(hw, hw->aq.asq_last_status));
1577 /* schedule our worker thread which will take care of
1578 * applying the new filter changes
1580 i40e_service_event_schedule(pf);
1585 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1586 * @vsi: vsi structure
1587 * @seed: RSS hash seed
1589 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1590 u8 *lut, u16 lut_size)
1592 struct i40e_pf *pf = vsi->back;
1593 struct i40e_hw *hw = &pf->hw;
1597 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1598 (struct i40e_aqc_get_set_rss_key_data *)seed;
1599 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1601 dev_info(&pf->pdev->dev,
1602 "Cannot set RSS key, err %s aq_err %s\n",
1603 i40e_stat_str(hw, ret),
1604 i40e_aq_str(hw, hw->aq.asq_last_status));
1609 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1611 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1613 dev_info(&pf->pdev->dev,
1614 "Cannot set RSS lut, err %s aq_err %s\n",
1615 i40e_stat_str(hw, ret),
1616 i40e_aq_str(hw, hw->aq.asq_last_status));
1624 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1625 * @vsi: VSI structure
1627 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1629 struct i40e_pf *pf = vsi->back;
1630 u8 seed[I40E_HKEY_ARRAY_SIZE];
1634 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1637 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1638 vsi->num_queue_pairs);
1641 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1645 /* Use the user configured hash keys and lookup table if there is one,
1646 * otherwise use default
1648 if (vsi->rss_lut_user)
1649 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1651 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1652 if (vsi->rss_hkey_user)
1653 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1655 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1656 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1662 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1663 * @vsi: the VSI being configured,
1664 * @ctxt: VSI context structure
1665 * @enabled_tc: number of traffic classes to enable
1667 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1669 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1670 struct i40e_vsi_context *ctxt,
1673 u16 qcount = 0, max_qcount, qmap, sections = 0;
1674 int i, override_q, pow, num_qps, ret;
1675 u8 netdev_tc = 0, offset = 0;
1677 if (vsi->type != I40E_VSI_MAIN)
1679 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1680 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1681 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1682 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1683 num_qps = vsi->mqprio_qopt.qopt.count[0];
1685 /* find the next higher power-of-2 of num queue pairs */
1686 pow = ilog2(num_qps);
1687 if (!is_power_of_2(num_qps))
1689 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1690 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1692 /* Setup queue offset/count for all TCs for given VSI */
1693 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1694 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1695 /* See if the given TC is enabled for the given VSI */
1696 if (vsi->tc_config.enabled_tc & BIT(i)) {
1697 offset = vsi->mqprio_qopt.qopt.offset[i];
1698 qcount = vsi->mqprio_qopt.qopt.count[i];
1699 if (qcount > max_qcount)
1700 max_qcount = qcount;
1701 vsi->tc_config.tc_info[i].qoffset = offset;
1702 vsi->tc_config.tc_info[i].qcount = qcount;
1703 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1705 /* TC is not enabled so set the offset to
1706 * default queue and allocate one queue
1709 vsi->tc_config.tc_info[i].qoffset = 0;
1710 vsi->tc_config.tc_info[i].qcount = 1;
1711 vsi->tc_config.tc_info[i].netdev_tc = 0;
1715 /* Set actual Tx/Rx queue pairs */
1716 vsi->num_queue_pairs = offset + qcount;
1718 /* Setup queue TC[0].qmap for given VSI context */
1719 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1720 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1721 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1722 ctxt->info.valid_sections |= cpu_to_le16(sections);
1724 /* Reconfigure RSS for main VSI with max queue count */
1725 vsi->rss_size = max_qcount;
1726 ret = i40e_vsi_config_rss(vsi);
1728 dev_info(&vsi->back->pdev->dev,
1729 "Failed to reconfig rss for num_queues (%u)\n",
1733 vsi->reconfig_rss = true;
1734 dev_dbg(&vsi->back->pdev->dev,
1735 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1737 /* Find queue count available for channel VSIs and starting offset
1740 override_q = vsi->mqprio_qopt.qopt.count[0];
1741 if (override_q && override_q < vsi->num_queue_pairs) {
1742 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1743 vsi->next_base_queue = override_q;
1749 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1750 * @vsi: the VSI being setup
1751 * @ctxt: VSI context structure
1752 * @enabled_tc: Enabled TCs bitmap
1753 * @is_add: True if called before Add VSI
1755 * Setup VSI queue mapping for enabled traffic classes.
1757 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1758 struct i40e_vsi_context *ctxt,
1762 struct i40e_pf *pf = vsi->back;
1772 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1775 /* Number of queues per enabled TC */
1776 num_tc_qps = vsi->alloc_queue_pairs;
1777 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1778 /* Find numtc from enabled TC bitmap */
1779 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1780 if (enabled_tc & BIT(i)) /* TC is enabled */
1784 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1787 num_tc_qps = num_tc_qps / numtc;
1788 num_tc_qps = min_t(int, num_tc_qps,
1789 i40e_pf_get_max_q_per_tc(pf));
1792 vsi->tc_config.numtc = numtc;
1793 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1795 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1796 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1797 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1799 /* Setup queue offset/count for all TCs for given VSI */
1800 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1801 /* See if the given TC is enabled for the given VSI */
1802 if (vsi->tc_config.enabled_tc & BIT(i)) {
1806 switch (vsi->type) {
1808 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1809 I40E_FLAG_FD_ATR_ENABLED)) ||
1810 vsi->tc_config.enabled_tc != 1) {
1811 qcount = min_t(int, pf->alloc_rss_size,
1817 case I40E_VSI_SRIOV:
1818 case I40E_VSI_VMDQ2:
1820 qcount = num_tc_qps;
1824 vsi->tc_config.tc_info[i].qoffset = offset;
1825 vsi->tc_config.tc_info[i].qcount = qcount;
1827 /* find the next higher power-of-2 of num queue pairs */
1830 while (num_qps && (BIT_ULL(pow) < qcount)) {
1835 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1837 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1838 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1842 /* TC is not enabled so set the offset to
1843 * default queue and allocate one queue
1846 vsi->tc_config.tc_info[i].qoffset = 0;
1847 vsi->tc_config.tc_info[i].qcount = 1;
1848 vsi->tc_config.tc_info[i].netdev_tc = 0;
1852 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1855 /* Set actual Tx/Rx queue pairs */
1856 vsi->num_queue_pairs = offset;
1857 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1858 if (vsi->req_queue_pairs > 0)
1859 vsi->num_queue_pairs = vsi->req_queue_pairs;
1860 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1861 vsi->num_queue_pairs = pf->num_lan_msix;
1864 /* Scheduler section valid can only be set for ADD VSI */
1866 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1868 ctxt->info.up_enable_bits = enabled_tc;
1870 if (vsi->type == I40E_VSI_SRIOV) {
1871 ctxt->info.mapping_flags |=
1872 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1873 for (i = 0; i < vsi->num_queue_pairs; i++)
1874 ctxt->info.queue_mapping[i] =
1875 cpu_to_le16(vsi->base_queue + i);
1877 ctxt->info.mapping_flags |=
1878 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1879 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1881 ctxt->info.valid_sections |= cpu_to_le16(sections);
1885 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1886 * @netdev: the netdevice
1887 * @addr: address to add
1889 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1890 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1892 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1894 struct i40e_netdev_priv *np = netdev_priv(netdev);
1895 struct i40e_vsi *vsi = np->vsi;
1897 if (i40e_add_mac_filter(vsi, addr))
1904 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1905 * @netdev: the netdevice
1906 * @addr: address to add
1908 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1909 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1911 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1913 struct i40e_netdev_priv *np = netdev_priv(netdev);
1914 struct i40e_vsi *vsi = np->vsi;
1916 /* Under some circumstances, we might receive a request to delete
1917 * our own device address from our uc list. Because we store the
1918 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1919 * such requests and not delete our device address from this list.
1921 if (ether_addr_equal(addr, netdev->dev_addr))
1924 i40e_del_mac_filter(vsi, addr);
1930 * i40e_set_rx_mode - NDO callback to set the netdev filters
1931 * @netdev: network interface device structure
1933 static void i40e_set_rx_mode(struct net_device *netdev)
1935 struct i40e_netdev_priv *np = netdev_priv(netdev);
1936 struct i40e_vsi *vsi = np->vsi;
1938 spin_lock_bh(&vsi->mac_filter_hash_lock);
1940 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1941 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1943 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1945 /* check for other flag changes */
1946 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1947 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1948 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1953 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1954 * @vsi: Pointer to VSI struct
1955 * @from: Pointer to list which contains MAC filter entries - changes to
1956 * those entries needs to be undone.
1958 * MAC filter entries from this list were slated for deletion.
1960 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1961 struct hlist_head *from)
1963 struct i40e_mac_filter *f;
1964 struct hlist_node *h;
1966 hlist_for_each_entry_safe(f, h, from, hlist) {
1967 u64 key = i40e_addr_to_hkey(f->macaddr);
1969 /* Move the element back into MAC filter list*/
1970 hlist_del(&f->hlist);
1971 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1976 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1977 * @vsi: Pointer to vsi struct
1978 * @from: Pointer to list which contains MAC filter entries - changes to
1979 * those entries needs to be undone.
1981 * MAC filter entries from this list were slated for addition.
1983 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1984 struct hlist_head *from)
1986 struct i40e_new_mac_filter *new;
1987 struct hlist_node *h;
1989 hlist_for_each_entry_safe(new, h, from, hlist) {
1990 /* We can simply free the wrapper structure */
1991 hlist_del(&new->hlist);
1997 * i40e_next_entry - Get the next non-broadcast filter from a list
1998 * @next: pointer to filter in list
2000 * Returns the next non-broadcast filter in the list. Required so that we
2001 * ignore broadcast filters within the list, since these are not handled via
2002 * the normal firmware update path.
2005 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2007 hlist_for_each_entry_continue(next, hlist) {
2008 if (!is_broadcast_ether_addr(next->f->macaddr))
2016 * i40e_update_filter_state - Update filter state based on return data
2018 * @count: Number of filters added
2019 * @add_list: return data from fw
2020 * @add_head: pointer to first filter in current batch
2022 * MAC filter entries from list were slated to be added to device. Returns
2023 * number of successful filters. Note that 0 does NOT mean success!
2026 i40e_update_filter_state(int count,
2027 struct i40e_aqc_add_macvlan_element_data *add_list,
2028 struct i40e_new_mac_filter *add_head)
2033 for (i = 0; i < count; i++) {
2034 /* Always check status of each filter. We don't need to check
2035 * the firmware return status because we pre-set the filter
2036 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2037 * request to the adminq. Thus, if it no longer matches then
2038 * we know the filter is active.
2040 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2041 add_head->state = I40E_FILTER_FAILED;
2043 add_head->state = I40E_FILTER_ACTIVE;
2047 add_head = i40e_next_filter(add_head);
2056 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2057 * @vsi: ptr to the VSI
2058 * @vsi_name: name to display in messages
2059 * @list: the list of filters to send to firmware
2060 * @num_del: the number of filters to delete
2061 * @retval: Set to -EIO on failure to delete
2063 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2064 * *retval instead of a return value so that success does not force ret_val to
2065 * be set to 0. This ensures that a sequence of calls to this function
2066 * preserve the previous value of *retval on successful delete.
2069 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2070 struct i40e_aqc_remove_macvlan_element_data *list,
2071 int num_del, int *retval)
2073 struct i40e_hw *hw = &vsi->back->hw;
2077 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2078 aq_err = hw->aq.asq_last_status;
2080 /* Explicitly ignore and do not report when firmware returns ENOENT */
2081 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2083 dev_info(&vsi->back->pdev->dev,
2084 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2085 vsi_name, i40e_stat_str(hw, aq_ret),
2086 i40e_aq_str(hw, aq_err));
2091 * i40e_aqc_add_filters - Request firmware to add a set of filters
2092 * @vsi: ptr to the VSI
2093 * @vsi_name: name to display in messages
2094 * @list: the list of filters to send to firmware
2095 * @add_head: Position in the add hlist
2096 * @num_add: the number of filters to add
2098 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2099 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2100 * space for more filters.
2103 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2104 struct i40e_aqc_add_macvlan_element_data *list,
2105 struct i40e_new_mac_filter *add_head,
2108 struct i40e_hw *hw = &vsi->back->hw;
2111 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2112 aq_err = hw->aq.asq_last_status;
2113 fcnt = i40e_update_filter_state(num_add, list, add_head);
2115 if (fcnt != num_add) {
2116 if (vsi->type == I40E_VSI_MAIN) {
2117 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2118 dev_warn(&vsi->back->pdev->dev,
2119 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2120 i40e_aq_str(hw, aq_err), vsi_name);
2121 } else if (vsi->type == I40E_VSI_SRIOV ||
2122 vsi->type == I40E_VSI_VMDQ1 ||
2123 vsi->type == I40E_VSI_VMDQ2) {
2124 dev_warn(&vsi->back->pdev->dev,
2125 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2126 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2128 dev_warn(&vsi->back->pdev->dev,
2129 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2130 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2136 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2137 * @vsi: pointer to the VSI
2138 * @vsi_name: the VSI name
2141 * This function sets or clears the promiscuous broadcast flags for VLAN
2142 * filters in order to properly receive broadcast frames. Assumes that only
2143 * broadcast filters are passed.
2145 * Returns status indicating success or failure;
2148 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2149 struct i40e_mac_filter *f)
2151 bool enable = f->state == I40E_FILTER_NEW;
2152 struct i40e_hw *hw = &vsi->back->hw;
2155 if (f->vlan == I40E_VLAN_ANY) {
2156 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2161 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2169 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2170 dev_warn(&vsi->back->pdev->dev,
2171 "Error %s, forcing overflow promiscuous on %s\n",
2172 i40e_aq_str(hw, hw->aq.asq_last_status),
2180 * i40e_set_promiscuous - set promiscuous mode
2181 * @pf: board private structure
2182 * @promisc: promisc on or off
2184 * There are different ways of setting promiscuous mode on a PF depending on
2185 * what state/environment we're in. This identifies and sets it appropriately.
2186 * Returns 0 on success.
2188 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2190 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2191 struct i40e_hw *hw = &pf->hw;
2194 if (vsi->type == I40E_VSI_MAIN &&
2195 pf->lan_veb != I40E_NO_VEB &&
2196 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2197 /* set defport ON for Main VSI instead of true promisc
2198 * this way we will get all unicast/multicast and VLAN
2199 * promisc behavior but will not get VF or VMDq traffic
2200 * replicated on the Main VSI.
2203 aq_ret = i40e_aq_set_default_vsi(hw,
2207 aq_ret = i40e_aq_clear_default_vsi(hw,
2211 dev_info(&pf->pdev->dev,
2212 "Set default VSI failed, err %s, aq_err %s\n",
2213 i40e_stat_str(hw, aq_ret),
2214 i40e_aq_str(hw, hw->aq.asq_last_status));
2217 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2223 dev_info(&pf->pdev->dev,
2224 "set unicast promisc failed, err %s, aq_err %s\n",
2225 i40e_stat_str(hw, aq_ret),
2226 i40e_aq_str(hw, hw->aq.asq_last_status));
2228 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2233 dev_info(&pf->pdev->dev,
2234 "set multicast promisc failed, err %s, aq_err %s\n",
2235 i40e_stat_str(hw, aq_ret),
2236 i40e_aq_str(hw, hw->aq.asq_last_status));
2241 pf->cur_promisc = promisc;
2247 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2248 * @vsi: ptr to the VSI
2250 * Push any outstanding VSI filter changes through the AdminQ.
2252 * Returns 0 or error value
2254 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2256 struct hlist_head tmp_add_list, tmp_del_list;
2257 struct i40e_mac_filter *f;
2258 struct i40e_new_mac_filter *new, *add_head = NULL;
2259 struct i40e_hw *hw = &vsi->back->hw;
2260 bool old_overflow, new_overflow;
2261 unsigned int failed_filters = 0;
2262 unsigned int vlan_filters = 0;
2263 char vsi_name[16] = "PF";
2264 int filter_list_len = 0;
2265 i40e_status aq_ret = 0;
2266 u32 changed_flags = 0;
2267 struct hlist_node *h;
2276 /* empty array typed pointers, kcalloc later */
2277 struct i40e_aqc_add_macvlan_element_data *add_list;
2278 struct i40e_aqc_remove_macvlan_element_data *del_list;
2280 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2281 usleep_range(1000, 2000);
2284 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2287 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2288 vsi->current_netdev_flags = vsi->netdev->flags;
2291 INIT_HLIST_HEAD(&tmp_add_list);
2292 INIT_HLIST_HEAD(&tmp_del_list);
2294 if (vsi->type == I40E_VSI_SRIOV)
2295 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2296 else if (vsi->type != I40E_VSI_MAIN)
2297 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2299 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2300 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2302 spin_lock_bh(&vsi->mac_filter_hash_lock);
2303 /* Create a list of filters to delete. */
2304 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2305 if (f->state == I40E_FILTER_REMOVE) {
2306 /* Move the element into temporary del_list */
2307 hash_del(&f->hlist);
2308 hlist_add_head(&f->hlist, &tmp_del_list);
2310 /* Avoid counting removed filters */
2313 if (f->state == I40E_FILTER_NEW) {
2314 /* Create a temporary i40e_new_mac_filter */
2315 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2317 goto err_no_memory_locked;
2319 /* Store pointer to the real filter */
2321 new->state = f->state;
2323 /* Add it to the hash list */
2324 hlist_add_head(&new->hlist, &tmp_add_list);
2327 /* Count the number of active (current and new) VLAN
2328 * filters we have now. Does not count filters which
2329 * are marked for deletion.
2335 retval = i40e_correct_mac_vlan_filters(vsi,
2340 goto err_no_memory_locked;
2342 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2345 /* Now process 'del_list' outside the lock */
2346 if (!hlist_empty(&tmp_del_list)) {
2347 filter_list_len = hw->aq.asq_buf_size /
2348 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2349 list_size = filter_list_len *
2350 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2351 del_list = kzalloc(list_size, GFP_ATOMIC);
2355 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2358 /* handle broadcast filters by updating the broadcast
2359 * promiscuous flag and release filter list.
2361 if (is_broadcast_ether_addr(f->macaddr)) {
2362 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2364 hlist_del(&f->hlist);
2369 /* add to delete list */
2370 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2371 if (f->vlan == I40E_VLAN_ANY) {
2372 del_list[num_del].vlan_tag = 0;
2373 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2375 del_list[num_del].vlan_tag =
2376 cpu_to_le16((u16)(f->vlan));
2379 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2380 del_list[num_del].flags = cmd_flags;
2383 /* flush a full buffer */
2384 if (num_del == filter_list_len) {
2385 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2387 memset(del_list, 0, list_size);
2390 /* Release memory for MAC filter entries which were
2391 * synced up with HW.
2393 hlist_del(&f->hlist);
2398 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2406 if (!hlist_empty(&tmp_add_list)) {
2407 /* Do all the adds now. */
2408 filter_list_len = hw->aq.asq_buf_size /
2409 sizeof(struct i40e_aqc_add_macvlan_element_data);
2410 list_size = filter_list_len *
2411 sizeof(struct i40e_aqc_add_macvlan_element_data);
2412 add_list = kzalloc(list_size, GFP_ATOMIC);
2417 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2418 /* handle broadcast filters by updating the broadcast
2419 * promiscuous flag instead of adding a MAC filter.
2421 if (is_broadcast_ether_addr(new->f->macaddr)) {
2422 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2424 new->state = I40E_FILTER_FAILED;
2426 new->state = I40E_FILTER_ACTIVE;
2430 /* add to add array */
2434 ether_addr_copy(add_list[num_add].mac_addr,
2436 if (new->f->vlan == I40E_VLAN_ANY) {
2437 add_list[num_add].vlan_tag = 0;
2438 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2440 add_list[num_add].vlan_tag =
2441 cpu_to_le16((u16)(new->f->vlan));
2443 add_list[num_add].queue_number = 0;
2444 /* set invalid match method for later detection */
2445 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2446 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2447 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2450 /* flush a full buffer */
2451 if (num_add == filter_list_len) {
2452 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2454 memset(add_list, 0, list_size);
2459 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2462 /* Now move all of the filters from the temp add list back to
2465 spin_lock_bh(&vsi->mac_filter_hash_lock);
2466 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2467 /* Only update the state if we're still NEW */
2468 if (new->f->state == I40E_FILTER_NEW)
2469 new->f->state = new->state;
2470 hlist_del(&new->hlist);
2473 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2478 /* Determine the number of active and failed filters. */
2479 spin_lock_bh(&vsi->mac_filter_hash_lock);
2480 vsi->active_filters = 0;
2481 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2482 if (f->state == I40E_FILTER_ACTIVE)
2483 vsi->active_filters++;
2484 else if (f->state == I40E_FILTER_FAILED)
2487 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2489 /* Check if we are able to exit overflow promiscuous mode. We can
2490 * safely exit if we didn't just enter, we no longer have any failed
2491 * filters, and we have reduced filters below the threshold value.
2493 if (old_overflow && !failed_filters &&
2494 vsi->active_filters < vsi->promisc_threshold) {
2495 dev_info(&pf->pdev->dev,
2496 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2498 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2499 vsi->promisc_threshold = 0;
2502 /* if the VF is not trusted do not do promisc */
2503 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2504 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2508 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2510 /* If we are entering overflow promiscuous, we need to calculate a new
2511 * threshold for when we are safe to exit
2513 if (!old_overflow && new_overflow)
2514 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2516 /* check for changes in promiscuous modes */
2517 if (changed_flags & IFF_ALLMULTI) {
2518 bool cur_multipromisc;
2520 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2521 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2526 retval = i40e_aq_rc_to_posix(aq_ret,
2527 hw->aq.asq_last_status);
2528 dev_info(&pf->pdev->dev,
2529 "set multi promisc failed on %s, err %s aq_err %s\n",
2531 i40e_stat_str(hw, aq_ret),
2532 i40e_aq_str(hw, hw->aq.asq_last_status));
2536 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2539 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2541 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2543 retval = i40e_aq_rc_to_posix(aq_ret,
2544 hw->aq.asq_last_status);
2545 dev_info(&pf->pdev->dev,
2546 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2547 cur_promisc ? "on" : "off",
2549 i40e_stat_str(hw, aq_ret),
2550 i40e_aq_str(hw, hw->aq.asq_last_status));
2554 /* if something went wrong then set the changed flag so we try again */
2556 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2558 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2562 /* Restore elements on the temporary add and delete lists */
2563 spin_lock_bh(&vsi->mac_filter_hash_lock);
2564 err_no_memory_locked:
2565 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2566 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2567 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2569 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2570 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2575 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2576 * @pf: board private structure
2578 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2584 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2586 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
2587 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2591 for (v = 0; v < pf->num_alloc_vsi; v++) {
2593 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2594 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2597 /* come back and try again later */
2598 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2604 clear_bit(__I40E_VF_DISABLE, pf->state);
2608 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2611 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2613 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2614 return I40E_RXBUFFER_2048;
2616 return I40E_RXBUFFER_3072;
2620 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2621 * @netdev: network interface device structure
2622 * @new_mtu: new value for maximum frame size
2624 * Returns 0 on success, negative on failure
2626 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2628 struct i40e_netdev_priv *np = netdev_priv(netdev);
2629 struct i40e_vsi *vsi = np->vsi;
2630 struct i40e_pf *pf = vsi->back;
2632 if (i40e_enabled_xdp_vsi(vsi)) {
2633 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2635 if (frame_size > i40e_max_xdp_frame_size(vsi))
2639 netdev_info(netdev, "changing MTU from %d to %d\n",
2640 netdev->mtu, new_mtu);
2641 netdev->mtu = new_mtu;
2642 if (netif_running(netdev))
2643 i40e_vsi_reinit_locked(vsi);
2644 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2645 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2650 * i40e_ioctl - Access the hwtstamp interface
2651 * @netdev: network interface device structure
2652 * @ifr: interface request data
2653 * @cmd: ioctl command
2655 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2657 struct i40e_netdev_priv *np = netdev_priv(netdev);
2658 struct i40e_pf *pf = np->vsi->back;
2662 return i40e_ptp_get_ts_config(pf, ifr);
2664 return i40e_ptp_set_ts_config(pf, ifr);
2671 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2672 * @vsi: the vsi being adjusted
2674 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2676 struct i40e_vsi_context ctxt;
2679 /* Don't modify stripping options if a port VLAN is active */
2683 if ((vsi->info.valid_sections &
2684 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2685 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2686 return; /* already enabled */
2688 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2689 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2690 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2692 ctxt.seid = vsi->seid;
2693 ctxt.info = vsi->info;
2694 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2696 dev_info(&vsi->back->pdev->dev,
2697 "update vlan stripping failed, err %s aq_err %s\n",
2698 i40e_stat_str(&vsi->back->hw, ret),
2699 i40e_aq_str(&vsi->back->hw,
2700 vsi->back->hw.aq.asq_last_status));
2705 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2706 * @vsi: the vsi being adjusted
2708 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2710 struct i40e_vsi_context ctxt;
2713 /* Don't modify stripping options if a port VLAN is active */
2717 if ((vsi->info.valid_sections &
2718 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2719 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2720 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2721 return; /* already disabled */
2723 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2724 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2725 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2727 ctxt.seid = vsi->seid;
2728 ctxt.info = vsi->info;
2729 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2731 dev_info(&vsi->back->pdev->dev,
2732 "update vlan stripping failed, err %s aq_err %s\n",
2733 i40e_stat_str(&vsi->back->hw, ret),
2734 i40e_aq_str(&vsi->back->hw,
2735 vsi->back->hw.aq.asq_last_status));
2740 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2741 * @vsi: the vsi being configured
2742 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2744 * This is a helper function for adding a new MAC/VLAN filter with the
2745 * specified VLAN for each existing MAC address already in the hash table.
2746 * This function does *not* perform any accounting to update filters based on
2749 * NOTE: this function expects to be called while under the
2750 * mac_filter_hash_lock
2752 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2754 struct i40e_mac_filter *f, *add_f;
2755 struct hlist_node *h;
2758 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2759 if (f->state == I40E_FILTER_REMOVE)
2761 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2763 dev_info(&vsi->back->pdev->dev,
2764 "Could not add vlan filter %d for %pM\n",
2774 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2775 * @vsi: the VSI being configured
2776 * @vid: VLAN id to be added
2778 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2785 /* The network stack will attempt to add VID=0, with the intention to
2786 * receive priority tagged packets with a VLAN of 0. Our HW receives
2787 * these packets by default when configured to receive untagged
2788 * packets, so we don't need to add a filter for this case.
2789 * Additionally, HW interprets adding a VID=0 filter as meaning to
2790 * receive *only* tagged traffic and stops receiving untagged traffic.
2791 * Thus, we do not want to actually add a filter for VID=0
2796 /* Locked once because all functions invoked below iterates list*/
2797 spin_lock_bh(&vsi->mac_filter_hash_lock);
2798 err = i40e_add_vlan_all_mac(vsi, vid);
2799 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2803 /* schedule our worker thread which will take care of
2804 * applying the new filter changes
2806 i40e_service_event_schedule(vsi->back);
2811 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2812 * @vsi: the vsi being configured
2813 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2815 * This function should be used to remove all VLAN filters which match the
2816 * given VID. It does not schedule the service event and does not take the
2817 * mac_filter_hash_lock so it may be combined with other operations under
2818 * a single invocation of the mac_filter_hash_lock.
2820 * NOTE: this function expects to be called while under the
2821 * mac_filter_hash_lock
2823 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2825 struct i40e_mac_filter *f;
2826 struct hlist_node *h;
2829 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2831 __i40e_del_filter(vsi, f);
2836 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2837 * @vsi: the VSI being configured
2838 * @vid: VLAN id to be removed
2840 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2842 if (!vid || vsi->info.pvid)
2845 spin_lock_bh(&vsi->mac_filter_hash_lock);
2846 i40e_rm_vlan_all_mac(vsi, vid);
2847 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2849 /* schedule our worker thread which will take care of
2850 * applying the new filter changes
2852 i40e_service_event_schedule(vsi->back);
2856 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2857 * @netdev: network interface to be adjusted
2858 * @proto: unused protocol value
2859 * @vid: vlan id to be added
2861 * net_device_ops implementation for adding vlan ids
2863 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2864 __always_unused __be16 proto, u16 vid)
2866 struct i40e_netdev_priv *np = netdev_priv(netdev);
2867 struct i40e_vsi *vsi = np->vsi;
2870 if (vid >= VLAN_N_VID)
2873 ret = i40e_vsi_add_vlan(vsi, vid);
2875 set_bit(vid, vsi->active_vlans);
2881 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2882 * @netdev: network interface to be adjusted
2883 * @proto: unused protocol value
2884 * @vid: vlan id to be added
2886 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2887 __always_unused __be16 proto, u16 vid)
2889 struct i40e_netdev_priv *np = netdev_priv(netdev);
2890 struct i40e_vsi *vsi = np->vsi;
2892 if (vid >= VLAN_N_VID)
2894 set_bit(vid, vsi->active_vlans);
2898 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2899 * @netdev: network interface to be adjusted
2900 * @proto: unused protocol value
2901 * @vid: vlan id to be removed
2903 * net_device_ops implementation for removing vlan ids
2905 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2906 __always_unused __be16 proto, u16 vid)
2908 struct i40e_netdev_priv *np = netdev_priv(netdev);
2909 struct i40e_vsi *vsi = np->vsi;
2911 /* return code is ignored as there is nothing a user
2912 * can do about failure to remove and a log message was
2913 * already printed from the other function
2915 i40e_vsi_kill_vlan(vsi, vid);
2917 clear_bit(vid, vsi->active_vlans);
2923 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2924 * @vsi: the vsi being brought back up
2926 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2933 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2934 i40e_vlan_stripping_enable(vsi);
2936 i40e_vlan_stripping_disable(vsi);
2938 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2939 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2944 * i40e_vsi_add_pvid - Add pvid for the VSI
2945 * @vsi: the vsi being adjusted
2946 * @vid: the vlan id to set as a PVID
2948 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2950 struct i40e_vsi_context ctxt;
2953 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2954 vsi->info.pvid = cpu_to_le16(vid);
2955 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2956 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2957 I40E_AQ_VSI_PVLAN_EMOD_STR;
2959 ctxt.seid = vsi->seid;
2960 ctxt.info = vsi->info;
2961 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2963 dev_info(&vsi->back->pdev->dev,
2964 "add pvid failed, err %s aq_err %s\n",
2965 i40e_stat_str(&vsi->back->hw, ret),
2966 i40e_aq_str(&vsi->back->hw,
2967 vsi->back->hw.aq.asq_last_status));
2975 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2976 * @vsi: the vsi being adjusted
2978 * Just use the vlan_rx_register() service to put it back to normal
2980 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2984 i40e_vlan_stripping_disable(vsi);
2988 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2989 * @vsi: ptr to the VSI
2991 * If this function returns with an error, then it's possible one or
2992 * more of the rings is populated (while the rest are not). It is the
2993 * callers duty to clean those orphaned rings.
2995 * Return 0 on success, negative on failure
2997 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3001 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3002 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3004 if (!i40e_enabled_xdp_vsi(vsi))
3007 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3008 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3014 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3015 * @vsi: ptr to the VSI
3017 * Free VSI's transmit software resources
3019 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3023 if (vsi->tx_rings) {
3024 for (i = 0; i < vsi->num_queue_pairs; i++)
3025 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3026 i40e_free_tx_resources(vsi->tx_rings[i]);
3029 if (vsi->xdp_rings) {
3030 for (i = 0; i < vsi->num_queue_pairs; i++)
3031 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3032 i40e_free_tx_resources(vsi->xdp_rings[i]);
3037 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3038 * @vsi: ptr to the VSI
3040 * If this function returns with an error, then it's possible one or
3041 * more of the rings is populated (while the rest are not). It is the
3042 * callers duty to clean those orphaned rings.
3044 * Return 0 on success, negative on failure
3046 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3050 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3051 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3056 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3057 * @vsi: ptr to the VSI
3059 * Free all receive software resources
3061 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3068 for (i = 0; i < vsi->num_queue_pairs; i++)
3069 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3070 i40e_free_rx_resources(vsi->rx_rings[i]);
3074 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3075 * @ring: The Tx ring to configure
3077 * This enables/disables XPS for a given Tx descriptor ring
3078 * based on the TCs enabled for the VSI that ring belongs to.
3080 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3084 if (!ring->q_vector || !ring->netdev || ring->ch)
3087 /* We only initialize XPS once, so as not to overwrite user settings */
3088 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3091 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3092 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3097 * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
3098 * @ring: The Tx or Rx ring
3100 * Returns the UMEM or NULL.
3102 static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3104 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3105 int qid = ring->queue_index;
3107 if (ring_is_xdp(ring))
3108 qid -= ring->vsi->alloc_queue_pairs;
3110 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3113 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3117 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3118 * @ring: The Tx ring to configure
3120 * Configure the Tx descriptor ring in the HMC context.
3122 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3124 struct i40e_vsi *vsi = ring->vsi;
3125 u16 pf_q = vsi->base_queue + ring->queue_index;
3126 struct i40e_hw *hw = &vsi->back->hw;
3127 struct i40e_hmc_obj_txq tx_ctx;
3128 i40e_status err = 0;
3131 if (ring_is_xdp(ring))
3132 ring->xsk_umem = i40e_xsk_umem(ring);
3134 /* some ATR related tx ring init */
3135 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3136 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3137 ring->atr_count = 0;
3139 ring->atr_sample_rate = 0;
3143 i40e_config_xps_tx_ring(ring);
3145 /* clear the context structure first */
3146 memset(&tx_ctx, 0, sizeof(tx_ctx));
3148 tx_ctx.new_context = 1;
3149 tx_ctx.base = (ring->dma / 128);
3150 tx_ctx.qlen = ring->count;
3151 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3152 I40E_FLAG_FD_ATR_ENABLED));
3153 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3154 /* FDIR VSI tx ring can still use RS bit and writebacks */
3155 if (vsi->type != I40E_VSI_FDIR)
3156 tx_ctx.head_wb_ena = 1;
3157 tx_ctx.head_wb_addr = ring->dma +
3158 (ring->count * sizeof(struct i40e_tx_desc));
3160 /* As part of VSI creation/update, FW allocates certain
3161 * Tx arbitration queue sets for each TC enabled for
3162 * the VSI. The FW returns the handles to these queue
3163 * sets as part of the response buffer to Add VSI,
3164 * Update VSI, etc. AQ commands. It is expected that
3165 * these queue set handles be associated with the Tx
3166 * queues by the driver as part of the TX queue context
3167 * initialization. This has to be done regardless of
3168 * DCB as by default everything is mapped to TC0.
3173 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3176 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3178 tx_ctx.rdylist_act = 0;
3180 /* clear the context in the HMC */
3181 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3183 dev_info(&vsi->back->pdev->dev,
3184 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3185 ring->queue_index, pf_q, err);
3189 /* set the context in the HMC */
3190 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3192 dev_info(&vsi->back->pdev->dev,
3193 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3194 ring->queue_index, pf_q, err);
3198 /* Now associate this queue with this PCI function */
3200 if (ring->ch->type == I40E_VSI_VMDQ2)
3201 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3205 qtx_ctl |= (ring->ch->vsi_number <<
3206 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3207 I40E_QTX_CTL_VFVM_INDX_MASK;
3209 if (vsi->type == I40E_VSI_VMDQ2) {
3210 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3211 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3212 I40E_QTX_CTL_VFVM_INDX_MASK;
3214 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3218 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3219 I40E_QTX_CTL_PF_INDX_MASK);
3220 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3223 /* cache tail off for easier writes later */
3224 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3230 * i40e_configure_rx_ring - Configure a receive ring context
3231 * @ring: The Rx ring to configure
3233 * Configure the Rx descriptor ring in the HMC context.
3235 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3237 struct i40e_vsi *vsi = ring->vsi;
3238 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3239 u16 pf_q = vsi->base_queue + ring->queue_index;
3240 struct i40e_hw *hw = &vsi->back->hw;
3241 struct i40e_hmc_obj_rxq rx_ctx;
3242 i40e_status err = 0;
3246 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3248 /* clear the context structure first */
3249 memset(&rx_ctx, 0, sizeof(rx_ctx));
3251 if (ring->vsi->type == I40E_VSI_MAIN)
3252 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3254 ring->xsk_umem = i40e_xsk_umem(ring);
3255 if (ring->xsk_umem) {
3256 ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
3257 XDP_PACKET_HEADROOM;
3258 /* For AF_XDP ZC, we disallow packets to span on
3259 * multiple buffers, thus letting us skip that
3260 * handling in the fast-path.
3263 ring->zca.free = i40e_zca_free;
3264 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3269 dev_info(&vsi->back->pdev->dev,
3270 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3274 ring->rx_buf_len = vsi->rx_buf_len;
3275 if (ring->vsi->type == I40E_VSI_MAIN) {
3276 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3277 MEM_TYPE_PAGE_SHARED,
3284 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3285 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3287 rx_ctx.base = (ring->dma / 128);
3288 rx_ctx.qlen = ring->count;
3290 /* use 32 byte descriptors */
3293 /* descriptor type is always zero
3296 rx_ctx.hsplit_0 = 0;
3298 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3299 if (hw->revision_id == 0)
3300 rx_ctx.lrxqthresh = 0;
3302 rx_ctx.lrxqthresh = 1;
3303 rx_ctx.crcstrip = 1;
3305 /* this controls whether VLAN is stripped from inner headers */
3307 /* set the prefena field to 1 because the manual says to */
3310 /* clear the context in the HMC */
3311 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3313 dev_info(&vsi->back->pdev->dev,
3314 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3315 ring->queue_index, pf_q, err);
3319 /* set the context in the HMC */
3320 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3322 dev_info(&vsi->back->pdev->dev,
3323 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3324 ring->queue_index, pf_q, err);
3328 /* configure Rx buffer alignment */
3329 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3330 clear_ring_build_skb_enabled(ring);
3332 set_ring_build_skb_enabled(ring);
3334 /* cache tail for quicker writes, and clear the reg before use */
3335 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3336 writel(0, ring->tail);
3338 ok = ring->xsk_umem ?
3339 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3340 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3342 /* Log this in case the user has forgotten to give the kernel
3343 * any buffers, even later in the application.
3345 dev_info(&vsi->back->pdev->dev,
3346 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3347 ring->xsk_umem ? "UMEM enabled " : "",
3348 ring->queue_index, pf_q);
3355 * i40e_vsi_configure_tx - Configure the VSI for Tx
3356 * @vsi: VSI structure describing this set of rings and resources
3358 * Configure the Tx VSI for operation.
3360 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3365 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3366 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3368 if (!i40e_enabled_xdp_vsi(vsi))
3371 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3372 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3378 * i40e_vsi_configure_rx - Configure the VSI for Rx
3379 * @vsi: the VSI being configured
3381 * Configure the Rx VSI for operation.
3383 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3388 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3389 vsi->max_frame = I40E_MAX_RXBUFFER;
3390 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3391 #if (PAGE_SIZE < 8192)
3392 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3393 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3394 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3395 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3398 vsi->max_frame = I40E_MAX_RXBUFFER;
3399 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3403 /* set up individual rings */
3404 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3405 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3411 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3412 * @vsi: ptr to the VSI
3414 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3416 struct i40e_ring *tx_ring, *rx_ring;
3417 u16 qoffset, qcount;
3420 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3421 /* Reset the TC information */
3422 for (i = 0; i < vsi->num_queue_pairs; i++) {
3423 rx_ring = vsi->rx_rings[i];
3424 tx_ring = vsi->tx_rings[i];
3425 rx_ring->dcb_tc = 0;
3426 tx_ring->dcb_tc = 0;
3431 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3432 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3435 qoffset = vsi->tc_config.tc_info[n].qoffset;
3436 qcount = vsi->tc_config.tc_info[n].qcount;
3437 for (i = qoffset; i < (qoffset + qcount); i++) {
3438 rx_ring = vsi->rx_rings[i];
3439 tx_ring = vsi->tx_rings[i];
3440 rx_ring->dcb_tc = n;
3441 tx_ring->dcb_tc = n;
3447 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3448 * @vsi: ptr to the VSI
3450 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3453 i40e_set_rx_mode(vsi->netdev);
3457 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3458 * @vsi: Pointer to the targeted VSI
3460 * This function replays the hlist on the hw where all the SB Flow Director
3461 * filters were saved.
3463 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3465 struct i40e_fdir_filter *filter;
3466 struct i40e_pf *pf = vsi->back;
3467 struct hlist_node *node;
3469 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3472 /* Reset FDir counters as we're replaying all existing filters */
3473 pf->fd_tcp4_filter_cnt = 0;
3474 pf->fd_udp4_filter_cnt = 0;
3475 pf->fd_sctp4_filter_cnt = 0;
3476 pf->fd_ip4_filter_cnt = 0;
3478 hlist_for_each_entry_safe(filter, node,
3479 &pf->fdir_filter_list, fdir_node) {
3480 i40e_add_del_fdir(vsi, filter, true);
3485 * i40e_vsi_configure - Set up the VSI for action
3486 * @vsi: the VSI being configured
3488 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3492 i40e_set_vsi_rx_mode(vsi);
3493 i40e_restore_vlan(vsi);
3494 i40e_vsi_config_dcb_rings(vsi);
3495 err = i40e_vsi_configure_tx(vsi);
3497 err = i40e_vsi_configure_rx(vsi);
3503 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3504 * @vsi: the VSI being configured
3506 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3508 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3509 struct i40e_pf *pf = vsi->back;
3510 struct i40e_hw *hw = &pf->hw;
3515 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3516 * and PFINT_LNKLSTn registers, e.g.:
3517 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3519 qp = vsi->base_queue;
3520 vector = vsi->base_vector;
3521 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3522 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3524 q_vector->rx.next_update = jiffies + 1;
3525 q_vector->rx.target_itr =
3526 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3527 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3528 q_vector->rx.target_itr);
3529 q_vector->rx.current_itr = q_vector->rx.target_itr;
3531 q_vector->tx.next_update = jiffies + 1;
3532 q_vector->tx.target_itr =
3533 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3534 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3535 q_vector->tx.target_itr);
3536 q_vector->tx.current_itr = q_vector->tx.target_itr;
3538 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3539 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3541 /* Linked list for the queuepairs assigned to this vector */
3542 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3543 for (q = 0; q < q_vector->num_ringpairs; q++) {
3544 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3547 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3548 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3549 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3550 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3551 (I40E_QUEUE_TYPE_TX <<
3552 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3554 wr32(hw, I40E_QINT_RQCTL(qp), val);
3557 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3558 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3559 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3560 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3561 (I40E_QUEUE_TYPE_TX <<
3562 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3564 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3567 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3568 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3569 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3570 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3571 (I40E_QUEUE_TYPE_RX <<
3572 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3574 /* Terminate the linked list */
3575 if (q == (q_vector->num_ringpairs - 1))
3576 val |= (I40E_QUEUE_END_OF_LIST <<
3577 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3579 wr32(hw, I40E_QINT_TQCTL(qp), val);
3588 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3589 * @pf: pointer to private device data structure
3591 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3593 struct i40e_hw *hw = &pf->hw;
3596 /* clear things first */
3597 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3598 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3600 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3601 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3602 I40E_PFINT_ICR0_ENA_GRST_MASK |
3603 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3604 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3605 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3606 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3607 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3609 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3610 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3612 if (pf->flags & I40E_FLAG_PTP)
3613 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3615 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3617 /* SW_ITR_IDX = 0, but don't change INTENA */
3618 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3619 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3621 /* OTHER_ITR_IDX = 0 */
3622 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3626 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3627 * @vsi: the VSI being configured
3629 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3631 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3632 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3633 struct i40e_pf *pf = vsi->back;
3634 struct i40e_hw *hw = &pf->hw;
3637 /* set the ITR configuration */
3638 q_vector->rx.next_update = jiffies + 1;
3639 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3640 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
3641 q_vector->rx.current_itr = q_vector->rx.target_itr;
3642 q_vector->tx.next_update = jiffies + 1;
3643 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3644 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
3645 q_vector->tx.current_itr = q_vector->tx.target_itr;
3647 i40e_enable_misc_int_causes(pf);
3649 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3650 wr32(hw, I40E_PFINT_LNKLST0, 0);
3652 /* Associate the queue pair to the vector and enable the queue int */
3653 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3654 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3655 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3656 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3658 wr32(hw, I40E_QINT_RQCTL(0), val);
3660 if (i40e_enabled_xdp_vsi(vsi)) {
3661 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3662 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3664 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3666 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3669 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3670 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3671 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3673 wr32(hw, I40E_QINT_TQCTL(0), val);
3678 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3679 * @pf: board private structure
3681 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3683 struct i40e_hw *hw = &pf->hw;
3685 wr32(hw, I40E_PFINT_DYN_CTL0,
3686 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3691 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3692 * @pf: board private structure
3694 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3696 struct i40e_hw *hw = &pf->hw;
3699 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3700 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3701 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3703 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3708 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3709 * @irq: interrupt number
3710 * @data: pointer to a q_vector
3712 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3714 struct i40e_q_vector *q_vector = data;
3716 if (!q_vector->tx.ring && !q_vector->rx.ring)
3719 napi_schedule_irqoff(&q_vector->napi);
3725 * i40e_irq_affinity_notify - Callback for affinity changes
3726 * @notify: context as to what irq was changed
3727 * @mask: the new affinity mask
3729 * This is a callback function used by the irq_set_affinity_notifier function
3730 * so that we may register to receive changes to the irq affinity masks.
3732 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3733 const cpumask_t *mask)
3735 struct i40e_q_vector *q_vector =
3736 container_of(notify, struct i40e_q_vector, affinity_notify);
3738 cpumask_copy(&q_vector->affinity_mask, mask);
3742 * i40e_irq_affinity_release - Callback for affinity notifier release
3743 * @ref: internal core kernel usage
3745 * This is a callback function used by the irq_set_affinity_notifier function
3746 * to inform the current notification subscriber that they will no longer
3747 * receive notifications.
3749 static void i40e_irq_affinity_release(struct kref *ref) {}
3752 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3753 * @vsi: the VSI being configured
3754 * @basename: name for the vector
3756 * Allocates MSI-X vectors and requests interrupts from the kernel.
3758 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3760 int q_vectors = vsi->num_q_vectors;
3761 struct i40e_pf *pf = vsi->back;
3762 int base = vsi->base_vector;
3769 for (vector = 0; vector < q_vectors; vector++) {
3770 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3772 irq_num = pf->msix_entries[base + vector].vector;
3774 if (q_vector->tx.ring && q_vector->rx.ring) {
3775 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3776 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3778 } else if (q_vector->rx.ring) {
3779 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3780 "%s-%s-%d", basename, "rx", rx_int_idx++);
3781 } else if (q_vector->tx.ring) {
3782 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3783 "%s-%s-%d", basename, "tx", tx_int_idx++);
3785 /* skip this unused q_vector */
3788 err = request_irq(irq_num,
3794 dev_info(&pf->pdev->dev,
3795 "MSIX request_irq failed, error: %d\n", err);
3796 goto free_queue_irqs;
3799 /* register for affinity change notifications */
3800 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3801 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3802 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3803 /* Spread affinity hints out across online CPUs.
3805 * get_cpu_mask returns a static constant mask with
3806 * a permanent lifetime so it's ok to pass to
3807 * irq_set_affinity_hint without making a copy.
3809 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3810 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3813 vsi->irqs_ready = true;
3819 irq_num = pf->msix_entries[base + vector].vector;
3820 irq_set_affinity_notifier(irq_num, NULL);
3821 irq_set_affinity_hint(irq_num, NULL);
3822 free_irq(irq_num, &vsi->q_vectors[vector]);
3828 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3829 * @vsi: the VSI being un-configured
3831 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3833 struct i40e_pf *pf = vsi->back;
3834 struct i40e_hw *hw = &pf->hw;
3835 int base = vsi->base_vector;
3838 /* disable interrupt causation from each queue */
3839 for (i = 0; i < vsi->num_queue_pairs; i++) {
3842 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3843 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3844 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3846 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3847 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3848 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3850 if (!i40e_enabled_xdp_vsi(vsi))
3852 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3855 /* disable each interrupt */
3856 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3857 for (i = vsi->base_vector;
3858 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3859 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3862 for (i = 0; i < vsi->num_q_vectors; i++)
3863 synchronize_irq(pf->msix_entries[i + base].vector);
3865 /* Legacy and MSI mode - this stops all interrupt handling */
3866 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3867 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3869 synchronize_irq(pf->pdev->irq);
3874 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3875 * @vsi: the VSI being configured
3877 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3879 struct i40e_pf *pf = vsi->back;
3882 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3883 for (i = 0; i < vsi->num_q_vectors; i++)
3884 i40e_irq_dynamic_enable(vsi, i);
3886 i40e_irq_dynamic_enable_icr0(pf);
3889 i40e_flush(&pf->hw);
3894 * i40e_free_misc_vector - Free the vector that handles non-queue events
3895 * @pf: board private structure
3897 static void i40e_free_misc_vector(struct i40e_pf *pf)
3900 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3901 i40e_flush(&pf->hw);
3903 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3904 synchronize_irq(pf->msix_entries[0].vector);
3905 free_irq(pf->msix_entries[0].vector, pf);
3906 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3911 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3912 * @irq: interrupt number
3913 * @data: pointer to a q_vector
3915 * This is the handler used for all MSI/Legacy interrupts, and deals
3916 * with both queue and non-queue interrupts. This is also used in
3917 * MSIX mode to handle the non-queue interrupts.
3919 static irqreturn_t i40e_intr(int irq, void *data)
3921 struct i40e_pf *pf = (struct i40e_pf *)data;
3922 struct i40e_hw *hw = &pf->hw;
3923 irqreturn_t ret = IRQ_NONE;
3924 u32 icr0, icr0_remaining;
3927 icr0 = rd32(hw, I40E_PFINT_ICR0);
3928 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3930 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3931 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3934 /* if interrupt but no bits showing, must be SWINT */
3935 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3936 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3939 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3940 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3941 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3942 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3943 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3946 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3947 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3948 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3949 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3951 /* We do not have a way to disarm Queue causes while leaving
3952 * interrupt enabled for all other causes, ideally
3953 * interrupt should be disabled while we are in NAPI but
3954 * this is not a performance path and napi_schedule()
3955 * can deal with rescheduling.
3957 if (!test_bit(__I40E_DOWN, pf->state))
3958 napi_schedule_irqoff(&q_vector->napi);
3961 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3962 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3963 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3964 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3967 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3968 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3969 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3972 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3973 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3974 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3977 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3978 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3979 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3980 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3981 val = rd32(hw, I40E_GLGEN_RSTAT);
3982 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3983 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3984 if (val == I40E_RESET_CORER) {
3986 } else if (val == I40E_RESET_GLOBR) {
3988 } else if (val == I40E_RESET_EMPR) {
3990 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3994 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3995 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3996 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3997 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3998 rd32(hw, I40E_PFHMC_ERRORINFO),
3999 rd32(hw, I40E_PFHMC_ERRORDATA));
4002 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4003 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4005 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4006 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4007 i40e_ptp_tx_hwtstamp(pf);
4011 /* If a critical error is pending we have no choice but to reset the
4013 * Report and mask out any remaining unexpected interrupts.
4015 icr0_remaining = icr0 & ena_mask;
4016 if (icr0_remaining) {
4017 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4019 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4020 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4021 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4022 dev_info(&pf->pdev->dev, "device will be reset\n");
4023 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4024 i40e_service_event_schedule(pf);
4026 ena_mask &= ~icr0_remaining;
4031 /* re-enable interrupt causes */
4032 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4033 if (!test_bit(__I40E_DOWN, pf->state) ||
4034 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4035 i40e_service_event_schedule(pf);
4036 i40e_irq_dynamic_enable_icr0(pf);
4043 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4044 * @tx_ring: tx ring to clean
4045 * @budget: how many cleans we're allowed
4047 * Returns true if there's any budget left (e.g. the clean is finished)
4049 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4051 struct i40e_vsi *vsi = tx_ring->vsi;
4052 u16 i = tx_ring->next_to_clean;
4053 struct i40e_tx_buffer *tx_buf;
4054 struct i40e_tx_desc *tx_desc;
4056 tx_buf = &tx_ring->tx_bi[i];
4057 tx_desc = I40E_TX_DESC(tx_ring, i);
4058 i -= tx_ring->count;
4061 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4063 /* if next_to_watch is not set then there is no work pending */
4067 /* prevent any other reads prior to eop_desc */
4070 /* if the descriptor isn't done, no work yet to do */
4071 if (!(eop_desc->cmd_type_offset_bsz &
4072 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4075 /* clear next_to_watch to prevent false hangs */
4076 tx_buf->next_to_watch = NULL;
4078 tx_desc->buffer_addr = 0;
4079 tx_desc->cmd_type_offset_bsz = 0;
4080 /* move past filter desc */
4085 i -= tx_ring->count;
4086 tx_buf = tx_ring->tx_bi;
4087 tx_desc = I40E_TX_DESC(tx_ring, 0);
4089 /* unmap skb header data */
4090 dma_unmap_single(tx_ring->dev,
4091 dma_unmap_addr(tx_buf, dma),
4092 dma_unmap_len(tx_buf, len),
4094 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4095 kfree(tx_buf->raw_buf);
4097 tx_buf->raw_buf = NULL;
4098 tx_buf->tx_flags = 0;
4099 tx_buf->next_to_watch = NULL;
4100 dma_unmap_len_set(tx_buf, len, 0);
4101 tx_desc->buffer_addr = 0;
4102 tx_desc->cmd_type_offset_bsz = 0;
4104 /* move us past the eop_desc for start of next FD desc */
4109 i -= tx_ring->count;
4110 tx_buf = tx_ring->tx_bi;
4111 tx_desc = I40E_TX_DESC(tx_ring, 0);
4114 /* update budget accounting */
4116 } while (likely(budget));
4118 i += tx_ring->count;
4119 tx_ring->next_to_clean = i;
4121 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4122 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4128 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4129 * @irq: interrupt number
4130 * @data: pointer to a q_vector
4132 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4134 struct i40e_q_vector *q_vector = data;
4135 struct i40e_vsi *vsi;
4137 if (!q_vector->tx.ring)
4140 vsi = q_vector->tx.ring->vsi;
4141 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4147 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4148 * @vsi: the VSI being configured
4149 * @v_idx: vector index
4150 * @qp_idx: queue pair index
4152 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4154 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4155 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4156 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4158 tx_ring->q_vector = q_vector;
4159 tx_ring->next = q_vector->tx.ring;
4160 q_vector->tx.ring = tx_ring;
4161 q_vector->tx.count++;
4163 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4164 if (i40e_enabled_xdp_vsi(vsi)) {
4165 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4167 xdp_ring->q_vector = q_vector;
4168 xdp_ring->next = q_vector->tx.ring;
4169 q_vector->tx.ring = xdp_ring;
4170 q_vector->tx.count++;
4173 rx_ring->q_vector = q_vector;
4174 rx_ring->next = q_vector->rx.ring;
4175 q_vector->rx.ring = rx_ring;
4176 q_vector->rx.count++;
4180 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4181 * @vsi: the VSI being configured
4183 * This function maps descriptor rings to the queue-specific vectors
4184 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4185 * one vector per queue pair, but on a constrained vector budget, we
4186 * group the queue pairs as "efficiently" as possible.
4188 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4190 int qp_remaining = vsi->num_queue_pairs;
4191 int q_vectors = vsi->num_q_vectors;
4196 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4197 * group them so there are multiple queues per vector.
4198 * It is also important to go through all the vectors available to be
4199 * sure that if we don't use all the vectors, that the remaining vectors
4200 * are cleared. This is especially important when decreasing the
4201 * number of queues in use.
4203 for (; v_start < q_vectors; v_start++) {
4204 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4206 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4208 q_vector->num_ringpairs = num_ringpairs;
4209 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4211 q_vector->rx.count = 0;
4212 q_vector->tx.count = 0;
4213 q_vector->rx.ring = NULL;
4214 q_vector->tx.ring = NULL;
4216 while (num_ringpairs--) {
4217 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4225 * i40e_vsi_request_irq - Request IRQ from the OS
4226 * @vsi: the VSI being configured
4227 * @basename: name for the vector
4229 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4231 struct i40e_pf *pf = vsi->back;
4234 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4235 err = i40e_vsi_request_irq_msix(vsi, basename);
4236 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4237 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4240 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4244 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4249 #ifdef CONFIG_NET_POLL_CONTROLLER
4251 * i40e_netpoll - A Polling 'interrupt' handler
4252 * @netdev: network interface device structure
4254 * This is used by netconsole to send skbs without having to re-enable
4255 * interrupts. It's not called while the normal interrupt routine is executing.
4257 static void i40e_netpoll(struct net_device *netdev)
4259 struct i40e_netdev_priv *np = netdev_priv(netdev);
4260 struct i40e_vsi *vsi = np->vsi;
4261 struct i40e_pf *pf = vsi->back;
4264 /* if interface is down do nothing */
4265 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4268 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4269 for (i = 0; i < vsi->num_q_vectors; i++)
4270 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4272 i40e_intr(pf->pdev->irq, netdev);
4277 #define I40E_QTX_ENA_WAIT_COUNT 50
4280 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4281 * @pf: the PF being configured
4282 * @pf_q: the PF queue
4283 * @enable: enable or disable state of the queue
4285 * This routine will wait for the given Tx queue of the PF to reach the
4286 * enabled or disabled state.
4287 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4288 * multiple retries; else will return 0 in case of success.
4290 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4295 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4296 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4297 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4300 usleep_range(10, 20);
4302 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4309 * i40e_control_tx_q - Start or stop a particular Tx queue
4310 * @pf: the PF structure
4311 * @pf_q: the PF queue to configure
4312 * @enable: start or stop the queue
4314 * This function enables or disables a single queue. Note that any delay
4315 * required after the operation is expected to be handled by the caller of
4318 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4320 struct i40e_hw *hw = &pf->hw;
4324 /* warn the TX unit of coming changes */
4325 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4327 usleep_range(10, 20);
4329 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4330 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4331 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4332 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4334 usleep_range(1000, 2000);
4337 /* Skip if the queue is already in the requested state */
4338 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4341 /* turn on/off the queue */
4343 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4344 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4346 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4349 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4353 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4355 * @pf: the PF structure
4356 * @pf_q: the PF queue to configure
4357 * @is_xdp: true if the queue is used for XDP
4358 * @enable: start or stop the queue
4360 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4361 bool is_xdp, bool enable)
4365 i40e_control_tx_q(pf, pf_q, enable);
4367 /* wait for the change to finish */
4368 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4370 dev_info(&pf->pdev->dev,
4371 "VSI seid %d %sTx ring %d %sable timeout\n",
4372 seid, (is_xdp ? "XDP " : ""), pf_q,
4373 (enable ? "en" : "dis"));
4380 * i40e_vsi_control_tx - Start or stop a VSI's rings
4381 * @vsi: the VSI being configured
4382 * @enable: start or stop the rings
4384 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4386 struct i40e_pf *pf = vsi->back;
4387 int i, pf_q, ret = 0;
4389 pf_q = vsi->base_queue;
4390 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4391 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4393 false /*is xdp*/, enable);
4397 if (!i40e_enabled_xdp_vsi(vsi))
4400 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4401 pf_q + vsi->alloc_queue_pairs,
4402 true /*is xdp*/, enable);
4410 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4411 * @pf: the PF being configured
4412 * @pf_q: the PF queue
4413 * @enable: enable or disable state of the queue
4415 * This routine will wait for the given Rx queue of the PF to reach the
4416 * enabled or disabled state.
4417 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4418 * multiple retries; else will return 0 in case of success.
4420 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4425 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4426 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4427 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4430 usleep_range(10, 20);
4432 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4439 * i40e_control_rx_q - Start or stop a particular Rx queue
4440 * @pf: the PF structure
4441 * @pf_q: the PF queue to configure
4442 * @enable: start or stop the queue
4444 * This function enables or disables a single queue. Note that
4445 * any delay required after the operation is expected to be
4446 * handled by the caller of this function.
4448 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4450 struct i40e_hw *hw = &pf->hw;
4454 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4455 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4456 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4457 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4459 usleep_range(1000, 2000);
4462 /* Skip if the queue is already in the requested state */
4463 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4466 /* turn on/off the queue */
4468 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4470 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4472 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4476 * i40e_control_wait_rx_q
4477 * @pf: the PF structure
4478 * @pf_q: queue being configured
4479 * @enable: start or stop the rings
4481 * This function enables or disables a single queue along with waiting
4482 * for the change to finish. The caller of this function should handle
4483 * the delays needed in the case of disabling queues.
4485 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4489 i40e_control_rx_q(pf, pf_q, enable);
4491 /* wait for the change to finish */
4492 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4500 * i40e_vsi_control_rx - Start or stop a VSI's rings
4501 * @vsi: the VSI being configured
4502 * @enable: start or stop the rings
4504 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4506 struct i40e_pf *pf = vsi->back;
4507 int i, pf_q, ret = 0;
4509 pf_q = vsi->base_queue;
4510 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4511 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4513 dev_info(&pf->pdev->dev,
4514 "VSI seid %d Rx ring %d %sable timeout\n",
4515 vsi->seid, pf_q, (enable ? "en" : "dis"));
4520 /* Due to HW errata, on Rx disable only, the register can indicate done
4521 * before it really is. Needs 50ms to be sure
4530 * i40e_vsi_start_rings - Start a VSI's rings
4531 * @vsi: the VSI being configured
4533 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4537 /* do rx first for enable and last for disable */
4538 ret = i40e_vsi_control_rx(vsi, true);
4541 ret = i40e_vsi_control_tx(vsi, true);
4547 * i40e_vsi_stop_rings - Stop a VSI's rings
4548 * @vsi: the VSI being configured
4550 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4552 /* When port TX is suspended, don't wait */
4553 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4554 return i40e_vsi_stop_rings_no_wait(vsi);
4556 /* do rx first for enable and last for disable
4557 * Ignore return value, we need to shutdown whatever we can
4559 i40e_vsi_control_tx(vsi, false);
4560 i40e_vsi_control_rx(vsi, false);
4564 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4565 * @vsi: the VSI being shutdown
4567 * This function stops all the rings for a VSI but does not delay to verify
4568 * that rings have been disabled. It is expected that the caller is shutting
4569 * down multiple VSIs at once and will delay together for all the VSIs after
4570 * initiating the shutdown. This is particularly useful for shutting down lots
4571 * of VFs together. Otherwise, a large delay can be incurred while configuring
4572 * each VSI in serial.
4574 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4576 struct i40e_pf *pf = vsi->back;
4579 pf_q = vsi->base_queue;
4580 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4581 i40e_control_tx_q(pf, pf_q, false);
4582 i40e_control_rx_q(pf, pf_q, false);
4587 * i40e_vsi_free_irq - Free the irq association with the OS
4588 * @vsi: the VSI being configured
4590 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4592 struct i40e_pf *pf = vsi->back;
4593 struct i40e_hw *hw = &pf->hw;
4594 int base = vsi->base_vector;
4598 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4599 if (!vsi->q_vectors)
4602 if (!vsi->irqs_ready)
4605 vsi->irqs_ready = false;
4606 for (i = 0; i < vsi->num_q_vectors; i++) {
4611 irq_num = pf->msix_entries[vector].vector;
4613 /* free only the irqs that were actually requested */
4614 if (!vsi->q_vectors[i] ||
4615 !vsi->q_vectors[i]->num_ringpairs)
4618 /* clear the affinity notifier in the IRQ descriptor */
4619 irq_set_affinity_notifier(irq_num, NULL);
4620 /* remove our suggested affinity mask for this IRQ */
4621 irq_set_affinity_hint(irq_num, NULL);
4622 synchronize_irq(irq_num);
4623 free_irq(irq_num, vsi->q_vectors[i]);
4625 /* Tear down the interrupt queue link list
4627 * We know that they come in pairs and always
4628 * the Rx first, then the Tx. To clear the
4629 * link list, stick the EOL value into the
4630 * next_q field of the registers.
4632 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4633 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4634 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4635 val |= I40E_QUEUE_END_OF_LIST
4636 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4637 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4639 while (qp != I40E_QUEUE_END_OF_LIST) {
4642 val = rd32(hw, I40E_QINT_RQCTL(qp));
4644 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4645 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4646 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4647 I40E_QINT_RQCTL_INTEVENT_MASK);
4649 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4650 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4652 wr32(hw, I40E_QINT_RQCTL(qp), val);
4654 val = rd32(hw, I40E_QINT_TQCTL(qp));
4656 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4657 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4659 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4660 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4661 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4662 I40E_QINT_TQCTL_INTEVENT_MASK);
4664 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4665 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4667 wr32(hw, I40E_QINT_TQCTL(qp), val);
4672 free_irq(pf->pdev->irq, pf);
4674 val = rd32(hw, I40E_PFINT_LNKLST0);
4675 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4676 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4677 val |= I40E_QUEUE_END_OF_LIST
4678 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4679 wr32(hw, I40E_PFINT_LNKLST0, val);
4681 val = rd32(hw, I40E_QINT_RQCTL(qp));
4682 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4683 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4684 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4685 I40E_QINT_RQCTL_INTEVENT_MASK);
4687 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4688 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4690 wr32(hw, I40E_QINT_RQCTL(qp), val);
4692 val = rd32(hw, I40E_QINT_TQCTL(qp));
4694 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4695 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4696 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4697 I40E_QINT_TQCTL_INTEVENT_MASK);
4699 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4700 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4702 wr32(hw, I40E_QINT_TQCTL(qp), val);
4707 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4708 * @vsi: the VSI being configured
4709 * @v_idx: Index of vector to be freed
4711 * This function frees the memory allocated to the q_vector. In addition if
4712 * NAPI is enabled it will delete any references to the NAPI struct prior
4713 * to freeing the q_vector.
4715 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4717 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4718 struct i40e_ring *ring;
4723 /* disassociate q_vector from rings */
4724 i40e_for_each_ring(ring, q_vector->tx)
4725 ring->q_vector = NULL;
4727 i40e_for_each_ring(ring, q_vector->rx)
4728 ring->q_vector = NULL;
4730 /* only VSI w/ an associated netdev is set up w/ NAPI */
4732 netif_napi_del(&q_vector->napi);
4734 vsi->q_vectors[v_idx] = NULL;
4736 kfree_rcu(q_vector, rcu);
4740 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4741 * @vsi: the VSI being un-configured
4743 * This frees the memory allocated to the q_vectors and
4744 * deletes references to the NAPI struct.
4746 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4750 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4751 i40e_free_q_vector(vsi, v_idx);
4755 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4756 * @pf: board private structure
4758 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4760 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4761 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4762 pci_disable_msix(pf->pdev);
4763 kfree(pf->msix_entries);
4764 pf->msix_entries = NULL;
4765 kfree(pf->irq_pile);
4766 pf->irq_pile = NULL;
4767 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4768 pci_disable_msi(pf->pdev);
4770 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4774 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4775 * @pf: board private structure
4777 * We go through and clear interrupt specific resources and reset the structure
4778 * to pre-load conditions
4780 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4784 i40e_free_misc_vector(pf);
4786 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4787 I40E_IWARP_IRQ_PILE_ID);
4789 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4790 for (i = 0; i < pf->num_alloc_vsi; i++)
4792 i40e_vsi_free_q_vectors(pf->vsi[i]);
4793 i40e_reset_interrupt_capability(pf);
4797 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4798 * @vsi: the VSI being configured
4800 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4807 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4808 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4810 if (q_vector->rx.ring || q_vector->tx.ring)
4811 napi_enable(&q_vector->napi);
4816 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4817 * @vsi: the VSI being configured
4819 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4826 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4827 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4829 if (q_vector->rx.ring || q_vector->tx.ring)
4830 napi_disable(&q_vector->napi);
4835 * i40e_vsi_close - Shut down a VSI
4836 * @vsi: the vsi to be quelled
4838 static void i40e_vsi_close(struct i40e_vsi *vsi)
4840 struct i40e_pf *pf = vsi->back;
4841 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4843 i40e_vsi_free_irq(vsi);
4844 i40e_vsi_free_tx_resources(vsi);
4845 i40e_vsi_free_rx_resources(vsi);
4846 vsi->current_netdev_flags = 0;
4847 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4848 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4849 set_bit(__I40E_CLIENT_RESET, pf->state);
4853 * i40e_quiesce_vsi - Pause a given VSI
4854 * @vsi: the VSI being paused
4856 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4858 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4861 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4862 if (vsi->netdev && netif_running(vsi->netdev))
4863 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4865 i40e_vsi_close(vsi);
4869 * i40e_unquiesce_vsi - Resume a given VSI
4870 * @vsi: the VSI being resumed
4872 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4874 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4877 if (vsi->netdev && netif_running(vsi->netdev))
4878 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4880 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4884 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4887 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4891 for (v = 0; v < pf->num_alloc_vsi; v++) {
4893 i40e_quiesce_vsi(pf->vsi[v]);
4898 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4901 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4905 for (v = 0; v < pf->num_alloc_vsi; v++) {
4907 i40e_unquiesce_vsi(pf->vsi[v]);
4912 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4913 * @vsi: the VSI being configured
4915 * Wait until all queues on a given VSI have been disabled.
4917 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4919 struct i40e_pf *pf = vsi->back;
4922 pf_q = vsi->base_queue;
4923 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4924 /* Check and wait for the Tx queue */
4925 ret = i40e_pf_txq_wait(pf, pf_q, false);
4927 dev_info(&pf->pdev->dev,
4928 "VSI seid %d Tx ring %d disable timeout\n",
4933 if (!i40e_enabled_xdp_vsi(vsi))
4936 /* Check and wait for the XDP Tx queue */
4937 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4940 dev_info(&pf->pdev->dev,
4941 "VSI seid %d XDP Tx ring %d disable timeout\n",
4946 /* Check and wait for the Rx queue */
4947 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4949 dev_info(&pf->pdev->dev,
4950 "VSI seid %d Rx ring %d disable timeout\n",
4959 #ifdef CONFIG_I40E_DCB
4961 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4964 * This function waits for the queues to be in disabled state for all the
4965 * VSIs that are managed by this PF.
4967 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4971 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4973 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4985 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4986 * @pf: pointer to PF
4988 * Get TC map for ISCSI PF type that will include iSCSI TC
4991 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4993 struct i40e_dcb_app_priority_table app;
4994 struct i40e_hw *hw = &pf->hw;
4995 u8 enabled_tc = 1; /* TC0 is always enabled */
4997 /* Get the iSCSI APP TLV */
4998 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5000 for (i = 0; i < dcbcfg->numapps; i++) {
5001 app = dcbcfg->app[i];
5002 if (app.selector == I40E_APP_SEL_TCPIP &&
5003 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5004 tc = dcbcfg->etscfg.prioritytable[app.priority];
5005 enabled_tc |= BIT(tc);
5014 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5015 * @dcbcfg: the corresponding DCBx configuration structure
5017 * Return the number of TCs from given DCBx configuration
5019 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5021 int i, tc_unused = 0;
5025 /* Scan the ETS Config Priority Table to find
5026 * traffic class enabled for a given priority
5027 * and create a bitmask of enabled TCs
5029 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5030 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5032 /* Now scan the bitmask to check for
5033 * contiguous TCs starting with TC0
5035 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5036 if (num_tc & BIT(i)) {
5040 pr_err("Non-contiguous TC - Disabling DCB\n");
5048 /* There is always at least TC0 */
5056 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5057 * @dcbcfg: the corresponding DCBx configuration structure
5059 * Query the current DCB configuration and return the number of
5060 * traffic classes enabled from the given DCBX config
5062 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5064 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5068 for (i = 0; i < num_tc; i++)
5069 enabled_tc |= BIT(i);
5075 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5076 * @pf: PF being queried
5078 * Query the current MQPRIO configuration and return the number of
5079 * traffic classes enabled.
5081 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5083 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5084 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5085 u8 enabled_tc = 1, i;
5087 for (i = 1; i < num_tc; i++)
5088 enabled_tc |= BIT(i);
5093 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5094 * @pf: PF being queried
5096 * Return number of traffic classes enabled for the given PF
5098 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5100 struct i40e_hw *hw = &pf->hw;
5101 u8 i, enabled_tc = 1;
5103 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5105 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5106 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5108 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5109 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5112 /* SFP mode will be enabled for all TCs on port */
5113 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5114 return i40e_dcb_get_num_tc(dcbcfg);
5116 /* MFP mode return count of enabled TCs for this PF */
5117 if (pf->hw.func_caps.iscsi)
5118 enabled_tc = i40e_get_iscsi_tc_map(pf);
5120 return 1; /* Only TC0 */
5122 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5123 if (enabled_tc & BIT(i))
5130 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5131 * @pf: PF being queried
5133 * Return a bitmap for enabled traffic classes for this PF.
5135 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5137 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5138 return i40e_mqprio_get_enabled_tc(pf);
5140 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5143 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5144 return I40E_DEFAULT_TRAFFIC_CLASS;
5146 /* SFP mode we want PF to be enabled for all TCs */
5147 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5148 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5150 /* MFP enabled and iSCSI PF type */
5151 if (pf->hw.func_caps.iscsi)
5152 return i40e_get_iscsi_tc_map(pf);
5154 return I40E_DEFAULT_TRAFFIC_CLASS;
5158 * i40e_vsi_get_bw_info - Query VSI BW Information
5159 * @vsi: the VSI being queried
5161 * Returns 0 on success, negative value on failure
5163 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5165 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5166 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5167 struct i40e_pf *pf = vsi->back;
5168 struct i40e_hw *hw = &pf->hw;
5173 /* Get the VSI level BW configuration */
5174 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5176 dev_info(&pf->pdev->dev,
5177 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5178 i40e_stat_str(&pf->hw, ret),
5179 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5183 /* Get the VSI level BW configuration per TC */
5184 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5187 dev_info(&pf->pdev->dev,
5188 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5189 i40e_stat_str(&pf->hw, ret),
5190 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5194 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5195 dev_info(&pf->pdev->dev,
5196 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5197 bw_config.tc_valid_bits,
5198 bw_ets_config.tc_valid_bits);
5199 /* Still continuing */
5202 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5203 vsi->bw_max_quanta = bw_config.max_bw;
5204 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5205 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5206 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5207 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5208 vsi->bw_ets_limit_credits[i] =
5209 le16_to_cpu(bw_ets_config.credits[i]);
5210 /* 3 bits out of 4 for each TC */
5211 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5218 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5219 * @vsi: the VSI being configured
5220 * @enabled_tc: TC bitmap
5221 * @bw_share: BW shared credits per TC
5223 * Returns 0 on success, negative value on failure
5225 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5228 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5229 struct i40e_pf *pf = vsi->back;
5233 /* There is no need to reset BW when mqprio mode is on. */
5234 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5236 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5237 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5239 dev_info(&pf->pdev->dev,
5240 "Failed to reset tx rate for vsi->seid %u\n",
5244 bw_data.tc_valid_bits = enabled_tc;
5245 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5246 bw_data.tc_bw_credits[i] = bw_share[i];
5248 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5250 dev_info(&pf->pdev->dev,
5251 "AQ command Config VSI BW allocation per TC failed = %d\n",
5252 pf->hw.aq.asq_last_status);
5256 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5257 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5263 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5264 * @vsi: the VSI being configured
5265 * @enabled_tc: TC map to be enabled
5268 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5270 struct net_device *netdev = vsi->netdev;
5271 struct i40e_pf *pf = vsi->back;
5272 struct i40e_hw *hw = &pf->hw;
5275 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5281 netdev_reset_tc(netdev);
5285 /* Set up actual enabled TCs on the VSI */
5286 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5289 /* set per TC queues for the VSI */
5290 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5291 /* Only set TC queues for enabled tcs
5293 * e.g. For a VSI that has TC0 and TC3 enabled the
5294 * enabled_tc bitmap would be 0x00001001; the driver
5295 * will set the numtc for netdev as 2 that will be
5296 * referenced by the netdev layer as TC 0 and 1.
5298 if (vsi->tc_config.enabled_tc & BIT(i))
5299 netdev_set_tc_queue(netdev,
5300 vsi->tc_config.tc_info[i].netdev_tc,
5301 vsi->tc_config.tc_info[i].qcount,
5302 vsi->tc_config.tc_info[i].qoffset);
5305 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5308 /* Assign UP2TC map for the VSI */
5309 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5310 /* Get the actual TC# for the UP */
5311 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5312 /* Get the mapped netdev TC# for the UP */
5313 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5314 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5319 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5320 * @vsi: the VSI being configured
5321 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5323 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5324 struct i40e_vsi_context *ctxt)
5326 /* copy just the sections touched not the entire info
5327 * since not all sections are valid as returned by
5330 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5331 memcpy(&vsi->info.queue_mapping,
5332 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5333 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5334 sizeof(vsi->info.tc_mapping));
5338 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5339 * @vsi: VSI to be configured
5340 * @enabled_tc: TC bitmap
5342 * This configures a particular VSI for TCs that are mapped to the
5343 * given TC bitmap. It uses default bandwidth share for TCs across
5344 * VSIs to configure TC for a particular VSI.
5347 * It is expected that the VSI queues have been quisced before calling
5350 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5352 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5353 struct i40e_pf *pf = vsi->back;
5354 struct i40e_hw *hw = &pf->hw;
5355 struct i40e_vsi_context ctxt;
5359 /* Check if enabled_tc is same as existing or new TCs */
5360 if (vsi->tc_config.enabled_tc == enabled_tc &&
5361 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5364 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5365 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5366 if (enabled_tc & BIT(i))
5370 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5372 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5374 dev_info(&pf->pdev->dev,
5375 "Failed configuring TC map %d for VSI %d\n",
5376 enabled_tc, vsi->seid);
5377 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5380 dev_info(&pf->pdev->dev,
5381 "Failed querying vsi bw info, err %s aq_err %s\n",
5382 i40e_stat_str(hw, ret),
5383 i40e_aq_str(hw, hw->aq.asq_last_status));
5386 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5387 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5390 valid_tc = bw_config.tc_valid_bits;
5391 /* Always enable TC0, no matter what */
5393 dev_info(&pf->pdev->dev,
5394 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5395 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5396 enabled_tc = valid_tc;
5399 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5401 dev_err(&pf->pdev->dev,
5402 "Unable to configure TC map %d for VSI %d\n",
5403 enabled_tc, vsi->seid);
5408 /* Update Queue Pairs Mapping for currently enabled UPs */
5409 ctxt.seid = vsi->seid;
5410 ctxt.pf_num = vsi->back->hw.pf_id;
5412 ctxt.uplink_seid = vsi->uplink_seid;
5413 ctxt.info = vsi->info;
5414 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5415 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5419 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5422 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5425 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5426 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5427 vsi->num_queue_pairs);
5428 ret = i40e_vsi_config_rss(vsi);
5430 dev_info(&vsi->back->pdev->dev,
5431 "Failed to reconfig rss for num_queues\n");
5434 vsi->reconfig_rss = false;
5436 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5437 ctxt.info.valid_sections |=
5438 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5439 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5442 /* Update the VSI after updating the VSI queue-mapping
5445 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5447 dev_info(&pf->pdev->dev,
5448 "Update vsi tc config failed, err %s aq_err %s\n",
5449 i40e_stat_str(hw, ret),
5450 i40e_aq_str(hw, hw->aq.asq_last_status));
5453 /* update the local VSI info with updated queue map */
5454 i40e_vsi_update_queue_map(vsi, &ctxt);
5455 vsi->info.valid_sections = 0;
5457 /* Update current VSI BW information */
5458 ret = i40e_vsi_get_bw_info(vsi);
5460 dev_info(&pf->pdev->dev,
5461 "Failed updating vsi bw info, err %s aq_err %s\n",
5462 i40e_stat_str(hw, ret),
5463 i40e_aq_str(hw, hw->aq.asq_last_status));
5467 /* Update the netdev TC setup */
5468 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5474 * i40e_get_link_speed - Returns link speed for the interface
5475 * @vsi: VSI to be configured
5478 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5480 struct i40e_pf *pf = vsi->back;
5482 switch (pf->hw.phy.link_info.link_speed) {
5483 case I40E_LINK_SPEED_40GB:
5485 case I40E_LINK_SPEED_25GB:
5487 case I40E_LINK_SPEED_20GB:
5489 case I40E_LINK_SPEED_10GB:
5491 case I40E_LINK_SPEED_1GB:
5499 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5500 * @vsi: VSI to be configured
5501 * @seid: seid of the channel/VSI
5502 * @max_tx_rate: max TX rate to be configured as BW limit
5504 * Helper function to set BW limit for a given VSI
5506 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5508 struct i40e_pf *pf = vsi->back;
5513 speed = i40e_get_link_speed(vsi);
5514 if (max_tx_rate > speed) {
5515 dev_err(&pf->pdev->dev,
5516 "Invalid max tx rate %llu specified for VSI seid %d.",
5520 if (max_tx_rate && max_tx_rate < 50) {
5521 dev_warn(&pf->pdev->dev,
5522 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5526 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5527 credits = max_tx_rate;
5528 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5529 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5530 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5532 dev_err(&pf->pdev->dev,
5533 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5534 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5535 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5540 * i40e_remove_queue_channels - Remove queue channels for the TCs
5541 * @vsi: VSI to be configured
5543 * Remove queue channels for the TCs
5545 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5547 enum i40e_admin_queue_err last_aq_status;
5548 struct i40e_cloud_filter *cfilter;
5549 struct i40e_channel *ch, *ch_tmp;
5550 struct i40e_pf *pf = vsi->back;
5551 struct hlist_node *node;
5554 /* Reset rss size that was stored when reconfiguring rss for
5555 * channel VSIs with non-power-of-2 queue count.
5557 vsi->current_rss_size = 0;
5559 /* perform cleanup for channels if they exist */
5560 if (list_empty(&vsi->ch_list))
5563 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5564 struct i40e_vsi *p_vsi;
5566 list_del(&ch->list);
5567 p_vsi = ch->parent_vsi;
5568 if (!p_vsi || !ch->initialized) {
5572 /* Reset queue contexts */
5573 for (i = 0; i < ch->num_queue_pairs; i++) {
5574 struct i40e_ring *tx_ring, *rx_ring;
5577 pf_q = ch->base_queue + i;
5578 tx_ring = vsi->tx_rings[pf_q];
5581 rx_ring = vsi->rx_rings[pf_q];
5585 /* Reset BW configured for this VSI via mqprio */
5586 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5588 dev_info(&vsi->back->pdev->dev,
5589 "Failed to reset tx rate for ch->seid %u\n",
5592 /* delete cloud filters associated with this channel */
5593 hlist_for_each_entry_safe(cfilter, node,
5594 &pf->cloud_filter_list, cloud_node) {
5595 if (cfilter->seid != ch->seid)
5598 hash_del(&cfilter->cloud_node);
5599 if (cfilter->dst_port)
5600 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5604 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5606 last_aq_status = pf->hw.aq.asq_last_status;
5608 dev_info(&pf->pdev->dev,
5609 "Failed to delete cloud filter, err %s aq_err %s\n",
5610 i40e_stat_str(&pf->hw, ret),
5611 i40e_aq_str(&pf->hw, last_aq_status));
5615 /* delete VSI from FW */
5616 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5619 dev_err(&vsi->back->pdev->dev,
5620 "unable to remove channel (%d) for parent VSI(%d)\n",
5621 ch->seid, p_vsi->seid);
5624 INIT_LIST_HEAD(&vsi->ch_list);
5628 * i40e_is_any_channel - channel exist or not
5629 * @vsi: ptr to VSI to which channels are associated with
5631 * Returns true or false if channel(s) exist for associated VSI or not
5633 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5635 struct i40e_channel *ch, *ch_tmp;
5637 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5638 if (ch->initialized)
5646 * i40e_get_max_queues_for_channel
5647 * @vsi: ptr to VSI to which channels are associated with
5649 * Helper function which returns max value among the queue counts set on the
5650 * channels/TCs created.
5652 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5654 struct i40e_channel *ch, *ch_tmp;
5657 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5658 if (!ch->initialized)
5660 if (ch->num_queue_pairs > max)
5661 max = ch->num_queue_pairs;
5668 * i40e_validate_num_queues - validate num_queues w.r.t channel
5669 * @pf: ptr to PF device
5670 * @num_queues: number of queues
5671 * @vsi: the parent VSI
5672 * @reconfig_rss: indicates should the RSS be reconfigured or not
5674 * This function validates number of queues in the context of new channel
5675 * which is being established and determines if RSS should be reconfigured
5676 * or not for parent VSI.
5678 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5679 struct i40e_vsi *vsi, bool *reconfig_rss)
5686 *reconfig_rss = false;
5687 if (vsi->current_rss_size) {
5688 if (num_queues > vsi->current_rss_size) {
5689 dev_dbg(&pf->pdev->dev,
5690 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5691 num_queues, vsi->current_rss_size);
5693 } else if ((num_queues < vsi->current_rss_size) &&
5694 (!is_power_of_2(num_queues))) {
5695 dev_dbg(&pf->pdev->dev,
5696 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5697 num_queues, vsi->current_rss_size);
5702 if (!is_power_of_2(num_queues)) {
5703 /* Find the max num_queues configured for channel if channel
5705 * if channel exist, then enforce 'num_queues' to be more than
5706 * max ever queues configured for channel.
5708 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5709 if (num_queues < max_ch_queues) {
5710 dev_dbg(&pf->pdev->dev,
5711 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5712 num_queues, max_ch_queues);
5715 *reconfig_rss = true;
5722 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5723 * @vsi: the VSI being setup
5724 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5726 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5728 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5730 struct i40e_pf *pf = vsi->back;
5731 u8 seed[I40E_HKEY_ARRAY_SIZE];
5732 struct i40e_hw *hw = &pf->hw;
5740 if (rss_size > vsi->rss_size)
5743 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5744 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5748 /* Ignoring user configured lut if there is one */
5749 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5751 /* Use user configured hash key if there is one, otherwise
5754 if (vsi->rss_hkey_user)
5755 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5757 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5759 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5761 dev_info(&pf->pdev->dev,
5762 "Cannot set RSS lut, err %s aq_err %s\n",
5763 i40e_stat_str(hw, ret),
5764 i40e_aq_str(hw, hw->aq.asq_last_status));
5770 /* Do the update w.r.t. storing rss_size */
5771 if (!vsi->orig_rss_size)
5772 vsi->orig_rss_size = vsi->rss_size;
5773 vsi->current_rss_size = local_rss_size;
5779 * i40e_channel_setup_queue_map - Setup a channel queue map
5780 * @pf: ptr to PF device
5781 * @vsi: the VSI being setup
5782 * @ctxt: VSI context structure
5783 * @ch: ptr to channel structure
5785 * Setup queue map for a specific channel
5787 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5788 struct i40e_vsi_context *ctxt,
5789 struct i40e_channel *ch)
5791 u16 qcount, qmap, sections = 0;
5795 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5796 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5798 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5799 ch->num_queue_pairs = qcount;
5801 /* find the next higher power-of-2 of num queue pairs */
5802 pow = ilog2(qcount);
5803 if (!is_power_of_2(qcount))
5806 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5807 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5809 /* Setup queue TC[0].qmap for given VSI context */
5810 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5812 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5813 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5814 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5815 ctxt->info.valid_sections |= cpu_to_le16(sections);
5819 * i40e_add_channel - add a channel by adding VSI
5820 * @pf: ptr to PF device
5821 * @uplink_seid: underlying HW switching element (VEB) ID
5822 * @ch: ptr to channel structure
5824 * Add a channel (VSI) using add_vsi and queue_map
5826 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5827 struct i40e_channel *ch)
5829 struct i40e_hw *hw = &pf->hw;
5830 struct i40e_vsi_context ctxt;
5831 u8 enabled_tc = 0x1; /* TC0 enabled */
5834 if (ch->type != I40E_VSI_VMDQ2) {
5835 dev_info(&pf->pdev->dev,
5836 "add new vsi failed, ch->type %d\n", ch->type);
5840 memset(&ctxt, 0, sizeof(ctxt));
5841 ctxt.pf_num = hw->pf_id;
5843 ctxt.uplink_seid = uplink_seid;
5844 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5845 if (ch->type == I40E_VSI_VMDQ2)
5846 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5848 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5849 ctxt.info.valid_sections |=
5850 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5851 ctxt.info.switch_id =
5852 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5855 /* Set queue map for a given VSI context */
5856 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5858 /* Now time to create VSI */
5859 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5861 dev_info(&pf->pdev->dev,
5862 "add new vsi failed, err %s aq_err %s\n",
5863 i40e_stat_str(&pf->hw, ret),
5864 i40e_aq_str(&pf->hw,
5865 pf->hw.aq.asq_last_status));
5869 /* Success, update channel, set enabled_tc only if the channel
5872 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
5873 ch->seid = ctxt.seid;
5874 ch->vsi_number = ctxt.vsi_number;
5875 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5877 /* copy just the sections touched not the entire info
5878 * since not all sections are valid as returned by
5881 ch->info.mapping_flags = ctxt.info.mapping_flags;
5882 memcpy(&ch->info.queue_mapping,
5883 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5884 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5885 sizeof(ctxt.info.tc_mapping));
5890 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5893 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5897 bw_data.tc_valid_bits = ch->enabled_tc;
5898 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5899 bw_data.tc_bw_credits[i] = bw_share[i];
5901 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5904 dev_info(&vsi->back->pdev->dev,
5905 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5906 vsi->back->hw.aq.asq_last_status, ch->seid);
5910 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5911 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5917 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5918 * @pf: ptr to PF device
5919 * @vsi: the VSI being setup
5920 * @ch: ptr to channel structure
5922 * Configure TX rings associated with channel (VSI) since queues are being
5925 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5926 struct i40e_vsi *vsi,
5927 struct i40e_channel *ch)
5931 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5933 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5934 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5935 if (ch->enabled_tc & BIT(i))
5939 /* configure BW for new VSI */
5940 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5942 dev_info(&vsi->back->pdev->dev,
5943 "Failed configuring TC map %d for channel (seid %u)\n",
5944 ch->enabled_tc, ch->seid);
5948 for (i = 0; i < ch->num_queue_pairs; i++) {
5949 struct i40e_ring *tx_ring, *rx_ring;
5952 pf_q = ch->base_queue + i;
5954 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5957 tx_ring = vsi->tx_rings[pf_q];
5960 /* Get the RX ring ptr */
5961 rx_ring = vsi->rx_rings[pf_q];
5969 * i40e_setup_hw_channel - setup new channel
5970 * @pf: ptr to PF device
5971 * @vsi: the VSI being setup
5972 * @ch: ptr to channel structure
5973 * @uplink_seid: underlying HW switching element (VEB) ID
5974 * @type: type of channel to be created (VMDq2/VF)
5976 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5977 * and configures TX rings accordingly
5979 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5980 struct i40e_vsi *vsi,
5981 struct i40e_channel *ch,
5982 u16 uplink_seid, u8 type)
5986 ch->initialized = false;
5987 ch->base_queue = vsi->next_base_queue;
5990 /* Proceed with creation of channel (VMDq2) VSI */
5991 ret = i40e_add_channel(pf, uplink_seid, ch);
5993 dev_info(&pf->pdev->dev,
5994 "failed to add_channel using uplink_seid %u\n",
5999 /* Mark the successful creation of channel */
6000 ch->initialized = true;
6002 /* Reconfigure TX queues using QTX_CTL register */
6003 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6005 dev_info(&pf->pdev->dev,
6006 "failed to configure TX rings for channel %u\n",
6011 /* update 'next_base_queue' */
6012 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6013 dev_dbg(&pf->pdev->dev,
6014 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6015 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6016 ch->num_queue_pairs,
6017 vsi->next_base_queue);
6022 * i40e_setup_channel - setup new channel using uplink element
6023 * @pf: ptr to PF device
6024 * @type: type of channel to be created (VMDq2/VF)
6025 * @uplink_seid: underlying HW switching element (VEB) ID
6026 * @ch: ptr to channel structure
6028 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6029 * and uplink switching element (uplink_seid)
6031 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6032 struct i40e_channel *ch)
6038 if (vsi->type == I40E_VSI_MAIN) {
6039 vsi_type = I40E_VSI_VMDQ2;
6041 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6046 /* underlying switching element */
6047 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6049 /* create channel (VSI), configure TX rings */
6050 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6052 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6056 return ch->initialized ? true : false;
6060 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6061 * @vsi: ptr to VSI which has PF backing
6063 * Sets up switch mode correctly if it needs to be changed and perform
6064 * what are allowed modes.
6066 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6069 struct i40e_pf *pf = vsi->back;
6070 struct i40e_hw *hw = &pf->hw;
6073 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6077 if (hw->dev_caps.switch_mode) {
6078 /* if switch mode is set, support mode2 (non-tunneled for
6079 * cloud filter) for now
6081 u32 switch_mode = hw->dev_caps.switch_mode &
6082 I40E_SWITCH_MODE_MASK;
6083 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6084 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6086 dev_err(&pf->pdev->dev,
6087 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6088 hw->dev_caps.switch_mode);
6093 /* Set Bit 7 to be valid */
6094 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6096 /* Set L4type for TCP support */
6097 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6099 /* Set cloud filter mode */
6100 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6102 /* Prep mode field for set_switch_config */
6103 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6104 pf->last_sw_conf_valid_flags,
6106 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6107 dev_err(&pf->pdev->dev,
6108 "couldn't set switch config bits, err %s aq_err %s\n",
6109 i40e_stat_str(hw, ret),
6111 hw->aq.asq_last_status));
6117 * i40e_create_queue_channel - function to create channel
6118 * @vsi: VSI to be configured
6119 * @ch: ptr to channel (it contains channel specific params)
6121 * This function creates channel (VSI) using num_queues specified by user,
6122 * reconfigs RSS if needed.
6124 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6125 struct i40e_channel *ch)
6127 struct i40e_pf *pf = vsi->back;
6134 if (!ch->num_queue_pairs) {
6135 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6136 ch->num_queue_pairs);
6140 /* validate user requested num_queues for channel */
6141 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6144 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6145 ch->num_queue_pairs);
6149 /* By default we are in VEPA mode, if this is the first VF/VMDq
6150 * VSI to be added switch to VEB mode.
6152 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6153 (!i40e_is_any_channel(vsi))) {
6154 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6155 dev_dbg(&pf->pdev->dev,
6156 "Failed to create channel. Override queues (%u) not power of 2\n",
6157 vsi->tc_config.tc_info[0].qcount);
6161 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6162 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6164 if (vsi->type == I40E_VSI_MAIN) {
6165 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6166 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6169 i40e_do_reset_safe(pf,
6170 I40E_PF_RESET_FLAG);
6173 /* now onwards for main VSI, number of queues will be value
6174 * of TC0's queue count
6178 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6179 * it should be more than num_queues
6181 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6182 dev_dbg(&pf->pdev->dev,
6183 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6184 vsi->cnt_q_avail, ch->num_queue_pairs);
6188 /* reconfig_rss only if vsi type is MAIN_VSI */
6189 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6190 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6192 dev_info(&pf->pdev->dev,
6193 "Error: unable to reconfig rss for num_queues (%u)\n",
6194 ch->num_queue_pairs);
6199 if (!i40e_setup_channel(pf, vsi, ch)) {
6200 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6204 dev_info(&pf->pdev->dev,
6205 "Setup channel (id:%u) utilizing num_queues %d\n",
6206 ch->seid, ch->num_queue_pairs);
6208 /* configure VSI for BW limit */
6209 if (ch->max_tx_rate) {
6210 u64 credits = ch->max_tx_rate;
6212 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6215 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6216 dev_dbg(&pf->pdev->dev,
6217 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6223 /* in case of VF, this will be main SRIOV VSI */
6224 ch->parent_vsi = vsi;
6226 /* and update main_vsi's count for queue_available to use */
6227 vsi->cnt_q_avail -= ch->num_queue_pairs;
6233 * i40e_configure_queue_channels - Add queue channel for the given TCs
6234 * @vsi: VSI to be configured
6236 * Configures queue channel mapping to the given TCs
6238 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6240 struct i40e_channel *ch;
6244 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6245 vsi->tc_seid_map[0] = vsi->seid;
6246 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6247 if (vsi->tc_config.enabled_tc & BIT(i)) {
6248 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6254 INIT_LIST_HEAD(&ch->list);
6255 ch->num_queue_pairs =
6256 vsi->tc_config.tc_info[i].qcount;
6258 vsi->tc_config.tc_info[i].qoffset;
6260 /* Bandwidth limit through tc interface is in bytes/s,
6263 max_rate = vsi->mqprio_qopt.max_rate[i];
6264 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6265 ch->max_tx_rate = max_rate;
6267 list_add_tail(&ch->list, &vsi->ch_list);
6269 ret = i40e_create_queue_channel(vsi, ch);
6271 dev_err(&vsi->back->pdev->dev,
6272 "Failed creating queue channel with TC%d: queues %d\n",
6273 i, ch->num_queue_pairs);
6276 vsi->tc_seid_map[i] = ch->seid;
6282 i40e_remove_queue_channels(vsi);
6287 * i40e_veb_config_tc - Configure TCs for given VEB
6289 * @enabled_tc: TC bitmap
6291 * Configures given TC bitmap for VEB (switching) element
6293 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6295 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6296 struct i40e_pf *pf = veb->pf;
6300 /* No TCs or already enabled TCs just return */
6301 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6304 bw_data.tc_valid_bits = enabled_tc;
6305 /* bw_data.absolute_credits is not set (relative) */
6307 /* Enable ETS TCs with equal BW Share for now */
6308 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6309 if (enabled_tc & BIT(i))
6310 bw_data.tc_bw_share_credits[i] = 1;
6313 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6316 dev_info(&pf->pdev->dev,
6317 "VEB bw config failed, err %s aq_err %s\n",
6318 i40e_stat_str(&pf->hw, ret),
6319 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6323 /* Update the BW information */
6324 ret = i40e_veb_get_bw_info(veb);
6326 dev_info(&pf->pdev->dev,
6327 "Failed getting veb bw config, err %s aq_err %s\n",
6328 i40e_stat_str(&pf->hw, ret),
6329 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6336 #ifdef CONFIG_I40E_DCB
6338 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6341 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6342 * the caller would've quiesce all the VSIs before calling
6345 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6351 /* Enable the TCs available on PF to all VEBs */
6352 tc_map = i40e_pf_get_tc_map(pf);
6353 for (v = 0; v < I40E_MAX_VEB; v++) {
6356 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6358 dev_info(&pf->pdev->dev,
6359 "Failed configuring TC for VEB seid=%d\n",
6361 /* Will try to configure as many components */
6365 /* Update each VSI */
6366 for (v = 0; v < pf->num_alloc_vsi; v++) {
6370 /* - Enable all TCs for the LAN VSI
6371 * - For all others keep them at TC0 for now
6373 if (v == pf->lan_vsi)
6374 tc_map = i40e_pf_get_tc_map(pf);
6376 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6378 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6380 dev_info(&pf->pdev->dev,
6381 "Failed configuring TC for VSI seid=%d\n",
6383 /* Will try to configure as many components */
6385 /* Re-configure VSI vectors based on updated TC map */
6386 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6387 if (pf->vsi[v]->netdev)
6388 i40e_dcbnl_set_all(pf->vsi[v]);
6394 * i40e_resume_port_tx - Resume port Tx
6397 * Resume a port's Tx and issue a PF reset in case of failure to
6400 static int i40e_resume_port_tx(struct i40e_pf *pf)
6402 struct i40e_hw *hw = &pf->hw;
6405 ret = i40e_aq_resume_port_tx(hw, NULL);
6407 dev_info(&pf->pdev->dev,
6408 "Resume Port Tx failed, err %s aq_err %s\n",
6409 i40e_stat_str(&pf->hw, ret),
6410 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6411 /* Schedule PF reset to recover */
6412 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6413 i40e_service_event_schedule(pf);
6420 * i40e_update_dcb_config
6421 * @hw: pointer to the HW struct
6422 * @enable_mib_change: enable MIB change event
6424 * Update DCB configuration from the firmware
6426 static enum i40e_status_code
6427 i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change)
6429 struct i40e_lldp_variables lldp_cfg;
6432 if (!hw->func_caps.dcb)
6433 return I40E_NOT_SUPPORTED;
6435 /* Read LLDP NVM area */
6436 ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
6438 return I40E_ERR_NOT_READY;
6440 /* Get DCBX status */
6441 ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
6445 /* Check the DCBX Status */
6446 if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
6447 hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
6448 /* Get current DCBX configuration */
6449 ret = i40e_get_dcb_config(hw);
6452 } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
6453 return I40E_ERR_NOT_READY;
6456 /* Configure the LLDP MIB change event */
6457 if (enable_mib_change)
6458 ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
6464 * i40e_init_pf_dcb - Initialize DCB configuration
6465 * @pf: PF being configured
6467 * Query the current DCB configuration and cache it
6468 * in the hardware structure
6470 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6472 struct i40e_hw *hw = &pf->hw;
6475 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6476 * Also do not enable DCBx if FW LLDP agent is disabled
6478 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6479 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
6480 dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
6481 err = I40E_NOT_SUPPORTED;
6485 err = i40e_update_dcb_config(hw, true);
6487 /* Device/Function is not DCBX capable */
6488 if ((!hw->func_caps.dcb) ||
6489 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6490 dev_info(&pf->pdev->dev,
6491 "DCBX offload is not supported or is disabled for this PF.\n");
6493 /* When status is not DISABLED then DCBX in FW */
6494 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6495 DCB_CAP_DCBX_VER_IEEE;
6497 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6498 /* Enable DCB tagging only when more than one TC
6499 * or explicitly disable if only one TC
6501 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6502 pf->flags |= I40E_FLAG_DCB_ENABLED;
6504 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6505 dev_dbg(&pf->pdev->dev,
6506 "DCBX offload is supported for this PF.\n");
6508 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6509 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6510 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6512 dev_info(&pf->pdev->dev,
6513 "Query for DCB configuration failed, err %s aq_err %s\n",
6514 i40e_stat_str(&pf->hw, err),
6515 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6521 #endif /* CONFIG_I40E_DCB */
6522 #define SPEED_SIZE 14
6525 * i40e_print_link_message - print link up or down
6526 * @vsi: the VSI for which link needs a message
6527 * @isup: true of link is up, false otherwise
6529 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6531 enum i40e_aq_link_speed new_speed;
6532 struct i40e_pf *pf = vsi->back;
6533 char *speed = "Unknown";
6534 char *fc = "Unknown";
6540 new_speed = pf->hw.phy.link_info.link_speed;
6542 new_speed = I40E_LINK_SPEED_UNKNOWN;
6544 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6546 vsi->current_isup = isup;
6547 vsi->current_speed = new_speed;
6549 netdev_info(vsi->netdev, "NIC Link is Down\n");
6553 /* Warn user if link speed on NPAR enabled partition is not at
6556 if (pf->hw.func_caps.npar_enable &&
6557 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6558 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6559 netdev_warn(vsi->netdev,
6560 "The partition detected link speed that is less than 10Gbps\n");
6562 switch (pf->hw.phy.link_info.link_speed) {
6563 case I40E_LINK_SPEED_40GB:
6566 case I40E_LINK_SPEED_20GB:
6569 case I40E_LINK_SPEED_25GB:
6572 case I40E_LINK_SPEED_10GB:
6575 case I40E_LINK_SPEED_5GB:
6578 case I40E_LINK_SPEED_2_5GB:
6581 case I40E_LINK_SPEED_1GB:
6584 case I40E_LINK_SPEED_100MB:
6591 switch (pf->hw.fc.current_mode) {
6595 case I40E_FC_TX_PAUSE:
6598 case I40E_FC_RX_PAUSE:
6606 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6607 req_fec = ", Requested FEC: None";
6608 fec = ", FEC: None";
6609 an = ", Autoneg: False";
6611 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6612 an = ", Autoneg: True";
6614 if (pf->hw.phy.link_info.fec_info &
6615 I40E_AQ_CONFIG_FEC_KR_ENA)
6616 fec = ", FEC: CL74 FC-FEC/BASE-R";
6617 else if (pf->hw.phy.link_info.fec_info &
6618 I40E_AQ_CONFIG_FEC_RS_ENA)
6619 fec = ", FEC: CL108 RS-FEC";
6621 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6622 * both RS and FC are requested
6624 if (vsi->back->hw.phy.link_info.req_fec_info &
6625 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6626 if (vsi->back->hw.phy.link_info.req_fec_info &
6627 I40E_AQ_REQUEST_FEC_RS)
6628 req_fec = ", Requested FEC: CL108 RS-FEC";
6630 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6634 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6635 speed, req_fec, fec, an, fc);
6639 * i40e_up_complete - Finish the last steps of bringing up a connection
6640 * @vsi: the VSI being configured
6642 static int i40e_up_complete(struct i40e_vsi *vsi)
6644 struct i40e_pf *pf = vsi->back;
6647 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6648 i40e_vsi_configure_msix(vsi);
6650 i40e_configure_msi_and_legacy(vsi);
6653 err = i40e_vsi_start_rings(vsi);
6657 clear_bit(__I40E_VSI_DOWN, vsi->state);
6658 i40e_napi_enable_all(vsi);
6659 i40e_vsi_enable_irq(vsi);
6661 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6663 i40e_print_link_message(vsi, true);
6664 netif_tx_start_all_queues(vsi->netdev);
6665 netif_carrier_on(vsi->netdev);
6668 /* replay FDIR SB filters */
6669 if (vsi->type == I40E_VSI_FDIR) {
6670 /* reset fd counters */
6673 i40e_fdir_filter_restore(vsi);
6676 /* On the next run of the service_task, notify any clients of the new
6679 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6680 i40e_service_event_schedule(pf);
6686 * i40e_vsi_reinit_locked - Reset the VSI
6687 * @vsi: the VSI being configured
6689 * Rebuild the ring structs after some configuration
6690 * has changed, e.g. MTU size.
6692 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6694 struct i40e_pf *pf = vsi->back;
6696 WARN_ON(in_interrupt());
6697 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6698 usleep_range(1000, 2000);
6702 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6706 * i40e_up - Bring the connection back up after being down
6707 * @vsi: the VSI being configured
6709 int i40e_up(struct i40e_vsi *vsi)
6713 err = i40e_vsi_configure(vsi);
6715 err = i40e_up_complete(vsi);
6721 * i40e_force_link_state - Force the link status
6722 * @pf: board private structure
6723 * @is_up: whether the link state should be forced up or down
6725 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6727 struct i40e_aq_get_phy_abilities_resp abilities;
6728 struct i40e_aq_set_phy_config config = {0};
6729 struct i40e_hw *hw = &pf->hw;
6734 /* Card might've been put in an unstable state by other drivers
6735 * and applications, which causes incorrect speed values being
6736 * set on startup. In order to clear speed registers, we call
6737 * get_phy_capabilities twice, once to get initial state of
6738 * available speeds, and once to get current PHY config.
6740 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6743 dev_err(&pf->pdev->dev,
6744 "failed to get phy cap., ret = %s last_status = %s\n",
6745 i40e_stat_str(hw, err),
6746 i40e_aq_str(hw, hw->aq.asq_last_status));
6749 speed = abilities.link_speed;
6751 /* Get the current phy config */
6752 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6755 dev_err(&pf->pdev->dev,
6756 "failed to get phy cap., ret = %s last_status = %s\n",
6757 i40e_stat_str(hw, err),
6758 i40e_aq_str(hw, hw->aq.asq_last_status));
6762 /* If link needs to go up, but was not forced to go down,
6763 * and its speed values are OK, no need for a flap
6765 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6766 return I40E_SUCCESS;
6768 /* To force link we need to set bits for all supported PHY types,
6769 * but there are now more than 32, so we need to split the bitmap
6770 * across two fields.
6772 mask = I40E_PHY_TYPES_BITMASK;
6773 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6774 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6775 /* Copy the old settings, except of phy_type */
6776 config.abilities = abilities.abilities;
6777 if (abilities.link_speed != 0)
6778 config.link_speed = abilities.link_speed;
6780 config.link_speed = speed;
6781 config.eee_capability = abilities.eee_capability;
6782 config.eeer = abilities.eeer_val;
6783 config.low_power_ctrl = abilities.d3_lpan;
6784 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6785 I40E_AQ_PHY_FEC_CONFIG_MASK;
6786 err = i40e_aq_set_phy_config(hw, &config, NULL);
6789 dev_err(&pf->pdev->dev,
6790 "set phy config ret = %s last_status = %s\n",
6791 i40e_stat_str(&pf->hw, err),
6792 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6796 /* Update the link info */
6797 err = i40e_update_link_info(hw);
6799 /* Wait a little bit (on 40G cards it sometimes takes a really
6800 * long time for link to come back from the atomic reset)
6804 i40e_update_link_info(hw);
6807 i40e_aq_set_link_restart_an(hw, true, NULL);
6809 return I40E_SUCCESS;
6813 * i40e_down - Shutdown the connection processing
6814 * @vsi: the VSI being stopped
6816 void i40e_down(struct i40e_vsi *vsi)
6820 /* It is assumed that the caller of this function
6821 * sets the vsi->state __I40E_VSI_DOWN bit.
6824 netif_carrier_off(vsi->netdev);
6825 netif_tx_disable(vsi->netdev);
6827 i40e_vsi_disable_irq(vsi);
6828 i40e_vsi_stop_rings(vsi);
6829 if (vsi->type == I40E_VSI_MAIN &&
6830 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6831 i40e_force_link_state(vsi->back, false);
6832 i40e_napi_disable_all(vsi);
6834 for (i = 0; i < vsi->num_queue_pairs; i++) {
6835 i40e_clean_tx_ring(vsi->tx_rings[i]);
6836 if (i40e_enabled_xdp_vsi(vsi)) {
6837 /* Make sure that in-progress ndo_xdp_xmit
6838 * calls are completed.
6841 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6843 i40e_clean_rx_ring(vsi->rx_rings[i]);
6849 * i40e_validate_mqprio_qopt- validate queue mapping info
6850 * @vsi: the VSI being configured
6851 * @mqprio_qopt: queue parametrs
6853 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6854 struct tc_mqprio_qopt_offload *mqprio_qopt)
6856 u64 sum_max_rate = 0;
6860 if (mqprio_qopt->qopt.offset[0] != 0 ||
6861 mqprio_qopt->qopt.num_tc < 1 ||
6862 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6864 for (i = 0; ; i++) {
6865 if (!mqprio_qopt->qopt.count[i])
6867 if (mqprio_qopt->min_rate[i]) {
6868 dev_err(&vsi->back->pdev->dev,
6869 "Invalid min tx rate (greater than 0) specified\n");
6872 max_rate = mqprio_qopt->max_rate[i];
6873 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6874 sum_max_rate += max_rate;
6876 if (i >= mqprio_qopt->qopt.num_tc - 1)
6878 if (mqprio_qopt->qopt.offset[i + 1] !=
6879 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6882 if (vsi->num_queue_pairs <
6883 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6886 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6887 dev_err(&vsi->back->pdev->dev,
6888 "Invalid max tx rate specified\n");
6895 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6896 * @vsi: the VSI being configured
6898 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6903 /* Only TC0 is enabled */
6904 vsi->tc_config.numtc = 1;
6905 vsi->tc_config.enabled_tc = 1;
6906 qcount = min_t(int, vsi->alloc_queue_pairs,
6907 i40e_pf_get_max_q_per_tc(vsi->back));
6908 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6909 /* For the TC that is not enabled set the offset to to default
6910 * queue and allocate one queue for the given TC.
6912 vsi->tc_config.tc_info[i].qoffset = 0;
6914 vsi->tc_config.tc_info[i].qcount = qcount;
6916 vsi->tc_config.tc_info[i].qcount = 1;
6917 vsi->tc_config.tc_info[i].netdev_tc = 0;
6922 * i40e_del_macvlan_filter
6923 * @hw: pointer to the HW structure
6924 * @seid: seid of the channel VSI
6925 * @macaddr: the mac address to apply as a filter
6926 * @aq_err: store the admin Q error
6928 * This function deletes a mac filter on the channel VSI which serves as the
6929 * macvlan. Returns 0 on success.
6931 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
6932 const u8 *macaddr, int *aq_err)
6934 struct i40e_aqc_remove_macvlan_element_data element;
6937 memset(&element, 0, sizeof(element));
6938 ether_addr_copy(element.mac_addr, macaddr);
6939 element.vlan_tag = 0;
6940 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6941 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
6942 *aq_err = hw->aq.asq_last_status;
6948 * i40e_add_macvlan_filter
6949 * @hw: pointer to the HW structure
6950 * @seid: seid of the channel VSI
6951 * @macaddr: the mac address to apply as a filter
6952 * @aq_err: store the admin Q error
6954 * This function adds a mac filter on the channel VSI which serves as the
6955 * macvlan. Returns 0 on success.
6957 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
6958 const u8 *macaddr, int *aq_err)
6960 struct i40e_aqc_add_macvlan_element_data element;
6964 ether_addr_copy(element.mac_addr, macaddr);
6965 element.vlan_tag = 0;
6966 element.queue_number = 0;
6967 element.match_method = I40E_AQC_MM_ERR_NO_RES;
6968 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6969 element.flags = cpu_to_le16(cmd_flags);
6970 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
6971 *aq_err = hw->aq.asq_last_status;
6977 * i40e_reset_ch_rings - Reset the queue contexts in a channel
6978 * @vsi: the VSI we want to access
6979 * @ch: the channel we want to access
6981 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
6983 struct i40e_ring *tx_ring, *rx_ring;
6987 for (i = 0; i < ch->num_queue_pairs; i++) {
6988 pf_q = ch->base_queue + i;
6989 tx_ring = vsi->tx_rings[pf_q];
6991 rx_ring = vsi->rx_rings[pf_q];
6997 * i40e_free_macvlan_channels
6998 * @vsi: the VSI we want to access
7000 * This function frees the Qs of the channel VSI from
7001 * the stack and also deletes the channel VSIs which
7002 * serve as macvlans.
7004 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7006 struct i40e_channel *ch, *ch_tmp;
7009 if (list_empty(&vsi->macvlan_list))
7012 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7013 struct i40e_vsi *parent_vsi;
7015 if (i40e_is_channel_macvlan(ch)) {
7016 i40e_reset_ch_rings(vsi, ch);
7017 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7018 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7019 netdev_set_sb_channel(ch->fwd->netdev, 0);
7024 list_del(&ch->list);
7025 parent_vsi = ch->parent_vsi;
7026 if (!parent_vsi || !ch->initialized) {
7031 /* remove the VSI */
7032 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7035 dev_err(&vsi->back->pdev->dev,
7036 "unable to remove channel (%d) for parent VSI(%d)\n",
7037 ch->seid, parent_vsi->seid);
7040 vsi->macvlan_cnt = 0;
7044 * i40e_fwd_ring_up - bring the macvlan device up
7045 * @vsi: the VSI we want to access
7046 * @vdev: macvlan netdevice
7047 * @fwd: the private fwd structure
7049 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7050 struct i40e_fwd_adapter *fwd)
7052 int ret = 0, num_tc = 1, i, aq_err;
7053 struct i40e_channel *ch, *ch_tmp;
7054 struct i40e_pf *pf = vsi->back;
7055 struct i40e_hw *hw = &pf->hw;
7057 if (list_empty(&vsi->macvlan_list))
7060 /* Go through the list and find an available channel */
7061 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7062 if (!i40e_is_channel_macvlan(ch)) {
7064 /* record configuration for macvlan interface in vdev */
7065 for (i = 0; i < num_tc; i++)
7066 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7068 ch->num_queue_pairs,
7070 for (i = 0; i < ch->num_queue_pairs; i++) {
7071 struct i40e_ring *tx_ring, *rx_ring;
7074 pf_q = ch->base_queue + i;
7076 /* Get to TX ring ptr */
7077 tx_ring = vsi->tx_rings[pf_q];
7080 /* Get the RX ring ptr */
7081 rx_ring = vsi->rx_rings[pf_q];
7088 /* Guarantee all rings are updated before we update the
7089 * MAC address filter.
7093 /* Add a mac filter */
7094 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7096 /* if we cannot add the MAC rule then disable the offload */
7097 macvlan_release_l2fw_offload(vdev);
7098 for (i = 0; i < ch->num_queue_pairs; i++) {
7099 struct i40e_ring *rx_ring;
7102 pf_q = ch->base_queue + i;
7103 rx_ring = vsi->rx_rings[pf_q];
7104 rx_ring->netdev = NULL;
7106 dev_info(&pf->pdev->dev,
7107 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7108 i40e_stat_str(hw, ret),
7109 i40e_aq_str(hw, aq_err));
7110 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7117 * i40e_setup_macvlans - create the channels which will be macvlans
7118 * @vsi: the VSI we want to access
7119 * @macvlan_cnt: no. of macvlans to be setup
7120 * @qcnt: no. of Qs per macvlan
7121 * @vdev: macvlan netdevice
7123 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7124 struct net_device *vdev)
7126 struct i40e_pf *pf = vsi->back;
7127 struct i40e_hw *hw = &pf->hw;
7128 struct i40e_vsi_context ctxt;
7129 u16 sections, qmap, num_qps;
7130 struct i40e_channel *ch;
7131 int i, pow, ret = 0;
7134 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7137 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7139 /* find the next higher power-of-2 of num queue pairs */
7140 pow = fls(roundup_pow_of_two(num_qps) - 1);
7142 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7143 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7145 /* Setup context bits for the main VSI */
7146 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7147 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7148 memset(&ctxt, 0, sizeof(ctxt));
7149 ctxt.seid = vsi->seid;
7150 ctxt.pf_num = vsi->back->hw.pf_id;
7152 ctxt.uplink_seid = vsi->uplink_seid;
7153 ctxt.info = vsi->info;
7154 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7155 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7156 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7157 ctxt.info.valid_sections |= cpu_to_le16(sections);
7159 /* Reconfigure RSS for main VSI with new max queue count */
7160 vsi->rss_size = max_t(u16, num_qps, qcnt);
7161 ret = i40e_vsi_config_rss(vsi);
7163 dev_info(&pf->pdev->dev,
7164 "Failed to reconfig RSS for num_queues (%u)\n",
7168 vsi->reconfig_rss = true;
7169 dev_dbg(&vsi->back->pdev->dev,
7170 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7171 vsi->next_base_queue = num_qps;
7172 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7174 /* Update the VSI after updating the VSI queue-mapping
7177 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7179 dev_info(&pf->pdev->dev,
7180 "Update vsi tc config failed, err %s aq_err %s\n",
7181 i40e_stat_str(hw, ret),
7182 i40e_aq_str(hw, hw->aq.asq_last_status));
7185 /* update the local VSI info with updated queue map */
7186 i40e_vsi_update_queue_map(vsi, &ctxt);
7187 vsi->info.valid_sections = 0;
7189 /* Create channels for macvlans */
7190 INIT_LIST_HEAD(&vsi->macvlan_list);
7191 for (i = 0; i < macvlan_cnt; i++) {
7192 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7197 INIT_LIST_HEAD(&ch->list);
7198 ch->num_queue_pairs = qcnt;
7199 if (!i40e_setup_channel(pf, vsi, ch)) {
7203 ch->parent_vsi = vsi;
7204 vsi->cnt_q_avail -= ch->num_queue_pairs;
7206 list_add_tail(&ch->list, &vsi->macvlan_list);
7212 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7213 i40e_free_macvlan_channels(vsi);
7219 * i40e_fwd_add - configure macvlans
7220 * @netdev: net device to configure
7221 * @vdev: macvlan netdevice
7223 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7225 struct i40e_netdev_priv *np = netdev_priv(netdev);
7226 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7227 struct i40e_vsi *vsi = np->vsi;
7228 struct i40e_pf *pf = vsi->back;
7229 struct i40e_fwd_adapter *fwd;
7230 int avail_macvlan, ret;
7232 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7233 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7234 return ERR_PTR(-EINVAL);
7236 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7237 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7238 return ERR_PTR(-EINVAL);
7240 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7241 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7242 return ERR_PTR(-EINVAL);
7245 /* The macvlan device has to be a single Q device so that the
7246 * tc_to_txq field can be reused to pick the tx queue.
7248 if (netif_is_multiqueue(vdev))
7249 return ERR_PTR(-ERANGE);
7251 if (!vsi->macvlan_cnt) {
7252 /* reserve bit 0 for the pf device */
7253 set_bit(0, vsi->fwd_bitmask);
7255 /* Try to reserve as many queues as possible for macvlans. First
7256 * reserve 3/4th of max vectors, then half, then quarter and
7257 * calculate Qs per macvlan as you go
7259 vectors = pf->num_lan_msix;
7260 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7261 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7263 macvlan_cnt = (vectors - 32) / 4;
7264 } else if (vectors <= 64 && vectors > 32) {
7265 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7267 macvlan_cnt = (vectors - 16) / 2;
7268 } else if (vectors <= 32 && vectors > 16) {
7269 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
7271 macvlan_cnt = vectors - 16;
7272 } else if (vectors <= 16 && vectors > 8) {
7273 /* allocate 1 Q per macvlan and 8 Qs to the PF */
7275 macvlan_cnt = vectors - 8;
7277 /* allocate 1 Q per macvlan and 1 Q to the PF */
7279 macvlan_cnt = vectors - 1;
7282 if (macvlan_cnt == 0)
7283 return ERR_PTR(-EBUSY);
7285 /* Quiesce VSI queues */
7286 i40e_quiesce_vsi(vsi);
7288 /* sets up the macvlans but does not "enable" them */
7289 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7292 return ERR_PTR(ret);
7295 i40e_unquiesce_vsi(vsi);
7297 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7299 if (avail_macvlan >= I40E_MAX_MACVLANS)
7300 return ERR_PTR(-EBUSY);
7302 /* create the fwd struct */
7303 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7305 return ERR_PTR(-ENOMEM);
7307 set_bit(avail_macvlan, vsi->fwd_bitmask);
7308 fwd->bit_no = avail_macvlan;
7309 netdev_set_sb_channel(vdev, avail_macvlan);
7312 if (!netif_running(netdev))
7315 /* Set fwd ring up */
7316 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7318 /* unbind the queues and drop the subordinate channel config */
7319 netdev_unbind_sb_channel(netdev, vdev);
7320 netdev_set_sb_channel(vdev, 0);
7323 return ERR_PTR(-EINVAL);
7330 * i40e_del_all_macvlans - Delete all the mac filters on the channels
7331 * @vsi: the VSI we want to access
7333 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7335 struct i40e_channel *ch, *ch_tmp;
7336 struct i40e_pf *pf = vsi->back;
7337 struct i40e_hw *hw = &pf->hw;
7338 int aq_err, ret = 0;
7340 if (list_empty(&vsi->macvlan_list))
7343 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7344 if (i40e_is_channel_macvlan(ch)) {
7345 ret = i40e_del_macvlan_filter(hw, ch->seid,
7346 i40e_channel_mac(ch),
7349 /* Reset queue contexts */
7350 i40e_reset_ch_rings(vsi, ch);
7351 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7352 netdev_unbind_sb_channel(vsi->netdev,
7354 netdev_set_sb_channel(ch->fwd->netdev, 0);
7363 * i40e_fwd_del - delete macvlan interfaces
7364 * @netdev: net device to configure
7365 * @vdev: macvlan netdevice
7367 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7369 struct i40e_netdev_priv *np = netdev_priv(netdev);
7370 struct i40e_fwd_adapter *fwd = vdev;
7371 struct i40e_channel *ch, *ch_tmp;
7372 struct i40e_vsi *vsi = np->vsi;
7373 struct i40e_pf *pf = vsi->back;
7374 struct i40e_hw *hw = &pf->hw;
7375 int aq_err, ret = 0;
7377 /* Find the channel associated with the macvlan and del mac filter */
7378 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7379 if (i40e_is_channel_macvlan(ch) &&
7380 ether_addr_equal(i40e_channel_mac(ch),
7381 fwd->netdev->dev_addr)) {
7382 ret = i40e_del_macvlan_filter(hw, ch->seid,
7383 i40e_channel_mac(ch),
7386 /* Reset queue contexts */
7387 i40e_reset_ch_rings(vsi, ch);
7388 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7389 netdev_unbind_sb_channel(netdev, fwd->netdev);
7390 netdev_set_sb_channel(fwd->netdev, 0);
7394 dev_info(&pf->pdev->dev,
7395 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7396 i40e_stat_str(hw, ret),
7397 i40e_aq_str(hw, aq_err));
7405 * i40e_setup_tc - configure multiple traffic classes
7406 * @netdev: net device to configure
7407 * @type_data: tc offload data
7409 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7411 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7412 struct i40e_netdev_priv *np = netdev_priv(netdev);
7413 struct i40e_vsi *vsi = np->vsi;
7414 struct i40e_pf *pf = vsi->back;
7415 u8 enabled_tc = 0, num_tc, hw;
7416 bool need_reset = false;
7417 int old_queue_pairs;
7422 old_queue_pairs = vsi->num_queue_pairs;
7423 num_tc = mqprio_qopt->qopt.num_tc;
7424 hw = mqprio_qopt->qopt.hw;
7425 mode = mqprio_qopt->mode;
7427 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7428 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7432 /* Check if MFP enabled */
7433 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7435 "Configuring TC not supported in MFP mode\n");
7439 case TC_MQPRIO_MODE_DCB:
7440 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7442 /* Check if DCB enabled to continue */
7443 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7445 "DCB is not enabled for adapter\n");
7449 /* Check whether tc count is within enabled limit */
7450 if (num_tc > i40e_pf_get_num_tc(pf)) {
7452 "TC count greater than enabled on link for adapter\n");
7456 case TC_MQPRIO_MODE_CHANNEL:
7457 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7459 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7462 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7464 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7467 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7468 sizeof(*mqprio_qopt));
7469 pf->flags |= I40E_FLAG_TC_MQPRIO;
7470 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7477 /* Generate TC map for number of tc requested */
7478 for (i = 0; i < num_tc; i++)
7479 enabled_tc |= BIT(i);
7481 /* Requesting same TC configuration as already enabled */
7482 if (enabled_tc == vsi->tc_config.enabled_tc &&
7483 mode != TC_MQPRIO_MODE_CHANNEL)
7486 /* Quiesce VSI queues */
7487 i40e_quiesce_vsi(vsi);
7489 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7490 i40e_remove_queue_channels(vsi);
7492 /* Configure VSI for enabled TCs */
7493 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7495 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
7500 dev_info(&vsi->back->pdev->dev,
7501 "Setup channel (id:%u) utilizing num_queues %d\n",
7502 vsi->seid, vsi->tc_config.tc_info[0].qcount);
7505 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
7506 if (vsi->mqprio_qopt.max_rate[0]) {
7507 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
7509 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
7510 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
7512 u64 credits = max_tx_rate;
7514 do_div(credits, I40E_BW_CREDIT_DIVISOR);
7515 dev_dbg(&vsi->back->pdev->dev,
7516 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
7525 ret = i40e_configure_queue_channels(vsi);
7527 vsi->num_queue_pairs = old_queue_pairs;
7529 "Failed configuring queue channels\n");
7536 /* Reset the configuration data to defaults, only TC0 is enabled */
7538 i40e_vsi_set_default_tc_config(vsi);
7543 i40e_unquiesce_vsi(vsi);
7548 * i40e_set_cld_element - sets cloud filter element data
7549 * @filter: cloud filter rule
7550 * @cld: ptr to cloud filter element data
7552 * This is helper function to copy data into cloud filter element
7555 i40e_set_cld_element(struct i40e_cloud_filter *filter,
7556 struct i40e_aqc_cloud_filters_element_data *cld)
7561 memset(cld, 0, sizeof(*cld));
7562 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7563 ether_addr_copy(cld->inner_mac, filter->src_mac);
7565 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7568 if (filter->n_proto == ETH_P_IPV6) {
7569 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7570 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7572 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7573 ipa = cpu_to_le32(ipa);
7574 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7577 ipa = be32_to_cpu(filter->dst_ipv4);
7578 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7581 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7583 /* tenant_id is not supported by FW now, once the support is enabled
7584 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
7586 if (filter->tenant_id)
7591 * i40e_add_del_cloud_filter - Add/del cloud filter
7592 * @vsi: pointer to VSI
7593 * @filter: cloud filter rule
7594 * @add: if true, add, if false, delete
7596 * Add or delete a cloud filter for a specific flow spec.
7597 * Returns 0 if the filter were successfully added.
7599 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7600 struct i40e_cloud_filter *filter, bool add)
7602 struct i40e_aqc_cloud_filters_element_data cld_filter;
7603 struct i40e_pf *pf = vsi->back;
7605 static const u16 flag_table[128] = {
7606 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7607 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7608 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7609 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7610 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7611 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7612 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7613 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7614 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7615 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7616 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7617 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7618 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7619 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7622 if (filter->flags >= ARRAY_SIZE(flag_table))
7623 return I40E_ERR_CONFIG;
7625 /* copy element needed to add cloud filter from filter */
7626 i40e_set_cld_element(filter, &cld_filter);
7628 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7629 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7630 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7632 if (filter->n_proto == ETH_P_IPV6)
7633 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7634 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7636 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7637 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7640 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7643 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7646 dev_dbg(&pf->pdev->dev,
7647 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7648 add ? "add" : "delete", filter->dst_port, ret,
7649 pf->hw.aq.asq_last_status);
7651 dev_info(&pf->pdev->dev,
7652 "%s cloud filter for VSI: %d\n",
7653 add ? "Added" : "Deleted", filter->seid);
7658 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
7659 * @vsi: pointer to VSI
7660 * @filter: cloud filter rule
7661 * @add: if true, add, if false, delete
7663 * Add or delete a cloud filter for a specific flow spec using big buffer.
7664 * Returns 0 if the filter were successfully added.
7666 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7667 struct i40e_cloud_filter *filter,
7670 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7671 struct i40e_pf *pf = vsi->back;
7674 /* Both (src/dst) valid mac_addr are not supported */
7675 if ((is_valid_ether_addr(filter->dst_mac) &&
7676 is_valid_ether_addr(filter->src_mac)) ||
7677 (is_multicast_ether_addr(filter->dst_mac) &&
7678 is_multicast_ether_addr(filter->src_mac)))
7681 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
7682 * ports are not supported via big buffer now.
7684 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7687 /* adding filter using src_port/src_ip is not supported at this stage */
7688 if (filter->src_port || filter->src_ipv4 ||
7689 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7692 /* copy element needed to add cloud filter from filter */
7693 i40e_set_cld_element(filter, &cld_filter.element);
7695 if (is_valid_ether_addr(filter->dst_mac) ||
7696 is_valid_ether_addr(filter->src_mac) ||
7697 is_multicast_ether_addr(filter->dst_mac) ||
7698 is_multicast_ether_addr(filter->src_mac)) {
7699 /* MAC + IP : unsupported mode */
7700 if (filter->dst_ipv4)
7703 /* since we validated that L4 port must be valid before
7704 * we get here, start with respective "flags" value
7705 * and update if vlan is present or not
7707 cld_filter.element.flags =
7708 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7710 if (filter->vlan_id) {
7711 cld_filter.element.flags =
7712 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7715 } else if (filter->dst_ipv4 ||
7716 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7717 cld_filter.element.flags =
7718 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7719 if (filter->n_proto == ETH_P_IPV6)
7720 cld_filter.element.flags |=
7721 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7723 cld_filter.element.flags |=
7724 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7726 dev_err(&pf->pdev->dev,
7727 "either mac or ip has to be valid for cloud filter\n");
7731 /* Now copy L4 port in Byte 6..7 in general fields */
7732 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7733 be16_to_cpu(filter->dst_port);
7736 /* Validate current device switch mode, change if necessary */
7737 ret = i40e_validate_and_set_switch_mode(vsi);
7739 dev_err(&pf->pdev->dev,
7740 "failed to set switch mode, ret %d\n",
7745 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7748 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7753 dev_dbg(&pf->pdev->dev,
7754 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7755 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7757 dev_info(&pf->pdev->dev,
7758 "%s cloud filter for VSI: %d, L4 port: %d\n",
7759 add ? "add" : "delete", filter->seid,
7760 ntohs(filter->dst_port));
7765 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7766 * @vsi: Pointer to VSI
7767 * @cls_flower: Pointer to struct flow_cls_offload
7768 * @filter: Pointer to cloud filter structure
7771 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7772 struct flow_cls_offload *f,
7773 struct i40e_cloud_filter *filter)
7775 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
7776 struct flow_dissector *dissector = rule->match.dissector;
7777 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7778 struct i40e_pf *pf = vsi->back;
7781 if (dissector->used_keys &
7782 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7783 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7784 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7785 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7786 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7787 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7788 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7789 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7790 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7791 dissector->used_keys);
7795 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7796 struct flow_match_enc_keyid match;
7798 flow_rule_match_enc_keyid(rule, &match);
7799 if (match.mask->keyid != 0)
7800 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7802 filter->tenant_id = be32_to_cpu(match.key->keyid);
7805 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
7806 struct flow_match_basic match;
7808 flow_rule_match_basic(rule, &match);
7809 n_proto_key = ntohs(match.key->n_proto);
7810 n_proto_mask = ntohs(match.mask->n_proto);
7812 if (n_proto_key == ETH_P_ALL) {
7816 filter->n_proto = n_proto_key & n_proto_mask;
7817 filter->ip_proto = match.key->ip_proto;
7820 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7821 struct flow_match_eth_addrs match;
7823 flow_rule_match_eth_addrs(rule, &match);
7825 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7826 if (!is_zero_ether_addr(match.mask->dst)) {
7827 if (is_broadcast_ether_addr(match.mask->dst)) {
7828 field_flags |= I40E_CLOUD_FIELD_OMAC;
7830 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7832 return I40E_ERR_CONFIG;
7836 if (!is_zero_ether_addr(match.mask->src)) {
7837 if (is_broadcast_ether_addr(match.mask->src)) {
7838 field_flags |= I40E_CLOUD_FIELD_IMAC;
7840 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7842 return I40E_ERR_CONFIG;
7845 ether_addr_copy(filter->dst_mac, match.key->dst);
7846 ether_addr_copy(filter->src_mac, match.key->src);
7849 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
7850 struct flow_match_vlan match;
7852 flow_rule_match_vlan(rule, &match);
7853 if (match.mask->vlan_id) {
7854 if (match.mask->vlan_id == VLAN_VID_MASK) {
7855 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7858 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7859 match.mask->vlan_id);
7860 return I40E_ERR_CONFIG;
7864 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
7867 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
7868 struct flow_match_control match;
7870 flow_rule_match_control(rule, &match);
7871 addr_type = match.key->addr_type;
7874 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7875 struct flow_match_ipv4_addrs match;
7877 flow_rule_match_ipv4_addrs(rule, &match);
7878 if (match.mask->dst) {
7879 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
7880 field_flags |= I40E_CLOUD_FIELD_IIP;
7882 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7884 return I40E_ERR_CONFIG;
7888 if (match.mask->src) {
7889 if (match.mask->src == cpu_to_be32(0xffffffff)) {
7890 field_flags |= I40E_CLOUD_FIELD_IIP;
7892 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7894 return I40E_ERR_CONFIG;
7898 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7899 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7900 return I40E_ERR_CONFIG;
7902 filter->dst_ipv4 = match.key->dst;
7903 filter->src_ipv4 = match.key->src;
7906 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7907 struct flow_match_ipv6_addrs match;
7909 flow_rule_match_ipv6_addrs(rule, &match);
7911 /* src and dest IPV6 address should not be LOOPBACK
7912 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7914 if (ipv6_addr_loopback(&match.key->dst) ||
7915 ipv6_addr_loopback(&match.key->src)) {
7916 dev_err(&pf->pdev->dev,
7917 "Bad ipv6, addr is LOOPBACK\n");
7918 return I40E_ERR_CONFIG;
7920 if (!ipv6_addr_any(&match.mask->dst) ||
7921 !ipv6_addr_any(&match.mask->src))
7922 field_flags |= I40E_CLOUD_FIELD_IIP;
7924 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
7925 sizeof(filter->src_ipv6));
7926 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
7927 sizeof(filter->dst_ipv6));
7930 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
7931 struct flow_match_ports match;
7933 flow_rule_match_ports(rule, &match);
7934 if (match.mask->src) {
7935 if (match.mask->src == cpu_to_be16(0xffff)) {
7936 field_flags |= I40E_CLOUD_FIELD_IIP;
7938 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7939 be16_to_cpu(match.mask->src));
7940 return I40E_ERR_CONFIG;
7944 if (match.mask->dst) {
7945 if (match.mask->dst == cpu_to_be16(0xffff)) {
7946 field_flags |= I40E_CLOUD_FIELD_IIP;
7948 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7949 be16_to_cpu(match.mask->dst));
7950 return I40E_ERR_CONFIG;
7954 filter->dst_port = match.key->dst;
7955 filter->src_port = match.key->src;
7957 switch (filter->ip_proto) {
7962 dev_err(&pf->pdev->dev,
7963 "Only UDP and TCP transport are supported\n");
7967 filter->flags = field_flags;
7972 * i40e_handle_tclass: Forward to a traffic class on the device
7973 * @vsi: Pointer to VSI
7974 * @tc: traffic class index on the device
7975 * @filter: Pointer to cloud filter structure
7978 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7979 struct i40e_cloud_filter *filter)
7981 struct i40e_channel *ch, *ch_tmp;
7983 /* direct to a traffic class on the same device */
7985 filter->seid = vsi->seid;
7987 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7988 if (!filter->dst_port) {
7989 dev_err(&vsi->back->pdev->dev,
7990 "Specify destination port to direct to traffic class that is not default\n");
7993 if (list_empty(&vsi->ch_list))
7995 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7997 if (ch->seid == vsi->tc_seid_map[tc])
7998 filter->seid = ch->seid;
8002 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8007 * i40e_configure_clsflower - Configure tc flower filters
8008 * @vsi: Pointer to VSI
8009 * @cls_flower: Pointer to struct flow_cls_offload
8012 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8013 struct flow_cls_offload *cls_flower)
8015 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8016 struct i40e_cloud_filter *filter = NULL;
8017 struct i40e_pf *pf = vsi->back;
8021 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8025 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8026 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8029 if (pf->fdir_pf_active_filters ||
8030 (!hlist_empty(&pf->fdir_filter_list))) {
8031 dev_err(&vsi->back->pdev->dev,
8032 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8036 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8037 dev_err(&vsi->back->pdev->dev,
8038 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8039 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8040 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8043 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8047 filter->cookie = cls_flower->cookie;
8049 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8053 err = i40e_handle_tclass(vsi, tc, filter);
8057 /* Add cloud filter */
8058 if (filter->dst_port)
8059 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8061 err = i40e_add_del_cloud_filter(vsi, filter, true);
8064 dev_err(&pf->pdev->dev,
8065 "Failed to add cloud filter, err %s\n",
8066 i40e_stat_str(&pf->hw, err));
8070 /* add filter to the ordered list */
8071 INIT_HLIST_NODE(&filter->cloud_node);
8073 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8075 pf->num_cloud_filters++;
8084 * i40e_find_cloud_filter - Find the could filter in the list
8085 * @vsi: Pointer to VSI
8086 * @cookie: filter specific cookie
8089 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8090 unsigned long *cookie)
8092 struct i40e_cloud_filter *filter = NULL;
8093 struct hlist_node *node2;
8095 hlist_for_each_entry_safe(filter, node2,
8096 &vsi->back->cloud_filter_list, cloud_node)
8097 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8103 * i40e_delete_clsflower - Remove tc flower filters
8104 * @vsi: Pointer to VSI
8105 * @cls_flower: Pointer to struct flow_cls_offload
8108 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8109 struct flow_cls_offload *cls_flower)
8111 struct i40e_cloud_filter *filter = NULL;
8112 struct i40e_pf *pf = vsi->back;
8115 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8120 hash_del(&filter->cloud_node);
8122 if (filter->dst_port)
8123 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8125 err = i40e_add_del_cloud_filter(vsi, filter, false);
8129 dev_err(&pf->pdev->dev,
8130 "Failed to delete cloud filter, err %s\n",
8131 i40e_stat_str(&pf->hw, err));
8132 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8135 pf->num_cloud_filters--;
8136 if (!pf->num_cloud_filters)
8137 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8138 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8139 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8140 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8141 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8147 * i40e_setup_tc_cls_flower - flower classifier offloads
8148 * @netdev: net device to configure
8149 * @type_data: offload data
8151 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8152 struct flow_cls_offload *cls_flower)
8154 struct i40e_vsi *vsi = np->vsi;
8156 switch (cls_flower->command) {
8157 case FLOW_CLS_REPLACE:
8158 return i40e_configure_clsflower(vsi, cls_flower);
8159 case FLOW_CLS_DESTROY:
8160 return i40e_delete_clsflower(vsi, cls_flower);
8161 case FLOW_CLS_STATS:
8168 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8171 struct i40e_netdev_priv *np = cb_priv;
8173 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8177 case TC_SETUP_CLSFLOWER:
8178 return i40e_setup_tc_cls_flower(np, type_data);
8185 static LIST_HEAD(i40e_block_cb_list);
8187 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8190 struct i40e_netdev_priv *np = netdev_priv(netdev);
8193 case TC_SETUP_QDISC_MQPRIO:
8194 return i40e_setup_tc(netdev, type_data);
8195 case TC_SETUP_BLOCK:
8196 return flow_block_cb_setup_simple(type_data,
8197 &i40e_block_cb_list,
8198 i40e_setup_tc_block_cb,
8206 * i40e_open - Called when a network interface is made active
8207 * @netdev: network interface device structure
8209 * The open entry point is called when a network interface is made
8210 * active by the system (IFF_UP). At this point all resources needed
8211 * for transmit and receive operations are allocated, the interrupt
8212 * handler is registered with the OS, the netdev watchdog subtask is
8213 * enabled, and the stack is notified that the interface is ready.
8215 * Returns 0 on success, negative value on failure
8217 int i40e_open(struct net_device *netdev)
8219 struct i40e_netdev_priv *np = netdev_priv(netdev);
8220 struct i40e_vsi *vsi = np->vsi;
8221 struct i40e_pf *pf = vsi->back;
8224 /* disallow open during test or if eeprom is broken */
8225 if (test_bit(__I40E_TESTING, pf->state) ||
8226 test_bit(__I40E_BAD_EEPROM, pf->state))
8229 netif_carrier_off(netdev);
8231 if (i40e_force_link_state(pf, true))
8234 err = i40e_vsi_open(vsi);
8238 /* configure global TSO hardware offload settings */
8239 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8240 TCP_FLAG_FIN) >> 16);
8241 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8243 TCP_FLAG_CWR) >> 16);
8244 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8246 udp_tunnel_get_rx_info(netdev);
8253 * @vsi: the VSI to open
8255 * Finish initialization of the VSI.
8257 * Returns 0 on success, negative value on failure
8259 * Note: expects to be called while under rtnl_lock()
8261 int i40e_vsi_open(struct i40e_vsi *vsi)
8263 struct i40e_pf *pf = vsi->back;
8264 char int_name[I40E_INT_NAME_STR_LEN];
8267 /* allocate descriptors */
8268 err = i40e_vsi_setup_tx_resources(vsi);
8271 err = i40e_vsi_setup_rx_resources(vsi);
8275 err = i40e_vsi_configure(vsi);
8280 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8281 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8282 err = i40e_vsi_request_irq(vsi, int_name);
8286 /* Notify the stack of the actual queue counts. */
8287 err = netif_set_real_num_tx_queues(vsi->netdev,
8288 vsi->num_queue_pairs);
8290 goto err_set_queues;
8292 err = netif_set_real_num_rx_queues(vsi->netdev,
8293 vsi->num_queue_pairs);
8295 goto err_set_queues;
8297 } else if (vsi->type == I40E_VSI_FDIR) {
8298 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8299 dev_driver_string(&pf->pdev->dev),
8300 dev_name(&pf->pdev->dev));
8301 err = i40e_vsi_request_irq(vsi, int_name);
8308 err = i40e_up_complete(vsi);
8310 goto err_up_complete;
8317 i40e_vsi_free_irq(vsi);
8319 i40e_vsi_free_rx_resources(vsi);
8321 i40e_vsi_free_tx_resources(vsi);
8322 if (vsi == pf->vsi[pf->lan_vsi])
8323 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8329 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
8330 * @pf: Pointer to PF
8332 * This function destroys the hlist where all the Flow Director
8333 * filters were saved.
8335 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8337 struct i40e_fdir_filter *filter;
8338 struct i40e_flex_pit *pit_entry, *tmp;
8339 struct hlist_node *node2;
8341 hlist_for_each_entry_safe(filter, node2,
8342 &pf->fdir_filter_list, fdir_node) {
8343 hlist_del(&filter->fdir_node);
8347 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8348 list_del(&pit_entry->list);
8351 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8353 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8354 list_del(&pit_entry->list);
8357 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8359 pf->fdir_pf_active_filters = 0;
8360 pf->fd_tcp4_filter_cnt = 0;
8361 pf->fd_udp4_filter_cnt = 0;
8362 pf->fd_sctp4_filter_cnt = 0;
8363 pf->fd_ip4_filter_cnt = 0;
8365 /* Reprogram the default input set for TCP/IPv4 */
8366 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8367 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8368 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8370 /* Reprogram the default input set for UDP/IPv4 */
8371 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8372 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8373 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8375 /* Reprogram the default input set for SCTP/IPv4 */
8376 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8377 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8378 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8380 /* Reprogram the default input set for Other/IPv4 */
8381 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8382 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8384 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8385 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8389 * i40e_cloud_filter_exit - Cleans up the cloud filters
8390 * @pf: Pointer to PF
8392 * This function destroys the hlist where all the cloud filters
8395 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8397 struct i40e_cloud_filter *cfilter;
8398 struct hlist_node *node;
8400 hlist_for_each_entry_safe(cfilter, node,
8401 &pf->cloud_filter_list, cloud_node) {
8402 hlist_del(&cfilter->cloud_node);
8405 pf->num_cloud_filters = 0;
8407 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8408 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8409 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8410 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8411 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8416 * i40e_close - Disables a network interface
8417 * @netdev: network interface device structure
8419 * The close entry point is called when an interface is de-activated
8420 * by the OS. The hardware is still under the driver's control, but
8421 * this netdev interface is disabled.
8423 * Returns 0, this is not allowed to fail
8425 int i40e_close(struct net_device *netdev)
8427 struct i40e_netdev_priv *np = netdev_priv(netdev);
8428 struct i40e_vsi *vsi = np->vsi;
8430 i40e_vsi_close(vsi);
8436 * i40e_do_reset - Start a PF or Core Reset sequence
8437 * @pf: board private structure
8438 * @reset_flags: which reset is requested
8439 * @lock_acquired: indicates whether or not the lock has been acquired
8440 * before this function was called.
8442 * The essential difference in resets is that the PF Reset
8443 * doesn't clear the packet buffers, doesn't reset the PE
8444 * firmware, and doesn't bother the other PFs on the chip.
8446 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
8450 WARN_ON(in_interrupt());
8453 /* do the biggest reset indicated */
8454 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
8456 /* Request a Global Reset
8458 * This will start the chip's countdown to the actual full
8459 * chip reset event, and a warning interrupt to be sent
8460 * to all PFs, including the requestor. Our handler
8461 * for the warning interrupt will deal with the shutdown
8462 * and recovery of the switch setup.
8464 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
8465 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8466 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
8467 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8469 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
8471 /* Request a Core Reset
8473 * Same as Global Reset, except does *not* include the MAC/PHY
8475 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
8476 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8477 val |= I40E_GLGEN_RTRIG_CORER_MASK;
8478 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8479 i40e_flush(&pf->hw);
8481 } else if (reset_flags & I40E_PF_RESET_FLAG) {
8483 /* Request a PF Reset
8485 * Resets only the PF-specific registers
8487 * This goes directly to the tear-down and rebuild of
8488 * the switch, since we need to do all the recovery as
8489 * for the Core Reset.
8491 dev_dbg(&pf->pdev->dev, "PFR requested\n");
8492 i40e_handle_reset_warning(pf, lock_acquired);
8494 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
8497 /* Find the VSI(s) that requested a re-init */
8498 dev_info(&pf->pdev->dev,
8499 "VSI reinit requested\n");
8500 for (v = 0; v < pf->num_alloc_vsi; v++) {
8501 struct i40e_vsi *vsi = pf->vsi[v];
8504 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
8506 i40e_vsi_reinit_locked(pf->vsi[v]);
8508 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
8511 /* Find the VSI(s) that needs to be brought down */
8512 dev_info(&pf->pdev->dev, "VSI down requested\n");
8513 for (v = 0; v < pf->num_alloc_vsi; v++) {
8514 struct i40e_vsi *vsi = pf->vsi[v];
8517 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
8519 set_bit(__I40E_VSI_DOWN, vsi->state);
8524 dev_info(&pf->pdev->dev,
8525 "bad reset request 0x%08x\n", reset_flags);
8529 #ifdef CONFIG_I40E_DCB
8531 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
8532 * @pf: board private structure
8533 * @old_cfg: current DCB config
8534 * @new_cfg: new DCB config
8536 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8537 struct i40e_dcbx_config *old_cfg,
8538 struct i40e_dcbx_config *new_cfg)
8540 bool need_reconfig = false;
8542 /* Check if ETS configuration has changed */
8543 if (memcmp(&new_cfg->etscfg,
8545 sizeof(new_cfg->etscfg))) {
8546 /* If Priority Table has changed reconfig is needed */
8547 if (memcmp(&new_cfg->etscfg.prioritytable,
8548 &old_cfg->etscfg.prioritytable,
8549 sizeof(new_cfg->etscfg.prioritytable))) {
8550 need_reconfig = true;
8551 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8554 if (memcmp(&new_cfg->etscfg.tcbwtable,
8555 &old_cfg->etscfg.tcbwtable,
8556 sizeof(new_cfg->etscfg.tcbwtable)))
8557 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8559 if (memcmp(&new_cfg->etscfg.tsatable,
8560 &old_cfg->etscfg.tsatable,
8561 sizeof(new_cfg->etscfg.tsatable)))
8562 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8565 /* Check if PFC configuration has changed */
8566 if (memcmp(&new_cfg->pfc,
8568 sizeof(new_cfg->pfc))) {
8569 need_reconfig = true;
8570 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8573 /* Check if APP Table has changed */
8574 if (memcmp(&new_cfg->app,
8576 sizeof(new_cfg->app))) {
8577 need_reconfig = true;
8578 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8581 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8582 return need_reconfig;
8586 * i40e_handle_lldp_event - Handle LLDP Change MIB event
8587 * @pf: board private structure
8588 * @e: event info posted on ARQ
8590 static int i40e_handle_lldp_event(struct i40e_pf *pf,
8591 struct i40e_arq_event_info *e)
8593 struct i40e_aqc_lldp_get_mib *mib =
8594 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8595 struct i40e_hw *hw = &pf->hw;
8596 struct i40e_dcbx_config tmp_dcbx_cfg;
8597 bool need_reconfig = false;
8601 /* Not DCB capable or capability disabled */
8602 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8605 /* Ignore if event is not for Nearest Bridge */
8606 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8607 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8608 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8609 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8612 /* Check MIB Type and return if event for Remote MIB update */
8613 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8614 dev_dbg(&pf->pdev->dev,
8615 "LLDP event mib type %s\n", type ? "remote" : "local");
8616 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8617 /* Update the remote cached instance and return */
8618 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8619 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8620 &hw->remote_dcbx_config);
8624 /* Store the old configuration */
8625 tmp_dcbx_cfg = hw->local_dcbx_config;
8627 /* Reset the old DCBx configuration data */
8628 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8629 /* Get updated DCBX data from firmware */
8630 ret = i40e_get_dcb_config(&pf->hw);
8632 dev_info(&pf->pdev->dev,
8633 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8634 i40e_stat_str(&pf->hw, ret),
8635 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8639 /* No change detected in DCBX configs */
8640 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8641 sizeof(tmp_dcbx_cfg))) {
8642 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8646 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8647 &hw->local_dcbx_config);
8649 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8654 /* Enable DCB tagging only when more than one TC */
8655 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8656 pf->flags |= I40E_FLAG_DCB_ENABLED;
8658 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8660 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8661 /* Reconfiguration needed quiesce all VSIs */
8662 i40e_pf_quiesce_all_vsi(pf);
8664 /* Changes in configuration update VEB/VSI */
8665 i40e_dcb_reconfigure(pf);
8667 ret = i40e_resume_port_tx(pf);
8669 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8670 /* In case of error no point in resuming VSIs */
8674 /* Wait for the PF's queues to be disabled */
8675 ret = i40e_pf_wait_queues_disabled(pf);
8677 /* Schedule PF reset to recover */
8678 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8679 i40e_service_event_schedule(pf);
8681 i40e_pf_unquiesce_all_vsi(pf);
8682 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8683 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8689 #endif /* CONFIG_I40E_DCB */
8692 * i40e_do_reset_safe - Protected reset path for userland calls.
8693 * @pf: board private structure
8694 * @reset_flags: which reset is requested
8697 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8700 i40e_do_reset(pf, reset_flags, true);
8705 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8706 * @pf: board private structure
8707 * @e: event info posted on ARQ
8709 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8712 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8713 struct i40e_arq_event_info *e)
8715 struct i40e_aqc_lan_overflow *data =
8716 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8717 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8718 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8719 struct i40e_hw *hw = &pf->hw;
8723 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8726 /* Queue belongs to VF, find the VF and issue VF reset */
8727 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8728 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8729 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8730 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8731 vf_id -= hw->func_caps.vf_base_id;
8732 vf = &pf->vf[vf_id];
8733 i40e_vc_notify_vf_reset(vf);
8734 /* Allow VF to process pending reset notification */
8736 i40e_reset_vf(vf, false);
8741 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8742 * @pf: board private structure
8744 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8748 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8749 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8754 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8755 * @pf: board private structure
8757 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8761 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8762 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8763 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8764 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8769 * i40e_get_global_fd_count - Get total FD filters programmed on device
8770 * @pf: board private structure
8772 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8776 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8777 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8778 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8779 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8784 * i40e_reenable_fdir_sb - Restore FDir SB capability
8785 * @pf: board private structure
8787 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8789 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8790 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8791 (I40E_DEBUG_FD & pf->hw.debug_mask))
8792 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8796 * i40e_reenable_fdir_atr - Restore FDir ATR capability
8797 * @pf: board private structure
8799 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8801 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8802 /* ATR uses the same filtering logic as SB rules. It only
8803 * functions properly if the input set mask is at the default
8804 * settings. It is safe to restore the default input set
8805 * because there are no active TCPv4 filter rules.
8807 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8808 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8809 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8811 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8812 (I40E_DEBUG_FD & pf->hw.debug_mask))
8813 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8818 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
8819 * @pf: board private structure
8820 * @filter: FDir filter to remove
8822 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8823 struct i40e_fdir_filter *filter)
8825 /* Update counters */
8826 pf->fdir_pf_active_filters--;
8829 switch (filter->flow_type) {
8831 pf->fd_tcp4_filter_cnt--;
8834 pf->fd_udp4_filter_cnt--;
8837 pf->fd_sctp4_filter_cnt--;
8840 switch (filter->ip4_proto) {
8842 pf->fd_tcp4_filter_cnt--;
8845 pf->fd_udp4_filter_cnt--;
8848 pf->fd_sctp4_filter_cnt--;
8851 pf->fd_ip4_filter_cnt--;
8857 /* Remove the filter from the list and free memory */
8858 hlist_del(&filter->fdir_node);
8863 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8864 * @pf: board private structure
8866 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8868 struct i40e_fdir_filter *filter;
8869 u32 fcnt_prog, fcnt_avail;
8870 struct hlist_node *node;
8872 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8875 /* Check if we have enough room to re-enable FDir SB capability. */
8876 fcnt_prog = i40e_get_global_fd_count(pf);
8877 fcnt_avail = pf->fdir_pf_filter_count;
8878 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8879 (pf->fd_add_err == 0) ||
8880 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8881 i40e_reenable_fdir_sb(pf);
8883 /* We should wait for even more space before re-enabling ATR.
8884 * Additionally, we cannot enable ATR as long as we still have TCP SB
8887 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8888 (pf->fd_tcp4_filter_cnt == 0))
8889 i40e_reenable_fdir_atr(pf);
8891 /* if hw had a problem adding a filter, delete it */
8892 if (pf->fd_inv > 0) {
8893 hlist_for_each_entry_safe(filter, node,
8894 &pf->fdir_filter_list, fdir_node)
8895 if (filter->fd_id == pf->fd_inv)
8896 i40e_delete_invalid_filter(pf, filter);
8900 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8901 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8903 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8904 * @pf: board private structure
8906 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8908 unsigned long min_flush_time;
8909 int flush_wait_retry = 50;
8910 bool disable_atr = false;
8914 if (!time_after(jiffies, pf->fd_flush_timestamp +
8915 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8918 /* If the flush is happening too quick and we have mostly SB rules we
8919 * should not re-enable ATR for some time.
8921 min_flush_time = pf->fd_flush_timestamp +
8922 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8923 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8925 if (!(time_after(jiffies, min_flush_time)) &&
8926 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8927 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8928 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8932 pf->fd_flush_timestamp = jiffies;
8933 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8934 /* flush all filters */
8935 wr32(&pf->hw, I40E_PFQF_CTL_1,
8936 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8937 i40e_flush(&pf->hw);
8941 /* Check FD flush status every 5-6msec */
8942 usleep_range(5000, 6000);
8943 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8944 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8946 } while (flush_wait_retry--);
8947 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8948 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8950 /* replay sideband filters */
8951 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8952 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8953 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8954 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8955 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8956 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8961 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8962 * @pf: board private structure
8964 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8966 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8969 /* We can see up to 256 filter programming desc in transit if the filters are
8970 * being applied really fast; before we see the first
8971 * filter miss error on Rx queue 0. Accumulating enough error messages before
8972 * reacting will make sure we don't cause flush too often.
8974 #define I40E_MAX_FD_PROGRAM_ERROR 256
8977 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8978 * @pf: board private structure
8980 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8983 /* if interface is down do nothing */
8984 if (test_bit(__I40E_DOWN, pf->state))
8987 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8988 i40e_fdir_flush_and_replay(pf);
8990 i40e_fdir_check_and_reenable(pf);
8995 * i40e_vsi_link_event - notify VSI of a link event
8996 * @vsi: vsi to be notified
8997 * @link_up: link up or down
8999 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9001 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9004 switch (vsi->type) {
9006 if (!vsi->netdev || !vsi->netdev_registered)
9010 netif_carrier_on(vsi->netdev);
9011 netif_tx_wake_all_queues(vsi->netdev);
9013 netif_carrier_off(vsi->netdev);
9014 netif_tx_stop_all_queues(vsi->netdev);
9018 case I40E_VSI_SRIOV:
9019 case I40E_VSI_VMDQ2:
9021 case I40E_VSI_IWARP:
9022 case I40E_VSI_MIRROR:
9024 /* there is no notification for other VSIs */
9030 * i40e_veb_link_event - notify elements on the veb of a link event
9031 * @veb: veb to be notified
9032 * @link_up: link up or down
9034 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9039 if (!veb || !veb->pf)
9043 /* depth first... */
9044 for (i = 0; i < I40E_MAX_VEB; i++)
9045 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9046 i40e_veb_link_event(pf->veb[i], link_up);
9048 /* ... now the local VSIs */
9049 for (i = 0; i < pf->num_alloc_vsi; i++)
9050 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9051 i40e_vsi_link_event(pf->vsi[i], link_up);
9055 * i40e_link_event - Update netif_carrier status
9056 * @pf: board private structure
9058 static void i40e_link_event(struct i40e_pf *pf)
9060 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9061 u8 new_link_speed, old_link_speed;
9063 bool new_link, old_link;
9065 /* set this to force the get_link_status call to refresh state */
9066 pf->hw.phy.get_link_info = true;
9067 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9068 status = i40e_get_link_status(&pf->hw, &new_link);
9070 /* On success, disable temp link polling */
9071 if (status == I40E_SUCCESS) {
9072 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9074 /* Enable link polling temporarily until i40e_get_link_status
9075 * returns I40E_SUCCESS
9077 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9078 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9083 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9084 new_link_speed = pf->hw.phy.link_info.link_speed;
9086 if (new_link == old_link &&
9087 new_link_speed == old_link_speed &&
9088 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9089 new_link == netif_carrier_ok(vsi->netdev)))
9092 i40e_print_link_message(vsi, new_link);
9094 /* Notify the base of the switch tree connected to
9095 * the link. Floating VEBs are not notified.
9097 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9098 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9100 i40e_vsi_link_event(vsi, new_link);
9103 i40e_vc_notify_link_state(pf);
9105 if (pf->flags & I40E_FLAG_PTP)
9106 i40e_ptp_set_increment(pf);
9110 * i40e_watchdog_subtask - periodic checks not using event driven response
9111 * @pf: board private structure
9113 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9117 /* if interface is down do nothing */
9118 if (test_bit(__I40E_DOWN, pf->state) ||
9119 test_bit(__I40E_CONFIG_BUSY, pf->state))
9122 /* make sure we don't do these things too often */
9123 if (time_before(jiffies, (pf->service_timer_previous +
9124 pf->service_timer_period)))
9126 pf->service_timer_previous = jiffies;
9128 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9129 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9130 i40e_link_event(pf);
9132 /* Update the stats for active netdevs so the network stack
9133 * can look at updated numbers whenever it cares to
9135 for (i = 0; i < pf->num_alloc_vsi; i++)
9136 if (pf->vsi[i] && pf->vsi[i]->netdev)
9137 i40e_update_stats(pf->vsi[i]);
9139 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9140 /* Update the stats for the active switching components */
9141 for (i = 0; i < I40E_MAX_VEB; i++)
9143 i40e_update_veb_stats(pf->veb[i]);
9146 i40e_ptp_rx_hang(pf);
9147 i40e_ptp_tx_hang(pf);
9151 * i40e_reset_subtask - Set up for resetting the device and driver
9152 * @pf: board private structure
9154 static void i40e_reset_subtask(struct i40e_pf *pf)
9156 u32 reset_flags = 0;
9158 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9159 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9160 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9162 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9163 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9164 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9166 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9167 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9168 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9170 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9171 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9172 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9174 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9175 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9176 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9179 /* If there's a recovery already waiting, it takes
9180 * precedence before starting a new reset sequence.
9182 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9183 i40e_prep_for_reset(pf, false);
9185 i40e_rebuild(pf, false, false);
9188 /* If we're already down or resetting, just bail */
9190 !test_bit(__I40E_DOWN, pf->state) &&
9191 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9192 i40e_do_reset(pf, reset_flags, false);
9197 * i40e_handle_link_event - Handle link event
9198 * @pf: board private structure
9199 * @e: event info posted on ARQ
9201 static void i40e_handle_link_event(struct i40e_pf *pf,
9202 struct i40e_arq_event_info *e)
9204 struct i40e_aqc_get_link_status *status =
9205 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9207 /* Do a new status request to re-enable LSE reporting
9208 * and load new status information into the hw struct
9209 * This completely ignores any state information
9210 * in the ARQ event info, instead choosing to always
9211 * issue the AQ update link status command.
9213 i40e_link_event(pf);
9215 /* Check if module meets thermal requirements */
9216 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9217 dev_err(&pf->pdev->dev,
9218 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9219 dev_err(&pf->pdev->dev,
9220 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9222 /* check for unqualified module, if link is down, suppress
9223 * the message if link was forced to be down.
9225 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9226 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9227 (!(status->link_info & I40E_AQ_LINK_UP)) &&
9228 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9229 dev_err(&pf->pdev->dev,
9230 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9231 dev_err(&pf->pdev->dev,
9232 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9238 * i40e_clean_adminq_subtask - Clean the AdminQ rings
9239 * @pf: board private structure
9241 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9243 struct i40e_arq_event_info event;
9244 struct i40e_hw *hw = &pf->hw;
9251 /* Do not run clean AQ when PF reset fails */
9252 if (test_bit(__I40E_RESET_FAILED, pf->state))
9255 /* check for error indications */
9256 val = rd32(&pf->hw, pf->hw.aq.arq.len);
9258 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9259 if (hw->debug_mask & I40E_DEBUG_AQ)
9260 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9261 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9263 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9264 if (hw->debug_mask & I40E_DEBUG_AQ)
9265 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9266 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9267 pf->arq_overflows++;
9269 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9270 if (hw->debug_mask & I40E_DEBUG_AQ)
9271 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9272 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9275 wr32(&pf->hw, pf->hw.aq.arq.len, val);
9277 val = rd32(&pf->hw, pf->hw.aq.asq.len);
9279 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9280 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9281 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9282 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9284 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9285 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9286 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9287 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9289 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9290 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9291 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9292 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9295 wr32(&pf->hw, pf->hw.aq.asq.len, val);
9297 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9298 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9303 ret = i40e_clean_arq_element(hw, &event, &pending);
9304 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9307 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9311 opcode = le16_to_cpu(event.desc.opcode);
9314 case i40e_aqc_opc_get_link_status:
9315 i40e_handle_link_event(pf, &event);
9317 case i40e_aqc_opc_send_msg_to_pf:
9318 ret = i40e_vc_process_vf_msg(pf,
9319 le16_to_cpu(event.desc.retval),
9320 le32_to_cpu(event.desc.cookie_high),
9321 le32_to_cpu(event.desc.cookie_low),
9325 case i40e_aqc_opc_lldp_update_mib:
9326 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9327 #ifdef CONFIG_I40E_DCB
9329 ret = i40e_handle_lldp_event(pf, &event);
9331 #endif /* CONFIG_I40E_DCB */
9333 case i40e_aqc_opc_event_lan_overflow:
9334 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9335 i40e_handle_lan_overflow_event(pf, &event);
9337 case i40e_aqc_opc_send_msg_to_peer:
9338 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9340 case i40e_aqc_opc_nvm_erase:
9341 case i40e_aqc_opc_nvm_update:
9342 case i40e_aqc_opc_oem_post_update:
9343 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9344 "ARQ NVM operation 0x%04x completed\n",
9348 dev_info(&pf->pdev->dev,
9349 "ARQ: Unknown event 0x%04x ignored\n",
9353 } while (i++ < pf->adminq_work_limit);
9355 if (i < pf->adminq_work_limit)
9356 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9358 /* re-enable Admin queue interrupt cause */
9359 val = rd32(hw, I40E_PFINT_ICR0_ENA);
9360 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9361 wr32(hw, I40E_PFINT_ICR0_ENA, val);
9364 kfree(event.msg_buf);
9368 * i40e_verify_eeprom - make sure eeprom is good to use
9369 * @pf: board private structure
9371 static void i40e_verify_eeprom(struct i40e_pf *pf)
9375 err = i40e_diag_eeprom_test(&pf->hw);
9377 /* retry in case of garbage read */
9378 err = i40e_diag_eeprom_test(&pf->hw);
9380 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
9382 set_bit(__I40E_BAD_EEPROM, pf->state);
9386 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
9387 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
9388 clear_bit(__I40E_BAD_EEPROM, pf->state);
9393 * i40e_enable_pf_switch_lb
9394 * @pf: pointer to the PF structure
9396 * enable switch loop back or die - no point in a return value
9398 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
9400 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9401 struct i40e_vsi_context ctxt;
9404 ctxt.seid = pf->main_vsi_seid;
9405 ctxt.pf_num = pf->hw.pf_id;
9407 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9409 dev_info(&pf->pdev->dev,
9410 "couldn't get PF vsi config, err %s aq_err %s\n",
9411 i40e_stat_str(&pf->hw, ret),
9412 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9415 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9416 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9417 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9419 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9421 dev_info(&pf->pdev->dev,
9422 "update vsi switch failed, err %s aq_err %s\n",
9423 i40e_stat_str(&pf->hw, ret),
9424 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9429 * i40e_disable_pf_switch_lb
9430 * @pf: pointer to the PF structure
9432 * disable switch loop back or die - no point in a return value
9434 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
9436 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9437 struct i40e_vsi_context ctxt;
9440 ctxt.seid = pf->main_vsi_seid;
9441 ctxt.pf_num = pf->hw.pf_id;
9443 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9445 dev_info(&pf->pdev->dev,
9446 "couldn't get PF vsi config, err %s aq_err %s\n",
9447 i40e_stat_str(&pf->hw, ret),
9448 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9451 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9452 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9453 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9455 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9457 dev_info(&pf->pdev->dev,
9458 "update vsi switch failed, err %s aq_err %s\n",
9459 i40e_stat_str(&pf->hw, ret),
9460 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9465 * i40e_config_bridge_mode - Configure the HW bridge mode
9466 * @veb: pointer to the bridge instance
9468 * Configure the loop back mode for the LAN VSI that is downlink to the
9469 * specified HW bridge instance. It is expected this function is called
9470 * when a new HW bridge is instantiated.
9472 static void i40e_config_bridge_mode(struct i40e_veb *veb)
9474 struct i40e_pf *pf = veb->pf;
9476 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
9477 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
9478 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9479 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
9480 i40e_disable_pf_switch_lb(pf);
9482 i40e_enable_pf_switch_lb(pf);
9486 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
9487 * @veb: pointer to the VEB instance
9489 * This is a recursive function that first builds the attached VSIs then
9490 * recurses in to build the next layer of VEB. We track the connections
9491 * through our own index numbers because the seid's from the HW could
9492 * change across the reset.
9494 static int i40e_reconstitute_veb(struct i40e_veb *veb)
9496 struct i40e_vsi *ctl_vsi = NULL;
9497 struct i40e_pf *pf = veb->pf;
9501 /* build VSI that owns this VEB, temporarily attached to base VEB */
9502 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
9504 pf->vsi[v]->veb_idx == veb->idx &&
9505 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
9506 ctl_vsi = pf->vsi[v];
9511 dev_info(&pf->pdev->dev,
9512 "missing owner VSI for veb_idx %d\n", veb->idx);
9514 goto end_reconstitute;
9516 if (ctl_vsi != pf->vsi[pf->lan_vsi])
9517 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9518 ret = i40e_add_vsi(ctl_vsi);
9520 dev_info(&pf->pdev->dev,
9521 "rebuild of veb_idx %d owner VSI failed: %d\n",
9523 goto end_reconstitute;
9525 i40e_vsi_reset_stats(ctl_vsi);
9527 /* create the VEB in the switch and move the VSI onto the VEB */
9528 ret = i40e_add_veb(veb, ctl_vsi);
9530 goto end_reconstitute;
9532 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
9533 veb->bridge_mode = BRIDGE_MODE_VEB;
9535 veb->bridge_mode = BRIDGE_MODE_VEPA;
9536 i40e_config_bridge_mode(veb);
9538 /* create the remaining VSIs attached to this VEB */
9539 for (v = 0; v < pf->num_alloc_vsi; v++) {
9540 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
9543 if (pf->vsi[v]->veb_idx == veb->idx) {
9544 struct i40e_vsi *vsi = pf->vsi[v];
9546 vsi->uplink_seid = veb->seid;
9547 ret = i40e_add_vsi(vsi);
9549 dev_info(&pf->pdev->dev,
9550 "rebuild of vsi_idx %d failed: %d\n",
9552 goto end_reconstitute;
9554 i40e_vsi_reset_stats(vsi);
9558 /* create any VEBs attached to this VEB - RECURSION */
9559 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9560 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9561 pf->veb[veb_idx]->uplink_seid = veb->seid;
9562 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9573 * i40e_get_capabilities - get info about the HW
9574 * @pf: the PF struct
9576 static int i40e_get_capabilities(struct i40e_pf *pf,
9577 enum i40e_admin_queue_opc list_type)
9579 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9584 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9586 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9590 /* this loads the data into the hw struct for us */
9591 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9592 &data_size, list_type,
9594 /* data loaded, buffer no longer needed */
9597 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9598 /* retry with a larger buffer */
9599 buf_len = data_size;
9600 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9601 dev_info(&pf->pdev->dev,
9602 "capability discovery failed, err %s aq_err %s\n",
9603 i40e_stat_str(&pf->hw, err),
9604 i40e_aq_str(&pf->hw,
9605 pf->hw.aq.asq_last_status));
9610 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9611 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9612 dev_info(&pf->pdev->dev,
9613 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9614 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9615 pf->hw.func_caps.num_msix_vectors,
9616 pf->hw.func_caps.num_msix_vectors_vf,
9617 pf->hw.func_caps.fd_filters_guaranteed,
9618 pf->hw.func_caps.fd_filters_best_effort,
9619 pf->hw.func_caps.num_tx_qp,
9620 pf->hw.func_caps.num_vsis);
9621 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9622 dev_info(&pf->pdev->dev,
9623 "switch_mode=0x%04x, function_valid=0x%08x\n",
9624 pf->hw.dev_caps.switch_mode,
9625 pf->hw.dev_caps.valid_functions);
9626 dev_info(&pf->pdev->dev,
9627 "SR-IOV=%d, num_vfs for all function=%u\n",
9628 pf->hw.dev_caps.sr_iov_1_1,
9629 pf->hw.dev_caps.num_vfs);
9630 dev_info(&pf->pdev->dev,
9631 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9632 pf->hw.dev_caps.num_vsis,
9633 pf->hw.dev_caps.num_rx_qp,
9634 pf->hw.dev_caps.num_tx_qp);
9637 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9638 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9639 + pf->hw.func_caps.num_vfs)
9640 if (pf->hw.revision_id == 0 &&
9641 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9642 dev_info(&pf->pdev->dev,
9643 "got num_vsis %d, setting num_vsis to %d\n",
9644 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9645 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9651 static int i40e_vsi_clear(struct i40e_vsi *vsi);
9654 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
9655 * @pf: board private structure
9657 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9659 struct i40e_vsi *vsi;
9661 /* quick workaround for an NVM issue that leaves a critical register
9664 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9665 static const u32 hkey[] = {
9666 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9667 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9668 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9672 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9673 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9676 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9679 /* find existing VSI and see if it needs configuring */
9680 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9682 /* create a new VSI if none exists */
9684 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9685 pf->vsi[pf->lan_vsi]->seid, 0);
9687 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9688 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9689 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9694 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9698 * i40e_fdir_teardown - release the Flow Director resources
9699 * @pf: board private structure
9701 static void i40e_fdir_teardown(struct i40e_pf *pf)
9703 struct i40e_vsi *vsi;
9705 i40e_fdir_filter_exit(pf);
9706 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9708 i40e_vsi_release(vsi);
9712 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9714 * @seid: seid of main or channel VSIs
9716 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9717 * existed before reset
9719 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9721 struct i40e_cloud_filter *cfilter;
9722 struct i40e_pf *pf = vsi->back;
9723 struct hlist_node *node;
9726 /* Add cloud filters back if they exist */
9727 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9729 if (cfilter->seid != seid)
9732 if (cfilter->dst_port)
9733 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9736 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9739 dev_dbg(&pf->pdev->dev,
9740 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9741 i40e_stat_str(&pf->hw, ret),
9742 i40e_aq_str(&pf->hw,
9743 pf->hw.aq.asq_last_status));
9751 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9754 * Rebuilds channel VSIs if they existed before reset
9756 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9758 struct i40e_channel *ch, *ch_tmp;
9761 if (list_empty(&vsi->ch_list))
9764 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9765 if (!ch->initialized)
9767 /* Proceed with creation of channel (VMDq2) VSI */
9768 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9770 dev_info(&vsi->back->pdev->dev,
9771 "failed to rebuild channels using uplink_seid %u\n",
9775 /* Reconfigure TX queues using QTX_CTL register */
9776 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9778 dev_info(&vsi->back->pdev->dev,
9779 "failed to configure TX rings for channel %u\n",
9783 /* update 'next_base_queue' */
9784 vsi->next_base_queue = vsi->next_base_queue +
9785 ch->num_queue_pairs;
9786 if (ch->max_tx_rate) {
9787 u64 credits = ch->max_tx_rate;
9789 if (i40e_set_bw_limit(vsi, ch->seid,
9793 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9794 dev_dbg(&vsi->back->pdev->dev,
9795 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9800 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9802 dev_dbg(&vsi->back->pdev->dev,
9803 "Failed to rebuild cloud filters for channel VSI %u\n",
9812 * i40e_prep_for_reset - prep for the core to reset
9813 * @pf: board private structure
9814 * @lock_acquired: indicates whether or not the lock has been acquired
9815 * before this function was called.
9817 * Close up the VFs and other things in prep for PF Reset.
9819 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9821 struct i40e_hw *hw = &pf->hw;
9822 i40e_status ret = 0;
9825 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9826 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9828 if (i40e_check_asq_alive(&pf->hw))
9829 i40e_vc_notify_reset(pf);
9831 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9833 /* quiesce the VSIs and their queues that are not already DOWN */
9834 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9837 i40e_pf_quiesce_all_vsi(pf);
9841 for (v = 0; v < pf->num_alloc_vsi; v++) {
9843 pf->vsi[v]->seid = 0;
9846 i40e_shutdown_adminq(&pf->hw);
9848 /* call shutdown HMC */
9849 if (hw->hmc.hmc_obj) {
9850 ret = i40e_shutdown_lan_hmc(hw);
9852 dev_warn(&pf->pdev->dev,
9853 "shutdown_lan_hmc failed: %d\n", ret);
9856 /* Save the current PTP time so that we can restore the time after the
9859 i40e_ptp_save_hw_time(pf);
9863 * i40e_send_version - update firmware with driver version
9866 static void i40e_send_version(struct i40e_pf *pf)
9868 struct i40e_driver_version dv;
9870 dv.major_version = DRV_VERSION_MAJOR;
9871 dv.minor_version = DRV_VERSION_MINOR;
9872 dv.build_version = DRV_VERSION_BUILD;
9873 dv.subbuild_version = 0;
9874 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9875 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9879 * i40e_get_oem_version - get OEM specific version information
9880 * @hw: pointer to the hardware structure
9882 static void i40e_get_oem_version(struct i40e_hw *hw)
9884 u16 block_offset = 0xffff;
9885 u16 block_length = 0;
9886 u16 capabilities = 0;
9890 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9891 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9892 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9893 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9894 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9895 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9896 #define I40E_NVM_OEM_LENGTH 3
9898 /* Check if pointer to OEM version block is valid. */
9899 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9900 if (block_offset == 0xffff)
9903 /* Check if OEM version block has correct length. */
9904 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9906 if (block_length < I40E_NVM_OEM_LENGTH)
9909 /* Check if OEM version format is as expected. */
9910 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9912 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9915 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9917 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9919 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9920 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9924 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9925 * @pf: board private structure
9927 static int i40e_reset(struct i40e_pf *pf)
9929 struct i40e_hw *hw = &pf->hw;
9932 ret = i40e_pf_reset(hw);
9934 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9935 set_bit(__I40E_RESET_FAILED, pf->state);
9936 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9944 * i40e_rebuild - rebuild using a saved config
9945 * @pf: board private structure
9946 * @reinit: if the Main VSI needs to re-initialized.
9947 * @lock_acquired: indicates whether or not the lock has been acquired
9948 * before this function was called.
9950 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9952 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
9953 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9954 struct i40e_hw *hw = &pf->hw;
9955 u8 set_fc_aq_fail = 0;
9960 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9961 i40e_check_recovery_mode(pf)) {
9962 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
9965 if (test_bit(__I40E_DOWN, pf->state) &&
9966 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
9967 !old_recovery_mode_bit)
9968 goto clear_recovery;
9969 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9971 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9972 ret = i40e_init_adminq(&pf->hw);
9974 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9975 i40e_stat_str(&pf->hw, ret),
9976 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9977 goto clear_recovery;
9979 i40e_get_oem_version(&pf->hw);
9981 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9982 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9983 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9984 /* The following delay is necessary for 4.33 firmware and older
9985 * to recover after EMP reset. 200 ms should suffice but we
9986 * put here 300 ms to be sure that FW is ready to operate
9992 /* re-verify the eeprom if we just had an EMP reset */
9993 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9994 i40e_verify_eeprom(pf);
9996 /* if we are going out of or into recovery mode we have to act
9997 * accordingly with regard to resources initialization
9998 * and deinitialization
10000 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10001 old_recovery_mode_bit) {
10002 if (i40e_get_capabilities(pf,
10003 i40e_aqc_opc_list_func_capabilities))
10006 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10007 /* we're staying in recovery mode so we'll reinitialize
10010 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10013 if (!lock_acquired)
10015 /* we're going out of recovery mode so we'll free
10016 * the IRQ allocated specifically for recovery mode
10017 * and restore the interrupt scheme
10019 free_irq(pf->pdev->irq, pf);
10020 i40e_clear_interrupt_scheme(pf);
10021 if (i40e_restore_interrupt_scheme(pf))
10025 /* tell the firmware that we're starting */
10026 i40e_send_version(pf);
10028 /* bail out in case recovery mode was detected, as there is
10029 * no need for further configuration.
10034 i40e_clear_pxe_mode(hw);
10035 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10037 goto end_core_reset;
10039 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10040 hw->func_caps.num_rx_qp, 0, 0);
10042 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10043 goto end_core_reset;
10045 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10047 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10048 goto end_core_reset;
10051 /* Enable FW to write a default DCB config on link-up */
10052 i40e_aq_set_dcb_parameters(hw, true, NULL);
10054 #ifdef CONFIG_I40E_DCB
10055 ret = i40e_init_pf_dcb(pf);
10057 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
10058 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10059 /* Continue without DCB enabled */
10061 #endif /* CONFIG_I40E_DCB */
10062 /* do basic switch setup */
10063 if (!lock_acquired)
10065 ret = i40e_setup_pf_switch(pf, reinit);
10069 /* The driver only wants link up/down and module qualification
10070 * reports from firmware. Note the negative logic.
10072 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10073 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10074 I40E_AQ_EVENT_MEDIA_NA |
10075 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10077 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10078 i40e_stat_str(&pf->hw, ret),
10079 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10081 /* make sure our flow control settings are restored */
10082 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
10084 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
10085 i40e_stat_str(&pf->hw, ret),
10086 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10088 /* Rebuild the VSIs and VEBs that existed before reset.
10089 * They are still in our local switch element arrays, so only
10090 * need to rebuild the switch model in the HW.
10092 * If there were VEBs but the reconstitution failed, we'll try
10093 * try to recover minimal use by getting the basic PF VSI working.
10095 if (vsi->uplink_seid != pf->mac_seid) {
10096 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10097 /* find the one VEB connected to the MAC, and find orphans */
10098 for (v = 0; v < I40E_MAX_VEB; v++) {
10102 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10103 pf->veb[v]->uplink_seid == 0) {
10104 ret = i40e_reconstitute_veb(pf->veb[v]);
10109 /* If Main VEB failed, we're in deep doodoo,
10110 * so give up rebuilding the switch and set up
10111 * for minimal rebuild of PF VSI.
10112 * If orphan failed, we'll report the error
10113 * but try to keep going.
10115 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10116 dev_info(&pf->pdev->dev,
10117 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10119 vsi->uplink_seid = pf->mac_seid;
10121 } else if (pf->veb[v]->uplink_seid == 0) {
10122 dev_info(&pf->pdev->dev,
10123 "rebuild of orphan VEB failed: %d\n",
10130 if (vsi->uplink_seid == pf->mac_seid) {
10131 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10132 /* no VEB, so rebuild only the Main VSI */
10133 ret = i40e_add_vsi(vsi);
10135 dev_info(&pf->pdev->dev,
10136 "rebuild of Main VSI failed: %d\n", ret);
10141 if (vsi->mqprio_qopt.max_rate[0]) {
10142 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10145 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10146 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10150 credits = max_tx_rate;
10151 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10152 dev_dbg(&vsi->back->pdev->dev,
10153 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10159 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10163 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
10164 * for this main VSI if they exist
10166 ret = i40e_rebuild_channels(vsi);
10170 /* Reconfigure hardware for allowing smaller MSS in the case
10171 * of TSO, so that we avoid the MDD being fired and causing
10172 * a reset in the case of small MSS+TSO.
10174 #define I40E_REG_MSS 0x000E64DC
10175 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
10176 #define I40E_64BYTE_MSS 0x400000
10177 val = rd32(hw, I40E_REG_MSS);
10178 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10179 val &= ~I40E_REG_MSS_MIN_MASK;
10180 val |= I40E_64BYTE_MSS;
10181 wr32(hw, I40E_REG_MSS, val);
10184 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10186 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10188 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10189 i40e_stat_str(&pf->hw, ret),
10190 i40e_aq_str(&pf->hw,
10191 pf->hw.aq.asq_last_status));
10193 /* reinit the misc interrupt */
10194 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10195 ret = i40e_setup_misc_vector(pf);
10197 /* Add a filter to drop all Flow control frames from any VSI from being
10198 * transmitted. By doing so we stop a malicious VF from sending out
10199 * PAUSE or PFC frames and potentially controlling traffic for other
10201 * The FW can still send Flow control frames if enabled.
10203 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10204 pf->main_vsi_seid);
10206 /* restart the VSIs that were rebuilt and running before the reset */
10207 i40e_pf_unquiesce_all_vsi(pf);
10209 /* Release the RTNL lock before we start resetting VFs */
10210 if (!lock_acquired)
10213 /* Restore promiscuous settings */
10214 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10216 dev_warn(&pf->pdev->dev,
10217 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10218 pf->cur_promisc ? "on" : "off",
10219 i40e_stat_str(&pf->hw, ret),
10220 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10222 i40e_reset_all_vfs(pf, true);
10224 /* tell the firmware that we're starting */
10225 i40e_send_version(pf);
10227 /* We've already released the lock, so don't do it again */
10228 goto end_core_reset;
10231 if (!lock_acquired)
10234 clear_bit(__I40E_RESET_FAILED, pf->state);
10236 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10237 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10241 * i40e_reset_and_rebuild - reset and rebuild using a saved config
10242 * @pf: board private structure
10243 * @reinit: if the Main VSI needs to re-initialized.
10244 * @lock_acquired: indicates whether or not the lock has been acquired
10245 * before this function was called.
10247 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10248 bool lock_acquired)
10251 /* Now we wait for GRST to settle out.
10252 * We don't have to delete the VEBs or VSIs from the hw switch
10253 * because the reset will make them disappear.
10255 ret = i40e_reset(pf);
10257 i40e_rebuild(pf, reinit, lock_acquired);
10261 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10262 * @pf: board private structure
10264 * Close up the VFs and other things in prep for a Core Reset,
10265 * then get ready to rebuild the world.
10266 * @lock_acquired: indicates whether or not the lock has been acquired
10267 * before this function was called.
10269 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10271 i40e_prep_for_reset(pf, lock_acquired);
10272 i40e_reset_and_rebuild(pf, false, lock_acquired);
10276 * i40e_handle_mdd_event
10277 * @pf: pointer to the PF structure
10279 * Called from the MDD irq handler to identify possibly malicious vfs
10281 static void i40e_handle_mdd_event(struct i40e_pf *pf)
10283 struct i40e_hw *hw = &pf->hw;
10284 bool mdd_detected = false;
10285 struct i40e_vf *vf;
10289 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10292 /* find what triggered the MDD event */
10293 reg = rd32(hw, I40E_GL_MDET_TX);
10294 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10295 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10296 I40E_GL_MDET_TX_PF_NUM_SHIFT;
10297 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10298 I40E_GL_MDET_TX_VF_NUM_SHIFT;
10299 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10300 I40E_GL_MDET_TX_EVENT_SHIFT;
10301 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10302 I40E_GL_MDET_TX_QUEUE_SHIFT) -
10303 pf->hw.func_caps.base_queue;
10304 if (netif_msg_tx_err(pf))
10305 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10306 event, queue, pf_num, vf_num);
10307 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10308 mdd_detected = true;
10310 reg = rd32(hw, I40E_GL_MDET_RX);
10311 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10312 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10313 I40E_GL_MDET_RX_FUNCTION_SHIFT;
10314 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10315 I40E_GL_MDET_RX_EVENT_SHIFT;
10316 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10317 I40E_GL_MDET_RX_QUEUE_SHIFT) -
10318 pf->hw.func_caps.base_queue;
10319 if (netif_msg_rx_err(pf))
10320 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10321 event, queue, func);
10322 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10323 mdd_detected = true;
10326 if (mdd_detected) {
10327 reg = rd32(hw, I40E_PF_MDET_TX);
10328 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10329 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10330 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10332 reg = rd32(hw, I40E_PF_MDET_RX);
10333 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10334 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10335 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10339 /* see if one of the VFs needs its hand slapped */
10340 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10342 reg = rd32(hw, I40E_VP_MDET_TX(i));
10343 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10344 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10345 vf->num_mdd_events++;
10346 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10348 dev_info(&pf->pdev->dev,
10349 "Use PF Control I/F to re-enable the VF\n");
10350 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10353 reg = rd32(hw, I40E_VP_MDET_RX(i));
10354 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10355 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10356 vf->num_mdd_events++;
10357 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10359 dev_info(&pf->pdev->dev,
10360 "Use PF Control I/F to re-enable the VF\n");
10361 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10365 /* re-enable mdd interrupt cause */
10366 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10367 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10368 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10369 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10373 static const char *i40e_tunnel_name(u8 type)
10376 case UDP_TUNNEL_TYPE_VXLAN:
10378 case UDP_TUNNEL_TYPE_GENEVE:
10386 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
10387 * @pf: board private structure
10389 static void i40e_sync_udp_filters(struct i40e_pf *pf)
10393 /* loop through and set pending bit for all active UDP filters */
10394 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10395 if (pf->udp_ports[i].port)
10396 pf->pending_udp_bitmap |= BIT_ULL(i);
10399 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
10403 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
10404 * @pf: board private structure
10406 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
10408 struct i40e_hw *hw = &pf->hw;
10409 u8 filter_index, type;
10413 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
10416 /* acquire RTNL to maintain state of flags and port requests */
10419 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10420 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
10421 struct i40e_udp_port_config *udp_port;
10422 i40e_status ret = 0;
10424 udp_port = &pf->udp_ports[i];
10425 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10427 port = READ_ONCE(udp_port->port);
10428 type = READ_ONCE(udp_port->type);
10429 filter_index = READ_ONCE(udp_port->filter_index);
10431 /* release RTNL while we wait on AQ command */
10435 ret = i40e_aq_add_udp_tunnel(hw, port,
10439 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
10440 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
10443 /* reacquire RTNL so we can update filter_index */
10447 dev_info(&pf->pdev->dev,
10448 "%s %s port %d, index %d failed, err %s aq_err %s\n",
10449 i40e_tunnel_name(type),
10450 port ? "add" : "delete",
10453 i40e_stat_str(&pf->hw, ret),
10454 i40e_aq_str(&pf->hw,
10455 pf->hw.aq.asq_last_status));
10457 /* failed to add, just reset port,
10458 * drop pending bit for any deletion
10460 udp_port->port = 0;
10461 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10464 /* record filter index on success */
10465 udp_port->filter_index = filter_index;
10474 * i40e_service_task - Run the driver's async subtasks
10475 * @work: pointer to work_struct containing our data
10477 static void i40e_service_task(struct work_struct *work)
10479 struct i40e_pf *pf = container_of(work,
10482 unsigned long start_time = jiffies;
10484 /* don't bother with service tasks if a reset is in progress */
10485 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
10486 test_bit(__I40E_SUSPENDED, pf->state))
10489 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
10492 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10493 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10494 i40e_sync_filters_subtask(pf);
10495 i40e_reset_subtask(pf);
10496 i40e_handle_mdd_event(pf);
10497 i40e_vc_process_vflr_event(pf);
10498 i40e_watchdog_subtask(pf);
10499 i40e_fdir_reinit_subtask(pf);
10500 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10501 /* Client subtask will reopen next time through. */
10502 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
10505 i40e_client_subtask(pf);
10506 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10508 i40e_notify_client_of_l2_param_changes(
10509 pf->vsi[pf->lan_vsi]);
10511 i40e_sync_filters_subtask(pf);
10512 i40e_sync_udp_filters_subtask(pf);
10514 i40e_reset_subtask(pf);
10517 i40e_clean_adminq_subtask(pf);
10519 /* flush memory to make sure state is correct before next watchdog */
10520 smp_mb__before_atomic();
10521 clear_bit(__I40E_SERVICE_SCHED, pf->state);
10523 /* If the tasks have taken longer than one timer cycle or there
10524 * is more work to be done, reschedule the service task now
10525 * rather than wait for the timer to tick again.
10527 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10528 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10529 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10530 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10531 i40e_service_event_schedule(pf);
10535 * i40e_service_timer - timer callback
10536 * @data: pointer to PF struct
10538 static void i40e_service_timer(struct timer_list *t)
10540 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10542 mod_timer(&pf->service_timer,
10543 round_jiffies(jiffies + pf->service_timer_period));
10544 i40e_service_event_schedule(pf);
10548 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
10549 * @vsi: the VSI being configured
10551 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10553 struct i40e_pf *pf = vsi->back;
10555 switch (vsi->type) {
10556 case I40E_VSI_MAIN:
10557 vsi->alloc_queue_pairs = pf->num_lan_qps;
10558 if (!vsi->num_tx_desc)
10559 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10560 I40E_REQ_DESCRIPTOR_MULTIPLE);
10561 if (!vsi->num_rx_desc)
10562 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10563 I40E_REQ_DESCRIPTOR_MULTIPLE);
10564 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10565 vsi->num_q_vectors = pf->num_lan_msix;
10567 vsi->num_q_vectors = 1;
10571 case I40E_VSI_FDIR:
10572 vsi->alloc_queue_pairs = 1;
10573 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10574 I40E_REQ_DESCRIPTOR_MULTIPLE);
10575 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10576 I40E_REQ_DESCRIPTOR_MULTIPLE);
10577 vsi->num_q_vectors = pf->num_fdsb_msix;
10580 case I40E_VSI_VMDQ2:
10581 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10582 if (!vsi->num_tx_desc)
10583 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10584 I40E_REQ_DESCRIPTOR_MULTIPLE);
10585 if (!vsi->num_rx_desc)
10586 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10587 I40E_REQ_DESCRIPTOR_MULTIPLE);
10588 vsi->num_q_vectors = pf->num_vmdq_msix;
10591 case I40E_VSI_SRIOV:
10592 vsi->alloc_queue_pairs = pf->num_vf_qps;
10593 if (!vsi->num_tx_desc)
10594 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10595 I40E_REQ_DESCRIPTOR_MULTIPLE);
10596 if (!vsi->num_rx_desc)
10597 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10598 I40E_REQ_DESCRIPTOR_MULTIPLE);
10610 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
10611 * @vsi: VSI pointer
10612 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
10614 * On error: returns error code (negative)
10615 * On success: returns 0
10617 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10619 struct i40e_ring **next_rings;
10623 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
10624 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10625 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10626 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10627 if (!vsi->tx_rings)
10629 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10630 if (i40e_enabled_xdp_vsi(vsi)) {
10631 vsi->xdp_rings = next_rings;
10632 next_rings += vsi->alloc_queue_pairs;
10634 vsi->rx_rings = next_rings;
10636 if (alloc_qvectors) {
10637 /* allocate memory for q_vector pointers */
10638 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10639 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10640 if (!vsi->q_vectors) {
10648 kfree(vsi->tx_rings);
10653 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10654 * @pf: board private structure
10655 * @type: type of VSI
10657 * On error: returns error code (negative)
10658 * On success: returns vsi index in PF (positive)
10660 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10663 struct i40e_vsi *vsi;
10667 /* Need to protect the allocation of the VSIs at the PF level */
10668 mutex_lock(&pf->switch_mutex);
10670 /* VSI list may be fragmented if VSI creation/destruction has
10671 * been happening. We can afford to do a quick scan to look
10672 * for any free VSIs in the list.
10674 * find next empty vsi slot, looping back around if necessary
10677 while (i < pf->num_alloc_vsi && pf->vsi[i])
10679 if (i >= pf->num_alloc_vsi) {
10681 while (i < pf->next_vsi && pf->vsi[i])
10685 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10686 vsi_idx = i; /* Found one! */
10689 goto unlock_pf; /* out of VSI slots! */
10691 pf->next_vsi = ++i;
10693 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10700 set_bit(__I40E_VSI_DOWN, vsi->state);
10702 vsi->idx = vsi_idx;
10703 vsi->int_rate_limit = 0;
10704 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10705 pf->rss_table_size : 64;
10706 vsi->netdev_registered = false;
10707 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10708 hash_init(vsi->mac_filter_hash);
10709 vsi->irqs_ready = false;
10711 if (type == I40E_VSI_MAIN) {
10712 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10713 if (!vsi->af_xdp_zc_qps)
10717 ret = i40e_set_num_rings_in_vsi(vsi);
10721 ret = i40e_vsi_alloc_arrays(vsi, true);
10725 /* Setup default MSIX irq handler for VSI */
10726 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10728 /* Initialize VSI lock */
10729 spin_lock_init(&vsi->mac_filter_hash_lock);
10730 pf->vsi[vsi_idx] = vsi;
10735 bitmap_free(vsi->af_xdp_zc_qps);
10736 pf->next_vsi = i - 1;
10739 mutex_unlock(&pf->switch_mutex);
10744 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
10745 * @vsi: VSI pointer
10746 * @free_qvectors: a bool to specify if q_vectors need to be freed.
10748 * On error: returns error code (negative)
10749 * On success: returns 0
10751 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10753 /* free the ring and vector containers */
10754 if (free_qvectors) {
10755 kfree(vsi->q_vectors);
10756 vsi->q_vectors = NULL;
10758 kfree(vsi->tx_rings);
10759 vsi->tx_rings = NULL;
10760 vsi->rx_rings = NULL;
10761 vsi->xdp_rings = NULL;
10765 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
10767 * @vsi: Pointer to VSI structure
10769 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10774 kfree(vsi->rss_hkey_user);
10775 vsi->rss_hkey_user = NULL;
10777 kfree(vsi->rss_lut_user);
10778 vsi->rss_lut_user = NULL;
10782 * i40e_vsi_clear - Deallocate the VSI provided
10783 * @vsi: the VSI being un-configured
10785 static int i40e_vsi_clear(struct i40e_vsi *vsi)
10787 struct i40e_pf *pf;
10796 mutex_lock(&pf->switch_mutex);
10797 if (!pf->vsi[vsi->idx]) {
10798 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10799 vsi->idx, vsi->idx, vsi->type);
10803 if (pf->vsi[vsi->idx] != vsi) {
10804 dev_err(&pf->pdev->dev,
10805 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10806 pf->vsi[vsi->idx]->idx,
10807 pf->vsi[vsi->idx]->type,
10808 vsi->idx, vsi->type);
10812 /* updates the PF for this cleared vsi */
10813 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10814 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10816 bitmap_free(vsi->af_xdp_zc_qps);
10817 i40e_vsi_free_arrays(vsi, true);
10818 i40e_clear_rss_config_user(vsi);
10820 pf->vsi[vsi->idx] = NULL;
10821 if (vsi->idx < pf->next_vsi)
10822 pf->next_vsi = vsi->idx;
10825 mutex_unlock(&pf->switch_mutex);
10833 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10834 * @vsi: the VSI being cleaned
10836 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10840 if (vsi->tx_rings && vsi->tx_rings[0]) {
10841 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10842 kfree_rcu(vsi->tx_rings[i], rcu);
10843 vsi->tx_rings[i] = NULL;
10844 vsi->rx_rings[i] = NULL;
10845 if (vsi->xdp_rings)
10846 vsi->xdp_rings[i] = NULL;
10852 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10853 * @vsi: the VSI being configured
10855 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10857 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10858 struct i40e_pf *pf = vsi->back;
10859 struct i40e_ring *ring;
10861 /* Set basic values in the rings to be used later during open() */
10862 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10863 /* allocate space for both Tx and Rx in one shot */
10864 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10868 ring->queue_index = i;
10869 ring->reg_idx = vsi->base_queue + i;
10870 ring->ring_active = false;
10872 ring->netdev = vsi->netdev;
10873 ring->dev = &pf->pdev->dev;
10874 ring->count = vsi->num_tx_desc;
10877 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10878 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10879 ring->itr_setting = pf->tx_itr_default;
10880 vsi->tx_rings[i] = ring++;
10882 if (!i40e_enabled_xdp_vsi(vsi))
10885 ring->queue_index = vsi->alloc_queue_pairs + i;
10886 ring->reg_idx = vsi->base_queue + ring->queue_index;
10887 ring->ring_active = false;
10889 ring->netdev = NULL;
10890 ring->dev = &pf->pdev->dev;
10891 ring->count = vsi->num_tx_desc;
10894 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10895 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10896 set_ring_xdp(ring);
10897 ring->itr_setting = pf->tx_itr_default;
10898 vsi->xdp_rings[i] = ring++;
10901 ring->queue_index = i;
10902 ring->reg_idx = vsi->base_queue + i;
10903 ring->ring_active = false;
10905 ring->netdev = vsi->netdev;
10906 ring->dev = &pf->pdev->dev;
10907 ring->count = vsi->num_rx_desc;
10910 ring->itr_setting = pf->rx_itr_default;
10911 vsi->rx_rings[i] = ring;
10917 i40e_vsi_clear_rings(vsi);
10922 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10923 * @pf: board private structure
10924 * @vectors: the number of MSI-X vectors to request
10926 * Returns the number of vectors reserved, or error
10928 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10930 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10931 I40E_MIN_MSIX, vectors);
10933 dev_info(&pf->pdev->dev,
10934 "MSI-X vector reservation failed: %d\n", vectors);
10942 * i40e_init_msix - Setup the MSIX capability
10943 * @pf: board private structure
10945 * Work with the OS to set up the MSIX vectors needed.
10947 * Returns the number of vectors reserved or negative on failure
10949 static int i40e_init_msix(struct i40e_pf *pf)
10951 struct i40e_hw *hw = &pf->hw;
10952 int cpus, extra_vectors;
10956 int iwarp_requested = 0;
10958 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10961 /* The number of vectors we'll request will be comprised of:
10962 * - Add 1 for "other" cause for Admin Queue events, etc.
10963 * - The number of LAN queue pairs
10964 * - Queues being used for RSS.
10965 * We don't need as many as max_rss_size vectors.
10966 * use rss_size instead in the calculation since that
10967 * is governed by number of cpus in the system.
10968 * - assumes symmetric Tx/Rx pairing
10969 * - The number of VMDq pairs
10970 * - The CPU count within the NUMA node if iWARP is enabled
10971 * Once we count this up, try the request.
10973 * If we can't get what we want, we'll simplify to nearly nothing
10974 * and try again. If that still fails, we punt.
10976 vectors_left = hw->func_caps.num_msix_vectors;
10979 /* reserve one vector for miscellaneous handler */
10980 if (vectors_left) {
10985 /* reserve some vectors for the main PF traffic queues. Initially we
10986 * only reserve at most 50% of the available vectors, in the case that
10987 * the number of online CPUs is large. This ensures that we can enable
10988 * extra features as well. Once we've enabled the other features, we
10989 * will use any remaining vectors to reach as close as we can to the
10990 * number of online CPUs.
10992 cpus = num_online_cpus();
10993 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10994 vectors_left -= pf->num_lan_msix;
10996 /* reserve one vector for sideband flow director */
10997 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10998 if (vectors_left) {
10999 pf->num_fdsb_msix = 1;
11003 pf->num_fdsb_msix = 0;
11007 /* can we reserve enough for iWARP? */
11008 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11009 iwarp_requested = pf->num_iwarp_msix;
11012 pf->num_iwarp_msix = 0;
11013 else if (vectors_left < pf->num_iwarp_msix)
11014 pf->num_iwarp_msix = 1;
11015 v_budget += pf->num_iwarp_msix;
11016 vectors_left -= pf->num_iwarp_msix;
11019 /* any vectors left over go for VMDq support */
11020 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11021 if (!vectors_left) {
11022 pf->num_vmdq_msix = 0;
11023 pf->num_vmdq_qps = 0;
11025 int vmdq_vecs_wanted =
11026 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11028 min_t(int, vectors_left, vmdq_vecs_wanted);
11030 /* if we're short on vectors for what's desired, we limit
11031 * the queues per vmdq. If this is still more than are
11032 * available, the user will need to change the number of
11033 * queues/vectors used by the PF later with the ethtool
11036 if (vectors_left < vmdq_vecs_wanted) {
11037 pf->num_vmdq_qps = 1;
11038 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11039 vmdq_vecs = min_t(int,
11043 pf->num_vmdq_msix = pf->num_vmdq_qps;
11045 v_budget += vmdq_vecs;
11046 vectors_left -= vmdq_vecs;
11050 /* On systems with a large number of SMP cores, we previously limited
11051 * the number of vectors for num_lan_msix to be at most 50% of the
11052 * available vectors, to allow for other features. Now, we add back
11053 * the remaining vectors. However, we ensure that the total
11054 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11055 * calculate the number of vectors we can add without going over the
11056 * cap of CPUs. For systems with a small number of CPUs this will be
11059 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11060 pf->num_lan_msix += extra_vectors;
11061 vectors_left -= extra_vectors;
11063 WARN(vectors_left < 0,
11064 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11066 v_budget += pf->num_lan_msix;
11067 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11069 if (!pf->msix_entries)
11072 for (i = 0; i < v_budget; i++)
11073 pf->msix_entries[i].entry = i;
11074 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11076 if (v_actual < I40E_MIN_MSIX) {
11077 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11078 kfree(pf->msix_entries);
11079 pf->msix_entries = NULL;
11080 pci_disable_msix(pf->pdev);
11083 } else if (v_actual == I40E_MIN_MSIX) {
11084 /* Adjust for minimal MSIX use */
11085 pf->num_vmdq_vsis = 0;
11086 pf->num_vmdq_qps = 0;
11087 pf->num_lan_qps = 1;
11088 pf->num_lan_msix = 1;
11090 } else if (v_actual != v_budget) {
11091 /* If we have limited resources, we will start with no vectors
11092 * for the special features and then allocate vectors to some
11093 * of these features based on the policy and at the end disable
11094 * the features that did not get any vectors.
11098 dev_info(&pf->pdev->dev,
11099 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11100 v_actual, v_budget);
11101 /* reserve the misc vector */
11102 vec = v_actual - 1;
11104 /* Scale vector usage down */
11105 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11106 pf->num_vmdq_vsis = 1;
11107 pf->num_vmdq_qps = 1;
11109 /* partition out the remaining vectors */
11112 pf->num_lan_msix = 1;
11115 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11116 pf->num_lan_msix = 1;
11117 pf->num_iwarp_msix = 1;
11119 pf->num_lan_msix = 2;
11123 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11124 pf->num_iwarp_msix = min_t(int, (vec / 3),
11126 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11127 I40E_DEFAULT_NUM_VMDQ_VSI);
11129 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11130 I40E_DEFAULT_NUM_VMDQ_VSI);
11132 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11133 pf->num_fdsb_msix = 1;
11136 pf->num_lan_msix = min_t(int,
11137 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11139 pf->num_lan_qps = pf->num_lan_msix;
11144 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11145 (pf->num_fdsb_msix == 0)) {
11146 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11147 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11148 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11150 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11151 (pf->num_vmdq_msix == 0)) {
11152 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11153 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11156 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11157 (pf->num_iwarp_msix == 0)) {
11158 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11159 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11161 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11162 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11164 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11166 pf->num_iwarp_msix);
11172 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11173 * @vsi: the VSI being configured
11174 * @v_idx: index of the vector in the vsi struct
11175 * @cpu: cpu to be used on affinity_mask
11177 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11179 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
11181 struct i40e_q_vector *q_vector;
11183 /* allocate q_vector */
11184 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11188 q_vector->vsi = vsi;
11189 q_vector->v_idx = v_idx;
11190 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11193 netif_napi_add(vsi->netdev, &q_vector->napi,
11194 i40e_napi_poll, NAPI_POLL_WEIGHT);
11196 /* tie q_vector and vsi together */
11197 vsi->q_vectors[v_idx] = q_vector;
11203 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11204 * @vsi: the VSI being configured
11206 * We allocate one q_vector per queue interrupt. If allocation fails we
11209 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11211 struct i40e_pf *pf = vsi->back;
11212 int err, v_idx, num_q_vectors, current_cpu;
11214 /* if not MSIX, give the one vector only to the LAN VSI */
11215 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11216 num_q_vectors = vsi->num_q_vectors;
11217 else if (vsi == pf->vsi[pf->lan_vsi])
11222 current_cpu = cpumask_first(cpu_online_mask);
11224 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11225 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
11228 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
11229 if (unlikely(current_cpu >= nr_cpu_ids))
11230 current_cpu = cpumask_first(cpu_online_mask);
11237 i40e_free_q_vector(vsi, v_idx);
11243 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11244 * @pf: board private structure to initialize
11246 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11251 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11252 vectors = i40e_init_msix(pf);
11254 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11255 I40E_FLAG_IWARP_ENABLED |
11256 I40E_FLAG_RSS_ENABLED |
11257 I40E_FLAG_DCB_CAPABLE |
11258 I40E_FLAG_DCB_ENABLED |
11259 I40E_FLAG_SRIOV_ENABLED |
11260 I40E_FLAG_FD_SB_ENABLED |
11261 I40E_FLAG_FD_ATR_ENABLED |
11262 I40E_FLAG_VMDQ_ENABLED);
11263 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11265 /* rework the queue expectations without MSIX */
11266 i40e_determine_queue_usage(pf);
11270 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11271 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11272 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11273 vectors = pci_enable_msi(pf->pdev);
11275 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11277 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11279 vectors = 1; /* one MSI or Legacy vector */
11282 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11283 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11285 /* set up vector assignment tracking */
11286 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11287 pf->irq_pile = kzalloc(size, GFP_KERNEL);
11291 pf->irq_pile->num_entries = vectors;
11292 pf->irq_pile->search_hint = 0;
11294 /* track first vector for misc interrupts, ignore return */
11295 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11301 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
11302 * @pf: private board data structure
11304 * Restore the interrupt scheme that was cleared when we suspended the
11305 * device. This should be called during resume to re-allocate the q_vectors
11306 * and reacquire IRQs.
11308 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11312 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
11313 * scheme. We need to re-enabled them here in order to attempt to
11314 * re-acquire the MSI or MSI-X vectors
11316 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11318 err = i40e_init_interrupt_scheme(pf);
11322 /* Now that we've re-acquired IRQs, we need to remap the vectors and
11323 * rings together again.
11325 for (i = 0; i < pf->num_alloc_vsi; i++) {
11327 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11330 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11334 err = i40e_setup_misc_vector(pf);
11338 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11339 i40e_client_update_msix_info(pf);
11346 i40e_vsi_free_q_vectors(pf->vsi[i]);
11353 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11354 * non queue events in recovery mode
11355 * @pf: board private structure
11357 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
11358 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11359 * This is handled differently than in recovery mode since no Tx/Rx resources
11360 * are being allocated.
11362 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11366 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11367 err = i40e_setup_misc_vector(pf);
11370 dev_info(&pf->pdev->dev,
11371 "MSI-X misc vector request failed, error %d\n",
11376 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11378 err = request_irq(pf->pdev->irq, i40e_intr, flags,
11382 dev_info(&pf->pdev->dev,
11383 "MSI/legacy misc vector request failed, error %d\n",
11387 i40e_enable_misc_int_causes(pf);
11388 i40e_irq_dynamic_enable_icr0(pf);
11395 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
11396 * @pf: board private structure
11398 * This sets up the handler for MSIX 0, which is used to manage the
11399 * non-queue interrupts, e.g. AdminQ and errors. This is not used
11400 * when in MSI or Legacy interrupt mode.
11402 static int i40e_setup_misc_vector(struct i40e_pf *pf)
11404 struct i40e_hw *hw = &pf->hw;
11407 /* Only request the IRQ once, the first time through. */
11408 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11409 err = request_irq(pf->msix_entries[0].vector,
11410 i40e_intr, 0, pf->int_name, pf);
11412 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11413 dev_info(&pf->pdev->dev,
11414 "request_irq for %s failed: %d\n",
11415 pf->int_name, err);
11420 i40e_enable_misc_int_causes(pf);
11422 /* associate no queues to the misc vector */
11423 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11424 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
11428 i40e_irq_dynamic_enable_icr0(pf);
11434 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
11435 * @vsi: Pointer to vsi structure
11436 * @seed: Buffter to store the hash keys
11437 * @lut: Buffer to store the lookup table entries
11438 * @lut_size: Size of buffer to store the lookup table entries
11440 * Return 0 on success, negative on failure
11442 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11443 u8 *lut, u16 lut_size)
11445 struct i40e_pf *pf = vsi->back;
11446 struct i40e_hw *hw = &pf->hw;
11450 ret = i40e_aq_get_rss_key(hw, vsi->id,
11451 (struct i40e_aqc_get_set_rss_key_data *)seed);
11453 dev_info(&pf->pdev->dev,
11454 "Cannot get RSS key, err %s aq_err %s\n",
11455 i40e_stat_str(&pf->hw, ret),
11456 i40e_aq_str(&pf->hw,
11457 pf->hw.aq.asq_last_status));
11463 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
11465 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11467 dev_info(&pf->pdev->dev,
11468 "Cannot get RSS lut, err %s aq_err %s\n",
11469 i40e_stat_str(&pf->hw, ret),
11470 i40e_aq_str(&pf->hw,
11471 pf->hw.aq.asq_last_status));
11480 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
11481 * @vsi: Pointer to vsi structure
11482 * @seed: RSS hash seed
11483 * @lut: Lookup table
11484 * @lut_size: Lookup table size
11486 * Returns 0 on success, negative on failure
11488 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
11489 const u8 *lut, u16 lut_size)
11491 struct i40e_pf *pf = vsi->back;
11492 struct i40e_hw *hw = &pf->hw;
11493 u16 vf_id = vsi->vf_id;
11496 /* Fill out hash function seed */
11498 u32 *seed_dw = (u32 *)seed;
11500 if (vsi->type == I40E_VSI_MAIN) {
11501 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11502 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
11503 } else if (vsi->type == I40E_VSI_SRIOV) {
11504 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
11505 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
11507 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
11512 u32 *lut_dw = (u32 *)lut;
11514 if (vsi->type == I40E_VSI_MAIN) {
11515 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11517 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11518 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
11519 } else if (vsi->type == I40E_VSI_SRIOV) {
11520 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
11522 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11523 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
11525 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11534 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
11535 * @vsi: Pointer to VSI structure
11536 * @seed: Buffer to store the keys
11537 * @lut: Buffer to store the lookup table entries
11538 * @lut_size: Size of buffer to store the lookup table entries
11540 * Returns 0 on success, negative on failure
11542 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
11543 u8 *lut, u16 lut_size)
11545 struct i40e_pf *pf = vsi->back;
11546 struct i40e_hw *hw = &pf->hw;
11550 u32 *seed_dw = (u32 *)seed;
11552 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11553 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11556 u32 *lut_dw = (u32 *)lut;
11558 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11560 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11561 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11568 * i40e_config_rss - Configure RSS keys and lut
11569 * @vsi: Pointer to VSI structure
11570 * @seed: RSS hash seed
11571 * @lut: Lookup table
11572 * @lut_size: Lookup table size
11574 * Returns 0 on success, negative on failure
11576 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11578 struct i40e_pf *pf = vsi->back;
11580 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11581 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
11583 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
11587 * i40e_get_rss - Get RSS keys and lut
11588 * @vsi: Pointer to VSI structure
11589 * @seed: Buffer to store the keys
11590 * @lut: Buffer to store the lookup table entries
11591 * @lut_size: Size of buffer to store the lookup table entries
11593 * Returns 0 on success, negative on failure
11595 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11597 struct i40e_pf *pf = vsi->back;
11599 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11600 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
11602 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
11606 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
11607 * @pf: Pointer to board private structure
11608 * @lut: Lookup table
11609 * @rss_table_size: Lookup table size
11610 * @rss_size: Range of queue number for hashing
11612 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
11613 u16 rss_table_size, u16 rss_size)
11617 for (i = 0; i < rss_table_size; i++)
11618 lut[i] = i % rss_size;
11622 * i40e_pf_config_rss - Prepare for RSS if used
11623 * @pf: board private structure
11625 static int i40e_pf_config_rss(struct i40e_pf *pf)
11627 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11628 u8 seed[I40E_HKEY_ARRAY_SIZE];
11630 struct i40e_hw *hw = &pf->hw;
11635 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
11636 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
11637 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
11638 hena |= i40e_pf_get_default_rss_hena(pf);
11640 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
11641 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
11643 /* Determine the RSS table size based on the hardware capabilities */
11644 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
11645 reg_val = (pf->rss_table_size == 512) ?
11646 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
11647 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
11648 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
11650 /* Determine the RSS size of the VSI */
11651 if (!vsi->rss_size) {
11653 /* If the firmware does something weird during VSI init, we
11654 * could end up with zero TCs. Check for that to avoid
11655 * divide-by-zero. It probably won't pass traffic, but it also
11658 qcount = vsi->num_queue_pairs /
11659 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11660 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11662 if (!vsi->rss_size)
11665 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11669 /* Use user configured lut if there is one, otherwise use default */
11670 if (vsi->rss_lut_user)
11671 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11673 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11675 /* Use user configured hash key if there is one, otherwise
11678 if (vsi->rss_hkey_user)
11679 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11681 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11682 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11689 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
11690 * @pf: board private structure
11691 * @queue_count: the requested queue count for rss.
11693 * returns 0 if rss is not enabled, if enabled returns the final rss queue
11694 * count which may be different from the requested queue count.
11695 * Note: expects to be called while under rtnl_lock()
11697 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11699 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11702 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11705 queue_count = min_t(int, queue_count, num_online_cpus());
11706 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11708 if (queue_count != vsi->num_queue_pairs) {
11711 vsi->req_queue_pairs = queue_count;
11712 i40e_prep_for_reset(pf, true);
11714 pf->alloc_rss_size = new_rss_size;
11716 i40e_reset_and_rebuild(pf, true, true);
11718 /* Discard the user configured hash keys and lut, if less
11719 * queues are enabled.
11721 if (queue_count < vsi->rss_size) {
11722 i40e_clear_rss_config_user(vsi);
11723 dev_dbg(&pf->pdev->dev,
11724 "discard user configured hash keys and lut\n");
11727 /* Reset vsi->rss_size, as number of enabled queues changed */
11728 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11729 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11731 i40e_pf_config_rss(pf);
11733 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11734 vsi->req_queue_pairs, pf->rss_size_max);
11735 return pf->alloc_rss_size;
11739 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
11740 * @pf: board private structure
11742 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11744 i40e_status status;
11745 bool min_valid, max_valid;
11746 u32 max_bw, min_bw;
11748 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11749 &min_valid, &max_valid);
11753 pf->min_bw = min_bw;
11755 pf->max_bw = max_bw;
11762 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
11763 * @pf: board private structure
11765 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11767 struct i40e_aqc_configure_partition_bw_data bw_data;
11768 i40e_status status;
11770 /* Set the valid bit for this PF */
11771 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11772 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11773 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11775 /* Set the new bandwidths */
11776 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11782 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
11783 * @pf: board private structure
11785 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11787 /* Commit temporary BW setting to permanent NVM image */
11788 enum i40e_admin_queue_err last_aq_status;
11792 if (pf->hw.partition_id != 1) {
11793 dev_info(&pf->pdev->dev,
11794 "Commit BW only works on partition 1! This is partition %d",
11795 pf->hw.partition_id);
11796 ret = I40E_NOT_SUPPORTED;
11797 goto bw_commit_out;
11800 /* Acquire NVM for read access */
11801 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11802 last_aq_status = pf->hw.aq.asq_last_status;
11804 dev_info(&pf->pdev->dev,
11805 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11806 i40e_stat_str(&pf->hw, ret),
11807 i40e_aq_str(&pf->hw, last_aq_status));
11808 goto bw_commit_out;
11811 /* Read word 0x10 of NVM - SW compatibility word 1 */
11812 ret = i40e_aq_read_nvm(&pf->hw,
11813 I40E_SR_NVM_CONTROL_WORD,
11814 0x10, sizeof(nvm_word), &nvm_word,
11816 /* Save off last admin queue command status before releasing
11819 last_aq_status = pf->hw.aq.asq_last_status;
11820 i40e_release_nvm(&pf->hw);
11822 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11823 i40e_stat_str(&pf->hw, ret),
11824 i40e_aq_str(&pf->hw, last_aq_status));
11825 goto bw_commit_out;
11828 /* Wait a bit for NVM release to complete */
11831 /* Acquire NVM for write access */
11832 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11833 last_aq_status = pf->hw.aq.asq_last_status;
11835 dev_info(&pf->pdev->dev,
11836 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11837 i40e_stat_str(&pf->hw, ret),
11838 i40e_aq_str(&pf->hw, last_aq_status));
11839 goto bw_commit_out;
11841 /* Write it back out unchanged to initiate update NVM,
11842 * which will force a write of the shadow (alt) RAM to
11843 * the NVM - thus storing the bandwidth values permanently.
11845 ret = i40e_aq_update_nvm(&pf->hw,
11846 I40E_SR_NVM_CONTROL_WORD,
11847 0x10, sizeof(nvm_word),
11848 &nvm_word, true, 0, NULL);
11849 /* Save off last admin queue command status before releasing
11852 last_aq_status = pf->hw.aq.asq_last_status;
11853 i40e_release_nvm(&pf->hw);
11855 dev_info(&pf->pdev->dev,
11856 "BW settings NOT SAVED, err %s aq_err %s\n",
11857 i40e_stat_str(&pf->hw, ret),
11858 i40e_aq_str(&pf->hw, last_aq_status));
11865 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
11866 * @pf: board private structure to initialize
11868 * i40e_sw_init initializes the Adapter private data structure.
11869 * Fields are initialized based on PCI device information and
11870 * OS network device settings (MTU size).
11872 static int i40e_sw_init(struct i40e_pf *pf)
11877 /* Set default capability flags */
11878 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11879 I40E_FLAG_MSI_ENABLED |
11880 I40E_FLAG_MSIX_ENABLED;
11882 /* Set default ITR */
11883 pf->rx_itr_default = I40E_ITR_RX_DEF;
11884 pf->tx_itr_default = I40E_ITR_TX_DEF;
11886 /* Depending on PF configurations, it is possible that the RSS
11887 * maximum might end up larger than the available queues
11889 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11890 pf->alloc_rss_size = 1;
11891 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11892 pf->rss_size_max = min_t(int, pf->rss_size_max,
11893 pf->hw.func_caps.num_tx_qp);
11894 if (pf->hw.func_caps.rss) {
11895 pf->flags |= I40E_FLAG_RSS_ENABLED;
11896 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11897 num_online_cpus());
11900 /* MFP mode enabled */
11901 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11902 pf->flags |= I40E_FLAG_MFP_ENABLED;
11903 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11904 if (i40e_get_partition_bw_setting(pf)) {
11905 dev_warn(&pf->pdev->dev,
11906 "Could not get partition bw settings\n");
11908 dev_info(&pf->pdev->dev,
11909 "Partition BW Min = %8.8x, Max = %8.8x\n",
11910 pf->min_bw, pf->max_bw);
11912 /* nudge the Tx scheduler */
11913 i40e_set_partition_bw_setting(pf);
11917 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11918 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11919 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11920 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11921 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11922 pf->hw.num_partitions > 1)
11923 dev_info(&pf->pdev->dev,
11924 "Flow Director Sideband mode Disabled in MFP mode\n");
11926 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11927 pf->fdir_pf_filter_count =
11928 pf->hw.func_caps.fd_filters_guaranteed;
11929 pf->hw.fdir_shared_filter_count =
11930 pf->hw.func_caps.fd_filters_best_effort;
11933 if (pf->hw.mac.type == I40E_MAC_X722) {
11934 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11935 I40E_HW_128_QP_RSS_CAPABLE |
11936 I40E_HW_ATR_EVICT_CAPABLE |
11937 I40E_HW_WB_ON_ITR_CAPABLE |
11938 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11939 I40E_HW_NO_PCI_LINK_CHECK |
11940 I40E_HW_USE_SET_LLDP_MIB |
11941 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11942 I40E_HW_PTP_L4_CAPABLE |
11943 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11944 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11946 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11947 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11948 I40E_FDEVICT_PCTYPE_DEFAULT) {
11949 dev_warn(&pf->pdev->dev,
11950 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11951 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11953 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11954 ((pf->hw.aq.api_maj_ver == 1) &&
11955 (pf->hw.aq.api_min_ver > 4))) {
11956 /* Supported in FW API version higher than 1.4 */
11957 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11960 /* Enable HW ATR eviction if possible */
11961 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11962 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11964 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11965 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11966 (pf->hw.aq.fw_maj_ver < 4))) {
11967 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11968 /* No DCB support for FW < v4.33 */
11969 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11972 /* Disable FW LLDP if FW < v4.3 */
11973 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11974 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11975 (pf->hw.aq.fw_maj_ver < 4)))
11976 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11978 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11979 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11980 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11981 (pf->hw.aq.fw_maj_ver >= 5)))
11982 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11984 /* Enable PTP L4 if FW > v6.0 */
11985 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11986 pf->hw.aq.fw_maj_ver >= 6)
11987 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11989 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11990 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11991 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11992 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11995 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11996 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11997 /* IWARP needs one extra vector for CQP just like MISC.*/
11998 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12000 /* Stopping FW LLDP engine is supported on XL710 and X722
12001 * starting from FW versions determined in i40e_init_adminq.
12002 * Stopping the FW LLDP engine is not supported on XL710
12003 * if NPAR is functioning so unset this hw flag in this case.
12005 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12006 pf->hw.func_caps.npar_enable &&
12007 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12008 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12010 #ifdef CONFIG_PCI_IOV
12011 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12012 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12013 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12014 pf->num_req_vfs = min_t(int,
12015 pf->hw.func_caps.num_vfs,
12016 I40E_MAX_VF_COUNT);
12018 #endif /* CONFIG_PCI_IOV */
12019 pf->eeprom_version = 0xDEAD;
12020 pf->lan_veb = I40E_NO_VEB;
12021 pf->lan_vsi = I40E_NO_VSI;
12023 /* By default FW has this off for performance reasons */
12024 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12026 /* set up queue assignment tracking */
12027 size = sizeof(struct i40e_lump_tracking)
12028 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12029 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12030 if (!pf->qp_pile) {
12034 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12035 pf->qp_pile->search_hint = 0;
12037 pf->tx_timeout_recovery_level = 1;
12039 mutex_init(&pf->switch_mutex);
12046 * i40e_set_ntuple - set the ntuple feature flag and take action
12047 * @pf: board private structure to initialize
12048 * @features: the feature set that the stack is suggesting
12050 * returns a bool to indicate if reset needs to happen
12052 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12054 bool need_reset = false;
12056 /* Check if Flow Director n-tuple support was enabled or disabled. If
12057 * the state changed, we need to reset.
12059 if (features & NETIF_F_NTUPLE) {
12060 /* Enable filters and mark for reset */
12061 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12063 /* enable FD_SB only if there is MSI-X vector and no cloud
12066 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12067 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12068 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12071 /* turn off filters, mark for reset and clear SW filter list */
12072 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12074 i40e_fdir_filter_exit(pf);
12076 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12077 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12078 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12080 /* reset fd counters */
12081 pf->fd_add_err = 0;
12082 pf->fd_atr_cnt = 0;
12083 /* if ATR was auto disabled it can be re-enabled. */
12084 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12085 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12086 (I40E_DEBUG_FD & pf->hw.debug_mask))
12087 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12093 * i40e_clear_rss_lut - clear the rx hash lookup table
12094 * @vsi: the VSI being configured
12096 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12098 struct i40e_pf *pf = vsi->back;
12099 struct i40e_hw *hw = &pf->hw;
12100 u16 vf_id = vsi->vf_id;
12103 if (vsi->type == I40E_VSI_MAIN) {
12104 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12105 wr32(hw, I40E_PFQF_HLUT(i), 0);
12106 } else if (vsi->type == I40E_VSI_SRIOV) {
12107 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12108 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12110 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12115 * i40e_set_features - set the netdev feature flags
12116 * @netdev: ptr to the netdev being adjusted
12117 * @features: the feature set that the stack is suggesting
12118 * Note: expects to be called while under rtnl_lock()
12120 static int i40e_set_features(struct net_device *netdev,
12121 netdev_features_t features)
12123 struct i40e_netdev_priv *np = netdev_priv(netdev);
12124 struct i40e_vsi *vsi = np->vsi;
12125 struct i40e_pf *pf = vsi->back;
12128 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12129 i40e_pf_config_rss(pf);
12130 else if (!(features & NETIF_F_RXHASH) &&
12131 netdev->features & NETIF_F_RXHASH)
12132 i40e_clear_rss_lut(vsi);
12134 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12135 i40e_vlan_stripping_enable(vsi);
12137 i40e_vlan_stripping_disable(vsi);
12139 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12140 dev_err(&pf->pdev->dev,
12141 "Offloaded tc filters active, can't turn hw_tc_offload off");
12145 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12146 i40e_del_all_macvlans(vsi);
12148 need_reset = i40e_set_ntuple(pf, features);
12151 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12157 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
12158 * @pf: board private structure
12159 * @port: The UDP port to look up
12161 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
12163 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
12167 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
12168 /* Do not report ports with pending deletions as
12171 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
12173 if (pf->udp_ports[i].port == port)
12181 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
12182 * @netdev: This physical port's netdev
12183 * @ti: Tunnel endpoint information
12185 static void i40e_udp_tunnel_add(struct net_device *netdev,
12186 struct udp_tunnel_info *ti)
12188 struct i40e_netdev_priv *np = netdev_priv(netdev);
12189 struct i40e_vsi *vsi = np->vsi;
12190 struct i40e_pf *pf = vsi->back;
12191 u16 port = ntohs(ti->port);
12195 idx = i40e_get_udp_port_idx(pf, port);
12197 /* Check if port already exists */
12198 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12199 netdev_info(netdev, "port %d already offloaded\n", port);
12203 /* Now check if there is space to add the new port */
12204 next_idx = i40e_get_udp_port_idx(pf, 0);
12206 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12207 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
12212 switch (ti->type) {
12213 case UDP_TUNNEL_TYPE_VXLAN:
12214 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
12216 case UDP_TUNNEL_TYPE_GENEVE:
12217 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
12219 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
12225 /* New port: add it and mark its index in the bitmap */
12226 pf->udp_ports[next_idx].port = port;
12227 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
12228 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
12229 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12233 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
12234 * @netdev: This physical port's netdev
12235 * @ti: Tunnel endpoint information
12237 static void i40e_udp_tunnel_del(struct net_device *netdev,
12238 struct udp_tunnel_info *ti)
12240 struct i40e_netdev_priv *np = netdev_priv(netdev);
12241 struct i40e_vsi *vsi = np->vsi;
12242 struct i40e_pf *pf = vsi->back;
12243 u16 port = ntohs(ti->port);
12246 idx = i40e_get_udp_port_idx(pf, port);
12248 /* Check if port already exists */
12249 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
12252 switch (ti->type) {
12253 case UDP_TUNNEL_TYPE_VXLAN:
12254 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
12257 case UDP_TUNNEL_TYPE_GENEVE:
12258 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
12265 /* if port exists, set it to 0 (mark for deletion)
12266 * and make it pending
12268 pf->udp_ports[idx].port = 0;
12270 /* Toggle pending bit instead of setting it. This way if we are
12271 * deleting a port that has yet to be added we just clear the pending
12272 * bit and don't have to worry about it.
12274 pf->pending_udp_bitmap ^= BIT_ULL(idx);
12275 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12279 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
12283 static int i40e_get_phys_port_id(struct net_device *netdev,
12284 struct netdev_phys_item_id *ppid)
12286 struct i40e_netdev_priv *np = netdev_priv(netdev);
12287 struct i40e_pf *pf = np->vsi->back;
12288 struct i40e_hw *hw = &pf->hw;
12290 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12291 return -EOPNOTSUPP;
12293 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12294 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12300 * i40e_ndo_fdb_add - add an entry to the hardware database
12301 * @ndm: the input from the stack
12302 * @tb: pointer to array of nladdr (unused)
12303 * @dev: the net device pointer
12304 * @addr: the MAC address entry being added
12306 * @flags: instructions from stack about fdb operation
12308 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12309 struct net_device *dev,
12310 const unsigned char *addr, u16 vid,
12312 struct netlink_ext_ack *extack)
12314 struct i40e_netdev_priv *np = netdev_priv(dev);
12315 struct i40e_pf *pf = np->vsi->back;
12318 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12319 return -EOPNOTSUPP;
12322 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12326 /* Hardware does not support aging addresses so if a
12327 * ndm_state is given only allow permanent addresses
12329 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12330 netdev_info(dev, "FDB only supports static addresses\n");
12334 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12335 err = dev_uc_add_excl(dev, addr);
12336 else if (is_multicast_ether_addr(addr))
12337 err = dev_mc_add_excl(dev, addr);
12341 /* Only return duplicate errors if NLM_F_EXCL is set */
12342 if (err == -EEXIST && !(flags & NLM_F_EXCL))
12349 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
12350 * @dev: the netdev being configured
12351 * @nlh: RTNL message
12352 * @flags: bridge flags
12353 * @extack: netlink extended ack
12355 * Inserts a new hardware bridge if not already created and
12356 * enables the bridging mode requested (VEB or VEPA). If the
12357 * hardware bridge has already been inserted and the request
12358 * is to change the mode then that requires a PF reset to
12359 * allow rebuild of the components with required hardware
12360 * bridge mode enabled.
12362 * Note: expects to be called while under rtnl_lock()
12364 static int i40e_ndo_bridge_setlink(struct net_device *dev,
12365 struct nlmsghdr *nlh,
12367 struct netlink_ext_ack *extack)
12369 struct i40e_netdev_priv *np = netdev_priv(dev);
12370 struct i40e_vsi *vsi = np->vsi;
12371 struct i40e_pf *pf = vsi->back;
12372 struct i40e_veb *veb = NULL;
12373 struct nlattr *attr, *br_spec;
12376 /* Only for PF VSI for now */
12377 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12378 return -EOPNOTSUPP;
12380 /* Find the HW bridge for PF VSI */
12381 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12382 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12386 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12388 nla_for_each_nested(attr, br_spec, rem) {
12391 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12394 mode = nla_get_u16(attr);
12395 if ((mode != BRIDGE_MODE_VEPA) &&
12396 (mode != BRIDGE_MODE_VEB))
12399 /* Insert a new HW bridge */
12401 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12402 vsi->tc_config.enabled_tc);
12404 veb->bridge_mode = mode;
12405 i40e_config_bridge_mode(veb);
12407 /* No Bridge HW offload available */
12411 } else if (mode != veb->bridge_mode) {
12412 /* Existing HW bridge but different mode needs reset */
12413 veb->bridge_mode = mode;
12414 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
12415 if (mode == BRIDGE_MODE_VEB)
12416 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12418 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12419 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12428 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
12431 * @seq: RTNL message seq #
12432 * @dev: the netdev being configured
12433 * @filter_mask: unused
12434 * @nlflags: netlink flags passed in
12436 * Return the mode in which the hardware bridge is operating in
12439 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12440 struct net_device *dev,
12441 u32 __always_unused filter_mask,
12444 struct i40e_netdev_priv *np = netdev_priv(dev);
12445 struct i40e_vsi *vsi = np->vsi;
12446 struct i40e_pf *pf = vsi->back;
12447 struct i40e_veb *veb = NULL;
12450 /* Only for PF VSI for now */
12451 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12452 return -EOPNOTSUPP;
12454 /* Find the HW bridge for the PF VSI */
12455 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12456 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12463 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12464 0, 0, nlflags, filter_mask, NULL);
12468 * i40e_features_check - Validate encapsulated packet conforms to limits
12470 * @dev: This physical port's netdev
12471 * @features: Offload features that the stack believes apply
12473 static netdev_features_t i40e_features_check(struct sk_buff *skb,
12474 struct net_device *dev,
12475 netdev_features_t features)
12479 /* No point in doing any of this if neither checksum nor GSO are
12480 * being requested for this frame. We can rule out both by just
12481 * checking for CHECKSUM_PARTIAL
12483 if (skb->ip_summed != CHECKSUM_PARTIAL)
12486 /* We cannot support GSO if the MSS is going to be less than
12487 * 64 bytes. If it is then we need to drop support for GSO.
12489 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
12490 features &= ~NETIF_F_GSO_MASK;
12492 /* MACLEN can support at most 63 words */
12493 len = skb_network_header(skb) - skb->data;
12494 if (len & ~(63 * 2))
12497 /* IPLEN and EIPLEN can support at most 127 dwords */
12498 len = skb_transport_header(skb) - skb_network_header(skb);
12499 if (len & ~(127 * 4))
12502 if (skb->encapsulation) {
12503 /* L4TUNLEN can support 127 words */
12504 len = skb_inner_network_header(skb) - skb_transport_header(skb);
12505 if (len & ~(127 * 2))
12508 /* IPLEN can support at most 127 dwords */
12509 len = skb_inner_transport_header(skb) -
12510 skb_inner_network_header(skb);
12511 if (len & ~(127 * 4))
12515 /* No need to validate L4LEN as TCP is the only protocol with a
12516 * a flexible value and we support all possible values supported
12517 * by TCP, which is at most 15 dwords
12522 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12526 * i40e_xdp_setup - add/remove an XDP program
12527 * @vsi: VSI to changed
12528 * @prog: XDP program
12530 static int i40e_xdp_setup(struct i40e_vsi *vsi,
12531 struct bpf_prog *prog)
12533 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
12534 struct i40e_pf *pf = vsi->back;
12535 struct bpf_prog *old_prog;
12539 /* Don't allow frames that span over multiple buffers */
12540 if (frame_size > vsi->rx_buf_len)
12543 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
12546 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
12547 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12550 i40e_prep_for_reset(pf, true);
12552 old_prog = xchg(&vsi->xdp_prog, prog);
12555 i40e_reset_and_rebuild(pf, true, true);
12557 for (i = 0; i < vsi->num_queue_pairs; i++)
12558 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12561 bpf_prog_put(old_prog);
12563 /* Kick start the NAPI context if there is an AF_XDP socket open
12564 * on that queue id. This so that receiving will start.
12566 if (need_reset && prog)
12567 for (i = 0; i < vsi->num_queue_pairs; i++)
12568 if (vsi->xdp_rings[i]->xsk_umem)
12569 (void)i40e_xsk_async_xmit(vsi->netdev, i);
12575 * i40e_enter_busy_conf - Enters busy config state
12578 * Returns 0 on success, <0 for failure.
12580 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12582 struct i40e_pf *pf = vsi->back;
12585 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12589 usleep_range(1000, 2000);
12596 * i40e_exit_busy_conf - Exits busy config state
12599 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
12601 struct i40e_pf *pf = vsi->back;
12603 clear_bit(__I40E_CONFIG_BUSY, pf->state);
12607 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
12609 * @queue_pair: queue pair
12611 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
12613 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
12614 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
12615 memset(&vsi->tx_rings[queue_pair]->stats, 0,
12616 sizeof(vsi->tx_rings[queue_pair]->stats));
12617 if (i40e_enabled_xdp_vsi(vsi)) {
12618 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
12619 sizeof(vsi->xdp_rings[queue_pair]->stats));
12624 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
12626 * @queue_pair: queue pair
12628 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
12630 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
12631 if (i40e_enabled_xdp_vsi(vsi)) {
12632 /* Make sure that in-progress ndo_xdp_xmit calls are
12636 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
12638 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
12642 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
12644 * @queue_pair: queue pair
12645 * @enable: true for enable, false for disable
12647 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
12650 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12651 struct i40e_q_vector *q_vector = rxr->q_vector;
12656 /* All rings in a qp belong to the same qvector. */
12657 if (q_vector->rx.ring || q_vector->tx.ring) {
12659 napi_enable(&q_vector->napi);
12661 napi_disable(&q_vector->napi);
12666 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
12668 * @queue_pair: queue pair
12669 * @enable: true for enable, false for disable
12671 * Returns 0 on success, <0 on failure.
12673 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
12676 struct i40e_pf *pf = vsi->back;
12679 pf_q = vsi->base_queue + queue_pair;
12680 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12681 false /*is xdp*/, enable);
12683 dev_info(&pf->pdev->dev,
12684 "VSI seid %d Tx ring %d %sable timeout\n",
12685 vsi->seid, pf_q, (enable ? "en" : "dis"));
12689 i40e_control_rx_q(pf, pf_q, enable);
12690 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12692 dev_info(&pf->pdev->dev,
12693 "VSI seid %d Rx ring %d %sable timeout\n",
12694 vsi->seid, pf_q, (enable ? "en" : "dis"));
12698 /* Due to HW errata, on Rx disable only, the register can
12699 * indicate done before it really is. Needs 50ms to be sure
12704 if (!i40e_enabled_xdp_vsi(vsi))
12707 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12708 pf_q + vsi->alloc_queue_pairs,
12709 true /*is xdp*/, enable);
12711 dev_info(&pf->pdev->dev,
12712 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12713 vsi->seid, pf_q, (enable ? "en" : "dis"));
12720 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
12722 * @queue_pair: queue_pair
12724 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12726 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12727 struct i40e_pf *pf = vsi->back;
12728 struct i40e_hw *hw = &pf->hw;
12730 /* All rings in a qp belong to the same qvector. */
12731 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12732 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12734 i40e_irq_dynamic_enable_icr0(pf);
12740 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
12742 * @queue_pair: queue_pair
12744 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12746 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12747 struct i40e_pf *pf = vsi->back;
12748 struct i40e_hw *hw = &pf->hw;
12750 /* For simplicity, instead of removing the qp interrupt causes
12751 * from the interrupt linked list, we simply disable the interrupt, and
12752 * leave the list intact.
12754 * All rings in a qp belong to the same qvector.
12756 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12757 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12759 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12761 synchronize_irq(pf->msix_entries[intpf].vector);
12763 /* Legacy and MSI mode - this stops all interrupt handling */
12764 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12765 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12767 synchronize_irq(pf->pdev->irq);
12772 * i40e_queue_pair_disable - Disables a queue pair
12774 * @queue_pair: queue pair
12776 * Returns 0 on success, <0 on failure.
12778 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12782 err = i40e_enter_busy_conf(vsi);
12786 i40e_queue_pair_disable_irq(vsi, queue_pair);
12787 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
12788 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
12789 i40e_queue_pair_clean_rings(vsi, queue_pair);
12790 i40e_queue_pair_reset_stats(vsi, queue_pair);
12796 * i40e_queue_pair_enable - Enables a queue pair
12798 * @queue_pair: queue pair
12800 * Returns 0 on success, <0 on failure.
12802 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12806 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12810 if (i40e_enabled_xdp_vsi(vsi)) {
12811 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12816 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12820 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
12821 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
12822 i40e_queue_pair_enable_irq(vsi, queue_pair);
12824 i40e_exit_busy_conf(vsi);
12830 * i40e_xdp - implements ndo_bpf for i40e
12832 * @xdp: XDP command
12834 static int i40e_xdp(struct net_device *dev,
12835 struct netdev_bpf *xdp)
12837 struct i40e_netdev_priv *np = netdev_priv(dev);
12838 struct i40e_vsi *vsi = np->vsi;
12840 if (vsi->type != I40E_VSI_MAIN)
12843 switch (xdp->command) {
12844 case XDP_SETUP_PROG:
12845 return i40e_xdp_setup(vsi, xdp->prog);
12846 case XDP_QUERY_PROG:
12847 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
12849 case XDP_SETUP_XSK_UMEM:
12850 return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
12851 xdp->xsk.queue_id);
12857 static const struct net_device_ops i40e_netdev_ops = {
12858 .ndo_open = i40e_open,
12859 .ndo_stop = i40e_close,
12860 .ndo_start_xmit = i40e_lan_xmit_frame,
12861 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12862 .ndo_set_rx_mode = i40e_set_rx_mode,
12863 .ndo_validate_addr = eth_validate_addr,
12864 .ndo_set_mac_address = i40e_set_mac,
12865 .ndo_change_mtu = i40e_change_mtu,
12866 .ndo_do_ioctl = i40e_ioctl,
12867 .ndo_tx_timeout = i40e_tx_timeout,
12868 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12869 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12870 #ifdef CONFIG_NET_POLL_CONTROLLER
12871 .ndo_poll_controller = i40e_netpoll,
12873 .ndo_setup_tc = __i40e_setup_tc,
12874 .ndo_set_features = i40e_set_features,
12875 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12876 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12877 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12878 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12879 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12880 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12881 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12882 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12883 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
12884 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12885 .ndo_fdb_add = i40e_ndo_fdb_add,
12886 .ndo_features_check = i40e_features_check,
12887 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12888 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12889 .ndo_bpf = i40e_xdp,
12890 .ndo_xdp_xmit = i40e_xdp_xmit,
12891 .ndo_xsk_async_xmit = i40e_xsk_async_xmit,
12892 .ndo_dfwd_add_station = i40e_fwd_add,
12893 .ndo_dfwd_del_station = i40e_fwd_del,
12897 * i40e_config_netdev - Setup the netdev flags
12898 * @vsi: the VSI being configured
12900 * Returns 0 on success, negative value on failure
12902 static int i40e_config_netdev(struct i40e_vsi *vsi)
12904 struct i40e_pf *pf = vsi->back;
12905 struct i40e_hw *hw = &pf->hw;
12906 struct i40e_netdev_priv *np;
12907 struct net_device *netdev;
12908 u8 broadcast[ETH_ALEN];
12909 u8 mac_addr[ETH_ALEN];
12911 netdev_features_t hw_enc_features;
12912 netdev_features_t hw_features;
12914 etherdev_size = sizeof(struct i40e_netdev_priv);
12915 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12919 vsi->netdev = netdev;
12920 np = netdev_priv(netdev);
12923 hw_enc_features = NETIF_F_SG |
12925 NETIF_F_IPV6_CSUM |
12927 NETIF_F_SOFT_FEATURES |
12932 NETIF_F_GSO_GRE_CSUM |
12933 NETIF_F_GSO_PARTIAL |
12934 NETIF_F_GSO_IPXIP4 |
12935 NETIF_F_GSO_IPXIP6 |
12936 NETIF_F_GSO_UDP_TUNNEL |
12937 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12943 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12944 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12946 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12948 netdev->hw_enc_features |= hw_enc_features;
12950 /* record features VLANs can make use of */
12951 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12953 /* enable macvlan offloads */
12954 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
12956 hw_features = hw_enc_features |
12957 NETIF_F_HW_VLAN_CTAG_TX |
12958 NETIF_F_HW_VLAN_CTAG_RX;
12960 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12961 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12963 netdev->hw_features |= hw_features;
12965 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12966 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12968 if (vsi->type == I40E_VSI_MAIN) {
12969 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12970 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12971 /* The following steps are necessary for two reasons. First,
12972 * some older NVM configurations load a default MAC-VLAN
12973 * filter that will accept any tagged packet, and we want to
12974 * replace this with a normal filter. Additionally, it is
12975 * possible our MAC address was provided by the platform using
12976 * Open Firmware or similar.
12978 * Thus, we need to remove the default filter and install one
12979 * specific to the MAC address.
12981 i40e_rm_default_mac_filter(vsi, mac_addr);
12982 spin_lock_bh(&vsi->mac_filter_hash_lock);
12983 i40e_add_mac_filter(vsi, mac_addr);
12984 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12986 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
12987 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
12988 * the end, which is 4 bytes long, so force truncation of the
12989 * original name by IFNAMSIZ - 4
12991 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12993 pf->vsi[pf->lan_vsi]->netdev->name);
12994 eth_random_addr(mac_addr);
12996 spin_lock_bh(&vsi->mac_filter_hash_lock);
12997 i40e_add_mac_filter(vsi, mac_addr);
12998 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13001 /* Add the broadcast filter so that we initially will receive
13002 * broadcast packets. Note that when a new VLAN is first added the
13003 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13004 * specific filters as part of transitioning into "vlan" operation.
13005 * When more VLANs are added, the driver will copy each existing MAC
13006 * filter and add it for the new VLAN.
13008 * Broadcast filters are handled specially by
13009 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13010 * promiscuous bit instead of adding this directly as a MAC/VLAN
13011 * filter. The subtask will update the correct broadcast promiscuous
13012 * bits as VLANs become active or inactive.
13014 eth_broadcast_addr(broadcast);
13015 spin_lock_bh(&vsi->mac_filter_hash_lock);
13016 i40e_add_mac_filter(vsi, broadcast);
13017 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13019 ether_addr_copy(netdev->dev_addr, mac_addr);
13020 ether_addr_copy(netdev->perm_addr, mac_addr);
13022 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13023 netdev->neigh_priv_len = sizeof(u32) * 4;
13025 netdev->priv_flags |= IFF_UNICAST_FLT;
13026 netdev->priv_flags |= IFF_SUPP_NOFCS;
13027 /* Setup netdev TC information */
13028 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13030 netdev->netdev_ops = &i40e_netdev_ops;
13031 netdev->watchdog_timeo = 5 * HZ;
13032 i40e_set_ethtool_ops(netdev);
13034 /* MTU range: 68 - 9706 */
13035 netdev->min_mtu = ETH_MIN_MTU;
13036 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13042 * i40e_vsi_delete - Delete a VSI from the switch
13043 * @vsi: the VSI being removed
13045 * Returns 0 on success, negative value on failure
13047 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13049 /* remove default VSI is not allowed */
13050 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13053 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13057 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13058 * @vsi: the VSI being queried
13060 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13062 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13064 struct i40e_veb *veb;
13065 struct i40e_pf *pf = vsi->back;
13067 /* Uplink is not a bridge so default to VEB */
13068 if (vsi->veb_idx >= I40E_MAX_VEB)
13071 veb = pf->veb[vsi->veb_idx];
13073 dev_info(&pf->pdev->dev,
13074 "There is no veb associated with the bridge\n");
13078 /* Uplink is a bridge in VEPA mode */
13079 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13082 /* Uplink is a bridge in VEB mode */
13086 /* VEPA is now default bridge, so return 0 */
13091 * i40e_add_vsi - Add a VSI to the switch
13092 * @vsi: the VSI being configured
13094 * This initializes a VSI context depending on the VSI type to be added and
13095 * passes it down to the add_vsi aq command.
13097 static int i40e_add_vsi(struct i40e_vsi *vsi)
13100 struct i40e_pf *pf = vsi->back;
13101 struct i40e_hw *hw = &pf->hw;
13102 struct i40e_vsi_context ctxt;
13103 struct i40e_mac_filter *f;
13104 struct hlist_node *h;
13107 u8 enabled_tc = 0x1; /* TC0 enabled */
13110 memset(&ctxt, 0, sizeof(ctxt));
13111 switch (vsi->type) {
13112 case I40E_VSI_MAIN:
13113 /* The PF's main VSI is already setup as part of the
13114 * device initialization, so we'll not bother with
13115 * the add_vsi call, but we will retrieve the current
13118 ctxt.seid = pf->main_vsi_seid;
13119 ctxt.pf_num = pf->hw.pf_id;
13121 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13122 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13124 dev_info(&pf->pdev->dev,
13125 "couldn't get PF vsi config, err %s aq_err %s\n",
13126 i40e_stat_str(&pf->hw, ret),
13127 i40e_aq_str(&pf->hw,
13128 pf->hw.aq.asq_last_status));
13131 vsi->info = ctxt.info;
13132 vsi->info.valid_sections = 0;
13134 vsi->seid = ctxt.seid;
13135 vsi->id = ctxt.vsi_number;
13137 enabled_tc = i40e_pf_get_tc_map(pf);
13139 /* Source pruning is enabled by default, so the flag is
13140 * negative logic - if it's set, we need to fiddle with
13141 * the VSI to disable source pruning.
13143 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13144 memset(&ctxt, 0, sizeof(ctxt));
13145 ctxt.seid = pf->main_vsi_seid;
13146 ctxt.pf_num = pf->hw.pf_id;
13148 ctxt.info.valid_sections |=
13149 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13150 ctxt.info.switch_id =
13151 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13152 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13154 dev_info(&pf->pdev->dev,
13155 "update vsi failed, err %s aq_err %s\n",
13156 i40e_stat_str(&pf->hw, ret),
13157 i40e_aq_str(&pf->hw,
13158 pf->hw.aq.asq_last_status));
13164 /* MFP mode setup queue map and update VSI */
13165 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13166 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13167 memset(&ctxt, 0, sizeof(ctxt));
13168 ctxt.seid = pf->main_vsi_seid;
13169 ctxt.pf_num = pf->hw.pf_id;
13171 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13172 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13174 dev_info(&pf->pdev->dev,
13175 "update vsi failed, err %s aq_err %s\n",
13176 i40e_stat_str(&pf->hw, ret),
13177 i40e_aq_str(&pf->hw,
13178 pf->hw.aq.asq_last_status));
13182 /* update the local VSI info queue map */
13183 i40e_vsi_update_queue_map(vsi, &ctxt);
13184 vsi->info.valid_sections = 0;
13186 /* Default/Main VSI is only enabled for TC0
13187 * reconfigure it to enable all TCs that are
13188 * available on the port in SFP mode.
13189 * For MFP case the iSCSI PF would use this
13190 * flow to enable LAN+iSCSI TC.
13192 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13194 /* Single TC condition is not fatal,
13195 * message and continue
13197 dev_info(&pf->pdev->dev,
13198 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13200 i40e_stat_str(&pf->hw, ret),
13201 i40e_aq_str(&pf->hw,
13202 pf->hw.aq.asq_last_status));
13207 case I40E_VSI_FDIR:
13208 ctxt.pf_num = hw->pf_id;
13210 ctxt.uplink_seid = vsi->uplink_seid;
13211 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13212 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13213 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13214 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13215 ctxt.info.valid_sections |=
13216 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13217 ctxt.info.switch_id =
13218 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13220 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13223 case I40E_VSI_VMDQ2:
13224 ctxt.pf_num = hw->pf_id;
13226 ctxt.uplink_seid = vsi->uplink_seid;
13227 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13228 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13230 /* This VSI is connected to VEB so the switch_id
13231 * should be set to zero by default.
13233 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13234 ctxt.info.valid_sections |=
13235 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13236 ctxt.info.switch_id =
13237 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13240 /* Setup the VSI tx/rx queue map for TC0 only for now */
13241 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13244 case I40E_VSI_SRIOV:
13245 ctxt.pf_num = hw->pf_id;
13246 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13247 ctxt.uplink_seid = vsi->uplink_seid;
13248 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13249 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13251 /* This VSI is connected to VEB so the switch_id
13252 * should be set to zero by default.
13254 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13255 ctxt.info.valid_sections |=
13256 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13257 ctxt.info.switch_id =
13258 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13261 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13262 ctxt.info.valid_sections |=
13263 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13264 ctxt.info.queueing_opt_flags |=
13265 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13266 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13269 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13270 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13271 if (pf->vf[vsi->vf_id].spoofchk) {
13272 ctxt.info.valid_sections |=
13273 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13274 ctxt.info.sec_flags |=
13275 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13276 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13278 /* Setup the VSI tx/rx queue map for TC0 only for now */
13279 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13282 case I40E_VSI_IWARP:
13283 /* send down message to iWARP */
13290 if (vsi->type != I40E_VSI_MAIN) {
13291 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13293 dev_info(&vsi->back->pdev->dev,
13294 "add vsi failed, err %s aq_err %s\n",
13295 i40e_stat_str(&pf->hw, ret),
13296 i40e_aq_str(&pf->hw,
13297 pf->hw.aq.asq_last_status));
13301 vsi->info = ctxt.info;
13302 vsi->info.valid_sections = 0;
13303 vsi->seid = ctxt.seid;
13304 vsi->id = ctxt.vsi_number;
13307 vsi->active_filters = 0;
13308 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13309 spin_lock_bh(&vsi->mac_filter_hash_lock);
13310 /* If macvlan filters already exist, force them to get loaded */
13311 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13312 f->state = I40E_FILTER_NEW;
13315 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13318 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13319 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13322 /* Update VSI BW information */
13323 ret = i40e_vsi_get_bw_info(vsi);
13325 dev_info(&pf->pdev->dev,
13326 "couldn't get vsi bw info, err %s aq_err %s\n",
13327 i40e_stat_str(&pf->hw, ret),
13328 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13329 /* VSI is already added so not tearing that up */
13338 * i40e_vsi_release - Delete a VSI and free its resources
13339 * @vsi: the VSI being removed
13341 * Returns 0 on success or < 0 on error
13343 int i40e_vsi_release(struct i40e_vsi *vsi)
13345 struct i40e_mac_filter *f;
13346 struct hlist_node *h;
13347 struct i40e_veb *veb = NULL;
13348 struct i40e_pf *pf;
13354 /* release of a VEB-owner or last VSI is not allowed */
13355 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13356 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13357 vsi->seid, vsi->uplink_seid);
13360 if (vsi == pf->vsi[pf->lan_vsi] &&
13361 !test_bit(__I40E_DOWN, pf->state)) {
13362 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13366 uplink_seid = vsi->uplink_seid;
13367 if (vsi->type != I40E_VSI_SRIOV) {
13368 if (vsi->netdev_registered) {
13369 vsi->netdev_registered = false;
13371 /* results in a call to i40e_close() */
13372 unregister_netdev(vsi->netdev);
13375 i40e_vsi_close(vsi);
13377 i40e_vsi_disable_irq(vsi);
13380 spin_lock_bh(&vsi->mac_filter_hash_lock);
13382 /* clear the sync flag on all filters */
13384 __dev_uc_unsync(vsi->netdev, NULL);
13385 __dev_mc_unsync(vsi->netdev, NULL);
13388 /* make sure any remaining filters are marked for deletion */
13389 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13390 __i40e_del_filter(vsi, f);
13392 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13394 i40e_sync_vsi_filters(vsi);
13396 i40e_vsi_delete(vsi);
13397 i40e_vsi_free_q_vectors(vsi);
13399 free_netdev(vsi->netdev);
13400 vsi->netdev = NULL;
13402 i40e_vsi_clear_rings(vsi);
13403 i40e_vsi_clear(vsi);
13405 /* If this was the last thing on the VEB, except for the
13406 * controlling VSI, remove the VEB, which puts the controlling
13407 * VSI onto the next level down in the switch.
13409 * Well, okay, there's one more exception here: don't remove
13410 * the orphan VEBs yet. We'll wait for an explicit remove request
13411 * from up the network stack.
13413 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13415 pf->vsi[i]->uplink_seid == uplink_seid &&
13416 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13417 n++; /* count the VSIs */
13420 for (i = 0; i < I40E_MAX_VEB; i++) {
13423 if (pf->veb[i]->uplink_seid == uplink_seid)
13424 n++; /* count the VEBs */
13425 if (pf->veb[i]->seid == uplink_seid)
13428 if (n == 0 && veb && veb->uplink_seid != 0)
13429 i40e_veb_release(veb);
13435 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
13436 * @vsi: ptr to the VSI
13438 * This should only be called after i40e_vsi_mem_alloc() which allocates the
13439 * corresponding SW VSI structure and initializes num_queue_pairs for the
13440 * newly allocated VSI.
13442 * Returns 0 on success or negative on failure
13444 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13447 struct i40e_pf *pf = vsi->back;
13449 if (vsi->q_vectors[0]) {
13450 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13455 if (vsi->base_vector) {
13456 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13457 vsi->seid, vsi->base_vector);
13461 ret = i40e_vsi_alloc_q_vectors(vsi);
13463 dev_info(&pf->pdev->dev,
13464 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
13465 vsi->num_q_vectors, vsi->seid, ret);
13466 vsi->num_q_vectors = 0;
13467 goto vector_setup_out;
13470 /* In Legacy mode, we do not have to get any other vector since we
13471 * piggyback on the misc/ICR0 for queue interrupts.
13473 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
13475 if (vsi->num_q_vectors)
13476 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
13477 vsi->num_q_vectors, vsi->idx);
13478 if (vsi->base_vector < 0) {
13479 dev_info(&pf->pdev->dev,
13480 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
13481 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
13482 i40e_vsi_free_q_vectors(vsi);
13484 goto vector_setup_out;
13492 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
13493 * @vsi: pointer to the vsi.
13495 * This re-allocates a vsi's queue resources.
13497 * Returns pointer to the successfully allocated and configured VSI sw struct
13498 * on success, otherwise returns NULL on failure.
13500 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
13502 u16 alloc_queue_pairs;
13503 struct i40e_pf *pf;
13512 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
13513 i40e_vsi_clear_rings(vsi);
13515 i40e_vsi_free_arrays(vsi, false);
13516 i40e_set_num_rings_in_vsi(vsi);
13517 ret = i40e_vsi_alloc_arrays(vsi, false);
13521 alloc_queue_pairs = vsi->alloc_queue_pairs *
13522 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13524 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13526 dev_info(&pf->pdev->dev,
13527 "failed to get tracking for %d queues for VSI %d err %d\n",
13528 alloc_queue_pairs, vsi->seid, ret);
13531 vsi->base_queue = ret;
13533 /* Update the FW view of the VSI. Force a reset of TC and queue
13534 * layout configurations.
13536 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13537 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13538 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13539 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13540 if (vsi->type == I40E_VSI_MAIN)
13541 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
13543 /* assign it some queues */
13544 ret = i40e_alloc_rings(vsi);
13548 /* map all of the rings to the q_vectors */
13549 i40e_vsi_map_rings_to_vectors(vsi);
13553 i40e_vsi_free_q_vectors(vsi);
13554 if (vsi->netdev_registered) {
13555 vsi->netdev_registered = false;
13556 unregister_netdev(vsi->netdev);
13557 free_netdev(vsi->netdev);
13558 vsi->netdev = NULL;
13560 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13562 i40e_vsi_clear(vsi);
13567 * i40e_vsi_setup - Set up a VSI by a given type
13568 * @pf: board private structure
13570 * @uplink_seid: the switch element to link to
13571 * @param1: usage depends upon VSI type. For VF types, indicates VF id
13573 * This allocates the sw VSI structure and its queue resources, then add a VSI
13574 * to the identified VEB.
13576 * Returns pointer to the successfully allocated and configure VSI sw struct on
13577 * success, otherwise returns NULL on failure.
13579 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
13580 u16 uplink_seid, u32 param1)
13582 struct i40e_vsi *vsi = NULL;
13583 struct i40e_veb *veb = NULL;
13584 u16 alloc_queue_pairs;
13588 /* The requested uplink_seid must be either
13589 * - the PF's port seid
13590 * no VEB is needed because this is the PF
13591 * or this is a Flow Director special case VSI
13592 * - seid of an existing VEB
13593 * - seid of a VSI that owns an existing VEB
13594 * - seid of a VSI that doesn't own a VEB
13595 * a new VEB is created and the VSI becomes the owner
13596 * - seid of the PF VSI, which is what creates the first VEB
13597 * this is a special case of the previous
13599 * Find which uplink_seid we were given and create a new VEB if needed
13601 for (i = 0; i < I40E_MAX_VEB; i++) {
13602 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
13608 if (!veb && uplink_seid != pf->mac_seid) {
13610 for (i = 0; i < pf->num_alloc_vsi; i++) {
13611 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
13617 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
13622 if (vsi->uplink_seid == pf->mac_seid)
13623 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
13624 vsi->tc_config.enabled_tc);
13625 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
13626 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13627 vsi->tc_config.enabled_tc);
13629 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
13630 dev_info(&vsi->back->pdev->dev,
13631 "New VSI creation error, uplink seid of LAN VSI expected.\n");
13634 /* We come up by default in VEPA mode if SRIOV is not
13635 * already enabled, in which case we can't force VEPA
13638 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
13639 veb->bridge_mode = BRIDGE_MODE_VEPA;
13640 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13642 i40e_config_bridge_mode(veb);
13644 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13645 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13649 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
13653 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13654 uplink_seid = veb->seid;
13657 /* get vsi sw struct */
13658 v_idx = i40e_vsi_mem_alloc(pf, type);
13661 vsi = pf->vsi[v_idx];
13665 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
13667 if (type == I40E_VSI_MAIN)
13668 pf->lan_vsi = v_idx;
13669 else if (type == I40E_VSI_SRIOV)
13670 vsi->vf_id = param1;
13671 /* assign it some queues */
13672 alloc_queue_pairs = vsi->alloc_queue_pairs *
13673 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13675 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13677 dev_info(&pf->pdev->dev,
13678 "failed to get tracking for %d queues for VSI %d err=%d\n",
13679 alloc_queue_pairs, vsi->seid, ret);
13682 vsi->base_queue = ret;
13684 /* get a VSI from the hardware */
13685 vsi->uplink_seid = uplink_seid;
13686 ret = i40e_add_vsi(vsi);
13690 switch (vsi->type) {
13691 /* setup the netdev if needed */
13692 case I40E_VSI_MAIN:
13693 case I40E_VSI_VMDQ2:
13694 ret = i40e_config_netdev(vsi);
13697 ret = register_netdev(vsi->netdev);
13700 vsi->netdev_registered = true;
13701 netif_carrier_off(vsi->netdev);
13702 #ifdef CONFIG_I40E_DCB
13703 /* Setup DCB netlink interface */
13704 i40e_dcbnl_setup(vsi);
13705 #endif /* CONFIG_I40E_DCB */
13708 case I40E_VSI_FDIR:
13709 /* set up vectors and rings if needed */
13710 ret = i40e_vsi_setup_vectors(vsi);
13714 ret = i40e_alloc_rings(vsi);
13718 /* map all of the rings to the q_vectors */
13719 i40e_vsi_map_rings_to_vectors(vsi);
13721 i40e_vsi_reset_stats(vsi);
13725 /* no netdev or rings for the other VSI types */
13729 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
13730 (vsi->type == I40E_VSI_VMDQ2)) {
13731 ret = i40e_vsi_config_rss(vsi);
13736 i40e_vsi_free_q_vectors(vsi);
13738 if (vsi->netdev_registered) {
13739 vsi->netdev_registered = false;
13740 unregister_netdev(vsi->netdev);
13741 free_netdev(vsi->netdev);
13742 vsi->netdev = NULL;
13745 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13747 i40e_vsi_clear(vsi);
13753 * i40e_veb_get_bw_info - Query VEB BW information
13754 * @veb: the veb to query
13756 * Query the Tx scheduler BW configuration data for given VEB
13758 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13760 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13761 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13762 struct i40e_pf *pf = veb->pf;
13763 struct i40e_hw *hw = &pf->hw;
13768 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13771 dev_info(&pf->pdev->dev,
13772 "query veb bw config failed, err %s aq_err %s\n",
13773 i40e_stat_str(&pf->hw, ret),
13774 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13778 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13781 dev_info(&pf->pdev->dev,
13782 "query veb bw ets config failed, err %s aq_err %s\n",
13783 i40e_stat_str(&pf->hw, ret),
13784 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13788 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13789 veb->bw_max_quanta = ets_data.tc_bw_max;
13790 veb->is_abs_credits = bw_data.absolute_credits_enable;
13791 veb->enabled_tc = ets_data.tc_valid_bits;
13792 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13793 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13794 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13795 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13796 veb->bw_tc_limit_credits[i] =
13797 le16_to_cpu(bw_data.tc_bw_limits[i]);
13798 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13806 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
13807 * @pf: board private structure
13809 * On error: returns error code (negative)
13810 * On success: returns vsi index in PF (positive)
13812 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13815 struct i40e_veb *veb;
13818 /* Need to protect the allocation of switch elements at the PF level */
13819 mutex_lock(&pf->switch_mutex);
13821 /* VEB list may be fragmented if VEB creation/destruction has
13822 * been happening. We can afford to do a quick scan to look
13823 * for any free slots in the list.
13825 * find next empty veb slot, looping back around if necessary
13828 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13830 if (i >= I40E_MAX_VEB) {
13832 goto err_alloc_veb; /* out of VEB slots! */
13835 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13838 goto err_alloc_veb;
13842 veb->enabled_tc = 1;
13847 mutex_unlock(&pf->switch_mutex);
13852 * i40e_switch_branch_release - Delete a branch of the switch tree
13853 * @branch: where to start deleting
13855 * This uses recursion to find the tips of the branch to be
13856 * removed, deleting until we get back to and can delete this VEB.
13858 static void i40e_switch_branch_release(struct i40e_veb *branch)
13860 struct i40e_pf *pf = branch->pf;
13861 u16 branch_seid = branch->seid;
13862 u16 veb_idx = branch->idx;
13865 /* release any VEBs on this VEB - RECURSION */
13866 for (i = 0; i < I40E_MAX_VEB; i++) {
13869 if (pf->veb[i]->uplink_seid == branch->seid)
13870 i40e_switch_branch_release(pf->veb[i]);
13873 /* Release the VSIs on this VEB, but not the owner VSI.
13875 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
13876 * the VEB itself, so don't use (*branch) after this loop.
13878 for (i = 0; i < pf->num_alloc_vsi; i++) {
13881 if (pf->vsi[i]->uplink_seid == branch_seid &&
13882 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13883 i40e_vsi_release(pf->vsi[i]);
13887 /* There's one corner case where the VEB might not have been
13888 * removed, so double check it here and remove it if needed.
13889 * This case happens if the veb was created from the debugfs
13890 * commands and no VSIs were added to it.
13892 if (pf->veb[veb_idx])
13893 i40e_veb_release(pf->veb[veb_idx]);
13897 * i40e_veb_clear - remove veb struct
13898 * @veb: the veb to remove
13900 static void i40e_veb_clear(struct i40e_veb *veb)
13906 struct i40e_pf *pf = veb->pf;
13908 mutex_lock(&pf->switch_mutex);
13909 if (pf->veb[veb->idx] == veb)
13910 pf->veb[veb->idx] = NULL;
13911 mutex_unlock(&pf->switch_mutex);
13918 * i40e_veb_release - Delete a VEB and free its resources
13919 * @veb: the VEB being removed
13921 void i40e_veb_release(struct i40e_veb *veb)
13923 struct i40e_vsi *vsi = NULL;
13924 struct i40e_pf *pf;
13929 /* find the remaining VSI and check for extras */
13930 for (i = 0; i < pf->num_alloc_vsi; i++) {
13931 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13937 dev_info(&pf->pdev->dev,
13938 "can't remove VEB %d with %d VSIs left\n",
13943 /* move the remaining VSI to uplink veb */
13944 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13945 if (veb->uplink_seid) {
13946 vsi->uplink_seid = veb->uplink_seid;
13947 if (veb->uplink_seid == pf->mac_seid)
13948 vsi->veb_idx = I40E_NO_VEB;
13950 vsi->veb_idx = veb->veb_idx;
13953 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13954 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13957 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13958 i40e_veb_clear(veb);
13962 * i40e_add_veb - create the VEB in the switch
13963 * @veb: the VEB to be instantiated
13964 * @vsi: the controlling VSI
13966 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13968 struct i40e_pf *pf = veb->pf;
13969 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13972 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13973 veb->enabled_tc, false,
13974 &veb->seid, enable_stats, NULL);
13976 /* get a VEB from the hardware */
13978 dev_info(&pf->pdev->dev,
13979 "couldn't add VEB, err %s aq_err %s\n",
13980 i40e_stat_str(&pf->hw, ret),
13981 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13985 /* get statistics counter */
13986 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13987 &veb->stats_idx, NULL, NULL, NULL);
13989 dev_info(&pf->pdev->dev,
13990 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13991 i40e_stat_str(&pf->hw, ret),
13992 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13995 ret = i40e_veb_get_bw_info(veb);
13997 dev_info(&pf->pdev->dev,
13998 "couldn't get VEB bw info, err %s aq_err %s\n",
13999 i40e_stat_str(&pf->hw, ret),
14000 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14001 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14005 vsi->uplink_seid = veb->seid;
14006 vsi->veb_idx = veb->idx;
14007 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14013 * i40e_veb_setup - Set up a VEB
14014 * @pf: board private structure
14015 * @flags: VEB setup flags
14016 * @uplink_seid: the switch element to link to
14017 * @vsi_seid: the initial VSI seid
14018 * @enabled_tc: Enabled TC bit-map
14020 * This allocates the sw VEB structure and links it into the switch
14021 * It is possible and legal for this to be a duplicate of an already
14022 * existing VEB. It is also possible for both uplink and vsi seids
14023 * to be zero, in order to create a floating VEB.
14025 * Returns pointer to the successfully allocated VEB sw struct on
14026 * success, otherwise returns NULL on failure.
14028 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14029 u16 uplink_seid, u16 vsi_seid,
14032 struct i40e_veb *veb, *uplink_veb = NULL;
14033 int vsi_idx, veb_idx;
14036 /* if one seid is 0, the other must be 0 to create a floating relay */
14037 if ((uplink_seid == 0 || vsi_seid == 0) &&
14038 (uplink_seid + vsi_seid != 0)) {
14039 dev_info(&pf->pdev->dev,
14040 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14041 uplink_seid, vsi_seid);
14045 /* make sure there is such a vsi and uplink */
14046 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14047 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14049 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14050 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14055 if (uplink_seid && uplink_seid != pf->mac_seid) {
14056 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14057 if (pf->veb[veb_idx] &&
14058 pf->veb[veb_idx]->seid == uplink_seid) {
14059 uplink_veb = pf->veb[veb_idx];
14064 dev_info(&pf->pdev->dev,
14065 "uplink seid %d not found\n", uplink_seid);
14070 /* get veb sw struct */
14071 veb_idx = i40e_veb_mem_alloc(pf);
14074 veb = pf->veb[veb_idx];
14075 veb->flags = flags;
14076 veb->uplink_seid = uplink_seid;
14077 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14078 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14080 /* create the VEB in the switch */
14081 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14084 if (vsi_idx == pf->lan_vsi)
14085 pf->lan_veb = veb->idx;
14090 i40e_veb_clear(veb);
14096 * i40e_setup_pf_switch_element - set PF vars based on switch type
14097 * @pf: board private structure
14098 * @ele: element we are building info from
14099 * @num_reported: total number of elements
14100 * @printconfig: should we print the contents
14102 * helper function to assist in extracting a few useful SEID values.
14104 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14105 struct i40e_aqc_switch_config_element_resp *ele,
14106 u16 num_reported, bool printconfig)
14108 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14109 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14110 u8 element_type = ele->element_type;
14111 u16 seid = le16_to_cpu(ele->seid);
14114 dev_info(&pf->pdev->dev,
14115 "type=%d seid=%d uplink=%d downlink=%d\n",
14116 element_type, seid, uplink_seid, downlink_seid);
14118 switch (element_type) {
14119 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14120 pf->mac_seid = seid;
14122 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14124 if (uplink_seid != pf->mac_seid)
14126 if (pf->lan_veb >= I40E_MAX_VEB) {
14129 /* find existing or else empty VEB */
14130 for (v = 0; v < I40E_MAX_VEB; v++) {
14131 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14136 if (pf->lan_veb >= I40E_MAX_VEB) {
14137 v = i40e_veb_mem_alloc(pf);
14143 if (pf->lan_veb >= I40E_MAX_VEB)
14146 pf->veb[pf->lan_veb]->seid = seid;
14147 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14148 pf->veb[pf->lan_veb]->pf = pf;
14149 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14151 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14152 if (num_reported != 1)
14154 /* This is immediately after a reset so we can assume this is
14157 pf->mac_seid = uplink_seid;
14158 pf->pf_seid = downlink_seid;
14159 pf->main_vsi_seid = seid;
14161 dev_info(&pf->pdev->dev,
14162 "pf_seid=%d main_vsi_seid=%d\n",
14163 pf->pf_seid, pf->main_vsi_seid);
14165 case I40E_SWITCH_ELEMENT_TYPE_PF:
14166 case I40E_SWITCH_ELEMENT_TYPE_VF:
14167 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14168 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14169 case I40E_SWITCH_ELEMENT_TYPE_PE:
14170 case I40E_SWITCH_ELEMENT_TYPE_PA:
14171 /* ignore these for now */
14174 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14175 element_type, seid);
14181 * i40e_fetch_switch_configuration - Get switch config from firmware
14182 * @pf: board private structure
14183 * @printconfig: should we print the contents
14185 * Get the current switch configuration from the device and
14186 * extract a few useful SEID values.
14188 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14190 struct i40e_aqc_get_switch_config_resp *sw_config;
14196 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14200 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14202 u16 num_reported, num_total;
14204 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14208 dev_info(&pf->pdev->dev,
14209 "get switch config failed err %s aq_err %s\n",
14210 i40e_stat_str(&pf->hw, ret),
14211 i40e_aq_str(&pf->hw,
14212 pf->hw.aq.asq_last_status));
14217 num_reported = le16_to_cpu(sw_config->header.num_reported);
14218 num_total = le16_to_cpu(sw_config->header.num_total);
14221 dev_info(&pf->pdev->dev,
14222 "header: %d reported %d total\n",
14223 num_reported, num_total);
14225 for (i = 0; i < num_reported; i++) {
14226 struct i40e_aqc_switch_config_element_resp *ele =
14227 &sw_config->element[i];
14229 i40e_setup_pf_switch_element(pf, ele, num_reported,
14232 } while (next_seid != 0);
14239 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14240 * @pf: board private structure
14241 * @reinit: if the Main VSI needs to re-initialized.
14243 * Returns 0 on success, negative value on failure
14245 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14250 /* find out what's out there already */
14251 ret = i40e_fetch_switch_configuration(pf, false);
14253 dev_info(&pf->pdev->dev,
14254 "couldn't fetch switch config, err %s aq_err %s\n",
14255 i40e_stat_str(&pf->hw, ret),
14256 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14259 i40e_pf_reset_stats(pf);
14261 /* set the switch config bit for the whole device to
14262 * support limited promisc or true promisc
14263 * when user requests promisc. The default is limited
14267 if ((pf->hw.pf_id == 0) &&
14268 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14269 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14270 pf->last_sw_conf_flags = flags;
14273 if (pf->hw.pf_id == 0) {
14276 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14277 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14279 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14280 dev_info(&pf->pdev->dev,
14281 "couldn't set switch config bits, err %s aq_err %s\n",
14282 i40e_stat_str(&pf->hw, ret),
14283 i40e_aq_str(&pf->hw,
14284 pf->hw.aq.asq_last_status));
14285 /* not a fatal problem, just keep going */
14287 pf->last_sw_conf_valid_flags = valid_flags;
14290 /* first time setup */
14291 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14292 struct i40e_vsi *vsi = NULL;
14295 /* Set up the PF VSI associated with the PF's main VSI
14296 * that is already in the HW switch
14298 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14299 uplink_seid = pf->veb[pf->lan_veb]->seid;
14301 uplink_seid = pf->mac_seid;
14302 if (pf->lan_vsi == I40E_NO_VSI)
14303 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14305 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14307 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14308 i40e_cloud_filter_exit(pf);
14309 i40e_fdir_teardown(pf);
14313 /* force a reset of TC and queue layout configurations */
14314 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14316 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14317 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14318 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14320 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14322 i40e_fdir_sb_setup(pf);
14324 /* Setup static PF queue filter control settings */
14325 ret = i40e_setup_pf_filter_control(pf);
14327 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14329 /* Failure here should not stop continuing other steps */
14332 /* enable RSS in the HW, even for only one queue, as the stack can use
14335 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14336 i40e_pf_config_rss(pf);
14338 /* fill in link information and enable LSE reporting */
14339 i40e_link_event(pf);
14341 /* Initialize user-specific link properties */
14342 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14343 I40E_AQ_AN_COMPLETED) ? true : false);
14347 /* repopulate tunnel port filters */
14348 i40e_sync_udp_filters(pf);
14354 * i40e_determine_queue_usage - Work out queue distribution
14355 * @pf: board private structure
14357 static void i40e_determine_queue_usage(struct i40e_pf *pf)
14362 pf->num_lan_qps = 0;
14364 /* Find the max queues to be put into basic use. We'll always be
14365 * using TC0, whether or not DCB is running, and TC0 will get the
14368 queues_left = pf->hw.func_caps.num_tx_qp;
14370 if ((queues_left == 1) ||
14371 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14372 /* one qp for PF, no queues for anything else */
14374 pf->alloc_rss_size = pf->num_lan_qps = 1;
14376 /* make sure all the fancies are disabled */
14377 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14378 I40E_FLAG_IWARP_ENABLED |
14379 I40E_FLAG_FD_SB_ENABLED |
14380 I40E_FLAG_FD_ATR_ENABLED |
14381 I40E_FLAG_DCB_CAPABLE |
14382 I40E_FLAG_DCB_ENABLED |
14383 I40E_FLAG_SRIOV_ENABLED |
14384 I40E_FLAG_VMDQ_ENABLED);
14385 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14386 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14387 I40E_FLAG_FD_SB_ENABLED |
14388 I40E_FLAG_FD_ATR_ENABLED |
14389 I40E_FLAG_DCB_CAPABLE))) {
14390 /* one qp for PF */
14391 pf->alloc_rss_size = pf->num_lan_qps = 1;
14392 queues_left -= pf->num_lan_qps;
14394 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14395 I40E_FLAG_IWARP_ENABLED |
14396 I40E_FLAG_FD_SB_ENABLED |
14397 I40E_FLAG_FD_ATR_ENABLED |
14398 I40E_FLAG_DCB_ENABLED |
14399 I40E_FLAG_VMDQ_ENABLED);
14400 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14402 /* Not enough queues for all TCs */
14403 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14404 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14405 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14406 I40E_FLAG_DCB_ENABLED);
14407 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14410 /* limit lan qps to the smaller of qps, cpus or msix */
14411 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14412 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14413 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14414 pf->num_lan_qps = q_max;
14416 queues_left -= pf->num_lan_qps;
14419 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14420 if (queues_left > 1) {
14421 queues_left -= 1; /* save 1 queue for FD */
14423 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14424 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14425 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14429 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14430 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14431 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14432 (queues_left / pf->num_vf_qps));
14433 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14436 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14437 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14438 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14439 (queues_left / pf->num_vmdq_qps));
14440 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14443 pf->queues_left = queues_left;
14444 dev_dbg(&pf->pdev->dev,
14445 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14446 pf->hw.func_caps.num_tx_qp,
14447 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14448 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14449 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
14454 * i40e_setup_pf_filter_control - Setup PF static filter control
14455 * @pf: PF to be setup
14457 * i40e_setup_pf_filter_control sets up a PF's initial filter control
14458 * settings. If PE/FCoE are enabled then it will also set the per PF
14459 * based filter sizes required for them. It also enables Flow director,
14460 * ethertype and macvlan type filter settings for the pf.
14462 * Returns 0 on success, negative on failure
14464 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
14466 struct i40e_filter_control_settings *settings = &pf->filter_settings;
14468 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
14470 /* Flow Director is enabled */
14471 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
14472 settings->enable_fdir = true;
14474 /* Ethtype and MACVLAN filters enabled for PF */
14475 settings->enable_ethtype = true;
14476 settings->enable_macvlan = true;
14478 if (i40e_set_filter_control(&pf->hw, settings))
14484 #define INFO_STRING_LEN 255
14485 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
14486 static void i40e_print_features(struct i40e_pf *pf)
14488 struct i40e_hw *hw = &pf->hw;
14492 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
14496 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
14497 #ifdef CONFIG_PCI_IOV
14498 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
14500 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
14501 pf->hw.func_caps.num_vsis,
14502 pf->vsi[pf->lan_vsi]->num_queue_pairs);
14503 if (pf->flags & I40E_FLAG_RSS_ENABLED)
14504 i += snprintf(&buf[i], REMAIN(i), " RSS");
14505 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
14506 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
14507 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14508 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
14509 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
14511 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
14512 i += snprintf(&buf[i], REMAIN(i), " DCB");
14513 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
14514 i += snprintf(&buf[i], REMAIN(i), " Geneve");
14515 if (pf->flags & I40E_FLAG_PTP)
14516 i += snprintf(&buf[i], REMAIN(i), " PTP");
14517 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
14518 i += snprintf(&buf[i], REMAIN(i), " VEB");
14520 i += snprintf(&buf[i], REMAIN(i), " VEPA");
14522 dev_info(&pf->pdev->dev, "%s\n", buf);
14524 WARN_ON(i > INFO_STRING_LEN);
14528 * i40e_get_platform_mac_addr - get platform-specific MAC address
14529 * @pdev: PCI device information struct
14530 * @pf: board private structure
14532 * Look up the MAC address for the device. First we'll try
14533 * eth_platform_get_mac_address, which will check Open Firmware, or arch
14534 * specific fallback. Otherwise, we'll default to the stored value in
14537 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
14539 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
14540 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
14544 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
14545 * @fec_cfg: FEC option to set in flags
14546 * @flags: ptr to flags in which we set FEC option
14548 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14550 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14551 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14552 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14553 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14554 *flags |= I40E_FLAG_RS_FEC;
14555 *flags &= ~I40E_FLAG_BASE_R_FEC;
14557 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14558 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14559 *flags |= I40E_FLAG_BASE_R_FEC;
14560 *flags &= ~I40E_FLAG_RS_FEC;
14563 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14567 * i40e_check_recovery_mode - check if we are running transition firmware
14568 * @pf: board private structure
14570 * Check registers indicating the firmware runs in recovery mode. Sets the
14571 * appropriate driver state.
14573 * Returns true if the recovery mode was detected, false otherwise
14575 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14577 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
14579 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
14580 dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14581 dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14582 set_bit(__I40E_RECOVERY_MODE, pf->state);
14586 if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
14587 dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
14593 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
14594 * @pf: board private structure
14595 * @hw: ptr to the hardware info
14597 * This function does a minimal setup of all subsystems needed for running
14600 * Returns 0 on success, negative on failure
14602 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
14604 struct i40e_vsi *vsi;
14608 pci_save_state(pf->pdev);
14610 /* set up periodic task facility */
14611 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14612 pf->service_timer_period = HZ;
14614 INIT_WORK(&pf->service_task, i40e_service_task);
14615 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14617 err = i40e_init_interrupt_scheme(pf);
14619 goto err_switch_setup;
14621 /* The number of VSIs reported by the FW is the minimum guaranteed
14622 * to us; HW supports far more and we share the remaining pool with
14623 * the other PFs. We allocate space for more than the guarantee with
14624 * the understanding that we might not get them all later.
14626 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14627 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14629 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14631 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
14632 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14636 goto err_switch_setup;
14639 /* We allocate one VSI which is needed as absolute minimum
14640 * in order to register the netdev
14642 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
14644 goto err_switch_setup;
14645 pf->lan_vsi = v_idx;
14646 vsi = pf->vsi[v_idx];
14648 goto err_switch_setup;
14649 vsi->alloc_queue_pairs = 1;
14650 err = i40e_config_netdev(vsi);
14652 goto err_switch_setup;
14653 err = register_netdev(vsi->netdev);
14655 goto err_switch_setup;
14656 vsi->netdev_registered = true;
14657 i40e_dbg_pf_init(pf);
14659 err = i40e_setup_misc_vector_for_recovery_mode(pf);
14661 goto err_switch_setup;
14663 /* tell the firmware that we're starting */
14664 i40e_send_version(pf);
14666 /* since everything's happy, start the service_task timer */
14667 mod_timer(&pf->service_timer,
14668 round_jiffies(jiffies + pf->service_timer_period));
14673 i40e_reset_interrupt_capability(pf);
14674 del_timer_sync(&pf->service_timer);
14675 i40e_shutdown_adminq(hw);
14676 iounmap(hw->hw_addr);
14677 pci_disable_pcie_error_reporting(pf->pdev);
14678 pci_release_mem_regions(pf->pdev);
14679 pci_disable_device(pf->pdev);
14686 * i40e_probe - Device initialization routine
14687 * @pdev: PCI device information struct
14688 * @ent: entry in i40e_pci_tbl
14690 * i40e_probe initializes a PF identified by a pci_dev structure.
14691 * The OS initialization, configuring of the PF private structure,
14692 * and a hardware reset occur.
14694 * Returns 0 on success, negative on failure
14696 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14698 struct i40e_aq_get_phy_abilities_resp abilities;
14699 struct i40e_pf *pf;
14700 struct i40e_hw *hw;
14701 static u16 pfs_found;
14709 err = pci_enable_device_mem(pdev);
14713 /* set up for high or low dma */
14714 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
14716 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
14718 dev_err(&pdev->dev,
14719 "DMA configuration failed: 0x%x\n", err);
14724 /* set up pci connections */
14725 err = pci_request_mem_regions(pdev, i40e_driver_name);
14727 dev_info(&pdev->dev,
14728 "pci_request_selected_regions failed %d\n", err);
14732 pci_enable_pcie_error_reporting(pdev);
14733 pci_set_master(pdev);
14735 /* Now that we have a PCI connection, we need to do the
14736 * low level device setup. This is primarily setting up
14737 * the Admin Queue structures and then querying for the
14738 * device's current profile information.
14740 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
14747 set_bit(__I40E_DOWN, pf->state);
14752 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
14753 I40E_MAX_CSR_SPACE);
14754 /* We believe that the highest register to read is
14755 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
14756 * is not less than that before mapping to prevent a
14759 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
14760 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
14765 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
14766 if (!hw->hw_addr) {
14768 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
14769 (unsigned int)pci_resource_start(pdev, 0),
14770 pf->ioremap_len, err);
14773 hw->vendor_id = pdev->vendor;
14774 hw->device_id = pdev->device;
14775 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
14776 hw->subsystem_vendor_id = pdev->subsystem_vendor;
14777 hw->subsystem_device_id = pdev->subsystem_device;
14778 hw->bus.device = PCI_SLOT(pdev->devfn);
14779 hw->bus.func = PCI_FUNC(pdev->devfn);
14780 hw->bus.bus_id = pdev->bus->number;
14781 pf->instance = pfs_found;
14783 /* Select something other than the 802.1ad ethertype for the
14784 * switch to use internally and drop on ingress.
14786 hw->switch_tag = 0xffff;
14787 hw->first_tag = ETH_P_8021AD;
14788 hw->second_tag = ETH_P_8021Q;
14790 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
14791 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
14792 INIT_LIST_HEAD(&pf->ddp_old_prof);
14794 /* set up the locks for the AQ, do this only once in probe
14795 * and destroy them only once in remove
14797 mutex_init(&hw->aq.asq_mutex);
14798 mutex_init(&hw->aq.arq_mutex);
14800 pf->msg_enable = netif_msg_init(debug,
14805 pf->hw.debug_mask = debug;
14807 /* do a special CORER for clearing PXE mode once at init */
14808 if (hw->revision_id == 0 &&
14809 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
14810 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14815 i40e_clear_pxe_mode(hw);
14818 /* Reset here to make sure all is clean and to define PF 'n' */
14820 if (!i40e_check_recovery_mode(pf)) {
14821 err = i40e_pf_reset(hw);
14823 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
14828 hw->aq.num_arq_entries = I40E_AQ_LEN;
14829 hw->aq.num_asq_entries = I40E_AQ_LEN;
14830 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14831 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14832 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
14834 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
14836 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
14838 err = i40e_init_shared_code(hw);
14840 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14845 /* set up a default setting for link flow control */
14846 pf->hw.fc.requested_mode = I40E_FC_NONE;
14848 err = i40e_init_adminq(hw);
14850 if (err == I40E_ERR_FIRMWARE_API_VERSION)
14851 dev_info(&pdev->dev,
14852 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
14853 hw->aq.api_maj_ver,
14854 hw->aq.api_min_ver,
14855 I40E_FW_API_VERSION_MAJOR,
14856 I40E_FW_MINOR_VERSION(hw));
14858 dev_info(&pdev->dev,
14859 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14863 i40e_get_oem_version(hw);
14865 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
14866 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
14867 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
14868 hw->aq.api_maj_ver, hw->aq.api_min_ver,
14869 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
14870 hw->subsystem_vendor_id, hw->subsystem_device_id);
14872 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
14873 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
14874 dev_info(&pdev->dev,
14875 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
14876 hw->aq.api_maj_ver,
14877 hw->aq.api_min_ver,
14878 I40E_FW_API_VERSION_MAJOR,
14879 I40E_FW_MINOR_VERSION(hw));
14880 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
14881 dev_info(&pdev->dev,
14882 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
14883 hw->aq.api_maj_ver,
14884 hw->aq.api_min_ver,
14885 I40E_FW_API_VERSION_MAJOR,
14886 I40E_FW_MINOR_VERSION(hw));
14888 i40e_verify_eeprom(pf);
14890 /* Rev 0 hardware was never productized */
14891 if (hw->revision_id < 1)
14892 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14894 i40e_clear_pxe_mode(hw);
14896 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
14898 goto err_adminq_setup;
14900 err = i40e_sw_init(pf);
14902 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
14906 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14907 return i40e_init_recovery_mode(pf, hw);
14909 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
14910 hw->func_caps.num_rx_qp, 0, 0);
14912 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
14913 goto err_init_lan_hmc;
14916 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
14918 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
14920 goto err_configure_lan_hmc;
14923 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
14924 * Ignore error return codes because if it was already disabled via
14925 * hardware settings this will fail
14927 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
14928 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
14929 i40e_aq_stop_lldp(hw, true, false, NULL);
14932 /* allow a platform config to override the HW addr */
14933 i40e_get_platform_mac_addr(pdev, pf);
14935 if (!is_valid_ether_addr(hw->mac.addr)) {
14936 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
14940 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
14941 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
14942 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
14943 if (is_valid_ether_addr(hw->mac.port_addr))
14944 pf->hw_features |= I40E_HW_PORT_ID_VALID;
14946 pci_set_drvdata(pdev, pf);
14947 pci_save_state(pdev);
14949 dev_info(&pdev->dev,
14950 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
14951 "FW LLDP is disabled\n" :
14952 "FW LLDP is enabled\n");
14954 /* Enable FW to write default DCB config on link-up */
14955 i40e_aq_set_dcb_parameters(hw, true, NULL);
14957 #ifdef CONFIG_I40E_DCB
14958 err = i40e_init_pf_dcb(pf);
14960 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
14961 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
14962 /* Continue without DCB enabled */
14964 #endif /* CONFIG_I40E_DCB */
14966 /* set up periodic task facility */
14967 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14968 pf->service_timer_period = HZ;
14970 INIT_WORK(&pf->service_task, i40e_service_task);
14971 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14973 /* NVM bit on means WoL disabled for the port */
14974 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
14975 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
14976 pf->wol_en = false;
14979 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
14981 /* set up the main switch operations */
14982 i40e_determine_queue_usage(pf);
14983 err = i40e_init_interrupt_scheme(pf);
14985 goto err_switch_setup;
14987 /* The number of VSIs reported by the FW is the minimum guaranteed
14988 * to us; HW supports far more and we share the remaining pool with
14989 * the other PFs. We allocate space for more than the guarantee with
14990 * the understanding that we might not get them all later.
14992 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14993 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14995 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14997 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
14998 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15002 goto err_switch_setup;
15005 #ifdef CONFIG_PCI_IOV
15006 /* prep for VF support */
15007 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15008 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15009 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15010 if (pci_num_vf(pdev))
15011 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15014 err = i40e_setup_pf_switch(pf, false);
15016 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15019 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15021 /* Make sure flow control is set according to current settings */
15022 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
15023 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
15024 dev_dbg(&pf->pdev->dev,
15025 "Set fc with err %s aq_err %s on get_phy_cap\n",
15026 i40e_stat_str(hw, err),
15027 i40e_aq_str(hw, hw->aq.asq_last_status));
15028 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
15029 dev_dbg(&pf->pdev->dev,
15030 "Set fc with err %s aq_err %s on set_phy_config\n",
15031 i40e_stat_str(hw, err),
15032 i40e_aq_str(hw, hw->aq.asq_last_status));
15033 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
15034 dev_dbg(&pf->pdev->dev,
15035 "Set fc with err %s aq_err %s on get_link_info\n",
15036 i40e_stat_str(hw, err),
15037 i40e_aq_str(hw, hw->aq.asq_last_status));
15039 /* if FDIR VSI was set up, start it now */
15040 for (i = 0; i < pf->num_alloc_vsi; i++) {
15041 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15042 i40e_vsi_open(pf->vsi[i]);
15047 /* The driver only wants link up/down and module qualification
15048 * reports from firmware. Note the negative logic.
15050 err = i40e_aq_set_phy_int_mask(&pf->hw,
15051 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15052 I40E_AQ_EVENT_MEDIA_NA |
15053 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15055 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15056 i40e_stat_str(&pf->hw, err),
15057 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15059 /* Reconfigure hardware for allowing smaller MSS in the case
15060 * of TSO, so that we avoid the MDD being fired and causing
15061 * a reset in the case of small MSS+TSO.
15063 val = rd32(hw, I40E_REG_MSS);
15064 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15065 val &= ~I40E_REG_MSS_MIN_MASK;
15066 val |= I40E_64BYTE_MSS;
15067 wr32(hw, I40E_REG_MSS, val);
15070 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15072 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15074 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15075 i40e_stat_str(&pf->hw, err),
15076 i40e_aq_str(&pf->hw,
15077 pf->hw.aq.asq_last_status));
15079 /* The main driver is (mostly) up and happy. We need to set this state
15080 * before setting up the misc vector or we get a race and the vector
15081 * ends up disabled forever.
15083 clear_bit(__I40E_DOWN, pf->state);
15085 /* In case of MSIX we are going to setup the misc vector right here
15086 * to handle admin queue events etc. In case of legacy and MSI
15087 * the misc functionality and queue processing is combined in
15088 * the same vector and that gets setup at open.
15090 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15091 err = i40e_setup_misc_vector(pf);
15093 dev_info(&pdev->dev,
15094 "setup of misc vector failed: %d\n", err);
15099 #ifdef CONFIG_PCI_IOV
15100 /* prep for VF support */
15101 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15102 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15103 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15104 /* disable link interrupts for VFs */
15105 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15106 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15107 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15110 if (pci_num_vf(pdev)) {
15111 dev_info(&pdev->dev,
15112 "Active VFs found, allocating resources.\n");
15113 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15115 dev_info(&pdev->dev,
15116 "Error %d allocating resources for existing VFs\n",
15120 #endif /* CONFIG_PCI_IOV */
15122 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15123 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15124 pf->num_iwarp_msix,
15125 I40E_IWARP_IRQ_PILE_ID);
15126 if (pf->iwarp_base_vector < 0) {
15127 dev_info(&pdev->dev,
15128 "failed to get tracking for %d vectors for IWARP err=%d\n",
15129 pf->num_iwarp_msix, pf->iwarp_base_vector);
15130 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15134 i40e_dbg_pf_init(pf);
15136 /* tell the firmware that we're starting */
15137 i40e_send_version(pf);
15139 /* since everything's happy, start the service_task timer */
15140 mod_timer(&pf->service_timer,
15141 round_jiffies(jiffies + pf->service_timer_period));
15143 /* add this PF to client device list and launch a client service task */
15144 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15145 err = i40e_lan_add_device(pf);
15147 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15151 #define PCI_SPEED_SIZE 8
15152 #define PCI_WIDTH_SIZE 8
15153 /* Devices on the IOSF bus do not have this information
15154 * and will report PCI Gen 1 x 1 by default so don't bother
15157 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15158 char speed[PCI_SPEED_SIZE] = "Unknown";
15159 char width[PCI_WIDTH_SIZE] = "Unknown";
15161 /* Get the negotiated link width and speed from PCI config
15164 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15167 i40e_set_pci_config_data(hw, link_status);
15169 switch (hw->bus.speed) {
15170 case i40e_bus_speed_8000:
15171 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15172 case i40e_bus_speed_5000:
15173 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15174 case i40e_bus_speed_2500:
15175 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15179 switch (hw->bus.width) {
15180 case i40e_bus_width_pcie_x8:
15181 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15182 case i40e_bus_width_pcie_x4:
15183 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15184 case i40e_bus_width_pcie_x2:
15185 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15186 case i40e_bus_width_pcie_x1:
15187 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15192 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15195 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15196 hw->bus.speed < i40e_bus_speed_8000) {
15197 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15198 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15202 /* get the requested speeds from the fw */
15203 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15205 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
15206 i40e_stat_str(&pf->hw, err),
15207 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15208 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15210 /* set the FEC config due to the board capabilities */
15211 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15213 /* get the supported phy types from the fw */
15214 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15216 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
15217 i40e_stat_str(&pf->hw, err),
15218 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15220 /* Add a filter to drop all Flow control frames from any VSI from being
15221 * transmitted. By doing so we stop a malicious VF from sending out
15222 * PAUSE or PFC frames and potentially controlling traffic for other
15224 * The FW can still send Flow control frames if enabled.
15226 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15227 pf->main_vsi_seid);
15229 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15230 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15231 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15232 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15233 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15234 /* print a string summarizing features */
15235 i40e_print_features(pf);
15239 /* Unwind what we've done if something failed in the setup */
15241 set_bit(__I40E_DOWN, pf->state);
15242 i40e_clear_interrupt_scheme(pf);
15245 i40e_reset_interrupt_capability(pf);
15246 del_timer_sync(&pf->service_timer);
15248 err_configure_lan_hmc:
15249 (void)i40e_shutdown_lan_hmc(hw);
15251 kfree(pf->qp_pile);
15255 iounmap(hw->hw_addr);
15259 pci_disable_pcie_error_reporting(pdev);
15260 pci_release_mem_regions(pdev);
15263 pci_disable_device(pdev);
15268 * i40e_remove - Device removal routine
15269 * @pdev: PCI device information struct
15271 * i40e_remove is called by the PCI subsystem to alert the driver
15272 * that is should release a PCI device. This could be caused by a
15273 * Hot-Plug event, or because the driver is going to be removed from
15276 static void i40e_remove(struct pci_dev *pdev)
15278 struct i40e_pf *pf = pci_get_drvdata(pdev);
15279 struct i40e_hw *hw = &pf->hw;
15280 i40e_status ret_code;
15283 i40e_dbg_pf_exit(pf);
15287 /* Disable RSS in hw */
15288 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15289 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15291 /* no more scheduling of any task */
15292 set_bit(__I40E_SUSPENDED, pf->state);
15293 set_bit(__I40E_DOWN, pf->state);
15294 if (pf->service_timer.function)
15295 del_timer_sync(&pf->service_timer);
15296 if (pf->service_task.func)
15297 cancel_work_sync(&pf->service_task);
15299 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15300 struct i40e_vsi *vsi = pf->vsi[0];
15302 /* We know that we have allocated only one vsi for this PF,
15303 * it was just for registering netdevice, so the interface
15304 * could be visible in the 'ifconfig' output
15306 unregister_netdev(vsi->netdev);
15307 free_netdev(vsi->netdev);
15312 /* Client close must be called explicitly here because the timer
15313 * has been stopped.
15315 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15317 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15319 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15322 i40e_fdir_teardown(pf);
15324 /* If there is a switch structure or any orphans, remove them.
15325 * This will leave only the PF's VSI remaining.
15327 for (i = 0; i < I40E_MAX_VEB; i++) {
15331 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
15332 pf->veb[i]->uplink_seid == 0)
15333 i40e_switch_branch_release(pf->veb[i]);
15336 /* Now we can shutdown the PF's VSI, just before we kill
15339 if (pf->vsi[pf->lan_vsi])
15340 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
15342 i40e_cloud_filter_exit(pf);
15344 /* remove attached clients */
15345 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15346 ret_code = i40e_lan_del_device(pf);
15348 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
15352 /* shutdown and destroy the HMC */
15353 if (hw->hmc.hmc_obj) {
15354 ret_code = i40e_shutdown_lan_hmc(hw);
15356 dev_warn(&pdev->dev,
15357 "Failed to destroy the HMC resources: %d\n",
15362 /* Free MSI/legacy interrupt 0 when in recovery mode. */
15363 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15364 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15365 free_irq(pf->pdev->irq, pf);
15367 /* shutdown the adminq */
15368 i40e_shutdown_adminq(hw);
15370 /* destroy the locks only once, here */
15371 mutex_destroy(&hw->aq.arq_mutex);
15372 mutex_destroy(&hw->aq.asq_mutex);
15374 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
15376 i40e_clear_interrupt_scheme(pf);
15377 for (i = 0; i < pf->num_alloc_vsi; i++) {
15379 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
15380 i40e_vsi_clear_rings(pf->vsi[i]);
15381 i40e_vsi_clear(pf->vsi[i]);
15387 for (i = 0; i < I40E_MAX_VEB; i++) {
15392 kfree(pf->qp_pile);
15395 iounmap(hw->hw_addr);
15397 pci_release_mem_regions(pdev);
15399 pci_disable_pcie_error_reporting(pdev);
15400 pci_disable_device(pdev);
15404 * i40e_pci_error_detected - warning that something funky happened in PCI land
15405 * @pdev: PCI device information struct
15406 * @error: the type of PCI error
15408 * Called to warn that something happened and the error handling steps
15409 * are in progress. Allows the driver to quiesce things, be ready for
15412 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
15413 enum pci_channel_state error)
15415 struct i40e_pf *pf = pci_get_drvdata(pdev);
15417 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
15420 dev_info(&pdev->dev,
15421 "Cannot recover - error happened during device probe\n");
15422 return PCI_ERS_RESULT_DISCONNECT;
15425 /* shutdown all operations */
15426 if (!test_bit(__I40E_SUSPENDED, pf->state))
15427 i40e_prep_for_reset(pf, false);
15429 /* Request a slot reset */
15430 return PCI_ERS_RESULT_NEED_RESET;
15434 * i40e_pci_error_slot_reset - a PCI slot reset just happened
15435 * @pdev: PCI device information struct
15437 * Called to find if the driver can work with the device now that
15438 * the pci slot has been reset. If a basic connection seems good
15439 * (registers are readable and have sane content) then return a
15440 * happy little PCI_ERS_RESULT_xxx.
15442 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
15444 struct i40e_pf *pf = pci_get_drvdata(pdev);
15445 pci_ers_result_t result;
15448 dev_dbg(&pdev->dev, "%s\n", __func__);
15449 if (pci_enable_device_mem(pdev)) {
15450 dev_info(&pdev->dev,
15451 "Cannot re-enable PCI device after reset.\n");
15452 result = PCI_ERS_RESULT_DISCONNECT;
15454 pci_set_master(pdev);
15455 pci_restore_state(pdev);
15456 pci_save_state(pdev);
15457 pci_wake_from_d3(pdev, false);
15459 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
15461 result = PCI_ERS_RESULT_RECOVERED;
15463 result = PCI_ERS_RESULT_DISCONNECT;
15470 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
15471 * @pdev: PCI device information struct
15473 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
15475 struct i40e_pf *pf = pci_get_drvdata(pdev);
15477 i40e_prep_for_reset(pf, false);
15481 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
15482 * @pdev: PCI device information struct
15484 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
15486 struct i40e_pf *pf = pci_get_drvdata(pdev);
15488 i40e_reset_and_rebuild(pf, false, false);
15492 * i40e_pci_error_resume - restart operations after PCI error recovery
15493 * @pdev: PCI device information struct
15495 * Called to allow the driver to bring things back up after PCI error
15496 * and/or reset recovery has finished.
15498 static void i40e_pci_error_resume(struct pci_dev *pdev)
15500 struct i40e_pf *pf = pci_get_drvdata(pdev);
15502 dev_dbg(&pdev->dev, "%s\n", __func__);
15503 if (test_bit(__I40E_SUSPENDED, pf->state))
15506 i40e_handle_reset_warning(pf, false);
15510 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
15511 * using the mac_address_write admin q function
15512 * @pf: pointer to i40e_pf struct
15514 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
15516 struct i40e_hw *hw = &pf->hw;
15521 /* Get current MAC address in case it's an LAA */
15522 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
15523 ether_addr_copy(mac_addr,
15524 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
15526 dev_err(&pf->pdev->dev,
15527 "Failed to retrieve MAC address; using default\n");
15528 ether_addr_copy(mac_addr, hw->mac.addr);
15531 /* The FW expects the mac address write cmd to first be called with
15532 * one of these flags before calling it again with the multicast
15535 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
15537 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
15538 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
15540 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15542 dev_err(&pf->pdev->dev,
15543 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
15547 flags = I40E_AQC_MC_MAG_EN
15548 | I40E_AQC_WOL_PRESERVE_ON_PFR
15549 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
15550 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15552 dev_err(&pf->pdev->dev,
15553 "Failed to enable Multicast Magic Packet wake up\n");
15557 * i40e_shutdown - PCI callback for shutting down
15558 * @pdev: PCI device information struct
15560 static void i40e_shutdown(struct pci_dev *pdev)
15562 struct i40e_pf *pf = pci_get_drvdata(pdev);
15563 struct i40e_hw *hw = &pf->hw;
15565 set_bit(__I40E_SUSPENDED, pf->state);
15566 set_bit(__I40E_DOWN, pf->state);
15568 del_timer_sync(&pf->service_timer);
15569 cancel_work_sync(&pf->service_task);
15570 i40e_cloud_filter_exit(pf);
15571 i40e_fdir_teardown(pf);
15573 /* Client close must be called explicitly here because the timer
15574 * has been stopped.
15576 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15578 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15579 i40e_enable_mc_magic_wake(pf);
15581 i40e_prep_for_reset(pf, false);
15583 wr32(hw, I40E_PFPM_APM,
15584 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15585 wr32(hw, I40E_PFPM_WUFC,
15586 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15588 /* Free MSI/legacy interrupt 0 when in recovery mode. */
15589 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15590 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15591 free_irq(pf->pdev->irq, pf);
15593 /* Since we're going to destroy queues during the
15594 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
15598 i40e_clear_interrupt_scheme(pf);
15601 if (system_state == SYSTEM_POWER_OFF) {
15602 pci_wake_from_d3(pdev, pf->wol_en);
15603 pci_set_power_state(pdev, PCI_D3hot);
15608 * i40e_suspend - PM callback for moving to D3
15609 * @dev: generic device information structure
15611 static int __maybe_unused i40e_suspend(struct device *dev)
15613 struct pci_dev *pdev = to_pci_dev(dev);
15614 struct i40e_pf *pf = pci_get_drvdata(pdev);
15615 struct i40e_hw *hw = &pf->hw;
15617 /* If we're already suspended, then there is nothing to do */
15618 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
15621 set_bit(__I40E_DOWN, pf->state);
15623 /* Ensure service task will not be running */
15624 del_timer_sync(&pf->service_timer);
15625 cancel_work_sync(&pf->service_task);
15627 /* Client close must be called explicitly here because the timer
15628 * has been stopped.
15630 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15632 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15633 i40e_enable_mc_magic_wake(pf);
15635 /* Since we're going to destroy queues during the
15636 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
15641 i40e_prep_for_reset(pf, true);
15643 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15644 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15646 /* Clear the interrupt scheme and release our IRQs so that the system
15647 * can safely hibernate even when there are a large number of CPUs.
15648 * Otherwise hibernation might fail when mapping all the vectors back
15651 i40e_clear_interrupt_scheme(pf);
15659 * i40e_resume - PM callback for waking up from D3
15660 * @dev: generic device information structure
15662 static int __maybe_unused i40e_resume(struct device *dev)
15664 struct pci_dev *pdev = to_pci_dev(dev);
15665 struct i40e_pf *pf = pci_get_drvdata(pdev);
15668 /* If we're not suspended, then there is nothing to do */
15669 if (!test_bit(__I40E_SUSPENDED, pf->state))
15672 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
15673 * since we're going to be restoring queues
15677 /* We cleared the interrupt scheme when we suspended, so we need to
15678 * restore it now to resume device functionality.
15680 err = i40e_restore_interrupt_scheme(pf);
15682 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
15686 clear_bit(__I40E_DOWN, pf->state);
15687 i40e_reset_and_rebuild(pf, false, true);
15691 /* Clear suspended state last after everything is recovered */
15692 clear_bit(__I40E_SUSPENDED, pf->state);
15694 /* Restart the service task */
15695 mod_timer(&pf->service_timer,
15696 round_jiffies(jiffies + pf->service_timer_period));
15701 static const struct pci_error_handlers i40e_err_handler = {
15702 .error_detected = i40e_pci_error_detected,
15703 .slot_reset = i40e_pci_error_slot_reset,
15704 .reset_prepare = i40e_pci_error_reset_prepare,
15705 .reset_done = i40e_pci_error_reset_done,
15706 .resume = i40e_pci_error_resume,
15709 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
15711 static struct pci_driver i40e_driver = {
15712 .name = i40e_driver_name,
15713 .id_table = i40e_pci_tbl,
15714 .probe = i40e_probe,
15715 .remove = i40e_remove,
15717 .pm = &i40e_pm_ops,
15719 .shutdown = i40e_shutdown,
15720 .err_handler = &i40e_err_handler,
15721 .sriov_configure = i40e_pci_sriov_configure,
15725 * i40e_init_module - Driver registration routine
15727 * i40e_init_module is the first routine called when the driver is
15728 * loaded. All it does is register with the PCI subsystem.
15730 static int __init i40e_init_module(void)
15732 pr_info("%s: %s - version %s\n", i40e_driver_name,
15733 i40e_driver_string, i40e_driver_version_str);
15734 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
15736 /* There is no need to throttle the number of active tasks because
15737 * each device limits its own task using a state bit for scheduling
15738 * the service task, and the device tasks do not interfere with each
15739 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
15740 * since we need to be able to guarantee forward progress even under
15743 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
15745 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
15750 return pci_register_driver(&i40e_driver);
15752 module_init(i40e_init_module);
15755 * i40e_exit_module - Driver exit cleanup routine
15757 * i40e_exit_module is called just before the driver is removed
15760 static void __exit i40e_exit_module(void)
15762 pci_unregister_driver(&i40e_driver);
15763 destroy_workqueue(i40e_wq);
15766 module_exit(i40e_exit_module);