1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 #include "ice_common.h"
6 #include "ice_adminq_cmd.h"
8 #define ICE_PF_RESET_WAIT_COUNT 200
10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
30 * ice_set_mac_type - Sets MAC type
31 * @hw: pointer to the HW structure
33 * This function sets the MAC type of the adapter based on the
34 * vendor ID and device ID stored in the HW structure.
36 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 return ICE_ERR_DEVICE_NOT_SUPPORTED;
41 hw->mac_type = ICE_MAC_GENERIC;
46 * ice_dev_onetime_setup - Temporary HW/FW workarounds
47 * @hw: pointer to the HW structure
49 * This function provides temporary workarounds for certain issues
50 * that are expected to be fixed in the HW/FW.
52 void ice_dev_onetime_setup(struct ice_hw *hw)
54 #define MBX_PF_VT_PFALLOC 0x00231E80
56 wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
60 * ice_clear_pf_cfg - Clear PF configuration
61 * @hw: pointer to the hardware structure
63 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
64 * configuration, flow director filters, etc.).
66 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
68 struct ice_aq_desc desc;
70 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
72 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
76 * ice_aq_manage_mac_read - manage MAC address read command
77 * @hw: pointer to the HW struct
78 * @buf: a virtual buffer to hold the manage MAC read response
79 * @buf_size: Size of the virtual buffer
80 * @cd: pointer to command details structure or NULL
82 * This function is used to return per PF station MAC address (0x0107).
83 * NOTE: Upon successful completion of this command, MAC address information
84 * is returned in user specified buffer. Please interpret user specified
85 * buffer as "manage_mac_read" response.
86 * Response such as various MAC addresses are stored in HW struct (port.mac)
87 * ice_aq_discover_caps is expected to be called before this function is called.
89 static enum ice_status
90 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
93 struct ice_aqc_manage_mac_read_resp *resp;
94 struct ice_aqc_manage_mac_read *cmd;
95 struct ice_aq_desc desc;
96 enum ice_status status;
100 cmd = &desc.params.mac_read;
102 if (buf_size < sizeof(*resp))
103 return ICE_ERR_BUF_TOO_SHORT;
105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
107 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
111 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
112 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
114 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
115 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
119 /* A single port can report up to two (LAN and WoL) addresses */
120 for (i = 0; i < cmd->num_addr; i++)
121 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
122 ether_addr_copy(hw->port_info->mac.lan_addr,
124 ether_addr_copy(hw->port_info->mac.perm_addr,
133 * ice_aq_get_phy_caps - returns PHY capabilities
134 * @pi: port information structure
135 * @qual_mods: report qualified modules
136 * @report_mode: report mode capabilities
137 * @pcaps: structure for PHY capabilities to be filled
138 * @cd: pointer to command details structure or NULL
140 * Returns the various PHY capabilities supported on the Port (0x0600)
143 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
144 struct ice_aqc_get_phy_caps_data *pcaps,
145 struct ice_sq_cd *cd)
147 struct ice_aqc_get_phy_caps *cmd;
148 u16 pcaps_size = sizeof(*pcaps);
149 struct ice_aq_desc desc;
150 enum ice_status status;
152 cmd = &desc.params.get_phy;
154 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
155 return ICE_ERR_PARAM;
157 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
160 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
162 cmd->param0 |= cpu_to_le16(report_mode);
163 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
165 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
166 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
167 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
174 * ice_get_media_type - Gets media type
175 * @pi: port information structure
177 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
179 struct ice_link_status *hw_link_info;
182 return ICE_MEDIA_UNKNOWN;
184 hw_link_info = &pi->phy.link_info;
185 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
186 /* If more than one media type is selected, report unknown */
187 return ICE_MEDIA_UNKNOWN;
189 if (hw_link_info->phy_type_low) {
190 switch (hw_link_info->phy_type_low) {
191 case ICE_PHY_TYPE_LOW_1000BASE_SX:
192 case ICE_PHY_TYPE_LOW_1000BASE_LX:
193 case ICE_PHY_TYPE_LOW_10GBASE_SR:
194 case ICE_PHY_TYPE_LOW_10GBASE_LR:
195 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
196 case ICE_PHY_TYPE_LOW_25GBASE_SR:
197 case ICE_PHY_TYPE_LOW_25GBASE_LR:
198 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
199 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
200 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
201 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
202 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
203 case ICE_PHY_TYPE_LOW_50GBASE_SR:
204 case ICE_PHY_TYPE_LOW_50GBASE_FR:
205 case ICE_PHY_TYPE_LOW_50GBASE_LR:
206 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
207 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
208 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
209 case ICE_PHY_TYPE_LOW_100GBASE_DR:
210 return ICE_MEDIA_FIBER;
211 case ICE_PHY_TYPE_LOW_100BASE_TX:
212 case ICE_PHY_TYPE_LOW_1000BASE_T:
213 case ICE_PHY_TYPE_LOW_2500BASE_T:
214 case ICE_PHY_TYPE_LOW_5GBASE_T:
215 case ICE_PHY_TYPE_LOW_10GBASE_T:
216 case ICE_PHY_TYPE_LOW_25GBASE_T:
217 return ICE_MEDIA_BASET;
218 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
219 case ICE_PHY_TYPE_LOW_25GBASE_CR:
220 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
221 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
222 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
223 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
224 case ICE_PHY_TYPE_LOW_50GBASE_CP:
225 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
226 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
227 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
229 case ICE_PHY_TYPE_LOW_1000BASE_KX:
230 case ICE_PHY_TYPE_LOW_2500BASE_KX:
231 case ICE_PHY_TYPE_LOW_2500BASE_X:
232 case ICE_PHY_TYPE_LOW_5GBASE_KR:
233 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
234 case ICE_PHY_TYPE_LOW_25GBASE_KR:
235 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
236 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
237 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
238 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
239 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
240 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
241 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
242 return ICE_MEDIA_BACKPLANE;
245 switch (hw_link_info->phy_type_high) {
246 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
247 return ICE_MEDIA_BACKPLANE;
250 return ICE_MEDIA_UNKNOWN;
254 * ice_aq_get_link_info
255 * @pi: port information structure
256 * @ena_lse: enable/disable LinkStatusEvent reporting
257 * @link: pointer to link status structure - optional
258 * @cd: pointer to command details structure or NULL
260 * Get Link Status (0x607). Returns the link status of the adapter.
263 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
264 struct ice_link_status *link, struct ice_sq_cd *cd)
266 struct ice_aqc_get_link_status_data link_data = { 0 };
267 struct ice_aqc_get_link_status *resp;
268 struct ice_link_status *li_old, *li;
269 enum ice_media_type *hw_media_type;
270 struct ice_fc_info *hw_fc_info;
271 bool tx_pause, rx_pause;
272 struct ice_aq_desc desc;
273 enum ice_status status;
278 return ICE_ERR_PARAM;
280 li_old = &pi->phy.link_info_old;
281 hw_media_type = &pi->phy.media_type;
282 li = &pi->phy.link_info;
283 hw_fc_info = &pi->fc;
285 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
286 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
287 resp = &desc.params.get_link_status;
288 resp->cmd_flags = cpu_to_le16(cmd_flags);
289 resp->lport_num = pi->lport;
291 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
296 /* save off old link status information */
299 /* update current link status information */
300 li->link_speed = le16_to_cpu(link_data.link_speed);
301 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
302 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
303 *hw_media_type = ice_get_media_type(pi);
304 li->link_info = link_data.link_info;
305 li->an_info = link_data.an_info;
306 li->ext_info = link_data.ext_info;
307 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
308 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
309 li->topo_media_conflict = link_data.topo_media_conflict;
310 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
311 ICE_AQ_CFG_PACING_TYPE_M);
314 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
315 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
316 if (tx_pause && rx_pause)
317 hw_fc_info->current_mode = ICE_FC_FULL;
319 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
321 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
323 hw_fc_info->current_mode = ICE_FC_NONE;
325 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
327 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
328 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
329 (unsigned long long)li->phy_type_low);
330 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
331 (unsigned long long)li->phy_type_high);
332 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
333 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
334 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
335 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
336 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
337 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
338 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
340 /* save link status information */
344 /* flag cleared so calling functions don't call AQ again */
345 pi->phy.get_link_info = false;
351 * ice_init_flex_flags
352 * @hw: pointer to the hardware structure
353 * @prof_id: Rx Descriptor Builder profile ID
355 * Function to initialize Rx flex flags
357 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
361 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
362 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
363 * flexiflags1[3:0] - Not used for flag programming
364 * flexiflags2[7:0] - Tunnel and VLAN types
365 * 2 invalid fields in last index
368 /* Rx flex flags are currently programmed for the NIC profiles only.
369 * Different flag bit programming configurations can be added per
372 case ICE_RXDID_FLEX_NIC:
373 case ICE_RXDID_FLEX_NIC_2:
374 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
375 ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
377 /* flex flag 1 is not used for flexi-flag programming, skipping
378 * these four FLG64 bits.
380 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
381 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
382 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
383 ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
384 ICE_FLG_EVLAN_x9100, idx++);
385 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
386 ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
387 ICE_FLG_TNL0, idx++);
388 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
389 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
393 ice_debug(hw, ICE_DBG_INIT,
394 "Flag programming for profile ID %d not supported\n",
401 * @hw: pointer to the hardware structure
402 * @prof_id: Rx Descriptor Builder profile ID
404 * Function to initialize flex descriptors
406 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
408 enum ice_flex_rx_mdid mdid;
411 case ICE_RXDID_FLEX_NIC:
412 case ICE_RXDID_FLEX_NIC_2:
413 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
414 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
415 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
417 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
418 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
420 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
422 ice_init_flex_flags(hw, prof_id);
426 ice_debug(hw, ICE_DBG_INIT,
427 "Field init for profile ID %d not supported\n",
433 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
434 * @hw: pointer to the HW struct
436 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
438 struct ice_switch_info *sw;
439 enum ice_status status;
441 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
442 sizeof(*hw->switch_info), GFP_KERNEL);
443 sw = hw->switch_info;
446 return ICE_ERR_NO_MEMORY;
448 INIT_LIST_HEAD(&sw->vsi_list_map_head);
450 status = ice_init_def_sw_recp(hw);
452 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
459 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
460 * @hw: pointer to the HW struct
462 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
464 struct ice_switch_info *sw = hw->switch_info;
465 struct ice_vsi_list_map_info *v_pos_map;
466 struct ice_vsi_list_map_info *v_tmp_map;
467 struct ice_sw_recipe *recps;
470 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
472 list_del(&v_pos_map->list_entry);
473 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
475 recps = hw->switch_info->recp_list;
476 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
477 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
479 recps[i].root_rid = i;
480 mutex_destroy(&recps[i].filt_rule_lock);
481 list_for_each_entry_safe(lst_itr, tmp_entry,
482 &recps[i].filt_rules, list_entry) {
483 list_del(&lst_itr->list_entry);
484 devm_kfree(ice_hw_to_dev(hw), lst_itr);
487 ice_rm_all_sw_replay_rule_info(hw);
488 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
489 devm_kfree(ice_hw_to_dev(hw), sw);
492 #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
493 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
494 #define ICE_FW_LOG_DESC_SIZE_MAX \
495 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
498 * ice_get_fw_log_cfg - get FW logging configuration
499 * @hw: pointer to the HW struct
501 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
503 struct ice_aqc_fw_logging_data *config;
504 struct ice_aq_desc desc;
505 enum ice_status status;
508 size = ICE_FW_LOG_DESC_SIZE_MAX;
509 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
511 return ICE_ERR_NO_MEMORY;
513 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
515 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
516 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
518 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
522 /* Save FW logging information into the HW structure */
523 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
526 v = le16_to_cpu(config->entry[i]);
527 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
528 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
530 if (m < ICE_AQC_FW_LOG_ID_MAX)
531 hw->fw_log.evnts[m].cur = flgs;
535 devm_kfree(ice_hw_to_dev(hw), config);
541 * ice_cfg_fw_log - configure FW logging
542 * @hw: pointer to the HW struct
543 * @enable: enable certain FW logging events if true, disable all if false
545 * This function enables/disables the FW logging via Rx CQ events and a UART
546 * port based on predetermined configurations. FW logging via the Rx CQ can be
547 * enabled/disabled for individual PF's. However, FW logging via the UART can
548 * only be enabled/disabled for all PFs on the same device.
550 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
551 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
552 * before initializing the device.
554 * When re/configuring FW logging, callers need to update the "cfg" elements of
555 * the hw->fw_log.evnts array with the desired logging event configurations for
556 * modules of interest. When disabling FW logging completely, the callers can
557 * just pass false in the "enable" parameter. On completion, the function will
558 * update the "cur" element of the hw->fw_log.evnts array with the resulting
559 * logging event configurations of the modules that are being re/configured. FW
560 * logging modules that are not part of a reconfiguration operation retain their
563 * Before resetting the device, it is recommended that the driver disables FW
564 * logging before shutting down the control queue. When disabling FW logging
565 * ("enable" = false), the latest configurations of FW logging events stored in
566 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
569 * When enabling FW logging to emit log messages via the Rx CQ during the
570 * device's initialization phase, a mechanism alternative to interrupt handlers
571 * needs to be used to extract FW log messages from the Rx CQ periodically and
572 * to prevent the Rx CQ from being full and stalling other types of control
573 * messages from FW to SW. Interrupts are typically disabled during the device's
574 * initialization phase.
576 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
578 struct ice_aqc_fw_logging_data *data = NULL;
579 struct ice_aqc_fw_logging *cmd;
580 enum ice_status status = 0;
581 u16 i, chgs = 0, len = 0;
582 struct ice_aq_desc desc;
586 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
589 /* Disable FW logging only when the control queue is still responsive */
591 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
594 /* Get current FW log settings */
595 status = ice_get_fw_log_cfg(hw);
599 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
600 cmd = &desc.params.fw_logging;
602 /* Indicate which controls are valid */
603 if (hw->fw_log.cq_en)
604 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
606 if (hw->fw_log.uart_en)
607 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
610 /* Fill in an array of entries with FW logging modules and
611 * logging events being reconfigured.
613 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
616 /* Keep track of enabled event types */
617 actv_evnts |= hw->fw_log.evnts[i].cfg;
619 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
623 data = devm_kzalloc(ice_hw_to_dev(hw),
624 ICE_FW_LOG_DESC_SIZE_MAX,
627 return ICE_ERR_NO_MEMORY;
630 val = i << ICE_AQC_FW_LOG_ID_S;
631 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
632 data->entry[chgs++] = cpu_to_le16(val);
635 /* Only enable FW logging if at least one module is specified.
636 * If FW logging is currently enabled but all modules are not
637 * enabled to emit log messages, disable FW logging altogether.
640 /* Leave if there is effectively no change */
644 if (hw->fw_log.cq_en)
645 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
647 if (hw->fw_log.uart_en)
648 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
651 len = ICE_FW_LOG_DESC_SIZE(chgs);
652 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
656 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
658 /* Update the current configuration to reflect events enabled.
659 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
660 * logging mode is enabled for the device. They do not reflect
661 * actual modules being enabled to emit log messages. So, their
662 * values remain unchanged even when all modules are disabled.
664 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
666 hw->fw_log.actv_evnts = actv_evnts;
667 for (i = 0; i < cnt; i++) {
671 /* When disabling all FW logging events as part
672 * of device's de-initialization, the original
673 * configurations are retained, and can be used
674 * to reconfigure FW logging later if the device
677 hw->fw_log.evnts[i].cur = 0;
681 v = le16_to_cpu(data->entry[i]);
682 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
683 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
689 devm_kfree(ice_hw_to_dev(hw), data);
696 * @hw: pointer to the HW struct
697 * @desc: pointer to the AQ message descriptor
698 * @buf: pointer to the buffer accompanying the AQ message
700 * Formats a FW Log message and outputs it via the standard driver logs.
702 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
704 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
705 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
706 le16_to_cpu(desc->datalen));
707 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
711 * ice_get_itr_intrl_gran - determine int/intrl granularity
712 * @hw: pointer to the HW struct
714 * Determines the ITR/intrl granularities based on the maximum aggregate
715 * bandwidth according to the device's configuration during power-on.
717 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
719 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
720 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
721 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
723 switch (max_agg_bw) {
724 case ICE_MAX_AGG_BW_200G:
725 case ICE_MAX_AGG_BW_100G:
726 case ICE_MAX_AGG_BW_50G:
727 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
728 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
730 case ICE_MAX_AGG_BW_25G:
731 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
732 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
738 * ice_get_nvm_version - get cached NVM version data
739 * @hw: pointer to the hardware structure
740 * @oem_ver: 8 bit NVM version
741 * @oem_build: 16 bit NVM build number
742 * @oem_patch: 8 NVM patch number
743 * @ver_hi: high 16 bits of the NVM version
744 * @ver_lo: low 16 bits of the NVM version
747 ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
748 u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
750 struct ice_nvm_info *nvm = &hw->nvm;
752 *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
753 *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
754 *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
755 ICE_OEM_VER_BUILD_SHIFT);
756 *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
757 *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
761 * ice_init_hw - main hardware initialization routine
762 * @hw: pointer to the hardware structure
764 enum ice_status ice_init_hw(struct ice_hw *hw)
766 struct ice_aqc_get_phy_caps_data *pcaps;
767 enum ice_status status;
771 /* Set MAC type based on DeviceID */
772 status = ice_set_mac_type(hw);
776 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
777 PF_FUNC_RID_FUNC_NUM_M) >>
778 PF_FUNC_RID_FUNC_NUM_S;
780 status = ice_reset(hw, ICE_RESET_PFR);
784 ice_get_itr_intrl_gran(hw);
786 status = ice_create_all_ctrlq(hw);
788 goto err_unroll_cqinit;
790 /* Enable FW logging. Not fatal if this fails. */
791 status = ice_cfg_fw_log(hw, true);
793 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
795 status = ice_clear_pf_cfg(hw);
797 goto err_unroll_cqinit;
799 ice_clear_pxe_mode(hw);
801 status = ice_init_nvm(hw);
803 goto err_unroll_cqinit;
805 status = ice_get_caps(hw);
807 goto err_unroll_cqinit;
809 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
810 sizeof(*hw->port_info), GFP_KERNEL);
811 if (!hw->port_info) {
812 status = ICE_ERR_NO_MEMORY;
813 goto err_unroll_cqinit;
816 /* set the back pointer to HW */
817 hw->port_info->hw = hw;
819 /* Initialize port_info struct with switch configuration data */
820 status = ice_get_initial_sw_cfg(hw);
822 goto err_unroll_alloc;
826 /* Query the allocated resources for Tx scheduler */
827 status = ice_sched_query_res_alloc(hw);
829 ice_debug(hw, ICE_DBG_SCHED,
830 "Failed to get scheduler allocated resources\n");
831 goto err_unroll_alloc;
834 /* Initialize port_info struct with scheduler data */
835 status = ice_sched_init_port(hw->port_info);
837 goto err_unroll_sched;
839 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
841 status = ICE_ERR_NO_MEMORY;
842 goto err_unroll_sched;
845 /* Initialize port_info struct with PHY capabilities */
846 status = ice_aq_get_phy_caps(hw->port_info, false,
847 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
848 devm_kfree(ice_hw_to_dev(hw), pcaps);
850 goto err_unroll_sched;
852 /* Initialize port_info struct with link information */
853 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
855 goto err_unroll_sched;
857 /* need a valid SW entry point to build a Tx tree */
858 if (!hw->sw_entry_point_layer) {
859 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
860 status = ICE_ERR_CFG;
861 goto err_unroll_sched;
863 INIT_LIST_HEAD(&hw->agg_list);
865 status = ice_init_fltr_mgmt_struct(hw);
867 goto err_unroll_sched;
869 ice_dev_onetime_setup(hw);
871 /* Get MAC information */
872 /* A single port can report up to two (LAN and WoL) addresses */
873 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
874 sizeof(struct ice_aqc_manage_mac_read_resp),
876 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
879 status = ICE_ERR_NO_MEMORY;
880 goto err_unroll_fltr_mgmt_struct;
883 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
884 devm_kfree(ice_hw_to_dev(hw), mac_buf);
887 goto err_unroll_fltr_mgmt_struct;
889 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
890 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
891 status = ice_init_hw_tbls(hw);
893 goto err_unroll_fltr_mgmt_struct;
896 err_unroll_fltr_mgmt_struct:
897 ice_cleanup_fltr_mgmt_struct(hw);
899 ice_sched_cleanup_all(hw);
901 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
903 ice_destroy_all_ctrlq(hw);
908 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
909 * @hw: pointer to the hardware structure
911 * This should be called only during nominal operation, not as a result of
912 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
913 * applicable initializations if it fails for any reason.
915 void ice_deinit_hw(struct ice_hw *hw)
917 ice_cleanup_fltr_mgmt_struct(hw);
919 ice_sched_cleanup_all(hw);
920 ice_sched_clear_agg(hw);
922 ice_free_hw_tbls(hw);
925 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
926 hw->port_info = NULL;
929 /* Attempt to disable FW logging before shutting down control queues */
930 ice_cfg_fw_log(hw, false);
931 ice_destroy_all_ctrlq(hw);
933 /* Clear VSI contexts if not already cleared */
934 ice_clear_all_vsi_ctx(hw);
938 * ice_check_reset - Check to see if a global reset is complete
939 * @hw: pointer to the hardware structure
941 enum ice_status ice_check_reset(struct ice_hw *hw)
943 u32 cnt, reg = 0, grst_delay, uld_mask;
945 /* Poll for Device Active state in case a recent CORER, GLOBR,
946 * or EMPR has occurred. The grst delay value is in 100ms units.
947 * Add 1sec for outstanding AQ commands that can take a long time.
949 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
950 GLGEN_RSTCTL_GRSTDEL_S) + 10;
952 for (cnt = 0; cnt < grst_delay; cnt++) {
954 reg = rd32(hw, GLGEN_RSTAT);
955 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
959 if (cnt == grst_delay) {
960 ice_debug(hw, ICE_DBG_INIT,
961 "Global reset polling failed to complete.\n");
962 return ICE_ERR_RESET_FAILED;
965 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
966 GLNVM_ULD_PCIER_DONE_1_M |\
967 GLNVM_ULD_CORER_DONE_M |\
968 GLNVM_ULD_GLOBR_DONE_M |\
969 GLNVM_ULD_POR_DONE_M |\
970 GLNVM_ULD_POR_DONE_1_M |\
971 GLNVM_ULD_PCIER_DONE_2_M)
973 uld_mask = ICE_RESET_DONE_MASK;
975 /* Device is Active; check Global Reset processes are done */
976 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
977 reg = rd32(hw, GLNVM_ULD) & uld_mask;
978 if (reg == uld_mask) {
979 ice_debug(hw, ICE_DBG_INIT,
980 "Global reset processes done. %d\n", cnt);
986 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
987 ice_debug(hw, ICE_DBG_INIT,
988 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
990 return ICE_ERR_RESET_FAILED;
997 * ice_pf_reset - Reset the PF
998 * @hw: pointer to the hardware structure
1000 * If a global reset has been triggered, this function checks
1001 * for its completion and then issues the PF reset
1003 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1007 /* If at function entry a global reset was already in progress, i.e.
1008 * state is not 'device active' or any of the reset done bits are not
1009 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1010 * global reset is done.
1012 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1013 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1014 /* poll on global reset currently in progress until done */
1015 if (ice_check_reset(hw))
1016 return ICE_ERR_RESET_FAILED;
1022 reg = rd32(hw, PFGEN_CTRL);
1024 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1026 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1027 reg = rd32(hw, PFGEN_CTRL);
1028 if (!(reg & PFGEN_CTRL_PFSWR_M))
1034 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1035 ice_debug(hw, ICE_DBG_INIT,
1036 "PF reset polling failed to complete.\n");
1037 return ICE_ERR_RESET_FAILED;
1044 * ice_reset - Perform different types of reset
1045 * @hw: pointer to the hardware structure
1046 * @req: reset request
1048 * This function triggers a reset as specified by the req parameter.
1051 * If anything other than a PF reset is triggered, PXE mode is restored.
1052 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1053 * interface has been restored in the rebuild flow.
1055 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1061 return ice_pf_reset(hw);
1062 case ICE_RESET_CORER:
1063 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1064 val = GLGEN_RTRIG_CORER_M;
1066 case ICE_RESET_GLOBR:
1067 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1068 val = GLGEN_RTRIG_GLOBR_M;
1071 return ICE_ERR_PARAM;
1074 val |= rd32(hw, GLGEN_RTRIG);
1075 wr32(hw, GLGEN_RTRIG, val);
1078 /* wait for the FW to be ready */
1079 return ice_check_reset(hw);
1083 * ice_copy_rxq_ctx_to_hw
1084 * @hw: pointer to the hardware structure
1085 * @ice_rxq_ctx: pointer to the rxq context
1086 * @rxq_index: the index of the Rx queue
1088 * Copies rxq context from dense structure to HW register space
1090 static enum ice_status
1091 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1096 return ICE_ERR_BAD_PTR;
1098 if (rxq_index > QRX_CTRL_MAX_INDEX)
1099 return ICE_ERR_PARAM;
1101 /* Copy each dword separately to HW */
1102 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1103 wr32(hw, QRX_CONTEXT(i, rxq_index),
1104 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1106 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1107 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1113 /* LAN Rx Queue Context */
1114 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1115 /* Field Width LSB */
1116 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1117 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1118 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1119 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1120 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1121 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1122 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1123 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1124 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1125 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1126 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1127 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1128 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1129 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1130 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1131 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1132 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1133 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1134 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1135 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1141 * @hw: pointer to the hardware structure
1142 * @rlan_ctx: pointer to the rxq context
1143 * @rxq_index: the index of the Rx queue
1145 * Converts rxq context from sparse to dense structure and then writes
1146 * it to HW register space and enables the hardware to prefetch descriptors
1147 * instead of only fetching them on demand
1150 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1153 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1156 return ICE_ERR_BAD_PTR;
1158 rlan_ctx->prefena = 1;
1160 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1161 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1164 /* LAN Tx Queue Context */
1165 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1166 /* Field Width LSB */
1167 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1168 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1169 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1170 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1171 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1172 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1173 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1174 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1175 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1176 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1177 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1178 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1179 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1180 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1181 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1182 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1183 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1184 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1185 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1186 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1187 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1188 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1189 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1190 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1191 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1192 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1193 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1194 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1200 * @hw: pointer to the hardware structure
1202 * @desc: pointer to control queue descriptor
1203 * @buf: pointer to command buffer
1204 * @buf_len: max length of buf
1206 * Dumps debug log about control command with descriptor contents.
1209 ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
1212 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1215 #ifndef CONFIG_DYNAMIC_DEBUG
1216 if (!(mask & hw->debug_mask))
1223 len = le16_to_cpu(cq_desc->datalen);
1226 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1227 le16_to_cpu(cq_desc->opcode),
1228 le16_to_cpu(cq_desc->flags),
1229 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1230 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1231 le32_to_cpu(cq_desc->cookie_high),
1232 le32_to_cpu(cq_desc->cookie_low));
1233 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1234 le32_to_cpu(cq_desc->params.generic.param0),
1235 le32_to_cpu(cq_desc->params.generic.param1));
1236 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1237 le32_to_cpu(cq_desc->params.generic.addr_high),
1238 le32_to_cpu(cq_desc->params.generic.addr_low));
1239 if (buf && cq_desc->datalen != 0) {
1240 ice_debug(hw, mask, "Buffer:\n");
1244 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1248 /* FW Admin Queue command wrappers */
1250 /* Software lock/mutex that is meant to be held while the Global Config Lock
1251 * in firmware is acquired by the software to prevent most (but not all) types
1252 * of AQ commands from being sent to FW
1254 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1257 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1258 * @hw: pointer to the HW struct
1259 * @desc: descriptor describing the command
1260 * @buf: buffer to use for indirect commands (NULL for direct commands)
1261 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1262 * @cd: pointer to command details structure
1264 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1267 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1268 u16 buf_size, struct ice_sq_cd *cd)
1270 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1271 bool lock_acquired = false;
1272 enum ice_status status;
1274 /* When a package download is in process (i.e. when the firmware's
1275 * Global Configuration Lock resource is held), only the Download
1276 * Package, Get Version, Get Package Info List and Release Resource
1277 * (with resource ID set to Global Config Lock) AdminQ commands are
1278 * allowed; all others must block until the package download completes
1279 * and the Global Config Lock is released. See also
1280 * ice_acquire_global_cfg_lock().
1282 switch (le16_to_cpu(desc->opcode)) {
1283 case ice_aqc_opc_download_pkg:
1284 case ice_aqc_opc_get_pkg_info_list:
1285 case ice_aqc_opc_get_ver:
1287 case ice_aqc_opc_release_res:
1288 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1292 mutex_lock(&ice_global_cfg_lock_sw);
1293 lock_acquired = true;
1297 status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1299 mutex_unlock(&ice_global_cfg_lock_sw);
1306 * @hw: pointer to the HW struct
1307 * @cd: pointer to command details structure or NULL
1309 * Get the firmware version (0x0001) from the admin queue commands
1311 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1313 struct ice_aqc_get_ver *resp;
1314 struct ice_aq_desc desc;
1315 enum ice_status status;
1317 resp = &desc.params.get_ver;
1319 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1321 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1324 hw->fw_branch = resp->fw_branch;
1325 hw->fw_maj_ver = resp->fw_major;
1326 hw->fw_min_ver = resp->fw_minor;
1327 hw->fw_patch = resp->fw_patch;
1328 hw->fw_build = le32_to_cpu(resp->fw_build);
1329 hw->api_branch = resp->api_branch;
1330 hw->api_maj_ver = resp->api_major;
1331 hw->api_min_ver = resp->api_minor;
1332 hw->api_patch = resp->api_patch;
1339 * ice_aq_send_driver_ver
1340 * @hw: pointer to the HW struct
1341 * @dv: driver's major, minor version
1342 * @cd: pointer to command details structure or NULL
1344 * Send the driver version (0x0002) to the firmware
1347 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1348 struct ice_sq_cd *cd)
1350 struct ice_aqc_driver_ver *cmd;
1351 struct ice_aq_desc desc;
1354 cmd = &desc.params.driver_ver;
1357 return ICE_ERR_PARAM;
1359 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1361 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1362 cmd->major_ver = dv->major_ver;
1363 cmd->minor_ver = dv->minor_ver;
1364 cmd->build_ver = dv->build_ver;
1365 cmd->subbuild_ver = dv->subbuild_ver;
1368 while (len < sizeof(dv->driver_string) &&
1369 isascii(dv->driver_string[len]) && dv->driver_string[len])
1372 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1377 * @hw: pointer to the HW struct
1378 * @unloading: is the driver unloading itself
1380 * Tell the Firmware that we're shutting down the AdminQ and whether
1381 * or not the driver is unloading as well (0x0003).
1383 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1385 struct ice_aqc_q_shutdown *cmd;
1386 struct ice_aq_desc desc;
1388 cmd = &desc.params.q_shutdown;
1390 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1393 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1395 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1400 * @hw: pointer to the HW struct
1402 * @access: access type
1403 * @sdp_number: resource number
1404 * @timeout: the maximum time in ms that the driver may hold the resource
1405 * @cd: pointer to command details structure or NULL
1407 * Requests common resource using the admin queue commands (0x0008).
1408 * When attempting to acquire the Global Config Lock, the driver can
1409 * learn of three states:
1410 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1411 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1412 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1413 * successfully downloaded the package; the driver does
1414 * not have to download the package and can continue
1417 * Note that if the caller is in an acquire lock, perform action, release lock
1418 * phase of operation, it is possible that the FW may detect a timeout and issue
1419 * a CORER. In this case, the driver will receive a CORER interrupt and will
1420 * have to determine its cause. The calling thread that is handling this flow
1421 * will likely get an error propagated back to it indicating the Download
1422 * Package, Update Package or the Release Resource AQ commands timed out.
1424 static enum ice_status
1425 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1426 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1427 struct ice_sq_cd *cd)
1429 struct ice_aqc_req_res *cmd_resp;
1430 struct ice_aq_desc desc;
1431 enum ice_status status;
1433 cmd_resp = &desc.params.res_owner;
1435 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1437 cmd_resp->res_id = cpu_to_le16(res);
1438 cmd_resp->access_type = cpu_to_le16(access);
1439 cmd_resp->res_number = cpu_to_le32(sdp_number);
1440 cmd_resp->timeout = cpu_to_le32(*timeout);
1443 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1445 /* The completion specifies the maximum time in ms that the driver
1446 * may hold the resource in the Timeout field.
1449 /* Global config lock response utilizes an additional status field.
1451 * If the Global config lock resource is held by some other driver, the
1452 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1453 * and the timeout field indicates the maximum time the current owner
1454 * of the resource has to free it.
1456 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1457 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1458 *timeout = le32_to_cpu(cmd_resp->timeout);
1460 } else if (le16_to_cpu(cmd_resp->status) ==
1461 ICE_AQ_RES_GLBL_IN_PROG) {
1462 *timeout = le32_to_cpu(cmd_resp->timeout);
1463 return ICE_ERR_AQ_ERROR;
1464 } else if (le16_to_cpu(cmd_resp->status) ==
1465 ICE_AQ_RES_GLBL_DONE) {
1466 return ICE_ERR_AQ_NO_WORK;
1469 /* invalid FW response, force a timeout immediately */
1471 return ICE_ERR_AQ_ERROR;
1474 /* If the resource is held by some other driver, the command completes
1475 * with a busy return value and the timeout field indicates the maximum
1476 * time the current owner of the resource has to free it.
1478 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1479 *timeout = le32_to_cpu(cmd_resp->timeout);
1485 * ice_aq_release_res
1486 * @hw: pointer to the HW struct
1488 * @sdp_number: resource number
1489 * @cd: pointer to command details structure or NULL
1491 * release common resource using the admin queue commands (0x0009)
1493 static enum ice_status
1494 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1495 struct ice_sq_cd *cd)
1497 struct ice_aqc_req_res *cmd;
1498 struct ice_aq_desc desc;
1500 cmd = &desc.params.res_owner;
1502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1504 cmd->res_id = cpu_to_le16(res);
1505 cmd->res_number = cpu_to_le32(sdp_number);
1507 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1512 * @hw: pointer to the HW structure
1514 * @access: access type (read or write)
1515 * @timeout: timeout in milliseconds
1517 * This function will attempt to acquire the ownership of a resource.
1520 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1521 enum ice_aq_res_access_type access, u32 timeout)
1523 #define ICE_RES_POLLING_DELAY_MS 10
1524 u32 delay = ICE_RES_POLLING_DELAY_MS;
1525 u32 time_left = timeout;
1526 enum ice_status status;
1528 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1530 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1531 * previously acquired the resource and performed any necessary updates;
1532 * in this case the caller does not obtain the resource and has no
1533 * further work to do.
1535 if (status == ICE_ERR_AQ_NO_WORK)
1536 goto ice_acquire_res_exit;
1539 ice_debug(hw, ICE_DBG_RES,
1540 "resource %d acquire type %d failed.\n", res, access);
1542 /* If necessary, poll until the current lock owner timeouts */
1543 timeout = time_left;
1544 while (status && timeout && time_left) {
1546 timeout = (timeout > delay) ? timeout - delay : 0;
1547 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1549 if (status == ICE_ERR_AQ_NO_WORK)
1550 /* lock free, but no work to do */
1557 if (status && status != ICE_ERR_AQ_NO_WORK)
1558 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1560 ice_acquire_res_exit:
1561 if (status == ICE_ERR_AQ_NO_WORK) {
1562 if (access == ICE_RES_WRITE)
1563 ice_debug(hw, ICE_DBG_RES,
1564 "resource indicates no work to do.\n");
1566 ice_debug(hw, ICE_DBG_RES,
1567 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1574 * @hw: pointer to the HW structure
1577 * This function will release a resource using the proper Admin Command.
1579 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1581 enum ice_status status;
1582 u32 total_delay = 0;
1584 status = ice_aq_release_res(hw, res, 0, NULL);
1586 /* there are some rare cases when trying to release the resource
1587 * results in an admin queue timeout, so handle them correctly
1589 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1590 (total_delay < hw->adminq.sq_cmd_timeout)) {
1592 status = ice_aq_release_res(hw, res, 0, NULL);
1598 * ice_get_num_per_func - determine number of resources per PF
1599 * @hw: pointer to the HW structure
1600 * @max: value to be evenly split between each PF
1602 * Determine the number of valid functions by going through the bitmap returned
1603 * from parsing capabilities and use this to calculate the number of resources
1604 * per PF based on the max value passed in.
1606 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1610 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1611 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1612 ICE_CAPS_VALID_FUNCS_M);
1621 * ice_parse_caps - parse function/device capabilities
1622 * @hw: pointer to the HW struct
1623 * @buf: pointer to a buffer containing function/device capability records
1624 * @cap_count: number of capability records in the list
1625 * @opc: type of capabilities list to parse
1627 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1630 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1631 enum ice_adminq_opc opc)
1633 struct ice_aqc_list_caps_elem *cap_resp;
1634 struct ice_hw_func_caps *func_p = NULL;
1635 struct ice_hw_dev_caps *dev_p = NULL;
1636 struct ice_hw_common_caps *caps;
1643 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1645 if (opc == ice_aqc_opc_list_dev_caps) {
1646 dev_p = &hw->dev_caps;
1647 caps = &dev_p->common_cap;
1649 } else if (opc == ice_aqc_opc_list_func_caps) {
1650 func_p = &hw->func_caps;
1651 caps = &func_p->common_cap;
1652 prefix = "func cap";
1654 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1658 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1659 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1660 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1661 u32 number = le32_to_cpu(cap_resp->number);
1662 u16 cap = le16_to_cpu(cap_resp->cap);
1665 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1666 caps->valid_functions = number;
1667 ice_debug(hw, ICE_DBG_INIT,
1668 "%s: valid_functions (bitmap) = %d\n", prefix,
1669 caps->valid_functions);
1671 case ICE_AQC_CAPS_SRIOV:
1672 caps->sr_iov_1_1 = (number == 1);
1673 ice_debug(hw, ICE_DBG_INIT,
1674 "%s: sr_iov_1_1 = %d\n", prefix,
1677 case ICE_AQC_CAPS_VF:
1679 dev_p->num_vfs_exposed = number;
1680 ice_debug(hw, ICE_DBG_INIT,
1681 "%s: num_vfs_exposed = %d\n", prefix,
1682 dev_p->num_vfs_exposed);
1683 } else if (func_p) {
1684 func_p->num_allocd_vfs = number;
1685 func_p->vf_base_id = logical_id;
1686 ice_debug(hw, ICE_DBG_INIT,
1687 "%s: num_allocd_vfs = %d\n", prefix,
1688 func_p->num_allocd_vfs);
1689 ice_debug(hw, ICE_DBG_INIT,
1690 "%s: vf_base_id = %d\n", prefix,
1691 func_p->vf_base_id);
1694 case ICE_AQC_CAPS_VSI:
1696 dev_p->num_vsi_allocd_to_host = number;
1697 ice_debug(hw, ICE_DBG_INIT,
1698 "%s: num_vsi_allocd_to_host = %d\n",
1700 dev_p->num_vsi_allocd_to_host);
1701 } else if (func_p) {
1702 func_p->guar_num_vsi =
1703 ice_get_num_per_func(hw, ICE_MAX_VSI);
1704 ice_debug(hw, ICE_DBG_INIT,
1705 "%s: guar_num_vsi (fw) = %d\n",
1707 ice_debug(hw, ICE_DBG_INIT,
1708 "%s: guar_num_vsi = %d\n",
1709 prefix, func_p->guar_num_vsi);
1712 case ICE_AQC_CAPS_DCB:
1713 caps->dcb = (number == 1);
1714 caps->active_tc_bitmap = logical_id;
1715 caps->maxtc = phys_id;
1716 ice_debug(hw, ICE_DBG_INIT,
1717 "%s: dcb = %d\n", prefix, caps->dcb);
1718 ice_debug(hw, ICE_DBG_INIT,
1719 "%s: active_tc_bitmap = %d\n", prefix,
1720 caps->active_tc_bitmap);
1721 ice_debug(hw, ICE_DBG_INIT,
1722 "%s: maxtc = %d\n", prefix, caps->maxtc);
1724 case ICE_AQC_CAPS_RSS:
1725 caps->rss_table_size = number;
1726 caps->rss_table_entry_width = logical_id;
1727 ice_debug(hw, ICE_DBG_INIT,
1728 "%s: rss_table_size = %d\n", prefix,
1729 caps->rss_table_size);
1730 ice_debug(hw, ICE_DBG_INIT,
1731 "%s: rss_table_entry_width = %d\n", prefix,
1732 caps->rss_table_entry_width);
1734 case ICE_AQC_CAPS_RXQS:
1735 caps->num_rxq = number;
1736 caps->rxq_first_id = phys_id;
1737 ice_debug(hw, ICE_DBG_INIT,
1738 "%s: num_rxq = %d\n", prefix,
1740 ice_debug(hw, ICE_DBG_INIT,
1741 "%s: rxq_first_id = %d\n", prefix,
1742 caps->rxq_first_id);
1744 case ICE_AQC_CAPS_TXQS:
1745 caps->num_txq = number;
1746 caps->txq_first_id = phys_id;
1747 ice_debug(hw, ICE_DBG_INIT,
1748 "%s: num_txq = %d\n", prefix,
1750 ice_debug(hw, ICE_DBG_INIT,
1751 "%s: txq_first_id = %d\n", prefix,
1752 caps->txq_first_id);
1754 case ICE_AQC_CAPS_MSIX:
1755 caps->num_msix_vectors = number;
1756 caps->msix_vector_first_id = phys_id;
1757 ice_debug(hw, ICE_DBG_INIT,
1758 "%s: num_msix_vectors = %d\n", prefix,
1759 caps->num_msix_vectors);
1760 ice_debug(hw, ICE_DBG_INIT,
1761 "%s: msix_vector_first_id = %d\n", prefix,
1762 caps->msix_vector_first_id);
1764 case ICE_AQC_CAPS_MAX_MTU:
1765 caps->max_mtu = number;
1766 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1767 prefix, caps->max_mtu);
1770 ice_debug(hw, ICE_DBG_INIT,
1771 "%s: unknown capability[%d]: 0x%x\n", prefix,
1779 * ice_aq_discover_caps - query function/device capabilities
1780 * @hw: pointer to the HW struct
1781 * @buf: a virtual buffer to hold the capabilities
1782 * @buf_size: Size of the virtual buffer
1783 * @cap_count: cap count needed if AQ err==ENOMEM
1784 * @opc: capabilities type to discover - pass in the command opcode
1785 * @cd: pointer to command details structure or NULL
1787 * Get the function(0x000a)/device(0x000b) capabilities description from
1790 static enum ice_status
1791 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1792 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1794 struct ice_aqc_list_caps *cmd;
1795 struct ice_aq_desc desc;
1796 enum ice_status status;
1798 cmd = &desc.params.get_cap;
1800 if (opc != ice_aqc_opc_list_func_caps &&
1801 opc != ice_aqc_opc_list_dev_caps)
1802 return ICE_ERR_PARAM;
1804 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1806 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1808 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1809 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1810 *cap_count = le32_to_cpu(cmd->count);
1815 * ice_discover_caps - get info about the HW
1816 * @hw: pointer to the hardware structure
1817 * @opc: capabilities type to discover - pass in the command opcode
1819 static enum ice_status
1820 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1822 enum ice_status status;
1827 /* The driver doesn't know how many capabilities the device will return
1828 * so the buffer size required isn't known ahead of time. The driver
1829 * starts with cbuf_len and if this turns out to be insufficient, the
1830 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1831 * The driver then allocates the buffer based on the count and retries
1832 * the operation. So it follows that the retry count is 2.
1834 #define ICE_GET_CAP_BUF_COUNT 40
1835 #define ICE_GET_CAP_RETRY_COUNT 2
1837 cap_count = ICE_GET_CAP_BUF_COUNT;
1838 retries = ICE_GET_CAP_RETRY_COUNT;
1843 cbuf_len = (u16)(cap_count *
1844 sizeof(struct ice_aqc_list_caps_elem));
1845 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1847 return ICE_ERR_NO_MEMORY;
1849 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1851 devm_kfree(ice_hw_to_dev(hw), cbuf);
1853 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1856 /* If ENOMEM is returned, try again with bigger buffer */
1857 } while (--retries);
1863 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
1864 * @hw: pointer to the hardware structure
1866 void ice_set_safe_mode_caps(struct ice_hw *hw)
1868 struct ice_hw_func_caps *func_caps = &hw->func_caps;
1869 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
1870 u32 valid_func, rxq_first_id, txq_first_id;
1871 u32 msix_vector_first_id, max_mtu;
1875 /* cache some func_caps values that should be restored after memset */
1876 valid_func = func_caps->common_cap.valid_functions;
1877 txq_first_id = func_caps->common_cap.txq_first_id;
1878 rxq_first_id = func_caps->common_cap.rxq_first_id;
1879 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
1880 max_mtu = func_caps->common_cap.max_mtu;
1882 /* unset func capabilities */
1883 memset(func_caps, 0, sizeof(*func_caps));
1885 /* restore cached values */
1886 func_caps->common_cap.valid_functions = valid_func;
1887 func_caps->common_cap.txq_first_id = txq_first_id;
1888 func_caps->common_cap.rxq_first_id = rxq_first_id;
1889 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1890 func_caps->common_cap.max_mtu = max_mtu;
1892 /* one Tx and one Rx queue in safe mode */
1893 func_caps->common_cap.num_rxq = 1;
1894 func_caps->common_cap.num_txq = 1;
1896 /* two MSIX vectors, one for traffic and one for misc causes */
1897 func_caps->common_cap.num_msix_vectors = 2;
1898 func_caps->guar_num_vsi = 1;
1900 /* cache some dev_caps values that should be restored after memset */
1901 valid_func = dev_caps->common_cap.valid_functions;
1902 txq_first_id = dev_caps->common_cap.txq_first_id;
1903 rxq_first_id = dev_caps->common_cap.rxq_first_id;
1904 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
1905 max_mtu = dev_caps->common_cap.max_mtu;
1907 /* unset dev capabilities */
1908 memset(dev_caps, 0, sizeof(*dev_caps));
1910 /* restore cached values */
1911 dev_caps->common_cap.valid_functions = valid_func;
1912 dev_caps->common_cap.txq_first_id = txq_first_id;
1913 dev_caps->common_cap.rxq_first_id = rxq_first_id;
1914 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1915 dev_caps->common_cap.max_mtu = max_mtu;
1917 /* valid_func is a bitmap. get number of functions */
1918 #define ICE_MAX_FUNCS 8
1919 for (i = 0; i < ICE_MAX_FUNCS; i++)
1920 if (valid_func & BIT(i))
1923 /* one Tx and one Rx queue per function in safe mode */
1924 dev_caps->common_cap.num_rxq = num_func;
1925 dev_caps->common_cap.num_txq = num_func;
1927 /* two MSIX vectors per function */
1928 dev_caps->common_cap.num_msix_vectors = 2 * num_func;
1932 * ice_get_caps - get info about the HW
1933 * @hw: pointer to the hardware structure
1935 enum ice_status ice_get_caps(struct ice_hw *hw)
1937 enum ice_status status;
1939 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1941 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1947 * ice_aq_manage_mac_write - manage MAC address write command
1948 * @hw: pointer to the HW struct
1949 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1950 * @flags: flags to control write behavior
1951 * @cd: pointer to command details structure or NULL
1953 * This function is used to write MAC address to the NVM (0x0108).
1956 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1957 struct ice_sq_cd *cd)
1959 struct ice_aqc_manage_mac_write *cmd;
1960 struct ice_aq_desc desc;
1962 cmd = &desc.params.mac_write;
1963 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1967 /* Prep values for flags, sah, sal */
1968 cmd->sah = htons(*((const u16 *)mac_addr));
1969 cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1971 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1975 * ice_aq_clear_pxe_mode
1976 * @hw: pointer to the HW struct
1978 * Tell the firmware that the driver is taking over from PXE (0x0110).
1980 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1982 struct ice_aq_desc desc;
1984 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1985 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1987 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1991 * ice_clear_pxe_mode - clear pxe operations mode
1992 * @hw: pointer to the HW struct
1994 * Make sure all PXE mode settings are cleared, including things
1995 * like descriptor fetch/write-back mode.
1997 void ice_clear_pxe_mode(struct ice_hw *hw)
1999 if (ice_check_sq_alive(hw, &hw->adminq))
2000 ice_aq_clear_pxe_mode(hw);
2004 * ice_get_link_speed_based_on_phy_type - returns link speed
2005 * @phy_type_low: lower part of phy_type
2006 * @phy_type_high: higher part of phy_type
2008 * This helper function will convert an entry in PHY type structure
2009 * [phy_type_low, phy_type_high] to its corresponding link speed.
2010 * Note: In the structure of [phy_type_low, phy_type_high], there should
2011 * be one bit set, as this function will convert one PHY type to its
2013 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2014 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2017 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2019 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2020 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2022 switch (phy_type_low) {
2023 case ICE_PHY_TYPE_LOW_100BASE_TX:
2024 case ICE_PHY_TYPE_LOW_100M_SGMII:
2025 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2027 case ICE_PHY_TYPE_LOW_1000BASE_T:
2028 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2029 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2030 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2031 case ICE_PHY_TYPE_LOW_1G_SGMII:
2032 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2034 case ICE_PHY_TYPE_LOW_2500BASE_T:
2035 case ICE_PHY_TYPE_LOW_2500BASE_X:
2036 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2037 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2039 case ICE_PHY_TYPE_LOW_5GBASE_T:
2040 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2041 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2043 case ICE_PHY_TYPE_LOW_10GBASE_T:
2044 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2045 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2046 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2047 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2048 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2049 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2050 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2052 case ICE_PHY_TYPE_LOW_25GBASE_T:
2053 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2054 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2055 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2056 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2057 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2058 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2059 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2060 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2061 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2062 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2063 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2065 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2066 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2067 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2068 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2069 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2070 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2071 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2073 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2074 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2075 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2076 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2077 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2078 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2079 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2080 case ICE_PHY_TYPE_LOW_50G_AUI2:
2081 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2082 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2083 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2084 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2085 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2086 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2087 case ICE_PHY_TYPE_LOW_50G_AUI1:
2088 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2090 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2091 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2092 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2093 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2094 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2095 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2096 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2097 case ICE_PHY_TYPE_LOW_100G_AUI4:
2098 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2099 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2100 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2101 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2102 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2103 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2106 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2110 switch (phy_type_high) {
2111 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2112 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2113 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2114 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2115 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2116 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2119 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2123 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2124 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2125 return ICE_AQ_LINK_SPEED_UNKNOWN;
2126 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2127 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2128 return ICE_AQ_LINK_SPEED_UNKNOWN;
2129 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2130 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2131 return speed_phy_type_low;
2133 return speed_phy_type_high;
2137 * ice_update_phy_type
2138 * @phy_type_low: pointer to the lower part of phy_type
2139 * @phy_type_high: pointer to the higher part of phy_type
2140 * @link_speeds_bitmap: targeted link speeds bitmap
2142 * Note: For the link_speeds_bitmap structure, you can check it at
2143 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2144 * link_speeds_bitmap include multiple speeds.
2146 * Each entry in this [phy_type_low, phy_type_high] structure will
2147 * present a certain link speed. This helper function will turn on bits
2148 * in [phy_type_low, phy_type_high] structure based on the value of
2149 * link_speeds_bitmap input parameter.
2152 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2153 u16 link_speeds_bitmap)
2160 /* We first check with low part of phy_type */
2161 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2162 pt_low = BIT_ULL(index);
2163 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2165 if (link_speeds_bitmap & speed)
2166 *phy_type_low |= BIT_ULL(index);
2169 /* We then check with high part of phy_type */
2170 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2171 pt_high = BIT_ULL(index);
2172 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2174 if (link_speeds_bitmap & speed)
2175 *phy_type_high |= BIT_ULL(index);
2180 * ice_aq_set_phy_cfg
2181 * @hw: pointer to the HW struct
2182 * @lport: logical port number
2183 * @cfg: structure with PHY configuration data to be set
2184 * @cd: pointer to command details structure or NULL
2186 * Set the various PHY configuration parameters supported on the Port.
2187 * One or more of the Set PHY config parameters may be ignored in an MFP
2188 * mode as the PF may not have the privilege to set some of the PHY Config
2189 * parameters. This status will be indicated by the command response (0x0601).
2192 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2193 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2195 struct ice_aq_desc desc;
2198 return ICE_ERR_PARAM;
2200 /* Ensure that only valid bits of cfg->caps can be turned on. */
2201 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2202 ice_debug(hw, ICE_DBG_PHY,
2203 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2206 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2209 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2210 desc.params.set_phy.lport_num = lport;
2211 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2213 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2214 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2215 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2216 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2217 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2218 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
2219 cfg->low_power_ctrl);
2220 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2221 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2222 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2224 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2228 * ice_update_link_info - update status of the HW network link
2229 * @pi: port info structure of the interested logical port
2231 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2233 struct ice_link_status *li;
2234 enum ice_status status;
2237 return ICE_ERR_PARAM;
2239 li = &pi->phy.link_info;
2241 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2245 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2246 struct ice_aqc_get_phy_caps_data *pcaps;
2250 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2253 return ICE_ERR_NO_MEMORY;
2255 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2258 memcpy(li->module_type, &pcaps->module_type,
2259 sizeof(li->module_type));
2261 devm_kfree(ice_hw_to_dev(hw), pcaps);
2269 * @pi: port information structure
2270 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2271 * @ena_auto_link_update: enable automatic link update
2273 * Set the requested flow control mode.
2276 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2278 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2279 struct ice_aqc_get_phy_caps_data *pcaps;
2280 enum ice_status status;
2281 u8 pause_mask = 0x0;
2285 return ICE_ERR_PARAM;
2287 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2289 switch (pi->fc.req_mode) {
2291 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2292 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2294 case ICE_FC_RX_PAUSE:
2295 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2297 case ICE_FC_TX_PAUSE:
2298 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2304 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2306 return ICE_ERR_NO_MEMORY;
2308 /* Get the current PHY config */
2309 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2312 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2316 /* clear the old pause settings */
2317 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2318 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2320 /* set the new capabilities */
2321 cfg.caps |= pause_mask;
2323 /* If the capabilities have changed, then set the new config */
2324 if (cfg.caps != pcaps->caps) {
2325 int retry_count, retry_max = 10;
2327 /* Auto restart link so settings take effect */
2328 if (ena_auto_link_update)
2329 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2330 /* Copy over all the old settings */
2331 cfg.phy_type_high = pcaps->phy_type_high;
2332 cfg.phy_type_low = pcaps->phy_type_low;
2333 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2334 cfg.eee_cap = pcaps->eee_cap;
2335 cfg.eeer_value = pcaps->eeer_value;
2336 cfg.link_fec_opt = pcaps->link_fec_options;
2338 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2340 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2344 /* Update the link info
2345 * It sometimes takes a really long time for link to
2346 * come back from the atomic reset. Thus, we wait a
2349 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2350 status = ice_update_link_info(pi);
2359 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2363 devm_kfree(ice_hw_to_dev(hw), pcaps);
2368 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2369 * @caps: PHY ability structure to copy date from
2370 * @cfg: PHY configuration structure to copy data to
2372 * Helper function to copy AQC PHY get ability data to PHY set configuration
2376 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2377 struct ice_aqc_set_phy_cfg_data *cfg)
2382 cfg->phy_type_low = caps->phy_type_low;
2383 cfg->phy_type_high = caps->phy_type_high;
2384 cfg->caps = caps->caps;
2385 cfg->low_power_ctrl = caps->low_power_ctrl;
2386 cfg->eee_cap = caps->eee_cap;
2387 cfg->eeer_value = caps->eeer_value;
2388 cfg->link_fec_opt = caps->link_fec_options;
2392 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2393 * @cfg: PHY configuration data to set FEC mode
2394 * @fec: FEC mode to configure
2396 * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2397 * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2398 * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2401 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2405 /* Clear RS bits, and AND BASE-R ability
2406 * bits and OR request bits.
2408 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2409 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2410 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2411 ICE_AQC_PHY_FEC_25G_KR_REQ;
2414 /* Clear BASE-R bits, and AND RS ability
2415 * bits and OR request bits.
2417 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2418 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2419 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2422 /* Clear all FEC option bits. */
2423 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2426 /* AND auto FEC bit, and all caps bits. */
2427 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2433 * ice_get_link_status - get status of the HW network link
2434 * @pi: port information structure
2435 * @link_up: pointer to bool (true/false = linkup/linkdown)
2437 * Variable link_up is true if link is up, false if link is down.
2438 * The variable link_up is invalid if status is non zero. As a
2439 * result of this call, link status reporting becomes enabled
2441 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2443 struct ice_phy_info *phy_info;
2444 enum ice_status status = 0;
2446 if (!pi || !link_up)
2447 return ICE_ERR_PARAM;
2449 phy_info = &pi->phy;
2451 if (phy_info->get_link_info) {
2452 status = ice_update_link_info(pi);
2455 ice_debug(pi->hw, ICE_DBG_LINK,
2456 "get link status error, status = %d\n",
2460 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2466 * ice_aq_set_link_restart_an
2467 * @pi: pointer to the port information structure
2468 * @ena_link: if true: enable link, if false: disable link
2469 * @cd: pointer to command details structure or NULL
2471 * Sets up the link and restarts the Auto-Negotiation over the link.
2474 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2475 struct ice_sq_cd *cd)
2477 struct ice_aqc_restart_an *cmd;
2478 struct ice_aq_desc desc;
2480 cmd = &desc.params.restart_an;
2482 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2484 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2485 cmd->lport_num = pi->lport;
2487 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2489 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2491 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2495 * ice_aq_set_event_mask
2496 * @hw: pointer to the HW struct
2497 * @port_num: port number of the physical function
2498 * @mask: event mask to be set
2499 * @cd: pointer to command details structure or NULL
2501 * Set event mask (0x0613)
2504 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2505 struct ice_sq_cd *cd)
2507 struct ice_aqc_set_event_mask *cmd;
2508 struct ice_aq_desc desc;
2510 cmd = &desc.params.set_event_mask;
2512 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2514 cmd->lport_num = port_num;
2516 cmd->event_mask = cpu_to_le16(mask);
2517 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2521 * ice_aq_set_mac_loopback
2522 * @hw: pointer to the HW struct
2523 * @ena_lpbk: Enable or Disable loopback
2524 * @cd: pointer to command details structure or NULL
2526 * Enable/disable loopback on a given port
2529 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2531 struct ice_aqc_set_mac_lb *cmd;
2532 struct ice_aq_desc desc;
2534 cmd = &desc.params.set_mac_lb;
2536 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2538 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2540 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2544 * ice_aq_set_port_id_led
2545 * @pi: pointer to the port information
2546 * @is_orig_mode: is this LED set to original mode (by the net-list)
2547 * @cd: pointer to command details structure or NULL
2549 * Set LED value for the given port (0x06e9)
2552 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2553 struct ice_sq_cd *cd)
2555 struct ice_aqc_set_port_id_led *cmd;
2556 struct ice_hw *hw = pi->hw;
2557 struct ice_aq_desc desc;
2559 cmd = &desc.params.set_port_id_led;
2561 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2564 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2566 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2568 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2572 * __ice_aq_get_set_rss_lut
2573 * @hw: pointer to the hardware structure
2574 * @vsi_id: VSI FW index
2575 * @lut_type: LUT table type
2576 * @lut: pointer to the LUT buffer provided by the caller
2577 * @lut_size: size of the LUT buffer
2578 * @glob_lut_idx: global LUT index
2579 * @set: set true to set the table, false to get the table
2581 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2583 static enum ice_status
2584 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2585 u16 lut_size, u8 glob_lut_idx, bool set)
2587 struct ice_aqc_get_set_rss_lut *cmd_resp;
2588 struct ice_aq_desc desc;
2589 enum ice_status status;
2592 cmd_resp = &desc.params.get_set_rss_lut;
2595 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2596 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2598 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2601 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2602 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2603 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2604 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2607 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2608 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2609 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2610 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2611 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2614 status = ICE_ERR_PARAM;
2615 goto ice_aq_get_set_rss_lut_exit;
2618 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2619 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2620 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2623 goto ice_aq_get_set_rss_lut_send;
2624 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2626 goto ice_aq_get_set_rss_lut_send;
2628 goto ice_aq_get_set_rss_lut_send;
2631 /* LUT size is only valid for Global and PF table types */
2633 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2635 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2636 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2637 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2638 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2640 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2641 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2642 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2643 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2644 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2649 status = ICE_ERR_PARAM;
2650 goto ice_aq_get_set_rss_lut_exit;
2653 ice_aq_get_set_rss_lut_send:
2654 cmd_resp->flags = cpu_to_le16(flags);
2655 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2657 ice_aq_get_set_rss_lut_exit:
2662 * ice_aq_get_rss_lut
2663 * @hw: pointer to the hardware structure
2664 * @vsi_handle: software VSI handle
2665 * @lut_type: LUT table type
2666 * @lut: pointer to the LUT buffer provided by the caller
2667 * @lut_size: size of the LUT buffer
2669 * get the RSS lookup table, PF or VSI type
2672 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2673 u8 *lut, u16 lut_size)
2675 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2676 return ICE_ERR_PARAM;
2678 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2679 lut_type, lut, lut_size, 0, false);
2683 * ice_aq_set_rss_lut
2684 * @hw: pointer to the hardware structure
2685 * @vsi_handle: software VSI handle
2686 * @lut_type: LUT table type
2687 * @lut: pointer to the LUT buffer provided by the caller
2688 * @lut_size: size of the LUT buffer
2690 * set the RSS lookup table, PF or VSI type
2693 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2694 u8 *lut, u16 lut_size)
2696 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2697 return ICE_ERR_PARAM;
2699 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2700 lut_type, lut, lut_size, 0, true);
2704 * __ice_aq_get_set_rss_key
2705 * @hw: pointer to the HW struct
2706 * @vsi_id: VSI FW index
2707 * @key: pointer to key info struct
2708 * @set: set true to set the key, false to get the key
2710 * get (0x0B04) or set (0x0B02) the RSS key per VSI
2713 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2714 struct ice_aqc_get_set_rss_keys *key,
2717 struct ice_aqc_get_set_rss_key *cmd_resp;
2718 u16 key_size = sizeof(*key);
2719 struct ice_aq_desc desc;
2721 cmd_resp = &desc.params.get_set_rss_key;
2724 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2725 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2727 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2730 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2731 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2732 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2733 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2735 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2739 * ice_aq_get_rss_key
2740 * @hw: pointer to the HW struct
2741 * @vsi_handle: software VSI handle
2742 * @key: pointer to key info struct
2744 * get the RSS key per VSI
2747 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2748 struct ice_aqc_get_set_rss_keys *key)
2750 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2751 return ICE_ERR_PARAM;
2753 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2758 * ice_aq_set_rss_key
2759 * @hw: pointer to the HW struct
2760 * @vsi_handle: software VSI handle
2761 * @keys: pointer to key info struct
2763 * set the RSS key per VSI
2766 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2767 struct ice_aqc_get_set_rss_keys *keys)
2769 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2770 return ICE_ERR_PARAM;
2772 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2777 * ice_aq_add_lan_txq
2778 * @hw: pointer to the hardware structure
2779 * @num_qgrps: Number of added queue groups
2780 * @qg_list: list of queue groups to be added
2781 * @buf_size: size of buffer for indirect command
2782 * @cd: pointer to command details structure or NULL
2784 * Add Tx LAN queue (0x0C30)
2787 * Prior to calling add Tx LAN queue:
2788 * Initialize the following as part of the Tx queue context:
2789 * Completion queue ID if the queue uses Completion queue, Quanta profile,
2790 * Cache profile and Packet shaper profile.
2792 * After add Tx LAN queue AQ command is completed:
2793 * Interrupts should be associated with specific queues,
2794 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2797 static enum ice_status
2798 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2799 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2800 struct ice_sq_cd *cd)
2802 u16 i, sum_header_size, sum_q_size = 0;
2803 struct ice_aqc_add_tx_qgrp *list;
2804 struct ice_aqc_add_txqs *cmd;
2805 struct ice_aq_desc desc;
2807 cmd = &desc.params.add_txqs;
2809 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2812 return ICE_ERR_PARAM;
2814 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2815 return ICE_ERR_PARAM;
2817 sum_header_size = num_qgrps *
2818 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
2821 for (i = 0; i < num_qgrps; i++) {
2822 struct ice_aqc_add_txqs_perq *q = list->txqs;
2824 sum_q_size += list->num_txqs * sizeof(*q);
2825 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2828 if (buf_size != (sum_header_size + sum_q_size))
2829 return ICE_ERR_PARAM;
2831 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2833 cmd->num_qgrps = num_qgrps;
2835 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2839 * ice_aq_dis_lan_txq
2840 * @hw: pointer to the hardware structure
2841 * @num_qgrps: number of groups in the list
2842 * @qg_list: the list of groups to disable
2843 * @buf_size: the total size of the qg_list buffer in bytes
2844 * @rst_src: if called due to reset, specifies the reset source
2845 * @vmvf_num: the relative VM or VF number that is undergoing the reset
2846 * @cd: pointer to command details structure or NULL
2848 * Disable LAN Tx queue (0x0C31)
2850 static enum ice_status
2851 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2852 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2853 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2854 struct ice_sq_cd *cd)
2856 struct ice_aqc_dis_txqs *cmd;
2857 struct ice_aq_desc desc;
2858 enum ice_status status;
2861 cmd = &desc.params.dis_txqs;
2862 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2864 /* qg_list can be NULL only in VM/VF reset flow */
2865 if (!qg_list && !rst_src)
2866 return ICE_ERR_PARAM;
2868 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2869 return ICE_ERR_PARAM;
2871 cmd->num_entries = num_qgrps;
2873 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2874 ICE_AQC_Q_DIS_TIMEOUT_M);
2878 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2879 cmd->vmvf_and_timeout |=
2880 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2883 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2884 /* In this case, FW expects vmvf_num to be absolute VF ID */
2885 cmd->vmvf_and_timeout |=
2886 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2887 ICE_AQC_Q_DIS_VMVF_NUM_M);
2894 /* flush pipe on time out */
2895 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2896 /* If no queue group info, we are in a reset flow. Issue the AQ */
2900 /* set RD bit to indicate that command buffer is provided by the driver
2901 * and it needs to be read by the firmware
2903 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2905 for (i = 0; i < num_qgrps; ++i) {
2906 /* Calculate the size taken up by the queue IDs in this group */
2907 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2909 /* Add the size of the group header */
2910 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2912 /* If the num of queues is even, add 2 bytes of padding */
2913 if ((qg_list[i].num_qs % 2) == 0)
2918 return ICE_ERR_PARAM;
2921 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2924 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2925 vmvf_num, hw->adminq.sq_last_status);
2927 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2928 le16_to_cpu(qg_list[0].q_id[0]),
2929 hw->adminq.sq_last_status);
2934 /* End of FW Admin Queue command wrappers */
2937 * ice_write_byte - write a byte to a packed context structure
2938 * @src_ctx: the context structure to read from
2939 * @dest_ctx: the context to be written to
2940 * @ce_info: a description of the struct to be filled
2943 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2945 u8 src_byte, dest_byte, mask;
2949 /* copy from the next struct field */
2950 from = src_ctx + ce_info->offset;
2952 /* prepare the bits and mask */
2953 shift_width = ce_info->lsb % 8;
2954 mask = (u8)(BIT(ce_info->width) - 1);
2959 /* shift to correct alignment */
2960 mask <<= shift_width;
2961 src_byte <<= shift_width;
2963 /* get the current bits from the target bit string */
2964 dest = dest_ctx + (ce_info->lsb / 8);
2966 memcpy(&dest_byte, dest, sizeof(dest_byte));
2968 dest_byte &= ~mask; /* get the bits not changing */
2969 dest_byte |= src_byte; /* add in the new bits */
2971 /* put it all back */
2972 memcpy(dest, &dest_byte, sizeof(dest_byte));
2976 * ice_write_word - write a word to a packed context structure
2977 * @src_ctx: the context structure to read from
2978 * @dest_ctx: the context to be written to
2979 * @ce_info: a description of the struct to be filled
2982 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2989 /* copy from the next struct field */
2990 from = src_ctx + ce_info->offset;
2992 /* prepare the bits and mask */
2993 shift_width = ce_info->lsb % 8;
2994 mask = BIT(ce_info->width) - 1;
2996 /* don't swizzle the bits until after the mask because the mask bits
2997 * will be in a different bit position on big endian machines
2999 src_word = *(u16 *)from;
3002 /* shift to correct alignment */
3003 mask <<= shift_width;
3004 src_word <<= shift_width;
3006 /* get the current bits from the target bit string */
3007 dest = dest_ctx + (ce_info->lsb / 8);
3009 memcpy(&dest_word, dest, sizeof(dest_word));
3011 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
3012 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3014 /* put it all back */
3015 memcpy(dest, &dest_word, sizeof(dest_word));
3019 * ice_write_dword - write a dword to a packed context structure
3020 * @src_ctx: the context structure to read from
3021 * @dest_ctx: the context to be written to
3022 * @ce_info: a description of the struct to be filled
3025 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3027 u32 src_dword, mask;
3032 /* copy from the next struct field */
3033 from = src_ctx + ce_info->offset;
3035 /* prepare the bits and mask */
3036 shift_width = ce_info->lsb % 8;
3038 /* if the field width is exactly 32 on an x86 machine, then the shift
3039 * operation will not work because the SHL instructions count is masked
3040 * to 5 bits so the shift will do nothing
3042 if (ce_info->width < 32)
3043 mask = BIT(ce_info->width) - 1;
3047 /* don't swizzle the bits until after the mask because the mask bits
3048 * will be in a different bit position on big endian machines
3050 src_dword = *(u32 *)from;
3053 /* shift to correct alignment */
3054 mask <<= shift_width;
3055 src_dword <<= shift_width;
3057 /* get the current bits from the target bit string */
3058 dest = dest_ctx + (ce_info->lsb / 8);
3060 memcpy(&dest_dword, dest, sizeof(dest_dword));
3062 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
3063 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
3065 /* put it all back */
3066 memcpy(dest, &dest_dword, sizeof(dest_dword));
3070 * ice_write_qword - write a qword to a packed context structure
3071 * @src_ctx: the context structure to read from
3072 * @dest_ctx: the context to be written to
3073 * @ce_info: a description of the struct to be filled
3076 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3078 u64 src_qword, mask;
3083 /* copy from the next struct field */
3084 from = src_ctx + ce_info->offset;
3086 /* prepare the bits and mask */
3087 shift_width = ce_info->lsb % 8;
3089 /* if the field width is exactly 64 on an x86 machine, then the shift
3090 * operation will not work because the SHL instructions count is masked
3091 * to 6 bits so the shift will do nothing
3093 if (ce_info->width < 64)
3094 mask = BIT_ULL(ce_info->width) - 1;
3098 /* don't swizzle the bits until after the mask because the mask bits
3099 * will be in a different bit position on big endian machines
3101 src_qword = *(u64 *)from;
3104 /* shift to correct alignment */
3105 mask <<= shift_width;
3106 src_qword <<= shift_width;
3108 /* get the current bits from the target bit string */
3109 dest = dest_ctx + (ce_info->lsb / 8);
3111 memcpy(&dest_qword, dest, sizeof(dest_qword));
3113 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
3114 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
3116 /* put it all back */
3117 memcpy(dest, &dest_qword, sizeof(dest_qword));
3121 * ice_set_ctx - set context bits in packed structure
3122 * @src_ctx: pointer to a generic non-packed context structure
3123 * @dest_ctx: pointer to memory for the packed structure
3124 * @ce_info: a description of the structure to be transformed
3127 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3131 for (f = 0; ce_info[f].width; f++) {
3132 /* We have to deal with each element of the FW response
3133 * using the correct size so that we are correct regardless
3134 * of the endianness of the machine.
3136 switch (ce_info[f].size_of) {
3138 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3141 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3144 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3147 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3150 return ICE_ERR_INVAL_SIZE;
3158 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3159 * @hw: pointer to the HW struct
3160 * @vsi_handle: software VSI handle
3162 * @q_handle: software queue handle
3164 static struct ice_q_ctx *
3165 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3167 struct ice_vsi_ctx *vsi;
3168 struct ice_q_ctx *q_ctx;
3170 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3173 if (q_handle >= vsi->num_lan_q_entries[tc])
3175 if (!vsi->lan_q_ctx[tc])
3177 q_ctx = vsi->lan_q_ctx[tc];
3178 return &q_ctx[q_handle];
3183 * @pi: port information structure
3184 * @vsi_handle: software VSI handle
3186 * @q_handle: software queue handle
3187 * @num_qgrps: Number of added queue groups
3188 * @buf: list of queue groups to be added
3189 * @buf_size: size of buffer for indirect command
3190 * @cd: pointer to command details structure or NULL
3192 * This function adds one LAN queue
3195 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3196 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3197 struct ice_sq_cd *cd)
3199 struct ice_aqc_txsched_elem_data node = { 0 };
3200 struct ice_sched_node *parent;
3201 struct ice_q_ctx *q_ctx;
3202 enum ice_status status;
3205 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3208 if (num_qgrps > 1 || buf->num_txqs > 1)
3209 return ICE_ERR_MAX_LIMIT;
3213 if (!ice_is_vsi_valid(hw, vsi_handle))
3214 return ICE_ERR_PARAM;
3216 mutex_lock(&pi->sched_lock);
3218 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3220 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3222 status = ICE_ERR_PARAM;
3226 /* find a parent node */
3227 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3228 ICE_SCHED_NODE_OWNER_LAN);
3230 status = ICE_ERR_PARAM;
3234 buf->parent_teid = parent->info.node_teid;
3235 node.parent_teid = parent->info.node_teid;
3236 /* Mark that the values in the "generic" section as valid. The default
3237 * value in the "generic" section is zero. This means that :
3238 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3239 * - 0 priority among siblings, indicated by Bit 1-3.
3240 * - WFQ, indicated by Bit 4.
3241 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3243 * - Bit 7 is reserved.
3244 * Without setting the generic section as valid in valid_sections, the
3245 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3247 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3249 /* add the LAN queue */
3250 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3252 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3253 le16_to_cpu(buf->txqs[0].txq_id),
3254 hw->adminq.sq_last_status);
3258 node.node_teid = buf->txqs[0].q_teid;
3259 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3260 q_ctx->q_handle = q_handle;
3262 /* add a leaf node into schduler tree queue layer */
3263 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3266 mutex_unlock(&pi->sched_lock);
3272 * @pi: port information structure
3273 * @vsi_handle: software VSI handle
3275 * @num_queues: number of queues
3276 * @q_handles: pointer to software queue handle array
3277 * @q_ids: pointer to the q_id array
3278 * @q_teids: pointer to queue node teids
3279 * @rst_src: if called due to reset, specifies the reset source
3280 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3281 * @cd: pointer to command details structure or NULL
3283 * This function removes queues and their corresponding nodes in SW DB
3286 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3287 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3288 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3289 struct ice_sq_cd *cd)
3291 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3292 struct ice_aqc_dis_txq_item qg_list;
3293 struct ice_q_ctx *q_ctx;
3296 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3300 /* if queue is disabled already yet the disable queue command
3301 * has to be sent to complete the VF reset, then call
3302 * ice_aq_dis_lan_txq without any queue information
3305 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3310 mutex_lock(&pi->sched_lock);
3312 for (i = 0; i < num_queues; i++) {
3313 struct ice_sched_node *node;
3315 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3318 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3320 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3324 if (q_ctx->q_handle != q_handles[i]) {
3325 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3326 q_ctx->q_handle, q_handles[i]);
3329 qg_list.parent_teid = node->info.parent_teid;
3331 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3332 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3333 sizeof(qg_list), rst_src, vmvf_num,
3338 ice_free_sched_node(pi, node);
3339 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3341 mutex_unlock(&pi->sched_lock);
3346 * ice_cfg_vsi_qs - configure the new/existing VSI queues
3347 * @pi: port information structure
3348 * @vsi_handle: software VSI handle
3349 * @tc_bitmap: TC bitmap
3350 * @maxqs: max queues array per TC
3351 * @owner: LAN or RDMA
3353 * This function adds/updates the VSI queues per TC.
3355 static enum ice_status
3356 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3357 u16 *maxqs, u8 owner)
3359 enum ice_status status = 0;
3362 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3365 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3366 return ICE_ERR_PARAM;
3368 mutex_lock(&pi->sched_lock);
3370 ice_for_each_traffic_class(i) {
3371 /* configuration is possible only if TC node is present */
3372 if (!ice_sched_get_tc_node(pi, i))
3375 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3376 ice_is_tc_ena(tc_bitmap, i));
3381 mutex_unlock(&pi->sched_lock);
3386 * ice_cfg_vsi_lan - configure VSI LAN queues
3387 * @pi: port information structure
3388 * @vsi_handle: software VSI handle
3389 * @tc_bitmap: TC bitmap
3390 * @max_lanqs: max LAN queues array per TC
3392 * This function adds/updates the VSI LAN queues per TC.
3395 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3398 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3399 ICE_SCHED_NODE_OWNER_LAN);
3403 * ice_replay_pre_init - replay pre initialization
3404 * @hw: pointer to the HW struct
3406 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3408 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3410 struct ice_switch_info *sw = hw->switch_info;
3413 /* Delete old entries from replay filter list head if there is any */
3414 ice_rm_all_sw_replay_rule_info(hw);
3415 /* In start of replay, move entries into replay_rules list, it
3416 * will allow adding rules entries back to filt_rules list,
3417 * which is operational list.
3419 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3420 list_replace_init(&sw->recp_list[i].filt_rules,
3421 &sw->recp_list[i].filt_replay_rules);
3427 * ice_replay_vsi - replay VSI configuration
3428 * @hw: pointer to the HW struct
3429 * @vsi_handle: driver VSI handle
3431 * Restore all VSI configuration after reset. It is required to call this
3432 * function with main VSI first.
3434 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3436 enum ice_status status;
3438 if (!ice_is_vsi_valid(hw, vsi_handle))
3439 return ICE_ERR_PARAM;
3441 /* Replay pre-initialization if there is any */
3442 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3443 status = ice_replay_pre_init(hw);
3448 /* Replay per VSI all filters */
3449 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3454 * ice_replay_post - post replay configuration cleanup
3455 * @hw: pointer to the HW struct
3457 * Post replay cleanup.
3459 void ice_replay_post(struct ice_hw *hw)
3461 /* Delete old entries from replay filter list head */
3462 ice_rm_all_sw_replay_rule_info(hw);
3466 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3467 * @hw: ptr to the hardware info
3468 * @reg: offset of 64 bit HW register to read from
3469 * @prev_stat_loaded: bool to specify if previous stats are loaded
3470 * @prev_stat: ptr to previous loaded stat value
3471 * @cur_stat: ptr to current stat value
3474 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3475 u64 *prev_stat, u64 *cur_stat)
3477 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
3479 /* device stats are not reset at PFR, they likely will not be zeroed
3480 * when the driver starts. Thus, save the value from the first read
3481 * without adding to the statistic value so that we report stats which
3482 * count up from zero.
3484 if (!prev_stat_loaded) {
3485 *prev_stat = new_data;
3489 /* Calculate the difference between the new and old values, and then
3490 * add it to the software stat value.
3492 if (new_data >= *prev_stat)
3493 *cur_stat += new_data - *prev_stat;
3495 /* to manage the potential roll-over */
3496 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
3498 /* Update the previously stored value to prepare for next read */
3499 *prev_stat = new_data;
3503 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3504 * @hw: ptr to the hardware info
3505 * @reg: offset of HW register to read from
3506 * @prev_stat_loaded: bool to specify if previous stats are loaded
3507 * @prev_stat: ptr to previous loaded stat value
3508 * @cur_stat: ptr to current stat value
3511 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3512 u64 *prev_stat, u64 *cur_stat)
3516 new_data = rd32(hw, reg);
3518 /* device stats are not reset at PFR, they likely will not be zeroed
3519 * when the driver starts. Thus, save the value from the first read
3520 * without adding to the statistic value so that we report stats which
3521 * count up from zero.
3523 if (!prev_stat_loaded) {
3524 *prev_stat = new_data;
3528 /* Calculate the difference between the new and old values, and then
3529 * add it to the software stat value.
3531 if (new_data >= *prev_stat)
3532 *cur_stat += new_data - *prev_stat;
3534 /* to manage the potential roll-over */
3535 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
3537 /* Update the previously stored value to prepare for next read */
3538 *prev_stat = new_data;
3542 * ice_sched_query_elem - query element information from HW
3543 * @hw: pointer to the HW struct
3544 * @node_teid: node TEID to be queried
3545 * @buf: buffer to element information
3547 * This function queries HW element information
3550 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3551 struct ice_aqc_get_elem *buf)
3553 u16 buf_size, num_elem_ret = 0;
3554 enum ice_status status;
3556 buf_size = sizeof(*buf);
3557 memset(buf, 0, buf_size);
3558 buf->generic[0].node_teid = cpu_to_le32(node_teid);
3559 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3561 if (status || num_elem_ret != 1)
3562 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");