1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level {
11 HCLGE_SHAPER_LVL_PRI = 0,
12 HCLGE_SHAPER_LVL_PG = 1,
13 HCLGE_SHAPER_LVL_PORT = 2,
14 HCLGE_SHAPER_LVL_QSET = 3,
15 HCLGE_SHAPER_LVL_CNT = 4,
16 HCLGE_SHAPER_LVL_VF = 0,
17 HCLGE_SHAPER_LVL_PF = 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 #define HCLGE_ETHER_MAX_RATE 100000
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29 * @ir: Rate to be config, its unit is Mbps
30 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31 * @ir_b: IR_B parameter of IR shaper
32 * @ir_u: IR_U parameter of IR shaper
33 * @ir_s: IR_S parameter of IR shaper
37 * IR_b * (2 ^ IR_u) * 8
38 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
41 * @return: 0: calculate sucessful, negative: fail
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44 u8 *ir_b, u8 *ir_u, u8 *ir_s)
46 #define DIVISOR_CLK (1000 * 8)
47 #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
49 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 6 * 256, /* Prioriy level */
51 6 * 32, /* Prioriy group level */
52 6 * 8, /* Port level */
53 6 * 256 /* Qset level */
61 if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
62 ir > HCLGE_ETHER_MAX_RATE)
65 tick = tick_array[shaper_level];
68 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
69 * the formula is changed to:
71 * ir_calc = ---------------- * 1000
74 ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
82 } else if (ir_calc > ir) {
83 /* Increasing the denominator to select ir_s value */
84 while (ir_calc >= ir && ir) {
86 ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
89 *ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) /
92 /* Increasing the numerator to select ir_u value */
95 while (ir_calc < ir) {
97 numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
98 ir_calc = (numerator + (tick >> 1)) / tick;
104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
105 *ir_b = (ir * tick + (denominator >> 1)) / denominator;
115 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
116 enum hclge_opcode_type opcode, u64 *stats)
118 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
121 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
122 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
125 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
126 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
127 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
130 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
132 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
136 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
137 struct hclge_pfc_stats_cmd *pfc_stats =
138 (struct hclge_pfc_stats_cmd *)desc[i].data;
140 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
141 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
143 if (index < HCLGE_MAX_TC_NUM)
145 le64_to_cpu(pfc_stats->pkt_num[j]);
151 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
153 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
156 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
158 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
161 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
163 struct hclge_desc desc;
165 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
167 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
168 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
170 return hclge_cmd_send(&hdev->hw, &desc, 1);
173 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
176 struct hclge_desc desc;
177 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
179 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
181 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
182 pfc->pri_en_bitmap = pfc_bitmap;
184 return hclge_cmd_send(&hdev->hw, &desc, 1);
187 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
188 u8 pause_trans_gap, u16 pause_trans_time)
190 struct hclge_cfg_pause_param_cmd *pause_param;
191 struct hclge_desc desc;
193 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
195 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
197 ether_addr_copy(pause_param->mac_addr, addr);
198 ether_addr_copy(pause_param->mac_addr_extra, addr);
199 pause_param->pause_trans_gap = pause_trans_gap;
200 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
202 return hclge_cmd_send(&hdev->hw, &desc, 1);
205 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
207 struct hclge_cfg_pause_param_cmd *pause_param;
208 struct hclge_desc desc;
213 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
215 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
217 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
221 trans_gap = pause_param->pause_trans_gap;
222 trans_time = le16_to_cpu(pause_param->pause_trans_time);
224 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
227 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
231 tc = hdev->tm_info.prio_tc[pri_id];
233 if (tc >= hdev->tm_info.num_tc)
237 * the register for priority has four bytes, the first bytes includes
238 * priority0 and priority1, the higher 4bit stands for priority1
239 * while the lower 4bit stands for priority0, as below:
240 * first byte: | pri_1 | pri_0 |
241 * second byte: | pri_3 | pri_2 |
242 * third byte: | pri_5 | pri_4 |
243 * fourth byte: | pri_7 | pri_6 |
245 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
250 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
252 struct hclge_desc desc;
253 u8 *pri = (u8 *)desc.data;
257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
259 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
260 ret = hclge_fill_pri_array(hdev, pri, pri_id);
265 return hclge_cmd_send(&hdev->hw, &desc, 1);
268 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
269 u8 pg_id, u8 pri_bit_map)
271 struct hclge_pg_to_pri_link_cmd *map;
272 struct hclge_desc desc;
274 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
276 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
279 map->pri_bit_map = pri_bit_map;
281 return hclge_cmd_send(&hdev->hw, &desc, 1);
284 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
287 struct hclge_qs_to_pri_link_cmd *map;
288 struct hclge_desc desc;
290 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
292 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
294 map->qs_id = cpu_to_le16(qs_id);
296 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
298 return hclge_cmd_send(&hdev->hw, &desc, 1);
301 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
304 struct hclge_nq_to_qs_link_cmd *map;
305 struct hclge_desc desc;
307 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
309 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
311 map->nq_id = cpu_to_le16(q_id);
312 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
314 return hclge_cmd_send(&hdev->hw, &desc, 1);
317 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
320 struct hclge_pg_weight_cmd *weight;
321 struct hclge_desc desc;
323 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
325 weight = (struct hclge_pg_weight_cmd *)desc.data;
327 weight->pg_id = pg_id;
330 return hclge_cmd_send(&hdev->hw, &desc, 1);
333 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
336 struct hclge_priority_weight_cmd *weight;
337 struct hclge_desc desc;
339 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
341 weight = (struct hclge_priority_weight_cmd *)desc.data;
343 weight->pri_id = pri_id;
346 return hclge_cmd_send(&hdev->hw, &desc, 1);
349 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
352 struct hclge_qs_weight_cmd *weight;
353 struct hclge_desc desc;
355 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
357 weight = (struct hclge_qs_weight_cmd *)desc.data;
359 weight->qs_id = cpu_to_le16(qs_id);
362 return hclge_cmd_send(&hdev->hw, &desc, 1);
365 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
368 u32 shapping_para = 0;
370 hclge_tm_set_field(shapping_para, IR_B, ir_b);
371 hclge_tm_set_field(shapping_para, IR_U, ir_u);
372 hclge_tm_set_field(shapping_para, IR_S, ir_s);
373 hclge_tm_set_field(shapping_para, BS_B, bs_b);
374 hclge_tm_set_field(shapping_para, BS_S, bs_s);
376 return shapping_para;
379 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
380 enum hclge_shap_bucket bucket, u8 pg_id,
383 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
384 enum hclge_opcode_type opcode;
385 struct hclge_desc desc;
387 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
388 HCLGE_OPC_TM_PG_C_SHAPPING;
389 hclge_cmd_setup_basic_desc(&desc, opcode, false);
391 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
393 shap_cfg_cmd->pg_id = pg_id;
395 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
397 return hclge_cmd_send(&hdev->hw, &desc, 1);
400 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
402 struct hclge_port_shapping_cmd *shap_cfg_cmd;
403 struct hclge_desc desc;
408 ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
409 HCLGE_SHAPER_LVL_PORT,
410 &ir_b, &ir_u, &ir_s);
414 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
415 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
417 shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
418 HCLGE_SHAPER_BS_U_DEF,
419 HCLGE_SHAPER_BS_S_DEF);
421 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
423 return hclge_cmd_send(&hdev->hw, &desc, 1);
426 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
427 enum hclge_shap_bucket bucket, u8 pri_id,
430 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
431 enum hclge_opcode_type opcode;
432 struct hclge_desc desc;
434 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
435 HCLGE_OPC_TM_PRI_C_SHAPPING;
437 hclge_cmd_setup_basic_desc(&desc, opcode, false);
439 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
441 shap_cfg_cmd->pri_id = pri_id;
443 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
445 return hclge_cmd_send(&hdev->hw, &desc, 1);
448 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
450 struct hclge_desc desc;
452 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
454 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
455 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
459 desc.data[0] = cpu_to_le32(pg_id);
461 return hclge_cmd_send(&hdev->hw, &desc, 1);
464 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
466 struct hclge_desc desc;
468 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
470 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
471 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
475 desc.data[0] = cpu_to_le32(pri_id);
477 return hclge_cmd_send(&hdev->hw, &desc, 1);
480 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
482 struct hclge_desc desc;
484 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
486 if (mode == HCLGE_SCH_MODE_DWRR)
487 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
491 desc.data[0] = cpu_to_le32(qs_id);
493 return hclge_cmd_send(&hdev->hw, &desc, 1);
496 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
499 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
500 struct hclge_desc desc;
502 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
505 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
507 bp_to_qs_map_cmd->tc_id = tc;
508 bp_to_qs_map_cmd->qs_group_id = grp_id;
509 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
511 return hclge_cmd_send(&hdev->hw, &desc, 1);
514 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
516 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
517 struct hclge_dev *hdev = vport->back;
521 /* TC configuration is shared by PF/VF in one port, only allow
522 * one tc for VF for simplicity. VF's vport_id is non zero.
524 kinfo->num_tc = vport->vport_id ? 1 :
525 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
526 vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
527 (vport->vport_id ? (vport->vport_id - 1) : 0);
529 max_rss_size = min_t(u16, hdev->rss_size_max,
530 vport->alloc_tqps / kinfo->num_tc);
532 /* Set to user value, no larger than max_rss_size. */
533 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
534 kinfo->req_rss_size <= max_rss_size) {
535 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
536 kinfo->rss_size, kinfo->req_rss_size);
537 kinfo->rss_size = kinfo->req_rss_size;
538 } else if (kinfo->rss_size > max_rss_size ||
539 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
540 /* if user not set rss, the rss_size should compare with the
541 * valid msi numbers to ensure one to one map between tqp and
544 if (!kinfo->req_rss_size)
545 max_rss_size = min_t(u16, max_rss_size,
546 (hdev->num_nic_msi - 1) /
549 /* Set to the maximum specification value (max_rss_size). */
550 kinfo->rss_size = max_rss_size;
553 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
554 vport->dwrr = 100; /* 100 percent as init */
555 vport->alloc_rss_size = kinfo->rss_size;
556 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
558 for (i = 0; i < HNAE3_MAX_TC; i++) {
559 if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
560 kinfo->tc_info[i].enable = true;
561 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
562 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
563 kinfo->tc_info[i].tc = i;
565 /* Set to default queue if TC is disable */
566 kinfo->tc_info[i].enable = false;
567 kinfo->tc_info[i].tqp_offset = 0;
568 kinfo->tc_info[i].tqp_count = 1;
569 kinfo->tc_info[i].tc = 0;
573 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
574 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
577 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
579 struct hclge_vport *vport = hdev->vport;
582 for (i = 0; i < hdev->num_alloc_vport; i++) {
583 hclge_tm_vport_tc_info_update(vport);
589 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
593 for (i = 0; i < hdev->tm_info.num_tc; i++) {
594 hdev->tm_info.tc_info[i].tc_id = i;
595 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
596 hdev->tm_info.tc_info[i].pgid = 0;
597 hdev->tm_info.tc_info[i].bw_limit =
598 hdev->tm_info.pg_info[0].bw_limit;
601 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
602 hdev->tm_info.prio_tc[i] =
603 (i >= hdev->tm_info.num_tc) ? 0 : i;
605 /* DCB is enabled if we have more than 1 TC or pfc_en is
608 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
609 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
611 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
614 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
616 #define BW_PERCENT 100
620 for (i = 0; i < hdev->tm_info.num_pg; i++) {
623 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
625 hdev->tm_info.pg_info[i].pg_id = i;
626 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
628 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
633 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
634 for (k = 0; k < hdev->tm_info.num_tc; k++)
635 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
639 static void hclge_pfc_info_init(struct hclge_dev *hdev)
641 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
642 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
643 dev_warn(&hdev->pdev->dev,
644 "DCB is disable, but last mode is FC_PFC\n");
646 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
647 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
648 /* fc_mode_last_time record the last fc_mode when
649 * DCB is enabled, so that fc_mode can be set to
650 * the correct value when DCB is disabled.
652 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
653 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
657 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
659 hclge_tm_pg_info_init(hdev);
661 hclge_tm_tc_info_init(hdev);
663 hclge_tm_vport_info_update(hdev);
665 hclge_pfc_info_init(hdev);
668 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
673 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
676 for (i = 0; i < hdev->tm_info.num_pg; i++) {
678 ret = hclge_tm_pg_to_pri_map_cfg(
679 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
687 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
695 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
699 for (i = 0; i < hdev->tm_info.num_pg; i++) {
700 /* Calc shaper para */
701 ret = hclge_shaper_para_calc(
702 hdev->tm_info.pg_info[i].bw_limit,
704 &ir_b, &ir_u, &ir_s);
708 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
709 HCLGE_SHAPER_BS_U_DEF,
710 HCLGE_SHAPER_BS_S_DEF);
711 ret = hclge_tm_pg_shapping_cfg(hdev,
712 HCLGE_TM_SHAP_C_BUCKET, i,
717 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
718 HCLGE_SHAPER_BS_U_DEF,
719 HCLGE_SHAPER_BS_S_DEF);
720 ret = hclge_tm_pg_shapping_cfg(hdev,
721 HCLGE_TM_SHAP_P_BUCKET, i,
730 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
736 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
740 for (i = 0; i < hdev->tm_info.num_pg; i++) {
742 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
750 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
751 struct hclge_vport *vport)
753 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
754 struct hnae3_queue **tqp = kinfo->tqp;
755 struct hnae3_tc_info *v_tc_info;
759 for (i = 0; i < kinfo->num_tc; i++) {
760 v_tc_info = &kinfo->tc_info[i];
761 for (j = 0; j < v_tc_info->tqp_count; j++) {
762 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
764 ret = hclge_tm_q_to_qs_map_cfg(hdev,
765 hclge_get_queue_id(q),
766 vport->qs_offset + i);
775 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
777 struct hclge_vport *vport = hdev->vport;
781 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
782 /* Cfg qs -> pri mapping, one by one mapping */
783 for (k = 0; k < hdev->num_alloc_vport; k++) {
784 struct hnae3_knic_private_info *kinfo =
787 for (i = 0; i < kinfo->num_tc; i++) {
788 ret = hclge_tm_qs_to_pri_map_cfg(
789 hdev, vport[k].qs_offset + i, i);
794 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
795 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
796 for (k = 0; k < hdev->num_alloc_vport; k++)
797 for (i = 0; i < HNAE3_MAX_TC; i++) {
798 ret = hclge_tm_qs_to_pri_map_cfg(
799 hdev, vport[k].qs_offset + i, k);
807 /* Cfg q -> qs mapping */
808 for (i = 0; i < hdev->num_alloc_vport; i++) {
809 ret = hclge_vport_q_to_qs_map(hdev, vport);
819 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
826 for (i = 0; i < hdev->tm_info.num_tc; i++) {
827 ret = hclge_shaper_para_calc(
828 hdev->tm_info.tc_info[i].bw_limit,
829 HCLGE_SHAPER_LVL_PRI,
830 &ir_b, &ir_u, &ir_s);
834 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
835 HCLGE_SHAPER_BS_U_DEF,
836 HCLGE_SHAPER_BS_S_DEF);
837 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
842 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
843 HCLGE_SHAPER_BS_U_DEF,
844 HCLGE_SHAPER_BS_S_DEF);
845 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
854 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
856 struct hclge_dev *hdev = vport->back;
861 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
862 &ir_b, &ir_u, &ir_s);
866 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
867 HCLGE_SHAPER_BS_U_DEF,
868 HCLGE_SHAPER_BS_S_DEF);
869 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
870 vport->vport_id, shaper_para);
874 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
875 HCLGE_SHAPER_BS_U_DEF,
876 HCLGE_SHAPER_BS_S_DEF);
877 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
878 vport->vport_id, shaper_para);
885 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
887 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
888 struct hclge_dev *hdev = vport->back;
893 for (i = 0; i < kinfo->num_tc; i++) {
894 ret = hclge_shaper_para_calc(
895 hdev->tm_info.tc_info[i].bw_limit,
896 HCLGE_SHAPER_LVL_QSET,
897 &ir_b, &ir_u, &ir_s);
905 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
907 struct hclge_vport *vport = hdev->vport;
911 /* Need config vport shaper */
912 for (i = 0; i < hdev->num_alloc_vport; i++) {
913 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
917 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
927 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
931 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
932 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
936 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
944 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
946 struct hclge_vport *vport = hdev->vport;
947 struct hclge_pg_info *pg_info;
952 for (i = 0; i < hdev->tm_info.num_tc; i++) {
954 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
955 dwrr = pg_info->tc_dwrr[i];
957 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
961 for (k = 0; k < hdev->num_alloc_vport; k++) {
962 ret = hclge_tm_qs_weight_cfg(
963 hdev, vport[k].qs_offset + i,
973 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
975 #define DEFAULT_TC_WEIGHT 1
976 #define DEFAULT_TC_OFFSET 14
978 struct hclge_ets_tc_weight_cmd *ets_weight;
979 struct hclge_desc desc;
982 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
983 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
985 for (i = 0; i < HNAE3_MAX_TC; i++) {
986 struct hclge_pg_info *pg_info;
988 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
990 if (!(hdev->hw_tc_map & BIT(i)))
994 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
995 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
998 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1000 return hclge_cmd_send(&hdev->hw, &desc, 1);
1003 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1005 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1006 struct hclge_dev *hdev = vport->back;
1011 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1016 for (i = 0; i < kinfo->num_tc; i++) {
1017 ret = hclge_tm_qs_weight_cfg(
1018 hdev, vport->qs_offset + i,
1019 hdev->tm_info.pg_info[0].tc_dwrr[i]);
1027 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1029 struct hclge_vport *vport = hdev->vport;
1033 for (i = 0; i < hdev->num_alloc_vport; i++) {
1034 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1044 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1048 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1049 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1053 if (!hnae3_dev_dcb_supported(hdev))
1056 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1057 if (ret == -EOPNOTSUPP) {
1058 dev_warn(&hdev->pdev->dev,
1059 "fw %08x does't support ets tc weight cmd\n",
1066 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1074 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1078 ret = hclge_up_to_tc_map(hdev);
1082 ret = hclge_tm_pg_to_pri_map(hdev);
1086 return hclge_tm_pri_q_qs_cfg(hdev);
1089 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1093 ret = hclge_tm_port_shaper_cfg(hdev);
1097 ret = hclge_tm_pg_shaper_cfg(hdev);
1101 return hclge_tm_pri_shaper_cfg(hdev);
1104 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1108 ret = hclge_tm_pg_dwrr_cfg(hdev);
1112 return hclge_tm_pri_dwrr_cfg(hdev);
1115 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1120 /* Only being config on TC-Based scheduler mode */
1121 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1124 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1125 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1133 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1135 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1136 struct hclge_dev *hdev = vport->back;
1140 if (vport->vport_id >= HNAE3_MAX_TC)
1143 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1147 for (i = 0; i < kinfo->num_tc; i++) {
1148 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1150 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1159 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1161 struct hclge_vport *vport = hdev->vport;
1165 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1166 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1167 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1171 for (k = 0; k < hdev->num_alloc_vport; k++) {
1172 ret = hclge_tm_qs_schd_mode_cfg(
1173 hdev, vport[k].qs_offset + i,
1174 HCLGE_SCH_MODE_DWRR);
1180 for (i = 0; i < hdev->num_alloc_vport; i++) {
1181 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1192 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1196 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1200 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1203 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1207 /* Cfg tm mapping */
1208 ret = hclge_tm_map_cfg(hdev);
1213 ret = hclge_tm_shaper_cfg(hdev);
1218 ret = hclge_tm_dwrr_cfg(hdev);
1222 /* Cfg schd mode for each level schd */
1223 return hclge_tm_schd_mode_hw(hdev);
1226 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1228 struct hclge_mac *mac = &hdev->hw.mac;
1230 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1231 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1232 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1235 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1237 u8 enable_bitmap = 0;
1239 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1240 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1241 HCLGE_RX_MAC_PAUSE_EN_MSK;
1243 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1244 hdev->tm_info.pfc_en);
1247 /* Each Tc has a 1024 queue sets to backpress, it divides to
1248 * 32 group, each group contains 32 queue sets, which can be
1249 * represented by u32 bitmap.
1251 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1255 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1259 for (k = 0; k < hdev->num_alloc_vport; k++) {
1260 struct hclge_vport *vport = &hdev->vport[k];
1261 u16 qs_id = vport->qs_offset + tc;
1264 grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1266 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1267 HCLGE_BP_SUB_GRP_ID_S);
1269 qs_bitmap |= (1 << sub_grp);
1272 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1280 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1284 switch (hdev->tm_info.fc_mode) {
1289 case HCLGE_FC_RX_PAUSE:
1293 case HCLGE_FC_TX_PAUSE:
1310 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1313 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1318 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1319 ret = hclge_bp_setup_hw(hdev, i);
1327 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1331 ret = hclge_pause_param_setup_hw(hdev);
1335 ret = hclge_mac_pause_setup_hw(hdev);
1339 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1340 if (!hnae3_dev_dcb_supported(hdev))
1343 /* GE MAC does not support PFC, when driver is initializing and MAC
1344 * is in GE Mode, ignore the error here, otherwise initialization
1347 ret = hclge_pfc_setup_hw(hdev);
1348 if (init && ret == -EOPNOTSUPP)
1349 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1351 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1356 return hclge_tm_bp_setup(hdev);
1359 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1361 struct hclge_vport *vport = hdev->vport;
1362 struct hnae3_knic_private_info *kinfo;
1365 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1366 hdev->tm_info.prio_tc[i] = prio_tc[i];
1368 for (k = 0; k < hdev->num_alloc_vport; k++) {
1369 kinfo = &vport[k].nic.kinfo;
1370 kinfo->prio_tc[i] = prio_tc[i];
1375 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1380 hdev->tm_info.num_tc = num_tc;
1382 for (i = 0; i < hdev->tm_info.num_tc; i++)
1387 hdev->tm_info.num_tc = 1;
1390 hdev->hw_tc_map = bit_map;
1392 hclge_tm_schd_info_init(hdev);
1395 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1397 /* DCB is enabled if we have more than 1 TC or pfc_en is
1400 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1401 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1403 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1405 hclge_pfc_info_init(hdev);
1408 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1412 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1413 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1416 ret = hclge_tm_schd_setup_hw(hdev);
1420 ret = hclge_pause_setup_hw(hdev, init);
1427 int hclge_tm_schd_init(struct hclge_dev *hdev)
1429 /* fc_mode is HCLGE_FC_FULL on reset */
1430 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1431 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1433 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1434 hdev->tm_info.num_pg != 1)
1437 hclge_tm_schd_info_init(hdev);
1439 return hclge_tm_init_hw(hdev, true);
1442 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1444 struct hclge_vport *vport = hdev->vport;
1447 hclge_tm_vport_tc_info_update(vport);
1449 ret = hclge_vport_q_to_qs_map(hdev, vport);
1453 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1456 return hclge_tm_bp_setup(hdev);