1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level {
11 HCLGE_SHAPER_LVL_PRI = 0,
12 HCLGE_SHAPER_LVL_PG = 1,
13 HCLGE_SHAPER_LVL_PORT = 2,
14 HCLGE_SHAPER_LVL_QSET = 3,
15 HCLGE_SHAPER_LVL_CNT = 4,
16 HCLGE_SHAPER_LVL_VF = 0,
17 HCLGE_SHAPER_LVL_PF = 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 #define HCLGE_ETHER_MAX_RATE 100000
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29 * @ir: Rate to be config, its unit is Mbps
30 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31 * @ir_b: IR_B parameter of IR shaper
32 * @ir_u: IR_U parameter of IR shaper
33 * @ir_s: IR_S parameter of IR shaper
37 * IR_b * (2 ^ IR_u) * 8
38 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
41 * @return: 0: calculate sucessful, negative: fail
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44 u8 *ir_b, u8 *ir_u, u8 *ir_s)
46 #define DIVISOR_CLK (1000 * 8)
47 #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
49 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 6 * 256, /* Prioriy level */
51 6 * 32, /* Prioriy group level */
52 6 * 8, /* Port level */
53 6 * 256 /* Qset level */
61 if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
62 ir > HCLGE_ETHER_MAX_RATE)
65 tick = tick_array[shaper_level];
68 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
69 * the formula is changed to:
71 * ir_calc = ---------------- * 1000
74 ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
82 } else if (ir_calc > ir) {
83 /* Increasing the denominator to select ir_s value */
84 while (ir_calc > ir) {
86 ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
92 *ir_b = (ir * tick * (1 << ir_s_calc) +
93 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
95 /* Increasing the numerator to select ir_u value */
98 while (ir_calc < ir) {
100 numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
101 ir_calc = (numerator + (tick >> 1)) / tick;
107 u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
108 *ir_b = (ir * tick + (denominator >> 1)) / denominator;
118 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
119 enum hclge_opcode_type opcode, u64 *stats)
121 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
124 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
125 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
128 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
129 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
130 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
133 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
135 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
139 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
140 struct hclge_pfc_stats_cmd *pfc_stats =
141 (struct hclge_pfc_stats_cmd *)desc[i].data;
143 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
144 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
146 if (index < HCLGE_MAX_TC_NUM)
148 le64_to_cpu(pfc_stats->pkt_num[j]);
154 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
156 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
159 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
161 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
164 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
166 struct hclge_desc desc;
168 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
170 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
171 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
173 return hclge_cmd_send(&hdev->hw, &desc, 1);
176 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
179 struct hclge_desc desc;
180 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
182 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
184 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
185 pfc->pri_en_bitmap = pfc_bitmap;
187 return hclge_cmd_send(&hdev->hw, &desc, 1);
190 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
191 u8 pause_trans_gap, u16 pause_trans_time)
193 struct hclge_cfg_pause_param_cmd *pause_param;
194 struct hclge_desc desc;
196 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
198 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
200 ether_addr_copy(pause_param->mac_addr, addr);
201 ether_addr_copy(pause_param->mac_addr_extra, addr);
202 pause_param->pause_trans_gap = pause_trans_gap;
203 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
205 return hclge_cmd_send(&hdev->hw, &desc, 1);
208 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
210 struct hclge_cfg_pause_param_cmd *pause_param;
211 struct hclge_desc desc;
216 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
218 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
220 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
224 trans_gap = pause_param->pause_trans_gap;
225 trans_time = le16_to_cpu(pause_param->pause_trans_time);
227 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
230 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
234 tc = hdev->tm_info.prio_tc[pri_id];
236 if (tc >= hdev->tm_info.num_tc)
240 * the register for priority has four bytes, the first bytes includes
241 * priority0 and priority1, the higher 4bit stands for priority1
242 * while the lower 4bit stands for priority0, as below:
243 * first byte: | pri_1 | pri_0 |
244 * second byte: | pri_3 | pri_2 |
245 * third byte: | pri_5 | pri_4 |
246 * fourth byte: | pri_7 | pri_6 |
248 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
253 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
255 struct hclge_desc desc;
256 u8 *pri = (u8 *)desc.data;
260 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
262 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
263 ret = hclge_fill_pri_array(hdev, pri, pri_id);
268 return hclge_cmd_send(&hdev->hw, &desc, 1);
271 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
272 u8 pg_id, u8 pri_bit_map)
274 struct hclge_pg_to_pri_link_cmd *map;
275 struct hclge_desc desc;
277 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
279 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
282 map->pri_bit_map = pri_bit_map;
284 return hclge_cmd_send(&hdev->hw, &desc, 1);
287 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
290 struct hclge_qs_to_pri_link_cmd *map;
291 struct hclge_desc desc;
293 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
295 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
297 map->qs_id = cpu_to_le16(qs_id);
299 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
301 return hclge_cmd_send(&hdev->hw, &desc, 1);
304 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
307 struct hclge_nq_to_qs_link_cmd *map;
308 struct hclge_desc desc;
310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
312 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
314 map->nq_id = cpu_to_le16(q_id);
315 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
317 return hclge_cmd_send(&hdev->hw, &desc, 1);
320 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
323 struct hclge_pg_weight_cmd *weight;
324 struct hclge_desc desc;
326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
328 weight = (struct hclge_pg_weight_cmd *)desc.data;
330 weight->pg_id = pg_id;
333 return hclge_cmd_send(&hdev->hw, &desc, 1);
336 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
339 struct hclge_priority_weight_cmd *weight;
340 struct hclge_desc desc;
342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
344 weight = (struct hclge_priority_weight_cmd *)desc.data;
346 weight->pri_id = pri_id;
349 return hclge_cmd_send(&hdev->hw, &desc, 1);
352 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
355 struct hclge_qs_weight_cmd *weight;
356 struct hclge_desc desc;
358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
360 weight = (struct hclge_qs_weight_cmd *)desc.data;
362 weight->qs_id = cpu_to_le16(qs_id);
365 return hclge_cmd_send(&hdev->hw, &desc, 1);
368 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
371 u32 shapping_para = 0;
373 hclge_tm_set_field(shapping_para, IR_B, ir_b);
374 hclge_tm_set_field(shapping_para, IR_U, ir_u);
375 hclge_tm_set_field(shapping_para, IR_S, ir_s);
376 hclge_tm_set_field(shapping_para, BS_B, bs_b);
377 hclge_tm_set_field(shapping_para, BS_S, bs_s);
379 return shapping_para;
382 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
383 enum hclge_shap_bucket bucket, u8 pg_id,
386 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
387 enum hclge_opcode_type opcode;
388 struct hclge_desc desc;
390 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
391 HCLGE_OPC_TM_PG_C_SHAPPING;
392 hclge_cmd_setup_basic_desc(&desc, opcode, false);
394 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
396 shap_cfg_cmd->pg_id = pg_id;
398 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
400 return hclge_cmd_send(&hdev->hw, &desc, 1);
403 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
405 struct hclge_port_shapping_cmd *shap_cfg_cmd;
406 struct hclge_desc desc;
407 u32 shapping_para = 0;
411 ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
412 HCLGE_SHAPER_LVL_PORT,
413 &ir_b, &ir_u, &ir_s);
417 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
418 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
420 shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
421 HCLGE_SHAPER_BS_U_DEF,
422 HCLGE_SHAPER_BS_S_DEF);
424 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
426 return hclge_cmd_send(&hdev->hw, &desc, 1);
429 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
430 enum hclge_shap_bucket bucket, u8 pri_id,
433 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
434 enum hclge_opcode_type opcode;
435 struct hclge_desc desc;
437 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
438 HCLGE_OPC_TM_PRI_C_SHAPPING;
440 hclge_cmd_setup_basic_desc(&desc, opcode, false);
442 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
444 shap_cfg_cmd->pri_id = pri_id;
446 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
448 return hclge_cmd_send(&hdev->hw, &desc, 1);
451 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
453 struct hclge_desc desc;
455 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
457 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
458 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
462 desc.data[0] = cpu_to_le32(pg_id);
464 return hclge_cmd_send(&hdev->hw, &desc, 1);
467 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
469 struct hclge_desc desc;
471 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
473 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
474 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
478 desc.data[0] = cpu_to_le32(pri_id);
480 return hclge_cmd_send(&hdev->hw, &desc, 1);
483 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
485 struct hclge_desc desc;
487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
489 if (mode == HCLGE_SCH_MODE_DWRR)
490 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
494 desc.data[0] = cpu_to_le32(qs_id);
496 return hclge_cmd_send(&hdev->hw, &desc, 1);
499 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
502 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
503 struct hclge_desc desc;
505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
508 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
510 bp_to_qs_map_cmd->tc_id = tc;
511 bp_to_qs_map_cmd->qs_group_id = grp_id;
512 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
514 return hclge_cmd_send(&hdev->hw, &desc, 1);
517 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
519 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
520 struct hclge_dev *hdev = vport->back;
524 /* TC configuration is shared by PF/VF in one port, only allow
525 * one tc for VF for simplicity. VF's vport_id is non zero.
527 kinfo->num_tc = vport->vport_id ? 1 :
528 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
529 vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
530 (vport->vport_id ? (vport->vport_id - 1) : 0);
532 max_rss_size = min_t(u16, hdev->rss_size_max,
533 vport->alloc_tqps / kinfo->num_tc);
535 /* Set to user value, no larger than max_rss_size. */
536 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
537 kinfo->req_rss_size <= max_rss_size) {
538 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
539 kinfo->rss_size, kinfo->req_rss_size);
540 kinfo->rss_size = kinfo->req_rss_size;
541 } else if (kinfo->rss_size > max_rss_size ||
542 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
543 /* if user not set rss, the rss_size should compare with the
544 * valid msi numbers to ensure one to one map between tqp and
547 if (!kinfo->req_rss_size)
548 max_rss_size = min_t(u16, max_rss_size,
549 (hdev->num_nic_msi - 1) /
552 /* Set to the maximum specification value (max_rss_size). */
553 kinfo->rss_size = max_rss_size;
556 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
557 vport->dwrr = 100; /* 100 percent as init */
558 vport->alloc_rss_size = kinfo->rss_size;
559 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
561 for (i = 0; i < HNAE3_MAX_TC; i++) {
562 if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
563 kinfo->tc_info[i].enable = true;
564 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
565 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
566 kinfo->tc_info[i].tc = i;
568 /* Set to default queue if TC is disable */
569 kinfo->tc_info[i].enable = false;
570 kinfo->tc_info[i].tqp_offset = 0;
571 kinfo->tc_info[i].tqp_count = 1;
572 kinfo->tc_info[i].tc = 0;
576 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
577 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
580 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
582 struct hclge_vport *vport = hdev->vport;
585 for (i = 0; i < hdev->num_alloc_vport; i++) {
586 hclge_tm_vport_tc_info_update(vport);
592 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
596 for (i = 0; i < hdev->tm_info.num_tc; i++) {
597 hdev->tm_info.tc_info[i].tc_id = i;
598 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
599 hdev->tm_info.tc_info[i].pgid = 0;
600 hdev->tm_info.tc_info[i].bw_limit =
601 hdev->tm_info.pg_info[0].bw_limit;
604 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
605 hdev->tm_info.prio_tc[i] =
606 (i >= hdev->tm_info.num_tc) ? 0 : i;
608 /* DCB is enabled if we have more than 1 TC or pfc_en is
611 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
612 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
614 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
617 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
619 #define BW_PERCENT 100
623 for (i = 0; i < hdev->tm_info.num_pg; i++) {
626 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
628 hdev->tm_info.pg_info[i].pg_id = i;
629 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
631 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
636 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
637 for (k = 0; k < hdev->tm_info.num_tc; k++)
638 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
642 static void hclge_pfc_info_init(struct hclge_dev *hdev)
644 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
645 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
646 dev_warn(&hdev->pdev->dev,
647 "DCB is disable, but last mode is FC_PFC\n");
649 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
650 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
651 /* fc_mode_last_time record the last fc_mode when
652 * DCB is enabled, so that fc_mode can be set to
653 * the correct value when DCB is disabled.
655 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
656 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
660 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
662 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
663 (hdev->tm_info.num_pg != 1))
666 hclge_tm_pg_info_init(hdev);
668 hclge_tm_tc_info_init(hdev);
670 hclge_tm_vport_info_update(hdev);
672 hclge_pfc_info_init(hdev);
677 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
682 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
685 for (i = 0; i < hdev->tm_info.num_pg; i++) {
687 ret = hclge_tm_pg_to_pri_map_cfg(
688 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
696 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
704 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
708 for (i = 0; i < hdev->tm_info.num_pg; i++) {
709 /* Calc shaper para */
710 ret = hclge_shaper_para_calc(
711 hdev->tm_info.pg_info[i].bw_limit,
713 &ir_b, &ir_u, &ir_s);
717 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
718 HCLGE_SHAPER_BS_U_DEF,
719 HCLGE_SHAPER_BS_S_DEF);
720 ret = hclge_tm_pg_shapping_cfg(hdev,
721 HCLGE_TM_SHAP_C_BUCKET, i,
726 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
727 HCLGE_SHAPER_BS_U_DEF,
728 HCLGE_SHAPER_BS_S_DEF);
729 ret = hclge_tm_pg_shapping_cfg(hdev,
730 HCLGE_TM_SHAP_P_BUCKET, i,
739 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
745 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
749 for (i = 0; i < hdev->tm_info.num_pg; i++) {
751 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
759 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
760 struct hclge_vport *vport)
762 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
763 struct hnae3_queue **tqp = kinfo->tqp;
764 struct hnae3_tc_info *v_tc_info;
768 for (i = 0; i < kinfo->num_tc; i++) {
769 v_tc_info = &kinfo->tc_info[i];
770 for (j = 0; j < v_tc_info->tqp_count; j++) {
771 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
773 ret = hclge_tm_q_to_qs_map_cfg(hdev,
774 hclge_get_queue_id(q),
775 vport->qs_offset + i);
784 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
786 struct hclge_vport *vport = hdev->vport;
790 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
791 /* Cfg qs -> pri mapping, one by one mapping */
792 for (k = 0; k < hdev->num_alloc_vport; k++) {
793 struct hnae3_knic_private_info *kinfo =
796 for (i = 0; i < kinfo->num_tc; i++) {
797 ret = hclge_tm_qs_to_pri_map_cfg(
798 hdev, vport[k].qs_offset + i, i);
803 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
804 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
805 for (k = 0; k < hdev->num_alloc_vport; k++)
806 for (i = 0; i < HNAE3_MAX_TC; i++) {
807 ret = hclge_tm_qs_to_pri_map_cfg(
808 hdev, vport[k].qs_offset + i, k);
816 /* Cfg q -> qs mapping */
817 for (i = 0; i < hdev->num_alloc_vport; i++) {
818 ret = hclge_vport_q_to_qs_map(hdev, vport);
828 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
835 for (i = 0; i < hdev->tm_info.num_tc; i++) {
836 ret = hclge_shaper_para_calc(
837 hdev->tm_info.tc_info[i].bw_limit,
838 HCLGE_SHAPER_LVL_PRI,
839 &ir_b, &ir_u, &ir_s);
843 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
844 HCLGE_SHAPER_BS_U_DEF,
845 HCLGE_SHAPER_BS_S_DEF);
846 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
851 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
852 HCLGE_SHAPER_BS_U_DEF,
853 HCLGE_SHAPER_BS_S_DEF);
854 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
863 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
865 struct hclge_dev *hdev = vport->back;
870 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
871 &ir_b, &ir_u, &ir_s);
875 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
876 HCLGE_SHAPER_BS_U_DEF,
877 HCLGE_SHAPER_BS_S_DEF);
878 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
879 vport->vport_id, shaper_para);
883 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
884 HCLGE_SHAPER_BS_U_DEF,
885 HCLGE_SHAPER_BS_S_DEF);
886 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
887 vport->vport_id, shaper_para);
894 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
896 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
897 struct hclge_dev *hdev = vport->back;
902 for (i = 0; i < kinfo->num_tc; i++) {
903 ret = hclge_shaper_para_calc(
904 hdev->tm_info.tc_info[i].bw_limit,
905 HCLGE_SHAPER_LVL_QSET,
906 &ir_b, &ir_u, &ir_s);
914 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
916 struct hclge_vport *vport = hdev->vport;
920 /* Need config vport shaper */
921 for (i = 0; i < hdev->num_alloc_vport; i++) {
922 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
926 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
936 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
940 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
941 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
945 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
953 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
955 struct hclge_vport *vport = hdev->vport;
956 struct hclge_pg_info *pg_info;
961 for (i = 0; i < hdev->tm_info.num_tc; i++) {
963 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
964 dwrr = pg_info->tc_dwrr[i];
966 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
970 for (k = 0; k < hdev->num_alloc_vport; k++) {
971 ret = hclge_tm_qs_weight_cfg(
972 hdev, vport[k].qs_offset + i,
982 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
984 #define DEFAULT_TC_WEIGHT 1
985 #define DEFAULT_TC_OFFSET 14
987 struct hclge_ets_tc_weight_cmd *ets_weight;
988 struct hclge_desc desc;
991 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
992 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
994 for (i = 0; i < HNAE3_MAX_TC; i++) {
995 struct hclge_pg_info *pg_info;
997 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
999 if (!(hdev->hw_tc_map & BIT(i)))
1003 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1004 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1007 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1009 return hclge_cmd_send(&hdev->hw, &desc, 1);
1012 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1014 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1015 struct hclge_dev *hdev = vport->back;
1020 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1025 for (i = 0; i < kinfo->num_tc; i++) {
1026 ret = hclge_tm_qs_weight_cfg(
1027 hdev, vport->qs_offset + i,
1028 hdev->tm_info.pg_info[0].tc_dwrr[i]);
1036 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1038 struct hclge_vport *vport = hdev->vport;
1042 for (i = 0; i < hdev->num_alloc_vport; i++) {
1043 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1053 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1057 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1058 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1062 if (!hnae3_dev_dcb_supported(hdev))
1065 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1066 if (ret == -EOPNOTSUPP) {
1067 dev_warn(&hdev->pdev->dev,
1068 "fw %08x does't support ets tc weight cmd\n",
1075 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1083 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1087 ret = hclge_up_to_tc_map(hdev);
1091 ret = hclge_tm_pg_to_pri_map(hdev);
1095 return hclge_tm_pri_q_qs_cfg(hdev);
1098 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1102 ret = hclge_tm_port_shaper_cfg(hdev);
1106 ret = hclge_tm_pg_shaper_cfg(hdev);
1110 return hclge_tm_pri_shaper_cfg(hdev);
1113 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1117 ret = hclge_tm_pg_dwrr_cfg(hdev);
1121 return hclge_tm_pri_dwrr_cfg(hdev);
1124 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1129 /* Only being config on TC-Based scheduler mode */
1130 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1133 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1134 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1142 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1144 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1145 struct hclge_dev *hdev = vport->back;
1149 if (vport->vport_id >= HNAE3_MAX_TC)
1152 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1156 for (i = 0; i < kinfo->num_tc; i++) {
1157 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1159 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1168 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1170 struct hclge_vport *vport = hdev->vport;
1174 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1175 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1176 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1180 for (k = 0; k < hdev->num_alloc_vport; k++) {
1181 ret = hclge_tm_qs_schd_mode_cfg(
1182 hdev, vport[k].qs_offset + i,
1183 HCLGE_SCH_MODE_DWRR);
1189 for (i = 0; i < hdev->num_alloc_vport; i++) {
1190 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1201 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1205 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1209 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1212 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1216 /* Cfg tm mapping */
1217 ret = hclge_tm_map_cfg(hdev);
1222 ret = hclge_tm_shaper_cfg(hdev);
1227 ret = hclge_tm_dwrr_cfg(hdev);
1231 /* Cfg schd mode for each level schd */
1232 return hclge_tm_schd_mode_hw(hdev);
1235 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1237 struct hclge_mac *mac = &hdev->hw.mac;
1239 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1240 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1241 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1244 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1246 u8 enable_bitmap = 0;
1248 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1249 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1250 HCLGE_RX_MAC_PAUSE_EN_MSK;
1252 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1253 hdev->tm_info.pfc_en);
1256 /* Each Tc has a 1024 queue sets to backpress, it divides to
1257 * 32 group, each group contains 32 queue sets, which can be
1258 * represented by u32 bitmap.
1260 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1264 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1268 for (k = 0; k < hdev->num_alloc_vport; k++) {
1269 struct hclge_vport *vport = &hdev->vport[k];
1270 u16 qs_id = vport->qs_offset + tc;
1273 grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1275 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1276 HCLGE_BP_SUB_GRP_ID_S);
1278 qs_bitmap |= (1 << sub_grp);
1281 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1289 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1293 switch (hdev->tm_info.fc_mode) {
1298 case HCLGE_FC_RX_PAUSE:
1302 case HCLGE_FC_TX_PAUSE:
1319 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1322 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1327 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1328 ret = hclge_bp_setup_hw(hdev, i);
1336 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1340 ret = hclge_pause_param_setup_hw(hdev);
1344 ret = hclge_mac_pause_setup_hw(hdev);
1348 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1349 if (!hnae3_dev_dcb_supported(hdev))
1352 /* GE MAC does not support PFC, when driver is initializing and MAC
1353 * is in GE Mode, ignore the error here, otherwise initialization
1356 ret = hclge_pfc_setup_hw(hdev);
1357 if (init && ret == -EOPNOTSUPP)
1358 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1360 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1365 return hclge_tm_bp_setup(hdev);
1368 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1370 struct hclge_vport *vport = hdev->vport;
1371 struct hnae3_knic_private_info *kinfo;
1374 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1375 hdev->tm_info.prio_tc[i] = prio_tc[i];
1377 for (k = 0; k < hdev->num_alloc_vport; k++) {
1378 kinfo = &vport[k].nic.kinfo;
1379 kinfo->prio_tc[i] = prio_tc[i];
1384 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1389 hdev->tm_info.num_tc = num_tc;
1391 for (i = 0; i < hdev->tm_info.num_tc; i++)
1396 hdev->tm_info.num_tc = 1;
1399 hdev->hw_tc_map = bit_map;
1401 hclge_tm_schd_info_init(hdev);
1404 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1406 /* DCB is enabled if we have more than 1 TC or pfc_en is
1409 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1410 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1412 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1414 hclge_pfc_info_init(hdev);
1417 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1421 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1422 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1425 ret = hclge_tm_schd_setup_hw(hdev);
1429 ret = hclge_pause_setup_hw(hdev, init);
1436 int hclge_tm_schd_init(struct hclge_dev *hdev)
1440 /* fc_mode is HCLGE_FC_FULL on reset */
1441 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1442 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1444 ret = hclge_tm_schd_info_init(hdev);
1448 return hclge_tm_init_hw(hdev, true);
1451 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1453 struct hclge_vport *vport = hdev->vport;
1456 hclge_tm_vport_tc_info_update(vport);
1458 ret = hclge_vport_q_to_qs_map(hdev, vport);
1462 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1465 return hclge_tm_bp_setup(hdev);