Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9
10 enum hclge_shaper_level {
11         HCLGE_SHAPER_LVL_PRI    = 0,
12         HCLGE_SHAPER_LVL_PG     = 1,
13         HCLGE_SHAPER_LVL_PORT   = 2,
14         HCLGE_SHAPER_LVL_QSET   = 3,
15         HCLGE_SHAPER_LVL_CNT    = 4,
16         HCLGE_SHAPER_LVL_VF     = 0,
17         HCLGE_SHAPER_LVL_PF     = 1,
18 };
19
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM    3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD    3
22
23 #define HCLGE_SHAPER_BS_U_DEF   5
24 #define HCLGE_SHAPER_BS_S_DEF   20
25
26 #define HCLGE_ETHER_MAX_RATE    100000
27
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29  * @ir: Rate to be config, its unit is Mbps
30  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31  * @ir_b: IR_B parameter of IR shaper
32  * @ir_u: IR_U parameter of IR shaper
33  * @ir_s: IR_S parameter of IR shaper
34  *
35  * the formula:
36  *
37  *              IR_b * (2 ^ IR_u) * 8
38  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
39  *              Tick * (2 ^ IR_s)
40  *
41  * @return: 0: calculate sucessful, negative: fail
42  */
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44                                   u8 *ir_b, u8 *ir_u, u8 *ir_s)
45 {
46 #define DIVISOR_CLK             (1000 * 8)
47 #define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
48
49         const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50                 6 * 256,        /* Prioriy level */
51                 6 * 32,         /* Prioriy group level */
52                 6 * 8,          /* Port level */
53                 6 * 256         /* Qset level */
54         };
55         u8 ir_u_calc = 0;
56         u8 ir_s_calc = 0;
57         u32 ir_calc;
58         u32 tick;
59
60         /* Calc tick */
61         if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
62             ir > HCLGE_ETHER_MAX_RATE)
63                 return -EINVAL;
64
65         tick = tick_array[shaper_level];
66
67         /**
68          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
69          * the formula is changed to:
70          *              126 * 1 * 8
71          * ir_calc = ---------------- * 1000
72          *              tick * 1
73          */
74         ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
75
76         if (ir_calc == ir) {
77                 *ir_b = 126;
78                 *ir_u = 0;
79                 *ir_s = 0;
80
81                 return 0;
82         } else if (ir_calc > ir) {
83                 /* Increasing the denominator to select ir_s value */
84                 while (ir_calc > ir) {
85                         ir_s_calc++;
86                         ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
87                 }
88
89                 if (ir_calc == ir)
90                         *ir_b = 126;
91                 else
92                         *ir_b = (ir * tick * (1 << ir_s_calc) +
93                                  (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
94         } else {
95                 /* Increasing the numerator to select ir_u value */
96                 u32 numerator;
97
98                 while (ir_calc < ir) {
99                         ir_u_calc++;
100                         numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
101                         ir_calc = (numerator + (tick >> 1)) / tick;
102                 }
103
104                 if (ir_calc == ir) {
105                         *ir_b = 126;
106                 } else {
107                         u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
108                         *ir_b = (ir * tick + (denominator >> 1)) / denominator;
109                 }
110         }
111
112         *ir_u = ir_u_calc;
113         *ir_s = ir_s_calc;
114
115         return 0;
116 }
117
118 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
119                                enum hclge_opcode_type opcode, u64 *stats)
120 {
121         struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
122         int ret, i, j;
123
124         if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
125               opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
126                 return -EINVAL;
127
128         for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
129                 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
130                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
131         }
132
133         hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
134
135         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
136         if (ret)
137                 return ret;
138
139         for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
140                 struct hclge_pfc_stats_cmd *pfc_stats =
141                                 (struct hclge_pfc_stats_cmd *)desc[i].data;
142
143                 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
144                         u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
145
146                         if (index < HCLGE_MAX_TC_NUM)
147                                 stats[index] =
148                                         le64_to_cpu(pfc_stats->pkt_num[j]);
149                 }
150         }
151         return 0;
152 }
153
154 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
155 {
156         return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
157 }
158
159 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
160 {
161         return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
162 }
163
164 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
165 {
166         struct hclge_desc desc;
167
168         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
169
170         desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
171                 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
172
173         return hclge_cmd_send(&hdev->hw, &desc, 1);
174 }
175
176 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
177                                   u8 pfc_bitmap)
178 {
179         struct hclge_desc desc;
180         struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
181
182         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
183
184         pfc->tx_rx_en_bitmap = tx_rx_bitmap;
185         pfc->pri_en_bitmap = pfc_bitmap;
186
187         return hclge_cmd_send(&hdev->hw, &desc, 1);
188 }
189
190 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
191                                  u8 pause_trans_gap, u16 pause_trans_time)
192 {
193         struct hclge_cfg_pause_param_cmd *pause_param;
194         struct hclge_desc desc;
195
196         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
197
198         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
199
200         ether_addr_copy(pause_param->mac_addr, addr);
201         ether_addr_copy(pause_param->mac_addr_extra, addr);
202         pause_param->pause_trans_gap = pause_trans_gap;
203         pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
204
205         return hclge_cmd_send(&hdev->hw, &desc, 1);
206 }
207
208 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
209 {
210         struct hclge_cfg_pause_param_cmd *pause_param;
211         struct hclge_desc desc;
212         u16 trans_time;
213         u8 trans_gap;
214         int ret;
215
216         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
217
218         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
219
220         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
221         if (ret)
222                 return ret;
223
224         trans_gap = pause_param->pause_trans_gap;
225         trans_time = le16_to_cpu(pause_param->pause_trans_time);
226
227         return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
228 }
229
230 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
231 {
232         u8 tc;
233
234         tc = hdev->tm_info.prio_tc[pri_id];
235
236         if (tc >= hdev->tm_info.num_tc)
237                 return -EINVAL;
238
239         /**
240          * the register for priority has four bytes, the first bytes includes
241          *  priority0 and priority1, the higher 4bit stands for priority1
242          *  while the lower 4bit stands for priority0, as below:
243          * first byte:  | pri_1 | pri_0 |
244          * second byte: | pri_3 | pri_2 |
245          * third byte:  | pri_5 | pri_4 |
246          * fourth byte: | pri_7 | pri_6 |
247          */
248         pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
249
250         return 0;
251 }
252
253 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
254 {
255         struct hclge_desc desc;
256         u8 *pri = (u8 *)desc.data;
257         u8 pri_id;
258         int ret;
259
260         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
261
262         for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
263                 ret = hclge_fill_pri_array(hdev, pri, pri_id);
264                 if (ret)
265                         return ret;
266         }
267
268         return hclge_cmd_send(&hdev->hw, &desc, 1);
269 }
270
271 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
272                                       u8 pg_id, u8 pri_bit_map)
273 {
274         struct hclge_pg_to_pri_link_cmd *map;
275         struct hclge_desc desc;
276
277         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
278
279         map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
280
281         map->pg_id = pg_id;
282         map->pri_bit_map = pri_bit_map;
283
284         return hclge_cmd_send(&hdev->hw, &desc, 1);
285 }
286
287 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
288                                       u16 qs_id, u8 pri)
289 {
290         struct hclge_qs_to_pri_link_cmd *map;
291         struct hclge_desc desc;
292
293         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
294
295         map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
296
297         map->qs_id = cpu_to_le16(qs_id);
298         map->priority = pri;
299         map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
300
301         return hclge_cmd_send(&hdev->hw, &desc, 1);
302 }
303
304 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
305                                     u16 q_id, u16 qs_id)
306 {
307         struct hclge_nq_to_qs_link_cmd *map;
308         struct hclge_desc desc;
309
310         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
311
312         map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
313
314         map->nq_id = cpu_to_le16(q_id);
315         map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
316
317         return hclge_cmd_send(&hdev->hw, &desc, 1);
318 }
319
320 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
321                                   u8 dwrr)
322 {
323         struct hclge_pg_weight_cmd *weight;
324         struct hclge_desc desc;
325
326         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
327
328         weight = (struct hclge_pg_weight_cmd *)desc.data;
329
330         weight->pg_id = pg_id;
331         weight->dwrr = dwrr;
332
333         return hclge_cmd_send(&hdev->hw, &desc, 1);
334 }
335
336 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
337                                    u8 dwrr)
338 {
339         struct hclge_priority_weight_cmd *weight;
340         struct hclge_desc desc;
341
342         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
343
344         weight = (struct hclge_priority_weight_cmd *)desc.data;
345
346         weight->pri_id = pri_id;
347         weight->dwrr = dwrr;
348
349         return hclge_cmd_send(&hdev->hw, &desc, 1);
350 }
351
352 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
353                                   u8 dwrr)
354 {
355         struct hclge_qs_weight_cmd *weight;
356         struct hclge_desc desc;
357
358         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
359
360         weight = (struct hclge_qs_weight_cmd *)desc.data;
361
362         weight->qs_id = cpu_to_le16(qs_id);
363         weight->dwrr = dwrr;
364
365         return hclge_cmd_send(&hdev->hw, &desc, 1);
366 }
367
368 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
369                                       u8 bs_b, u8 bs_s)
370 {
371         u32 shapping_para = 0;
372
373         hclge_tm_set_field(shapping_para, IR_B, ir_b);
374         hclge_tm_set_field(shapping_para, IR_U, ir_u);
375         hclge_tm_set_field(shapping_para, IR_S, ir_s);
376         hclge_tm_set_field(shapping_para, BS_B, bs_b);
377         hclge_tm_set_field(shapping_para, BS_S, bs_s);
378
379         return shapping_para;
380 }
381
382 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
383                                     enum hclge_shap_bucket bucket, u8 pg_id,
384                                     u32 shapping_para)
385 {
386         struct hclge_pg_shapping_cmd *shap_cfg_cmd;
387         enum hclge_opcode_type opcode;
388         struct hclge_desc desc;
389
390         opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
391                  HCLGE_OPC_TM_PG_C_SHAPPING;
392         hclge_cmd_setup_basic_desc(&desc, opcode, false);
393
394         shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
395
396         shap_cfg_cmd->pg_id = pg_id;
397
398         shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
399
400         return hclge_cmd_send(&hdev->hw, &desc, 1);
401 }
402
403 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
404 {
405         struct hclge_port_shapping_cmd *shap_cfg_cmd;
406         struct hclge_desc desc;
407         u32 shapping_para = 0;
408         u8 ir_u, ir_b, ir_s;
409         int ret;
410
411         ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
412                                      HCLGE_SHAPER_LVL_PORT,
413                                      &ir_b, &ir_u, &ir_s);
414         if (ret)
415                 return ret;
416
417         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
418         shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
419
420         shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
421                                                    HCLGE_SHAPER_BS_U_DEF,
422                                                    HCLGE_SHAPER_BS_S_DEF);
423
424         shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
425
426         return hclge_cmd_send(&hdev->hw, &desc, 1);
427 }
428
429 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
430                                      enum hclge_shap_bucket bucket, u8 pri_id,
431                                      u32 shapping_para)
432 {
433         struct hclge_pri_shapping_cmd *shap_cfg_cmd;
434         enum hclge_opcode_type opcode;
435         struct hclge_desc desc;
436
437         opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
438                  HCLGE_OPC_TM_PRI_C_SHAPPING;
439
440         hclge_cmd_setup_basic_desc(&desc, opcode, false);
441
442         shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
443
444         shap_cfg_cmd->pri_id = pri_id;
445
446         shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
447
448         return hclge_cmd_send(&hdev->hw, &desc, 1);
449 }
450
451 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
452 {
453         struct hclge_desc desc;
454
455         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
456
457         if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
458                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
459         else
460                 desc.data[1] = 0;
461
462         desc.data[0] = cpu_to_le32(pg_id);
463
464         return hclge_cmd_send(&hdev->hw, &desc, 1);
465 }
466
467 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
468 {
469         struct hclge_desc desc;
470
471         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
472
473         if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
474                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
475         else
476                 desc.data[1] = 0;
477
478         desc.data[0] = cpu_to_le32(pri_id);
479
480         return hclge_cmd_send(&hdev->hw, &desc, 1);
481 }
482
483 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
484 {
485         struct hclge_desc desc;
486
487         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
488
489         if (mode == HCLGE_SCH_MODE_DWRR)
490                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
491         else
492                 desc.data[1] = 0;
493
494         desc.data[0] = cpu_to_le32(qs_id);
495
496         return hclge_cmd_send(&hdev->hw, &desc, 1);
497 }
498
499 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
500                               u32 bit_map)
501 {
502         struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
503         struct hclge_desc desc;
504
505         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
506                                    false);
507
508         bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
509
510         bp_to_qs_map_cmd->tc_id = tc;
511         bp_to_qs_map_cmd->qs_group_id = grp_id;
512         bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
513
514         return hclge_cmd_send(&hdev->hw, &desc, 1);
515 }
516
517 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
518 {
519         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
520         struct hclge_dev *hdev = vport->back;
521         u16 max_rss_size;
522         u8 i;
523
524         /* TC configuration is shared by PF/VF in one port, only allow
525          * one tc for VF for simplicity. VF's vport_id is non zero.
526          */
527         kinfo->num_tc = vport->vport_id ? 1 :
528                         min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
529         vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
530                                 (vport->vport_id ? (vport->vport_id - 1) : 0);
531
532         max_rss_size = min_t(u16, hdev->rss_size_max,
533                              vport->alloc_tqps / kinfo->num_tc);
534
535         /* Set to user value, no larger than max_rss_size. */
536         if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
537             kinfo->req_rss_size <= max_rss_size) {
538                 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
539                          kinfo->rss_size, kinfo->req_rss_size);
540                 kinfo->rss_size = kinfo->req_rss_size;
541         } else if (kinfo->rss_size > max_rss_size ||
542                    (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
543                 /* if user not set rss, the rss_size should compare with the
544                  * valid msi numbers to ensure one to one map between tqp and
545                  * irq as default.
546                  */
547                 if (!kinfo->req_rss_size)
548                         max_rss_size = min_t(u16, max_rss_size,
549                                              (hdev->num_nic_msi - 1) /
550                                              kinfo->num_tc);
551
552                 /* Set to the maximum specification value (max_rss_size). */
553                 kinfo->rss_size = max_rss_size;
554         }
555
556         kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
557         vport->dwrr = 100;  /* 100 percent as init */
558         vport->alloc_rss_size = kinfo->rss_size;
559         vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
560
561         for (i = 0; i < HNAE3_MAX_TC; i++) {
562                 if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
563                         kinfo->tc_info[i].enable = true;
564                         kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
565                         kinfo->tc_info[i].tqp_count = kinfo->rss_size;
566                         kinfo->tc_info[i].tc = i;
567                 } else {
568                         /* Set to default queue if TC is disable */
569                         kinfo->tc_info[i].enable = false;
570                         kinfo->tc_info[i].tqp_offset = 0;
571                         kinfo->tc_info[i].tqp_count = 1;
572                         kinfo->tc_info[i].tc = 0;
573                 }
574         }
575
576         memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
577                FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
578 }
579
580 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
581 {
582         struct hclge_vport *vport = hdev->vport;
583         u32 i;
584
585         for (i = 0; i < hdev->num_alloc_vport; i++) {
586                 hclge_tm_vport_tc_info_update(vport);
587
588                 vport++;
589         }
590 }
591
592 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
593 {
594         u8 i;
595
596         for (i = 0; i < hdev->tm_info.num_tc; i++) {
597                 hdev->tm_info.tc_info[i].tc_id = i;
598                 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
599                 hdev->tm_info.tc_info[i].pgid = 0;
600                 hdev->tm_info.tc_info[i].bw_limit =
601                         hdev->tm_info.pg_info[0].bw_limit;
602         }
603
604         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
605                 hdev->tm_info.prio_tc[i] =
606                         (i >= hdev->tm_info.num_tc) ? 0 : i;
607
608         /* DCB is enabled if we have more than 1 TC or pfc_en is
609          * non-zero.
610          */
611         if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
612                 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
613         else
614                 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
615 }
616
617 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
618 {
619 #define BW_PERCENT      100
620
621         u8 i;
622
623         for (i = 0; i < hdev->tm_info.num_pg; i++) {
624                 int k;
625
626                 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
627
628                 hdev->tm_info.pg_info[i].pg_id = i;
629                 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
630
631                 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
632
633                 if (i != 0)
634                         continue;
635
636                 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
637                 for (k = 0; k < hdev->tm_info.num_tc; k++)
638                         hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
639         }
640 }
641
642 static void hclge_pfc_info_init(struct hclge_dev *hdev)
643 {
644         if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
645                 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
646                         dev_warn(&hdev->pdev->dev,
647                                  "DCB is disable, but last mode is FC_PFC\n");
648
649                 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
650         } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
651                 /* fc_mode_last_time record the last fc_mode when
652                  * DCB is enabled, so that fc_mode can be set to
653                  * the correct value when DCB is disabled.
654                  */
655                 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
656                 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
657         }
658 }
659
660 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
661 {
662         if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
663             (hdev->tm_info.num_pg != 1))
664                 return -EINVAL;
665
666         hclge_tm_pg_info_init(hdev);
667
668         hclge_tm_tc_info_init(hdev);
669
670         hclge_tm_vport_info_update(hdev);
671
672         hclge_pfc_info_init(hdev);
673
674         return 0;
675 }
676
677 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
678 {
679         int ret;
680         u32 i;
681
682         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
683                 return 0;
684
685         for (i = 0; i < hdev->tm_info.num_pg; i++) {
686                 /* Cfg mapping */
687                 ret = hclge_tm_pg_to_pri_map_cfg(
688                         hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
689                 if (ret)
690                         return ret;
691         }
692
693         return 0;
694 }
695
696 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
697 {
698         u8 ir_u, ir_b, ir_s;
699         u32 shaper_para;
700         int ret;
701         u32 i;
702
703         /* Cfg pg schd */
704         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
705                 return 0;
706
707         /* Pg to pri */
708         for (i = 0; i < hdev->tm_info.num_pg; i++) {
709                 /* Calc shaper para */
710                 ret = hclge_shaper_para_calc(
711                                         hdev->tm_info.pg_info[i].bw_limit,
712                                         HCLGE_SHAPER_LVL_PG,
713                                         &ir_b, &ir_u, &ir_s);
714                 if (ret)
715                         return ret;
716
717                 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
718                                                          HCLGE_SHAPER_BS_U_DEF,
719                                                          HCLGE_SHAPER_BS_S_DEF);
720                 ret = hclge_tm_pg_shapping_cfg(hdev,
721                                                HCLGE_TM_SHAP_C_BUCKET, i,
722                                                shaper_para);
723                 if (ret)
724                         return ret;
725
726                 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
727                                                          HCLGE_SHAPER_BS_U_DEF,
728                                                          HCLGE_SHAPER_BS_S_DEF);
729                 ret = hclge_tm_pg_shapping_cfg(hdev,
730                                                HCLGE_TM_SHAP_P_BUCKET, i,
731                                                shaper_para);
732                 if (ret)
733                         return ret;
734         }
735
736         return 0;
737 }
738
739 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
740 {
741         int ret;
742         u32 i;
743
744         /* cfg pg schd */
745         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
746                 return 0;
747
748         /* pg to prio */
749         for (i = 0; i < hdev->tm_info.num_pg; i++) {
750                 /* Cfg dwrr */
751                 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
752                 if (ret)
753                         return ret;
754         }
755
756         return 0;
757 }
758
759 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
760                                    struct hclge_vport *vport)
761 {
762         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
763         struct hnae3_queue **tqp = kinfo->tqp;
764         struct hnae3_tc_info *v_tc_info;
765         u32 i, j;
766         int ret;
767
768         for (i = 0; i < kinfo->num_tc; i++) {
769                 v_tc_info = &kinfo->tc_info[i];
770                 for (j = 0; j < v_tc_info->tqp_count; j++) {
771                         struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
772
773                         ret = hclge_tm_q_to_qs_map_cfg(hdev,
774                                                        hclge_get_queue_id(q),
775                                                        vport->qs_offset + i);
776                         if (ret)
777                                 return ret;
778                 }
779         }
780
781         return 0;
782 }
783
784 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
785 {
786         struct hclge_vport *vport = hdev->vport;
787         int ret;
788         u32 i, k;
789
790         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
791                 /* Cfg qs -> pri mapping, one by one mapping */
792                 for (k = 0; k < hdev->num_alloc_vport; k++) {
793                         struct hnae3_knic_private_info *kinfo =
794                                 &vport[k].nic.kinfo;
795
796                         for (i = 0; i < kinfo->num_tc; i++) {
797                                 ret = hclge_tm_qs_to_pri_map_cfg(
798                                         hdev, vport[k].qs_offset + i, i);
799                                 if (ret)
800                                         return ret;
801                         }
802                 }
803         } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
804                 /* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
805                 for (k = 0; k < hdev->num_alloc_vport; k++)
806                         for (i = 0; i < HNAE3_MAX_TC; i++) {
807                                 ret = hclge_tm_qs_to_pri_map_cfg(
808                                         hdev, vport[k].qs_offset + i, k);
809                                 if (ret)
810                                         return ret;
811                         }
812         } else {
813                 return -EINVAL;
814         }
815
816         /* Cfg q -> qs mapping */
817         for (i = 0; i < hdev->num_alloc_vport; i++) {
818                 ret = hclge_vport_q_to_qs_map(hdev, vport);
819                 if (ret)
820                         return ret;
821
822                 vport++;
823         }
824
825         return 0;
826 }
827
828 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
829 {
830         u8 ir_u, ir_b, ir_s;
831         u32 shaper_para;
832         int ret;
833         u32 i;
834
835         for (i = 0; i < hdev->tm_info.num_tc; i++) {
836                 ret = hclge_shaper_para_calc(
837                                         hdev->tm_info.tc_info[i].bw_limit,
838                                         HCLGE_SHAPER_LVL_PRI,
839                                         &ir_b, &ir_u, &ir_s);
840                 if (ret)
841                         return ret;
842
843                 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
844                                                          HCLGE_SHAPER_BS_U_DEF,
845                                                          HCLGE_SHAPER_BS_S_DEF);
846                 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
847                                                 shaper_para);
848                 if (ret)
849                         return ret;
850
851                 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
852                                                          HCLGE_SHAPER_BS_U_DEF,
853                                                          HCLGE_SHAPER_BS_S_DEF);
854                 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
855                                                 shaper_para);
856                 if (ret)
857                         return ret;
858         }
859
860         return 0;
861 }
862
863 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
864 {
865         struct hclge_dev *hdev = vport->back;
866         u8 ir_u, ir_b, ir_s;
867         u32 shaper_para;
868         int ret;
869
870         ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
871                                      &ir_b, &ir_u, &ir_s);
872         if (ret)
873                 return ret;
874
875         shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
876                                                  HCLGE_SHAPER_BS_U_DEF,
877                                                  HCLGE_SHAPER_BS_S_DEF);
878         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
879                                         vport->vport_id, shaper_para);
880         if (ret)
881                 return ret;
882
883         shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
884                                                  HCLGE_SHAPER_BS_U_DEF,
885                                                  HCLGE_SHAPER_BS_S_DEF);
886         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
887                                         vport->vport_id, shaper_para);
888         if (ret)
889                 return ret;
890
891         return 0;
892 }
893
894 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
895 {
896         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
897         struct hclge_dev *hdev = vport->back;
898         u8 ir_u, ir_b, ir_s;
899         u32 i;
900         int ret;
901
902         for (i = 0; i < kinfo->num_tc; i++) {
903                 ret = hclge_shaper_para_calc(
904                                         hdev->tm_info.tc_info[i].bw_limit,
905                                         HCLGE_SHAPER_LVL_QSET,
906                                         &ir_b, &ir_u, &ir_s);
907                 if (ret)
908                         return ret;
909         }
910
911         return 0;
912 }
913
914 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
915 {
916         struct hclge_vport *vport = hdev->vport;
917         int ret;
918         u32 i;
919
920         /* Need config vport shaper */
921         for (i = 0; i < hdev->num_alloc_vport; i++) {
922                 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
923                 if (ret)
924                         return ret;
925
926                 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
927                 if (ret)
928                         return ret;
929
930                 vport++;
931         }
932
933         return 0;
934 }
935
936 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
937 {
938         int ret;
939
940         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
941                 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
942                 if (ret)
943                         return ret;
944         } else {
945                 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
946                 if (ret)
947                         return ret;
948         }
949
950         return 0;
951 }
952
953 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
954 {
955         struct hclge_vport *vport = hdev->vport;
956         struct hclge_pg_info *pg_info;
957         u8 dwrr;
958         int ret;
959         u32 i, k;
960
961         for (i = 0; i < hdev->tm_info.num_tc; i++) {
962                 pg_info =
963                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
964                 dwrr = pg_info->tc_dwrr[i];
965
966                 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
967                 if (ret)
968                         return ret;
969
970                 for (k = 0; k < hdev->num_alloc_vport; k++) {
971                         ret = hclge_tm_qs_weight_cfg(
972                                 hdev, vport[k].qs_offset + i,
973                                 vport[k].dwrr);
974                         if (ret)
975                                 return ret;
976                 }
977         }
978
979         return 0;
980 }
981
982 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
983 {
984 #define DEFAULT_TC_WEIGHT       1
985 #define DEFAULT_TC_OFFSET       14
986
987         struct hclge_ets_tc_weight_cmd *ets_weight;
988         struct hclge_desc desc;
989         unsigned int i;
990
991         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
992         ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
993
994         for (i = 0; i < HNAE3_MAX_TC; i++) {
995                 struct hclge_pg_info *pg_info;
996
997                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
998
999                 if (!(hdev->hw_tc_map & BIT(i)))
1000                         continue;
1001
1002                 pg_info =
1003                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1004                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1005         }
1006
1007         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1008
1009         return hclge_cmd_send(&hdev->hw, &desc, 1);
1010 }
1011
1012 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1013 {
1014         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1015         struct hclge_dev *hdev = vport->back;
1016         int ret;
1017         u8 i;
1018
1019         /* Vf dwrr */
1020         ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1021         if (ret)
1022                 return ret;
1023
1024         /* Qset dwrr */
1025         for (i = 0; i < kinfo->num_tc; i++) {
1026                 ret = hclge_tm_qs_weight_cfg(
1027                         hdev, vport->qs_offset + i,
1028                         hdev->tm_info.pg_info[0].tc_dwrr[i]);
1029                 if (ret)
1030                         return ret;
1031         }
1032
1033         return 0;
1034 }
1035
1036 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1037 {
1038         struct hclge_vport *vport = hdev->vport;
1039         int ret;
1040         u32 i;
1041
1042         for (i = 0; i < hdev->num_alloc_vport; i++) {
1043                 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1044                 if (ret)
1045                         return ret;
1046
1047                 vport++;
1048         }
1049
1050         return 0;
1051 }
1052
1053 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1054 {
1055         int ret;
1056
1057         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1058                 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1059                 if (ret)
1060                         return ret;
1061
1062                 if (!hnae3_dev_dcb_supported(hdev))
1063                         return 0;
1064
1065                 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1066                 if (ret == -EOPNOTSUPP) {
1067                         dev_warn(&hdev->pdev->dev,
1068                                  "fw %08x does't support ets tc weight cmd\n",
1069                                  hdev->fw_version);
1070                         ret = 0;
1071                 }
1072
1073                 return ret;
1074         } else {
1075                 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1076                 if (ret)
1077                         return ret;
1078         }
1079
1080         return 0;
1081 }
1082
1083 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1084 {
1085         int ret;
1086
1087         ret = hclge_up_to_tc_map(hdev);
1088         if (ret)
1089                 return ret;
1090
1091         ret = hclge_tm_pg_to_pri_map(hdev);
1092         if (ret)
1093                 return ret;
1094
1095         return hclge_tm_pri_q_qs_cfg(hdev);
1096 }
1097
1098 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1099 {
1100         int ret;
1101
1102         ret = hclge_tm_port_shaper_cfg(hdev);
1103         if (ret)
1104                 return ret;
1105
1106         ret = hclge_tm_pg_shaper_cfg(hdev);
1107         if (ret)
1108                 return ret;
1109
1110         return hclge_tm_pri_shaper_cfg(hdev);
1111 }
1112
1113 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1114 {
1115         int ret;
1116
1117         ret = hclge_tm_pg_dwrr_cfg(hdev);
1118         if (ret)
1119                 return ret;
1120
1121         return hclge_tm_pri_dwrr_cfg(hdev);
1122 }
1123
1124 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1125 {
1126         int ret;
1127         u8 i;
1128
1129         /* Only being config on TC-Based scheduler mode */
1130         if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1131                 return 0;
1132
1133         for (i = 0; i < hdev->tm_info.num_pg; i++) {
1134                 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1135                 if (ret)
1136                         return ret;
1137         }
1138
1139         return 0;
1140 }
1141
1142 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1143 {
1144         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1145         struct hclge_dev *hdev = vport->back;
1146         int ret;
1147         u8 i;
1148
1149         if (vport->vport_id >= HNAE3_MAX_TC)
1150                 return -EINVAL;
1151
1152         ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1153         if (ret)
1154                 return ret;
1155
1156         for (i = 0; i < kinfo->num_tc; i++) {
1157                 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1158
1159                 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1160                                                 sch_mode);
1161                 if (ret)
1162                         return ret;
1163         }
1164
1165         return 0;
1166 }
1167
1168 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1169 {
1170         struct hclge_vport *vport = hdev->vport;
1171         int ret;
1172         u8 i, k;
1173
1174         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1175                 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1176                         ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1177                         if (ret)
1178                                 return ret;
1179
1180                         for (k = 0; k < hdev->num_alloc_vport; k++) {
1181                                 ret = hclge_tm_qs_schd_mode_cfg(
1182                                         hdev, vport[k].qs_offset + i,
1183                                         HCLGE_SCH_MODE_DWRR);
1184                                 if (ret)
1185                                         return ret;
1186                         }
1187                 }
1188         } else {
1189                 for (i = 0; i < hdev->num_alloc_vport; i++) {
1190                         ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1191                         if (ret)
1192                                 return ret;
1193
1194                         vport++;
1195                 }
1196         }
1197
1198         return 0;
1199 }
1200
1201 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1202 {
1203         int ret;
1204
1205         ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1206         if (ret)
1207                 return ret;
1208
1209         return hclge_tm_lvl34_schd_mode_cfg(hdev);
1210 }
1211
1212 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1213 {
1214         int ret;
1215
1216         /* Cfg tm mapping  */
1217         ret = hclge_tm_map_cfg(hdev);
1218         if (ret)
1219                 return ret;
1220
1221         /* Cfg tm shaper */
1222         ret = hclge_tm_shaper_cfg(hdev);
1223         if (ret)
1224                 return ret;
1225
1226         /* Cfg dwrr */
1227         ret = hclge_tm_dwrr_cfg(hdev);
1228         if (ret)
1229                 return ret;
1230
1231         /* Cfg schd mode for each level schd */
1232         return hclge_tm_schd_mode_hw(hdev);
1233 }
1234
1235 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1236 {
1237         struct hclge_mac *mac = &hdev->hw.mac;
1238
1239         return hclge_pause_param_cfg(hdev, mac->mac_addr,
1240                                      HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1241                                      HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1242 }
1243
1244 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1245 {
1246         u8 enable_bitmap = 0;
1247
1248         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1249                 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1250                                 HCLGE_RX_MAC_PAUSE_EN_MSK;
1251
1252         return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1253                                       hdev->tm_info.pfc_en);
1254 }
1255
1256 /* Each Tc has a 1024 queue sets to backpress, it divides to
1257  * 32 group, each group contains 32 queue sets, which can be
1258  * represented by u32 bitmap.
1259  */
1260 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1261 {
1262         int i;
1263
1264         for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1265                 u32 qs_bitmap = 0;
1266                 int k, ret;
1267
1268                 for (k = 0; k < hdev->num_alloc_vport; k++) {
1269                         struct hclge_vport *vport = &hdev->vport[k];
1270                         u16 qs_id = vport->qs_offset + tc;
1271                         u8 grp, sub_grp;
1272
1273                         grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1274                                               HCLGE_BP_GRP_ID_S);
1275                         sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1276                                                   HCLGE_BP_SUB_GRP_ID_S);
1277                         if (i == grp)
1278                                 qs_bitmap |= (1 << sub_grp);
1279                 }
1280
1281                 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1282                 if (ret)
1283                         return ret;
1284         }
1285
1286         return 0;
1287 }
1288
1289 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1290 {
1291         bool tx_en, rx_en;
1292
1293         switch (hdev->tm_info.fc_mode) {
1294         case HCLGE_FC_NONE:
1295                 tx_en = false;
1296                 rx_en = false;
1297                 break;
1298         case HCLGE_FC_RX_PAUSE:
1299                 tx_en = false;
1300                 rx_en = true;
1301                 break;
1302         case HCLGE_FC_TX_PAUSE:
1303                 tx_en = true;
1304                 rx_en = false;
1305                 break;
1306         case HCLGE_FC_FULL:
1307                 tx_en = true;
1308                 rx_en = true;
1309                 break;
1310         case HCLGE_FC_PFC:
1311                 tx_en = false;
1312                 rx_en = false;
1313                 break;
1314         default:
1315                 tx_en = true;
1316                 rx_en = true;
1317         }
1318
1319         return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1320 }
1321
1322 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1323 {
1324         int ret = 0;
1325         int i;
1326
1327         for (i = 0; i < hdev->tm_info.num_tc; i++) {
1328                 ret = hclge_bp_setup_hw(hdev, i);
1329                 if (ret)
1330                         return ret;
1331         }
1332
1333         return ret;
1334 }
1335
1336 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1337 {
1338         int ret;
1339
1340         ret = hclge_pause_param_setup_hw(hdev);
1341         if (ret)
1342                 return ret;
1343
1344         ret = hclge_mac_pause_setup_hw(hdev);
1345         if (ret)
1346                 return ret;
1347
1348         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1349         if (!hnae3_dev_dcb_supported(hdev))
1350                 return 0;
1351
1352         /* GE MAC does not support PFC, when driver is initializing and MAC
1353          * is in GE Mode, ignore the error here, otherwise initialization
1354          * will fail.
1355          */
1356         ret = hclge_pfc_setup_hw(hdev);
1357         if (init && ret == -EOPNOTSUPP)
1358                 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1359         else if (ret) {
1360                 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1361                         ret);
1362                 return ret;
1363         }
1364
1365         return hclge_tm_bp_setup(hdev);
1366 }
1367
1368 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1369 {
1370         struct hclge_vport *vport = hdev->vport;
1371         struct hnae3_knic_private_info *kinfo;
1372         u32 i, k;
1373
1374         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1375                 hdev->tm_info.prio_tc[i] = prio_tc[i];
1376
1377                 for (k = 0;  k < hdev->num_alloc_vport; k++) {
1378                         kinfo = &vport[k].nic.kinfo;
1379                         kinfo->prio_tc[i] = prio_tc[i];
1380                 }
1381         }
1382 }
1383
1384 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1385 {
1386         u8 bit_map = 0;
1387         u8 i;
1388
1389         hdev->tm_info.num_tc = num_tc;
1390
1391         for (i = 0; i < hdev->tm_info.num_tc; i++)
1392                 bit_map |= BIT(i);
1393
1394         if (!bit_map) {
1395                 bit_map = 1;
1396                 hdev->tm_info.num_tc = 1;
1397         }
1398
1399         hdev->hw_tc_map = bit_map;
1400
1401         hclge_tm_schd_info_init(hdev);
1402 }
1403
1404 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1405 {
1406         /* DCB is enabled if we have more than 1 TC or pfc_en is
1407          * non-zero.
1408          */
1409         if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1410                 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1411         else
1412                 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1413
1414         hclge_pfc_info_init(hdev);
1415 }
1416
1417 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1418 {
1419         int ret;
1420
1421         if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1422             (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1423                 return -ENOTSUPP;
1424
1425         ret = hclge_tm_schd_setup_hw(hdev);
1426         if (ret)
1427                 return ret;
1428
1429         ret = hclge_pause_setup_hw(hdev, init);
1430         if (ret)
1431                 return ret;
1432
1433         return 0;
1434 }
1435
1436 int hclge_tm_schd_init(struct hclge_dev *hdev)
1437 {
1438         int ret;
1439
1440         /* fc_mode is HCLGE_FC_FULL on reset */
1441         hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1442         hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1443
1444         ret = hclge_tm_schd_info_init(hdev);
1445         if (ret)
1446                 return ret;
1447
1448         return hclge_tm_init_hw(hdev, true);
1449 }
1450
1451 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1452 {
1453         struct hclge_vport *vport = hdev->vport;
1454         int ret;
1455
1456         hclge_tm_vport_tc_info_update(vport);
1457
1458         ret = hclge_vport_q_to_qs_map(hdev, vport);
1459         if (ret)
1460                 return ret;
1461
1462         if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1463                 return 0;
1464
1465         return hclge_tm_bp_setup(hdev);
1466 }