Linux-libre 5.4.47-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_cudbg.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
4  */
5
6 #include "t4_regs.h"
7 #include "cxgb4.h"
8 #include "cxgb4_cudbg.h"
9 #include "cudbg_zlib.h"
10
11 static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
12         { CUDBG_EDC0, cudbg_collect_edc0_meminfo },
13         { CUDBG_EDC1, cudbg_collect_edc1_meminfo },
14         { CUDBG_MC0, cudbg_collect_mc0_meminfo },
15         { CUDBG_MC1, cudbg_collect_mc1_meminfo },
16         { CUDBG_HMA, cudbg_collect_hma_meminfo },
17 };
18
19 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
20         { CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
21         { CUDBG_QDESC, cudbg_collect_qdesc },
22         { CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
23         { CUDBG_REG_DUMP, cudbg_collect_reg_dump },
24         { CUDBG_CIM_LA, cudbg_collect_cim_la },
25         { CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la },
26         { CUDBG_CIM_QCFG, cudbg_collect_cim_qcfg },
27         { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 },
28         { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 },
29         { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp },
30         { CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 },
31         { CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 },
32         { CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi },
33         { CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 },
34         { CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 },
35         { CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 },
36         { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 },
37         { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge },
38         { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi },
39         { CUDBG_RSS, cudbg_collect_rss },
40         { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config },
41         { CUDBG_PATH_MTU, cudbg_collect_path_mtu },
42         { CUDBG_PM_STATS, cudbg_collect_pm_stats },
43         { CUDBG_HW_SCHED, cudbg_collect_hw_sched },
44         { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect },
45         { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
46         { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
47         { CUDBG_TP_LA, cudbg_collect_tp_la },
48         { CUDBG_MEMINFO, cudbg_collect_meminfo },
49         { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
50         { CUDBG_CLK, cudbg_collect_clk_info },
51         { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
52         { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 },
53         { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
54         { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
55         { CUDBG_TID_INFO, cudbg_collect_tid },
56         { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
57         { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
58         { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
59         { CUDBG_VPD_DATA, cudbg_collect_vpd_data },
60         { CUDBG_LE_TCAM, cudbg_collect_le_tcam },
61         { CUDBG_CCTRL, cudbg_collect_cctrl },
62         { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect },
63         { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la },
64         { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect },
65         { CUDBG_PBT_TABLE, cudbg_collect_pbt_tables },
66         { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect },
67 };
68
69 static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
70 {
71         struct cudbg_tcam tcam_region = { 0 };
72         u32 value, n = 0, len = 0;
73
74         switch (entity) {
75         case CUDBG_REG_DUMP:
76                 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
77                 case CHELSIO_T4:
78                         len = T4_REGMAP_SIZE;
79                         break;
80                 case CHELSIO_T5:
81                 case CHELSIO_T6:
82                         len = T5_REGMAP_SIZE;
83                         break;
84                 default:
85                         break;
86                 }
87                 break;
88         case CUDBG_DEV_LOG:
89                 len = adap->params.devlog.size;
90                 break;
91         case CUDBG_CIM_LA:
92                 if (is_t6(adap->params.chip)) {
93                         len = adap->params.cim_la_size / 10 + 1;
94                         len *= 10 * sizeof(u32);
95                 } else {
96                         len = adap->params.cim_la_size / 8;
97                         len *= 8 * sizeof(u32);
98                 }
99                 len += sizeof(u32); /* for reading CIM LA configuration */
100                 break;
101         case CUDBG_CIM_MA_LA:
102                 len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
103                 break;
104         case CUDBG_CIM_QCFG:
105                 len = sizeof(struct cudbg_cim_qcfg);
106                 break;
107         case CUDBG_CIM_IBQ_TP0:
108         case CUDBG_CIM_IBQ_TP1:
109         case CUDBG_CIM_IBQ_ULP:
110         case CUDBG_CIM_IBQ_SGE0:
111         case CUDBG_CIM_IBQ_SGE1:
112         case CUDBG_CIM_IBQ_NCSI:
113                 len = CIM_IBQ_SIZE * 4 * sizeof(u32);
114                 break;
115         case CUDBG_CIM_OBQ_ULP0:
116                 len = cudbg_cim_obq_size(adap, 0);
117                 break;
118         case CUDBG_CIM_OBQ_ULP1:
119                 len = cudbg_cim_obq_size(adap, 1);
120                 break;
121         case CUDBG_CIM_OBQ_ULP2:
122                 len = cudbg_cim_obq_size(adap, 2);
123                 break;
124         case CUDBG_CIM_OBQ_ULP3:
125                 len = cudbg_cim_obq_size(adap, 3);
126                 break;
127         case CUDBG_CIM_OBQ_SGE:
128                 len = cudbg_cim_obq_size(adap, 4);
129                 break;
130         case CUDBG_CIM_OBQ_NCSI:
131                 len = cudbg_cim_obq_size(adap, 5);
132                 break;
133         case CUDBG_CIM_OBQ_RXQ0:
134                 len = cudbg_cim_obq_size(adap, 6);
135                 break;
136         case CUDBG_CIM_OBQ_RXQ1:
137                 len = cudbg_cim_obq_size(adap, 7);
138                 break;
139         case CUDBG_EDC0:
140                 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
141                 if (value & EDRAM0_ENABLE_F) {
142                         value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
143                         len = EDRAM0_SIZE_G(value);
144                 }
145                 len = cudbg_mbytes_to_bytes(len);
146                 break;
147         case CUDBG_EDC1:
148                 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
149                 if (value & EDRAM1_ENABLE_F) {
150                         value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
151                         len = EDRAM1_SIZE_G(value);
152                 }
153                 len = cudbg_mbytes_to_bytes(len);
154                 break;
155         case CUDBG_MC0:
156                 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
157                 if (value & EXT_MEM0_ENABLE_F) {
158                         value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
159                         len = EXT_MEM0_SIZE_G(value);
160                 }
161                 len = cudbg_mbytes_to_bytes(len);
162                 break;
163         case CUDBG_MC1:
164                 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
165                 if (value & EXT_MEM1_ENABLE_F) {
166                         value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
167                         len = EXT_MEM1_SIZE_G(value);
168                 }
169                 len = cudbg_mbytes_to_bytes(len);
170                 break;
171         case CUDBG_RSS:
172                 len = t4_chip_rss_size(adap) * sizeof(u16);
173                 break;
174         case CUDBG_RSS_VF_CONF:
175                 len = adap->params.arch.vfcount *
176                       sizeof(struct cudbg_rss_vf_conf);
177                 break;
178         case CUDBG_PATH_MTU:
179                 len = NMTUS * sizeof(u16);
180                 break;
181         case CUDBG_PM_STATS:
182                 len = sizeof(struct cudbg_pm_stats);
183                 break;
184         case CUDBG_HW_SCHED:
185                 len = sizeof(struct cudbg_hw_sched);
186                 break;
187         case CUDBG_TP_INDIRECT:
188                 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
189                 case CHELSIO_T5:
190                         n = sizeof(t5_tp_pio_array) +
191                             sizeof(t5_tp_tm_pio_array) +
192                             sizeof(t5_tp_mib_index_array);
193                         break;
194                 case CHELSIO_T6:
195                         n = sizeof(t6_tp_pio_array) +
196                             sizeof(t6_tp_tm_pio_array) +
197                             sizeof(t6_tp_mib_index_array);
198                         break;
199                 default:
200                         break;
201                 }
202                 n = n / (IREG_NUM_ELEM * sizeof(u32));
203                 len = sizeof(struct ireg_buf) * n;
204                 break;
205         case CUDBG_SGE_INDIRECT:
206                 len = sizeof(struct ireg_buf) * 2 +
207                       sizeof(struct sge_qbase_reg_field);
208                 break;
209         case CUDBG_ULPRX_LA:
210                 len = sizeof(struct cudbg_ulprx_la);
211                 break;
212         case CUDBG_TP_LA:
213                 len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
214                 break;
215         case CUDBG_MEMINFO:
216                 len = sizeof(struct cudbg_ver_hdr) +
217                       sizeof(struct cudbg_meminfo);
218                 break;
219         case CUDBG_CIM_PIF_LA:
220                 len = sizeof(struct cudbg_cim_pif_la);
221                 len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
222                 break;
223         case CUDBG_CLK:
224                 len = sizeof(struct cudbg_clk_info);
225                 break;
226         case CUDBG_PCIE_INDIRECT:
227                 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
228                 len = sizeof(struct ireg_buf) * n * 2;
229                 break;
230         case CUDBG_PM_INDIRECT:
231                 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
232                 len = sizeof(struct ireg_buf) * n * 2;
233                 break;
234         case CUDBG_TID_INFO:
235                 len = sizeof(struct cudbg_tid_info_region_rev1);
236                 break;
237         case CUDBG_PCIE_CONFIG:
238                 len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
239                 break;
240         case CUDBG_DUMP_CONTEXT:
241                 len = cudbg_dump_context_size(adap);
242                 break;
243         case CUDBG_MPS_TCAM:
244                 len = sizeof(struct cudbg_mps_tcam) *
245                       adap->params.arch.mps_tcam_size;
246                 break;
247         case CUDBG_VPD_DATA:
248                 len = sizeof(struct cudbg_vpd_data);
249                 break;
250         case CUDBG_LE_TCAM:
251                 cudbg_fill_le_tcam_info(adap, &tcam_region);
252                 len = sizeof(struct cudbg_tcam) +
253                       sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
254                 break;
255         case CUDBG_CCTRL:
256                 len = sizeof(u16) * NMTUS * NCCTRL_WIN;
257                 break;
258         case CUDBG_MA_INDIRECT:
259                 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
260                         n = sizeof(t6_ma_ireg_array) /
261                             (IREG_NUM_ELEM * sizeof(u32));
262                         len = sizeof(struct ireg_buf) * n * 2;
263                 }
264                 break;
265         case CUDBG_ULPTX_LA:
266                 len = sizeof(struct cudbg_ver_hdr) +
267                       sizeof(struct cudbg_ulptx_la);
268                 break;
269         case CUDBG_UP_CIM_INDIRECT:
270                 n = 0;
271                 if (is_t5(adap->params.chip))
272                         n = sizeof(t5_up_cim_reg_array) /
273                             ((IREG_NUM_ELEM + 1) * sizeof(u32));
274                 else if (is_t6(adap->params.chip))
275                         n = sizeof(t6_up_cim_reg_array) /
276                             ((IREG_NUM_ELEM + 1) * sizeof(u32));
277                 len = sizeof(struct ireg_buf) * n;
278                 break;
279         case CUDBG_PBT_TABLE:
280                 len = sizeof(struct cudbg_pbt_tables);
281                 break;
282         case CUDBG_MBOX_LOG:
283                 len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
284                 break;
285         case CUDBG_HMA_INDIRECT:
286                 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
287                         n = sizeof(t6_hma_ireg_array) /
288                             (IREG_NUM_ELEM * sizeof(u32));
289                         len = sizeof(struct ireg_buf) * n;
290                 }
291                 break;
292         case CUDBG_HMA:
293                 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
294                 if (value & HMA_MUX_F) {
295                         /* In T6, there's no MC1.  So, HMA shares MC1
296                          * address space.
297                          */
298                         value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
299                         len = EXT_MEM1_SIZE_G(value);
300                 }
301                 len = cudbg_mbytes_to_bytes(len);
302                 break;
303         case CUDBG_QDESC:
304                 cudbg_fill_qdesc_num_and_size(adap, NULL, &len);
305                 break;
306         default:
307                 break;
308         }
309
310         return len;
311 }
312
313 u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
314 {
315         u32 i, entity;
316         u32 len = 0;
317         u32 wsize;
318
319         if (flag & CXGB4_ETH_DUMP_HW) {
320                 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
321                         entity = cxgb4_collect_hw_dump[i].entity;
322                         len += cxgb4_get_entity_length(adap, entity);
323                 }
324         }
325
326         if (flag & CXGB4_ETH_DUMP_MEM) {
327                 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) {
328                         entity = cxgb4_collect_mem_dump[i].entity;
329                         len += cxgb4_get_entity_length(adap, entity);
330                 }
331         }
332
333         /* If compression is enabled, a smaller destination buffer is enough */
334         wsize = cudbg_get_workspace_size();
335         if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
336                 len = CUDBG_DUMP_BUFF_SIZE;
337
338         return len;
339 }
340
341 static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
342                                        struct cudbg_buffer *dbg_buff,
343                                        const struct cxgb4_collect_entity *e_arr,
344                                        u32 arr_size, void *buf, u32 *tot_size)
345 {
346         struct cudbg_error cudbg_err = { 0 };
347         struct cudbg_entity_hdr *entity_hdr;
348         u32 i, total_size = 0;
349         int ret;
350
351         for (i = 0; i < arr_size; i++) {
352                 const struct cxgb4_collect_entity *e = &e_arr[i];
353
354                 entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
355                 entity_hdr->entity_type = e->entity;
356                 entity_hdr->start_offset = dbg_buff->offset;
357                 memset(&cudbg_err, 0, sizeof(struct cudbg_error));
358                 ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err);
359                 if (ret) {
360                         entity_hdr->size = 0;
361                         dbg_buff->offset = entity_hdr->start_offset;
362                 } else {
363                         cudbg_align_debug_buffer(dbg_buff, entity_hdr);
364                 }
365
366                 /* Log error and continue with next entity */
367                 if (cudbg_err.sys_err)
368                         ret = CUDBG_SYSTEM_ERROR;
369
370                 entity_hdr->hdr_flags = ret;
371                 entity_hdr->sys_err = cudbg_err.sys_err;
372                 entity_hdr->sys_warn = cudbg_err.sys_warn;
373                 total_size += entity_hdr->size;
374         }
375
376         *tot_size += total_size;
377 }
378
379 static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
380 {
381         u32 workspace_size;
382
383         workspace_size = cudbg_get_workspace_size();
384         pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
385                                            workspace_size);
386         if (!pdbg_init->compress_buff)
387                 return -ENOMEM;
388
389         pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
390         pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
391                                CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
392         return 0;
393 }
394
395 static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
396 {
397         if (pdbg_init->compress_buff)
398                 vfree(pdbg_init->compress_buff);
399 }
400
401 int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
402                         u32 flag)
403 {
404         struct cudbg_buffer dbg_buff = { 0 };
405         u32 size, min_size, total_size = 0;
406         struct cudbg_init cudbg_init;
407         struct cudbg_hdr *cudbg_hdr;
408         int rc;
409
410         size = *buf_size;
411
412         memset(&cudbg_init, 0, sizeof(struct cudbg_init));
413         cudbg_init.adap = adap;
414         cudbg_init.outbuf = buf;
415         cudbg_init.outbuf_size = size;
416
417         dbg_buff.data = buf;
418         dbg_buff.size = size;
419         dbg_buff.offset = 0;
420
421         cudbg_hdr = (struct cudbg_hdr *)buf;
422         cudbg_hdr->signature = CUDBG_SIGNATURE;
423         cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
424         cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
425         cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
426         cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
427         cudbg_hdr->chip_ver = adap->params.chip;
428         cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
429
430         min_size = sizeof(struct cudbg_hdr) +
431                    sizeof(struct cudbg_entity_hdr) *
432                    cudbg_hdr->max_entities;
433         if (size < min_size)
434                 return -ENOMEM;
435
436         rc = cudbg_get_workspace_size();
437         if (rc) {
438                 /* Zlib available.  So, use zlib deflate */
439                 cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
440                 rc = cudbg_alloc_compress_buff(&cudbg_init);
441                 if (rc) {
442                         /* Ignore error and continue without compression. */
443                         dev_warn(adap->pdev_dev,
444                                  "Fail allocating compression buffer ret: %d.  Continuing without compression.\n",
445                                  rc);
446                         cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
447                         rc = 0;
448                 }
449         } else {
450                 cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
451         }
452
453         cudbg_hdr->compress_type = cudbg_init.compress_type;
454         dbg_buff.offset += min_size;
455         total_size = dbg_buff.offset;
456
457         if (flag & CXGB4_ETH_DUMP_HW)
458                 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
459                                            cxgb4_collect_hw_dump,
460                                            ARRAY_SIZE(cxgb4_collect_hw_dump),
461                                            buf,
462                                            &total_size);
463
464         if (flag & CXGB4_ETH_DUMP_MEM)
465                 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
466                                            cxgb4_collect_mem_dump,
467                                            ARRAY_SIZE(cxgb4_collect_mem_dump),
468                                            buf,
469                                            &total_size);
470
471         cudbg_free_compress_buff(&cudbg_init);
472         cudbg_hdr->data_len = total_size;
473         if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
474                 *buf_size = size;
475         else
476                 *buf_size = total_size;
477         return 0;
478 }
479
480 void cxgb4_init_ethtool_dump(struct adapter *adapter)
481 {
482         adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE;
483         adapter->eth_dump.version = adapter->params.fw_vers;
484         adapter->eth_dump.len = 0;
485 }
486
487 static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data *data, void *buf)
488 {
489         struct adapter *adap = container_of(data, struct adapter, vmcoredd);
490         u32 len = data->size;
491
492         return cxgb4_cudbg_collect(adap, buf, &len, CXGB4_ETH_DUMP_ALL);
493 }
494
495 int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap)
496 {
497         struct vmcoredd_data *data = &adap->vmcoredd;
498         u32 len;
499
500         len = sizeof(struct cudbg_hdr) +
501               sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
502         len += CUDBG_DUMP_BUFF_SIZE;
503
504         data->size = len;
505         snprintf(data->dump_name, sizeof(data->dump_name), "%s_%s",
506                  cxgb4_driver_name, adap->name);
507         data->vmcoredd_callback = cxgb4_cudbg_vmcoredd_collect;
508
509         return vmcore_add_device_dump(data);
510 }