2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #ifndef __MLX5_EN_STATS_H__
33 #define __MLX5_EN_STATS_H__
35 #define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
36 (*(u64 *)((char *)ptr + dsc[i].offset))
37 #define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
38 be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
39 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
40 (*(u32 *)((char *)ptr + dsc[i].offset))
41 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
42 be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
44 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
45 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
46 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
47 #define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
48 #define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
49 #define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld)
50 #define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
51 #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
54 char format[ETH_GSTRING_LEN];
55 size_t offset; /* Byte offset */
58 struct mlx5e_sw_stats {
65 u64 tx_tso_inner_packets;
66 u64 tx_tso_inner_bytes;
67 u64 tx_added_vlan_packets;
72 u64 rx_removed_vlan_packets;
73 u64 rx_csum_unnecessary;
76 u64 rx_csum_complete_tail;
77 u64 rx_csum_complete_tail_slow;
78 u64 rx_csum_unnecessary_inner;
89 u64 tx_csum_partial_inner;
104 u64 rx_mpwqe_filler_cqes;
105 u64 rx_mpwqe_filler_strides;
106 u64 rx_oversize_pkts_sw_drop;
107 u64 rx_buff_alloc_err;
108 u64 rx_cqe_compress_blks;
109 u64 rx_cqe_compress_pkts;
124 #ifdef CONFIG_MLX5_EN_TLS
125 u64 tx_tls_encrypted_packets;
126 u64 tx_tls_encrypted_bytes;
129 u64 tx_tls_resync_bytes;
130 u64 tx_tls_drop_no_sync_data;
131 u64 tx_tls_drop_bypass_req;
132 u64 tx_tls_dump_packets;
133 u64 tx_tls_dump_bytes;
138 u64 rx_xsk_csum_complete;
139 u64 rx_xsk_csum_unnecessary;
140 u64 rx_xsk_csum_unnecessary_inner;
141 u64 rx_xsk_csum_none;
143 u64 rx_xsk_removed_vlan_packets;
145 u64 rx_xsk_xdp_redirect;
147 u64 rx_xsk_mpwqe_filler_cqes;
148 u64 rx_xsk_mpwqe_filler_strides;
149 u64 rx_xsk_oversize_pkts_sw_drop;
150 u64 rx_xsk_buff_alloc_err;
151 u64 rx_xsk_cqe_compress_blks;
152 u64 rx_xsk_cqe_compress_pkts;
153 u64 rx_xsk_congst_umr;
163 struct mlx5e_qcounter_stats {
164 u32 rx_out_of_buffer;
165 u32 rx_if_down_packets;
168 struct mlx5e_vnic_env_stats {
169 __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
172 #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
173 vstats->query_vport_out, c)
175 struct mlx5e_vport_stats {
176 __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
179 #define PPORT_802_3_GET(pstats, c) \
180 MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
181 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
182 #define PPORT_2863_GET(pstats, c) \
183 MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
184 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
185 #define PPORT_2819_GET(pstats, c) \
186 MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
187 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
188 #define PPORT_PHY_STATISTICAL_GET(pstats, c) \
189 MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
190 counter_set.phys_layer_statistical_cntrs.c##_high)
191 #define PPORT_PER_PRIO_GET(pstats, prio, c) \
192 MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
193 counter_set.eth_per_prio_grp_data_layout.c##_high)
194 #define NUM_PPORT_PRIO 8
195 #define PPORT_ETH_EXT_GET(pstats, c) \
196 MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
197 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
199 struct mlx5e_pport_stats {
200 __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
201 __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
202 __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
203 __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
204 __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
205 __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
206 __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
209 #define PCIE_PERF_GET(pcie_stats, c) \
210 MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
211 counter_set.pcie_perf_cntrs_grp_data_layout.c)
213 #define PCIE_PERF_GET64(pcie_stats, c) \
214 MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
215 counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
217 struct mlx5e_pcie_stats {
218 __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
221 struct mlx5e_rq_stats {
225 u64 csum_complete_tail;
226 u64 csum_complete_tail_slow;
227 u64 csum_unnecessary;
228 u64 csum_unnecessary_inner;
233 u64 removed_vlan_packets;
237 u64 mpwqe_filler_cqes;
238 u64 mpwqe_filler_strides;
239 u64 oversize_pkts_sw_drop;
241 u64 cqe_compress_blks;
242 u64 cqe_compress_pkts;
252 struct mlx5e_sq_stats {
253 /* commonly accessed in data path */
259 u64 tso_inner_packets;
262 u64 csum_partial_inner;
263 u64 added_vlan_packets;
265 #ifdef CONFIG_MLX5_EN_TLS
266 u64 tls_encrypted_packets;
267 u64 tls_encrypted_bytes;
270 u64 tls_resync_bytes;
271 u64 tls_drop_no_sync_data;
272 u64 tls_drop_bypass_req;
273 u64 tls_dump_packets;
276 /* less likely accessed in data path */
281 /* dirtied @completion */
282 u64 cqes ____cacheline_aligned_in_smp;
287 struct mlx5e_xdpsq_stats {
293 /* dirtied @completion */
294 u64 cqes ____cacheline_aligned_in_smp;
297 struct mlx5e_ch_stats {
307 struct mlx5e_sw_stats sw;
308 struct mlx5e_qcounter_stats qcnt;
309 struct mlx5e_vnic_env_stats vnic;
310 struct mlx5e_vport_stats vport;
311 struct mlx5e_pport_stats pport;
312 struct rtnl_link_stats64 vf_vport;
313 struct mlx5e_pcie_stats pcie;
317 MLX5E_NDO_UPDATE_STATS = BIT(0x1),
321 struct mlx5e_stats_grp {
322 u16 update_stats_mask;
323 int (*get_num_stats)(struct mlx5e_priv *priv);
324 int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
325 int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
326 void (*update_stats)(struct mlx5e_priv *priv);
329 extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
330 extern const int mlx5e_num_stats_grps;
332 void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
334 #endif /* __MLX5_EN_STATS_H__ */