ath79/mikrotik: use routerbootpart partitions
[oweals/openwrt.git] / target / linux / layerscape / patches-5.4 / 701-net-0337-enetc-add-support-tsn-capabilities-qbv-qci-qbu-cbs.patch
1 From bf3f81f3773cc9f6b273d769aca96512780c6189 Mon Sep 17 00:00:00 2001
2 From: Po Liu <Po.Liu@nxp.com>
3 Date: Tue, 3 Dec 2019 16:52:57 +0800
4 Subject: [PATCH] enetc: add support tsn capabilities qbv/qci/qbu/cbs
5
6 Support Qbv/Qci/Qbu/Credit Base Shaper etc.
7 This patch using the generic netlink adapt layer driver net/tsn/*
8 and include/net/tsn.h interface load by user space. The user space
9 refer the include/uapi/linux/tsn.h.
10
11 Signed-off-by: Po Liu <Po.Liu@nxp.com>
12 ---
13  drivers/net/ethernet/freescale/enetc/Kconfig       |   10 +
14  drivers/net/ethernet/freescale/enetc/Makefile      |    1 +
15  drivers/net/ethernet/freescale/enetc/enetc.c       |   13 +-
16  drivers/net/ethernet/freescale/enetc/enetc.h       |   38 +
17  .../net/ethernet/freescale/enetc/enetc_ethtool.c   |   59 +
18  drivers/net/ethernet/freescale/enetc/enetc_hw.h    |  438 ++++-
19  drivers/net/ethernet/freescale/enetc/enetc_pf.c    |   15 +-
20  drivers/net/ethernet/freescale/enetc/enetc_tsn.c   | 2049 ++++++++++++++++++++
21  8 files changed, 2614 insertions(+), 9 deletions(-)
22  create mode 100644 drivers/net/ethernet/freescale/enetc/enetc_tsn.c
23
24 --- a/drivers/net/ethernet/freescale/enetc/Kconfig
25 +++ b/drivers/net/ethernet/freescale/enetc/Kconfig
26 @@ -60,3 +60,13 @@ config FSL_ENETC_QOS
27           enable/disable from user space via Qos commands(tc). In the kernel
28           side, it can be loaded by Qos driver. Currently, it is only support
29           taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu).
30 +
31 +config ENETC_TSN
32 +       bool "TSN Support for NXP ENETC driver"
33 +       default n
34 +       depends on TSN && FSL_ENETC
35 +       help
36 +         This driver supports TSN on Freescale ENETC driver. Provide
37 +         interface to config the tsn capabilities of ENETC. The interface link
38 +         to the /net/tsn/* and include/net/tsn.h. User space refer the
39 +         include/uapi/linux/tsn.h.
40 --- a/drivers/net/ethernet/freescale/enetc/Makefile
41 +++ b/drivers/net/ethernet/freescale/enetc/Makefile
42 @@ -6,6 +6,7 @@ obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
43  fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
44  fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
45  fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
46 +fsl-enetc-$(CONFIG_ENETC_TSN) += enetc_tsn.o
47  
48  obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
49  fsl-enetc-vf-y := enetc_vf.o $(common-objs)
50 --- a/drivers/net/ethernet/freescale/enetc/enetc.c
51 +++ b/drivers/net/ethernet/freescale/enetc/enetc.c
52 @@ -145,7 +145,8 @@ static int enetc_map_tx_buffs(struct ene
53         do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
54                     (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
55         tx_swbd->do_tstamp = do_tstamp;
56 -       tx_swbd->check_wb = tx_swbd->do_tstamp;
57 +       tx_swbd->qbv_en = !!(active_offloads & ENETC_F_QBV);
58 +       tx_swbd->check_wb = tx_swbd->do_tstamp || tx_swbd->qbv_en;
59  
60         if (do_vlan || do_tstamp)
61                 flags |= ENETC_TXBD_FLAGS_EX;
62 @@ -342,7 +343,7 @@ static void enetc_tstamp_tx(struct sk_bu
63  static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
64  {
65         struct net_device *ndev = tx_ring->ndev;
66 -       int tx_frm_cnt = 0, tx_byte_cnt = 0;
67 +       int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
68         struct enetc_tx_swbd *tx_swbd;
69         int i, bds_to_clean;
70         bool do_tstamp;
71 @@ -372,6 +373,10 @@ static bool enetc_clean_tx_ring(struct e
72                                                     &tstamp);
73                                 do_tstamp = true;
74                         }
75 +
76 +                       if (tx_swbd->qbv_en &&
77 +                           txbd->wb.status & ENETC_TXBD_STATS_WIN)
78 +                               tx_win_drop++;
79                 }
80  
81                 if (likely(tx_swbd->dma))
82 @@ -415,6 +420,7 @@ static bool enetc_clean_tx_ring(struct e
83         tx_ring->next_to_clean = i;
84         tx_ring->stats.packets += tx_frm_cnt;
85         tx_ring->stats.bytes += tx_byte_cnt;
86 +       tx_ring->stats.win_drop += tx_win_drop;
87  
88         if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
89                      __netif_subqueue_stopped(ndev, tx_ring->index) &&
90 @@ -778,6 +784,9 @@ void enetc_get_si_caps(struct enetc_si *
91  
92         if (val & ENETC_SIPCAPR0_QBV)
93                 si->hw_features |= ENETC_SI_F_QBV;
94 +
95 +       if (val & ENETC_SIPCAPR0_QBU)
96 +               si->hw_features |= ENETC_SI_F_QBU;
97  }
98  
99  static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
100 --- a/drivers/net/ethernet/freescale/enetc/enetc.h
101 +++ b/drivers/net/ethernet/freescale/enetc/enetc.h
102 @@ -10,6 +10,7 @@
103  #include <linux/ethtool.h>
104  #include <linux/if_vlan.h>
105  #include <linux/phy.h>
106 +#include <net/tsn.h>
107  
108  #include "enetc_hw.h"
109  
110 @@ -24,6 +25,7 @@ struct enetc_tx_swbd {
111         u8 is_dma_page:1;
112         u8 check_wb:1;
113         u8 do_tstamp:1;
114 +       u8 qbv_en:1;
115  };
116  
117  #define ENETC_RX_MAXFRM_SIZE   ENETC_MAC_MAXFRM_SIZE
118 @@ -42,6 +44,7 @@ struct enetc_ring_stats {
119         unsigned int packets;
120         unsigned int bytes;
121         unsigned int rx_alloc_errs;
122 +       unsigned int win_drop;
123  };
124  
125  #define ENETC_BDR_DEFAULT_SIZE 1024
126 @@ -111,6 +114,28 @@ struct enetc_msg_swbd {
127         int size;
128  };
129  
130 +#ifdef CONFIG_ENETC_TSN
131 +/* Credit-Based Shaper parameters */
132 +struct cbs {
133 +       u8 tc;
134 +       bool enable;
135 +       u8 bw;
136 +       u32 hi_credit;
137 +       u32 lo_credit;
138 +       u32 idle_slope;
139 +       u32 send_slope;
140 +       u32 tc_max_sized_frame;
141 +       u32 max_interfrence_size;
142 +};
143 +
144 +struct enetc_cbs {
145 +       u32 port_transmit_rate;
146 +       u32 port_max_size_frame;
147 +       u8 tc_nums;
148 +       struct cbs cbs[0];
149 +};
150 +#endif
151 +
152  #define ENETC_REV1     0x1
153  enum enetc_errata {
154         ENETC_ERR_TXCSUM        = BIT(0),
155 @@ -119,6 +144,7 @@ enum enetc_errata {
156  };
157  
158  #define ENETC_SI_F_QBV BIT(0)
159 +#define ENETC_SI_F_QBU BIT(1)
160  
161  /* PCI IEP device data */
162  struct enetc_si {
163 @@ -136,6 +162,10 @@ struct enetc_si {
164         int num_rss; /* number of RSS buckets */
165         unsigned short pad;
166         int hw_features;
167 +#ifdef CONFIG_ENETC_TSN
168 +       struct enetc_cbs *ecbs;
169 +#endif
170 +
171  };
172  
173  #define ENETC_SI_ALIGN 32
174 @@ -177,6 +207,7 @@ enum enetc_active_offloads {
175         ENETC_F_RX_TSTAMP       = BIT(0),
176         ENETC_F_TX_TSTAMP       = BIT(1),
177         ENETC_F_QBV             = BIT(2),
178 +       ENETC_F_QBU             = BIT(3),
179  };
180  
181  struct enetc_ndev_priv {
182 @@ -261,3 +292,10 @@ int enetc_setup_tc_cbs(struct net_device
183  #define enetc_sched_speed_set(ndev) (void)0
184  #define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
185  #endif
186 +#ifdef CONFIG_ENETC_TSN
187 +void enetc_tsn_pf_init(struct net_device *netdev, struct pci_dev *pdev);
188 +void enetc_tsn_pf_deinit(struct net_device *netdev);
189 +#else
190 +#define enetc_tsn_pf_init(netdev, pdev) (void)0
191 +#define enetc_tsn_pf_deinit(netdev) (void)0
192 +#endif
193 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
194 +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
195 @@ -183,6 +183,21 @@ static const struct {
196         { ENETC_PICDR(3),   "ICM DR3 discarded frames" },
197  };
198  
199 +static const struct {
200 +       int reg;
201 +       char name[ETH_GSTRING_LEN];
202 +} enetc_pmac_counters[] = {
203 +       { ENETC_PM1_RFRM,   "PMAC rx frames" },
204 +       { ENETC_PM1_RPKT,   "PMAC rx packets" },
205 +       { ENETC_PM1_RDRP,   "PMAC rx dropped packets" },
206 +       { ENETC_PM1_RFRG,   "PMAC rx fragment packets" },
207 +       { ENETC_PM1_TFRM,   "PMAC tx frames" },
208 +       { ENETC_PM1_TERR,   "PMAC tx error frames" },
209 +       { ENETC_PM1_TPKT,   "PMAC tx packets" },
210 +       { ENETC_MAC_MERGE_MMFCRXR,   "MAC merge fragment rx counter" },
211 +       { ENETC_MAC_MERGE_MMFCTXR,   "MAC merge fragment tx counter"},
212 +};
213 +
214  static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
215         "Rx ring %2d frames",
216         "Rx ring %2d alloc errors",
217 @@ -192,6 +207,10 @@ static const char tx_ring_stats[][ETH_GS
218         "Tx ring %2d frames",
219  };
220  
221 +static const char tx_windrop_stats[][ETH_GSTRING_LEN] = {
222 +       "Tx window drop %2d frames",
223 +};
224 +
225  static int enetc_get_sset_count(struct net_device *ndev, int sset)
226  {
227         struct enetc_ndev_priv *priv = netdev_priv(ndev);
228 @@ -209,6 +228,12 @@ static int enetc_get_sset_count(struct n
229  
230         len += ARRAY_SIZE(enetc_port_counters);
231  
232 +       if (priv->active_offloads & ENETC_F_QBU)
233 +               len += ARRAY_SIZE(enetc_pmac_counters);
234 +
235 +       if (priv->active_offloads & ENETC_F_QBV)
236 +               len += ARRAY_SIZE(tx_windrop_stats) * priv->num_tx_rings;
237 +
238         return len;
239  }
240  
241 @@ -247,6 +272,28 @@ static void enetc_get_strings(struct net
242                                 ETH_GSTRING_LEN);
243                         p += ETH_GSTRING_LEN;
244                 }
245 +
246 +               if (!(priv->active_offloads & ENETC_F_QBU))
247 +                       break;
248 +
249 +               for (i = 0; i < ARRAY_SIZE(enetc_pmac_counters); i++) {
250 +                       strlcpy(p, enetc_pmac_counters[i].name,
251 +                               ETH_GSTRING_LEN);
252 +                       p += ETH_GSTRING_LEN;
253 +               }
254 +
255 +               if (!((priv->active_offloads & ENETC_F_QBV)))
256 +                       break;
257 +
258 +               for (i = 0; i < priv->num_tx_rings; i++) {
259 +                       for (j = 0; j < ARRAY_SIZE(tx_windrop_stats); j++) {
260 +                               snprintf(p, ETH_GSTRING_LEN,
261 +                                        tx_windrop_stats[j],
262 +                                        i);
263 +                               p += ETH_GSTRING_LEN;
264 +                       }
265 +               }
266 +
267                 break;
268         }
269  }
270 @@ -274,6 +321,18 @@ static void enetc_get_ethtool_stats(stru
271  
272         for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
273                 data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
274 +
275 +       if (!(priv->active_offloads & ENETC_F_QBU))
276 +               return;
277 +
278 +       for (i = 0; i < ARRAY_SIZE(enetc_pmac_counters); i++)
279 +               data[o++] = enetc_port_rd(hw, enetc_pmac_counters[i].reg);
280 +
281 +       if (!((priv->active_offloads & ENETC_F_QBV)))
282 +               return;
283 +
284 +       for (i = 0; i < priv->num_tx_rings; i++)
285 +               data[o++] = priv->tx_ring[i]->stats.win_drop;
286  }
287  
288  #define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
289 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
290 +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
291 @@ -19,6 +19,7 @@
292  #define ENETC_SICTR1   0x1c
293  #define ENETC_SIPCAPR0 0x20
294  #define ENETC_SIPCAPR0_QBV     BIT(4)
295 +#define ENETC_SIPCAPR0_QBU     BIT(3)
296  #define ENETC_SIPCAPR0_RSS     BIT(8)
297  #define ENETC_SIPCAPR1 0x24
298  #define ENETC_SITGTGR  0x30
299 @@ -241,10 +242,20 @@ enum enetc_bdr_type {TX, RX};
300  #define ENETC_PCS_IF_MODE_SGMII_AN     0x0003
301  
302  #define ENETC_PM0_IF_MODE      0x8300
303 +#define ENETC_PM1_IF_MODE       0x9300
304  #define ENETC_PMO_IFM_RG       BIT(2)
305  #define ENETC_PM0_IFM_RLP      (BIT(5) | BIT(11))
306  #define ENETC_PM0_IFM_RGAUTO   (BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
307  #define ENETC_PM0_IFM_XGMII    BIT(12)
308 +#define ENETC_PSIDCAPR         0x1b08
309 +#define ENETC_PSIDCAPR_MSK     GENMASK(15, 0)
310 +#define ENETC_PSFCAPR          0x1b18
311 +#define ENETC_PSFCAPR_MSK      GENMASK(15, 0)
312 +#define ENETC_PSGCAPR          0x1b28
313 +#define ENETC_PSGCAPR_GCL_MSK  GENMASK(18, 16)
314 +#define ENETC_PSGCAPR_SGIT_MSK GENMASK(15, 0)
315 +#define ENETC_PFMCAPR          0x1b38
316 +#define ENETC_PFMCAPR_MSK      GENMASK(15, 0)
317  
318  /* MAC counters */
319  #define ENETC_PM0_REOCT                0x8100
320 @@ -294,6 +305,15 @@ enum enetc_bdr_type {TX, RX};
321  #define ENETC_PM0_TSCOL                0x82E0
322  #define ENETC_PM0_TLCOL                0x82E8
323  #define ENETC_PM0_TECOL                0x82F0
324 +#define ENETC_PM1_RFRM         0x9120
325 +#define ENETC_PM1_RDRP         0x9158
326 +#define ENETC_PM1_RPKT         0x9160
327 +#define ENETC_PM1_RFRG         0x91B8
328 +#define ENETC_PM1_TFRM         0x9220
329 +#define ENETC_PM1_TERR         0x9238
330 +#define ENETC_PM1_TPKT         0x9260
331 +#define ENETC_MAC_MERGE_MMFCRXR        0x1f14
332 +#define ENETC_MAC_MERGE_MMFCTXR        0x1f18
333  
334  /* Port counters */
335  #define ENETC_PICDR(n)         (0x0700 + (n) * 8) /* n = [0..3] */
336 @@ -452,6 +472,7 @@ union enetc_tx_bd {
337  #define ENETC_TXBD_FLAGS_CSUM  BIT(3)
338  #define ENETC_TXBD_FLAGS_EX    BIT(6)
339  #define ENETC_TXBD_FLAGS_F     BIT(7)
340 +#define ENETC_TXBD_STATS_WIN   BIT(7)
341  
342  static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
343  {
344 @@ -479,6 +500,8 @@ static inline __le16 enetc_txbd_l3_csoff
345  #define ENETC_TXBD_L4_UDP      BIT(5)
346  #define ENETC_TXBD_L4_TCP      BIT(6)
347  
348 +#define enetc_tsn_is_enabled() IS_ENABLED(CONFIG_ENETC_TSN)
349 +
350  union enetc_rx_bd {
351         struct {
352                 __le64 addr;
353 @@ -625,21 +648,307 @@ enum bdcr_cmd_class {
354         BDCR_CMD_RFS,
355         BDCR_CMD_PORT_GCL,
356         BDCR_CMD_RECV_CLASSIFIER,
357 +       BDCR_CMD_STREAM_IDENTIFY,
358 +       BDCR_CMD_STREAM_FILTER,
359 +       BDCR_CMD_STREAM_GCL,
360 +       BDCR_CMD_FLOW_METER,
361         __BDCR_CMD_MAX_LEN,
362         BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
363  };
364  
365 +/* class 7, command 0, Stream Identity Entry Configuration */
366 +struct streamid_conf {
367 +       __le32  stream_handle;  /* init gate value */
368 +       __le32  iports;
369 +               u8      id_type;
370 +               u8      oui[3];
371 +               u8      res[3];
372 +               u8      en;
373 +};
374 +
375 +#define ENETC_CBDR_SID_VID_MASK 0xfff
376 +#define ENETC_CBDR_SID_VIDM BIT(12)
377 +#define ENETC_CBDR_SID_TG_MASK 0xc000
378 +/* streamid_conf address point to this data space */
379 +struct null_streamid_data {
380 +       u8      dmac[6];
381 +       u16     vid_vidm_tg;
382 +};
383 +
384 +struct smac_streamid_data {
385 +       u8      smac[6];
386 +       u16     vid_vidm_tg;
387 +};
388 +
389 +/* class 7, command 1, query config , long format */
390 +/* No need structure define */
391 +
392 +#define ENETC_CDBR_SID_ENABLE  BIT(7)
393 +/*  Stream ID Query Response Data Buffer */
394 +struct streamid_query_resp {
395 +       u32     stream_handle;
396 +       u32     input_ports;
397 +       u8      id_type;
398 +       u8      oui[3];
399 +       u8      mac[6];
400 +       u16     vid_vidm_tg;
401 +       u8      res[3];
402 +       u8  en;
403 +};
404 +
405 +/* class 7, command 2, qeury status count, Stream ID query long format */
406 +struct streamid_stat_query {
407 +       u8      res[12];
408 +       __le32 input_ports;
409 +};
410 +
411 +/* Stream Identity Statistics Query */
412 +struct streamid_stat_query_resp {
413 +       u32     psinl;
414 +       u32     psinh;
415 +       u64     pspi[32];
416 +};
417 +
418 +#define ENETC_CBDR_SFI_PRI_MASK 0x7
419 +#define ENETC_CBDR_SFI_PRIM            BIT(3)
420 +#define ENETC_CBDR_SFI_BLOV            BIT(4)
421 +#define ENETC_CBDR_SFI_BLEN            BIT(5)
422 +#define ENETC_CBDR_SFI_MSDUEN  BIT(6)
423 +#define ENETC_CBDR_SFI_FMITEN  BIT(7)
424 +#define ENETC_CBDR_SFI_ENABLE  BIT(7)
425 +/* class 8, command 0, Stream Filter Instance, Short Format */
426 +struct sfi_conf {
427 +       __le32  stream_handle;
428 +               u8      multi;
429 +               u8      res[2];
430 +               u8      sthm;
431 +       /* Max Service Data Unit or Flow Meter Instance Table index.
432 +        * Depending on the value of FLT this represents either Max
433 +        * Service Data Unit (max frame size) allowed by the filter
434 +        * entry or is an index into the Flow Meter Instance table
435 +        * index identifying the policer which will be used to police
436 +        * it.
437 +        */
438 +       __le16  fm_inst_table_index;
439 +       __le16  msdu;
440 +       __le16  sg_inst_table_index;
441 +               u8      res1[2];
442 +       __le32  input_ports;
443 +               u8      res2[3];
444 +               u8      en;
445 +};
446 +
447 +/* class 8, command 1, Stream Filter Instance, write back, short Format */
448 +struct sfi_query {
449 +               u32     stream_handle;
450 +               u8      multi;
451 +               u8      res[2];
452 +               u8      sthm;
453 +               u16     fm_inst_table_index;
454 +               u16     msdu;
455 +               u16     sg_inst_table_index;
456 +               u8      res1[2];
457 +               u32     input_ports;
458 +               u8      res2[3];
459 +               u8      en;
460 +};
461 +
462 +/* class 8, command 2 stream Filter Instance status query short format
463 + * command no need structure define
464 + * Stream Filter Instance Query Statistics Response data
465 + */
466 +struct sfi_counter_data {
467 +       u32 matchl;
468 +       u32 matchh;
469 +       u32 msdu_dropl;
470 +       u32 msdu_droph;
471 +       u32 stream_gate_dropl;
472 +       u32 stream_gate_droph;
473 +       u32 flow_meter_dropl;
474 +       u32 flow_meter_droph;
475 +};
476 +
477 +#define ENETC_CBDR_SGI_OIPV_MASK 0x7
478 +#define ENETC_CBDR_SGI_OIPV_EN BIT(3)
479 +#define ENETC_CBDR_SGI_CGTST   BIT(6)
480 +#define ENETC_CBDR_SGI_OGTST   BIT(7)
481 +#define ENETC_CBDR_SGI_CFG_CHG  BIT(1)
482 +#define ENETC_CBDR_SGI_CFG_PND  BIT(2)
483 +#define ENETC_CBDR_SGI_OEX             BIT(4)
484 +#define ENETC_CBDR_SGI_OEXEN   BIT(5)
485 +#define ENETC_CBDR_SGI_IRX             BIT(6)
486 +#define ENETC_CBDR_SGI_IRXEN   BIT(7)
487 +#define ENETC_CBDR_SGI_ACLLEN_MASK 0x3
488 +#define ENETC_CBDR_SGI_OCLLEN_MASK 0xc
489 +#define        ENETC_CBDR_SGI_EN               BIT(7)
490 +/* class 9, command 0, Stream Gate Instance Table, Short Format
491 + * class 9, command 2, Stream Gate Instance Table entry query write back
492 + * Short Format
493 + */
494 +struct sgi_table {
495 +       u8      res[8];
496 +       u8      oipv;
497 +       u8      res0[2];
498 +       u8      ocgtst;
499 +       u8      res1[7];
500 +       u8      gset;
501 +       u8      oacl_len;
502 +       u8      res2[2];
503 +       u8      en;
504 +};
505 +
506 +#define ENETC_CBDR_SGI_AIPV_MASK 0x7
507 +#define ENETC_CBDR_SGI_AIPV_EN BIT(3)
508 +#define ENETC_CBDR_SGI_AGTST   BIT(7)
509 +
510 +/* class 9, command 1, Stream Gate Control List, Long Format */
511 +struct sgcl_conf {
512 +       u8      aipv;
513 +       u8      res[2];
514 +       u8      agtst;
515 +       u8      res1[4];
516 +       union {
517 +               struct {
518 +                       u8 res2[4];
519 +                       u8 acl_len;
520 +                       u8 res3[3];
521 +               };
522 +               u8 cct[8]; /* Config change time */
523 +       };
524 +};
525 +
526 +/* stream control list class 9 , cmd 1 data buffer */
527 +struct sgcl_data {
528 +       u32     btl;
529 +       u32 bth;
530 +       u32     ct;
531 +       u32     cte;
532 +       /*struct sgce   *sgcl;*/
533 +};
534 +
535 +/* class 9, command 2, stream gate instant table enery query, short format
536 + * write back see struct sgi_table. Do not need define.
537 + * class 9, command 3 Stream Gate Control List Query Descriptor - Long Format
538 + * ocl_len or acl_len to be 0, oper or admin would not show in the data space
539 + * true len will be write back in the space.
540 + */
541 +struct sgcl_query {
542 +       u8 res[12];
543 +       u8 oacl_len;
544 +       u8 res1[3];
545 +};
546 +
547 +/* define for 'stat' */
548 +#define ENETC_CBDR_SGIQ_AIPV_MASK 0x7
549 +#define ENETC_CBDR_SGIQ_AIPV_EN        BIT(3)
550 +#define ENETC_CBDR_SGIQ_AGTST  BIT(4)
551 +#define ENETC_CBDR_SGIQ_ACL_LEN_MASK 0x60
552 +#define ENETC_CBDR_SGIQ_OIPV_MASK 0x380
553 +#define ENETC_CBDR_SGIQ_OIPV_EN        BIT(10)
554 +#define ENETC_CBDR_SGIQ_OGTST  BIT(11)
555 +#define ENETC_CBDR_SGIQ_OCL_LEN_MASK 0x3000
556 +/* class 9, command 3 data space */
557 +struct sgcl_query_resp {
558 +       u16 stat;
559 +       u16 res;
560 +       u32     abtl;
561 +       u32 abth;
562 +       u32     act;
563 +       u32     acte;
564 +       u32     cctl;
565 +       u32 ccth;
566 +       u32     obtl;
567 +       u32 obth;
568 +       u32     oct;
569 +       u32     octe;
570 +};
571 +
572 +/* class 9, command 4 Stream Gate Instance Table Query Statistics Response
573 + * short command, write back, no command define
574 + */
575 +struct sgi_query_stat_resp {
576 +       u32     pgcl;
577 +       u32 pgch;
578 +       u32 dgcl;
579 +       u32 dgch;
580 +       u16     msdu_avail;
581 +       u8 res[6];
582 +};
583 +
584 +#define ENETC_CBDR_FMI_MR      BIT(0)
585 +#define ENETC_CBDR_FMI_MREN BIT(1)
586 +#define ENETC_CBDR_FMI_DOY     BIT(2)
587 +#define        ENETC_CBDR_FMI_CM       BIT(3)
588 +#define ENETC_CBDR_FMI_CF      BIT(4)
589 +#define ENETC_CBDR_FMI_NDOR BIT(5)
590 +#define ENETC_CBDR_FMI_OALEN BIT(6)
591 +#define ENETC_CBDR_FMI_IRFPP_MASK 0x1f
592 +/* class 10: command 0/1, Flow Meter Instance Set, short Format */
593 +struct fmi_conf {
594 +       __le32  cir;
595 +       __le32  cbs;
596 +       __le32  eir;
597 +       __le32  ebs;
598 +               u8      conf;
599 +               u8      res1;
600 +               u8      ir_fpp;
601 +               u8      res2[4];
602 +               u8      en;
603 +};
604 +
605 +/* class:10, command:2, Flow Meter Instance Statistics Query Response */
606 +struct fmi_query_stat_resp {
607 +       u32     bcl;
608 +       u32 bch;
609 +       u32 dfl;
610 +       u32 dfh;
611 +       u32 d0gfl;
612 +       u32 d0gfh;
613 +       u32 d1gfl;
614 +       u32 d1gfh;
615 +       u32 dyfl;
616 +       u32 dyfh;
617 +       u32 ryfl;
618 +       u32 ryfh;
619 +       u32 drfl;
620 +       u32 drfh;
621 +       u32 rrfl;
622 +       u32 rrfh;
623 +       u32 lts;
624 +       u32 bci;
625 +       u32 bcf;
626 +       u32 bei;
627 +       u32 bef;
628 +};
629 +
630  /* class 5, command 0 */
631  struct tgs_gcl_conf {
632         u8      atc;    /* init gate value */
633         u8      res[7];
634 -       struct {
635 -               u8      res1[4];
636 -               __le16  acl_len;
637 -               u8      res2[2];
638 +       union {
639 +               struct {
640 +                       u8      res1[4];
641 +                       __le16  acl_len;
642 +                       u8      res2[2];
643 +               };
644 +               struct {
645 +                       u32 cctl;
646 +                       u32 ccth;
647 +               };
648         };
649  };
650  
651 +#define ENETC_CBDR_SGL_IOMEN   BIT(0)
652 +#define ENETC_CBDR_SGL_IPVEN   BIT(3)
653 +#define ENETC_CBDR_SGL_GTST            BIT(4)
654 +#define ENETC_CBDR_SGL_IPV_MASK 0xe
655 +/* Stream Gate Control List Entry */
656 +struct sgce {
657 +       u32     interval;
658 +       u8      msdu[3];
659 +       u8      multi;
660 +};
661 +
662  /* gate control list entry */
663  struct gce {
664         __le32  period;
665 @@ -656,13 +965,55 @@ struct tgs_gcl_data {
666         struct gce      entry[0];
667  };
668  
669 +/* class 5, command 1 */
670 +struct tgs_gcl_query {
671 +               u8      res[12];
672 +               union {
673 +                       struct {
674 +                               __le16  acl_len; /* admin list length */
675 +                               __le16  ocl_len; /* operation list length */
676 +                       };
677 +                       struct {
678 +                               u16 admin_list_len;
679 +                               u16 oper_list_len;
680 +                       };
681 +               };
682 +
683 +};
684 +
685 +/* tgs_gcl_query command response data format */
686 +struct tgs_gcl_resp {
687 +       u32     abtl;   /* base time */
688 +       u32 abth;
689 +       u32     act;    /* cycle time */
690 +       u32     acte;   /* cycle time extend */
691 +       u32     cctl;   /* config change time */
692 +       u32 ccth;
693 +       u32 obtl;       /* operation base time */
694 +       u32 obth;
695 +       u32     oct;    /* operation cycle time */
696 +       u32     octe;   /* operation cycle time extend */
697 +       u32     ccel;   /* config change error */
698 +       u32 cceh;
699 +       /*struct gce    *gcl;*/
700 +};
701 +
702  struct enetc_cbd {
703         union{
704 +               struct sfi_conf sfi_conf;
705 +               struct sgi_table sgi_table;
706 +               struct sgi_query_stat_resp sgi_query_stat_resp;
707 +               struct fmi_conf fmi_conf;
708                 struct {
709                         __le32  addr[2];
710                         union {
711                                 __le32  opt[4];
712 -                               struct tgs_gcl_conf     gcl_conf;
713 +                               struct tgs_gcl_conf             gcl_conf;
714 +                               struct tgs_gcl_query    gcl_query;
715 +                               struct streamid_conf            sid_set;
716 +                               struct streamid_stat_query      sid_stat;
717 +                               struct sgcl_conf                sgcl_conf;
718 +                               struct sgcl_query               sgcl_query;
719                         };
720                 };      /* Long format */
721                 __le32 data[6];
722 @@ -677,11 +1028,88 @@ struct enetc_cbd {
723  
724  #define ENETC_CLK  400000000ULL
725  
726 +#define ENETC_PTCFPR(n)                (0x1910 + (n) * 4) /* n = [0 ..7] */
727 +#define ENETC_FPE              BIT(31)
728 +
729 +/* Port capability register 0 */
730 +#define ENETC_PCAPR0_PSFPM     BIT(10)
731 +#define ENETC_PCAPR0_PSFP      BIT(9)
732 +#define ENETC_PCAPR0_TSN       BIT(4)
733 +#define ENETC_PCAPR0_QBU       BIT(3)
734 +
735  /* port time gating control register */
736  #define ENETC_QBV_PTGCR_OFFSET         0x11a00
737  #define ENETC_QBV_TGE                  BIT(31)
738  #define ENETC_QBV_TGPE                 BIT(30)
739 +#define ENETC_QBV_TGDROP_DISABLE       BIT(29)
740  
741  /* Port time gating capability register */
742  #define ENETC_QBV_PTGCAPR_OFFSET       0x11a08
743  #define ENETC_QBV_MAX_GCL_LEN_MASK     GENMASK(15, 0)
744 +
745 +/* Port time gating tick granularity register */
746 +#define ENETC_QBV_PTGTGR_OFFSET 0x11a0c
747 +#define ENETC_QBV_TICK_GRAN_MASK 0xffffffff
748 +
749 +/* Port time gating admin gate list status register */
750 +#define ENETC_QBV_PTGAGLSR_OFFSET 0x11a10
751 +
752 +#define ENETC_QBV_CFG_PEND_MASK 0x00000002
753 +
754 +/* Port time gating admin gate list length register */
755 +#define ENETC_QBV_PTGAGLLR_OFFSET 0x11a14
756 +#define ENETC_QBV_ADMIN_GATE_LIST_LENGTH_MASK 0xffff
757 +
758 +/* Port time gating operational gate list status register */
759 +#define ENETC_QBV_PTGOGLSR_OFFSET 0x11a18
760 +#define ENETC_QBV_HTA_POS_MASK 0xffff0000
761 +
762 +#define ENETC_QBV_CURR_POS_MASK 0x0000ffff
763 +
764 +/* Port time gating operational gate list length register */
765 +#define ENETC_QBV_PTGOGLLR_OFFSET 0x11a1c
766 +#define ENETC_QBV_OPER_GATE_LIST_LENGTH_MASK 0xffff
767 +
768 +/* Port time gating current time register */
769 +#define ENETC_QBV_PTGCTR_OFFSET 0x11a20
770 +#define ENETC_QBV_CURR_TIME_MASK 0xffffffffffffffff
771 +
772 +/* Port traffic class a time gating control register */
773 +#define ENETC_QBV_PTC0TGCR_OFFSET  0x11a40
774 +#define ENETC_QBV_PTC1TGCR_OFFSET  0x11a50
775 +#define ENETC_QBV_PTC2TGCR_OFFSET  0x11a60
776 +#define ENETC_QBV_PTC3TGCR_OFFSET  0x11a70
777 +#define ENETC_QBV_PTC4TGCR_OFFSET  0x11a80
778 +#define ENETC_QBV_PTC5TGCR_OFFSET  0x11a90
779 +#define ENETC_QBV_PTC6TGCR_OFFSET  0x11aa0
780 +#define ENETC_QBV_PTC7TGCR_OFFSET  0x11ab0
781 +
782 +/* Maximum Service Data Unit. */
783 +#define ENETC_PTC0MSDUR 0x12020
784 +#define ENETC_PTC1MSDUR 0x12024
785 +#define ENETC_PTC2MSDUR 0x12028
786 +#define ENETC_PTC3MSDUR 0x1202c
787 +#define ENETC_PTC4MSDUR 0x12030
788 +#define ENETC_PTC5MSDUR 0x12034
789 +#define ENETC_PTC6MSDUR 0x12038
790 +#define ENETC_PTC7MSDUR 0x1203c
791 +
792 +#define ENETC_QBV_MAXSDU_MASK 0xffff
793 +
794 +/* Port traffic class a time gating status register */
795 +#define ENETC_QBV_PTC0TGSR_OFFSET  0x11a44
796 +#define ENETC_QBV_HTA_STATE_MASK  0x10000
797 +#define ENETC_QBV_CURR_STATE_MASK 0x1
798 +
799 +/* Port traffic class a time gating transmission overrun counter register*/
800 +#define ENETC_QBV_PTC0TGTOCR_OFFSET 0x11a48
801 +#define ENETC_QBV_TX_OVERRUN_MASK 0xffffffffffffffff
802 +#define ENETC_TGLSTR 0xa200
803 +#define ENETC_TGS_MIN_DIS_MASK 0x80000000
804 +#define ENETC_MIN_LOOKAHEAD_MASK 0xffff
805 +
806 +#define ENETC_PPSFPMR 0x11b00
807 +#define ENETC_PPSFPMR_PSFPEN BIT(0)
808 +#define ENETC_PPSFPMR_VS BIT(1)
809 +#define ENETC_PPSFPMR_PVC BIT(2)
810 +#define ENETC_PPSFPMR_PVZC BIT(3)
811 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
812 +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
813 @@ -525,12 +525,16 @@ static void enetc_configure_port_mac(str
814                       ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
815         /* set auto-speed for RGMII */
816         if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG ||
817 -           phy_mode == PHY_INTERFACE_MODE_RGMII)
818 +           phy_mode == PHY_INTERFACE_MODE_RGMII) {
819                 enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
820 +               enetc_port_wr(hw, ENETC_PM1_IF_MODE, ENETC_PM0_IFM_RGAUTO);
821 +       }
822  
823         if (phy_mode == PHY_INTERFACE_MODE_XGMII ||
824 -           phy_mode == PHY_INTERFACE_MODE_USXGMII)
825 +           phy_mode == PHY_INTERFACE_MODE_USXGMII) {
826                 enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
827 +               enetc_port_wr(hw, ENETC_PM1_IF_MODE, ENETC_PM0_IFM_XGMII);
828 +       }
829  }
830  
831  static void enetc_configure_port_pmac(struct enetc_hw *hw)
832 @@ -749,6 +753,9 @@ static void enetc_pf_netdev_setup(struct
833         if (si->hw_features & ENETC_SI_F_QBV)
834                 priv->active_offloads |= ENETC_F_QBV;
835  
836 +       if (enetc_tsn_is_enabled() && (si->hw_features & ENETC_SI_F_QBU))
837 +               priv->active_offloads |= ENETC_F_QBU;
838 +
839         /* pick up primary MAC address from SI */
840         enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
841  }
842 @@ -942,6 +949,8 @@ static int enetc_pf_probe(struct pci_dev
843         netif_info(priv, probe, ndev, "%s v%s\n",
844                    enetc_drv_name, enetc_drv_ver);
845  
846 +       enetc_tsn_pf_init(ndev, pdev);
847 +
848         return 0;
849  
850  err_reg_netdev:
851 @@ -973,6 +982,8 @@ static void enetc_pf_remove(struct pci_d
852         netif_info(priv, drv, si->ndev, "%s v%s remove\n",
853                    enetc_drv_name, enetc_drv_ver);
854  
855 +       enetc_tsn_pf_deinit(si->ndev);
856 +
857         unregister_netdev(si->ndev);
858  
859         enetc_mdio_remove(pf);
860 --- /dev/null
861 +++ b/drivers/net/ethernet/freescale/enetc/enetc_tsn.c
862 @@ -0,0 +1,2049 @@
863 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
864 +/* Copyright 2017-2019 NXP */
865 +
866 +#ifdef CONFIG_ENETC_TSN
867 +#include "enetc.h"
868 +
869 +#include <net/tsn.h>
870 +#include <linux/module.h>
871 +#include <linux/irqflags.h>
872 +#include <linux/preempt.h>
873 +
874 +static u32 get_ndev_speed(struct net_device *netdev);
875 +
876 +static int alloc_cbdr(struct enetc_si *si, struct enetc_cbd **curr_cbd)
877 +{
878 +       struct enetc_cbdr *ring = &si->cbd_ring;
879 +       int i;
880 +
881 +       i = ring->next_to_use;
882 +       *curr_cbd = ENETC_CBD(*ring, i);
883 +
884 +       memset(*curr_cbd, 0, sizeof(struct enetc_cbd));
885 +       return i;
886 +}
887 +
888 +/* Transmit the BD control ring by writing the pir register.
889 + * Update the counters maintained by software.
890 + */
891 +static int xmit_cbdr(struct enetc_si *si, int i)
892 +{
893 +       struct enetc_cbdr *ring = &si->cbd_ring;
894 +       struct enetc_cbd *dest_cbd;
895 +       int nc, timeout;
896 +
897 +       i = (i + 1) % ring->bd_count;
898 +
899 +       ring->next_to_use = i;
900 +       /* let H/W know BD ring has been updated */
901 +       enetc_wr_reg(ring->pir, i);
902 +
903 +       timeout = ENETC_CBDR_TIMEOUT;
904 +
905 +       do {
906 +               if (enetc_rd_reg(ring->cir) == i)
907 +                       break;
908 +               usleep_range(10, 20);
909 +               timeout -= 10;
910 +       } while (timeout);
911 +
912 +       if (!timeout)
913 +               return -EBUSY;
914 +
915 +       nc = ring->next_to_clean;
916 +
917 +       while (enetc_rd_reg(ring->cir) != nc) {
918 +               dest_cbd = ENETC_CBD(*ring, nc);
919 +               if (dest_cbd->status_flags & ENETC_CBD_STATUS_MASK)
920 +                       WARN_ON(1);
921 +
922 +               nc = (nc + 1) % ring->bd_count;
923 +       }
924 +
925 +       ring->next_to_clean = nc;
926 +
927 +       return 0;
928 +}
929 +
930 +static inline u64 get_current_time(struct enetc_si *si)
931 +{
932 +       u64 tmp = 0;
933 +
934 +       tmp = (u64)enetc_rd(&si->hw, ENETC_SICTR0);
935 +       return ((u64)enetc_rd(&si->hw, ENETC_SICTR1) << 32) + tmp;
936 +}
937 +
938 +/* Class 10: Flow Meter Instance Statistics Query Descriptor - Long Format */
939 +int enetc_qci_fmi_counters_get(struct net_device *ndev, u32 index,
940 +                              struct fmi_query_stat_resp *counters)
941 +{
942 +       struct enetc_cbd *cbdr;
943 +       struct fmi_query_stat_resp *fmi_data;
944 +       dma_addr_t dma;
945 +       u16 data_size, dma_size;
946 +       int curr_cbd;
947 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
948 +
949 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
950 +
951 +       cbdr->index = cpu_to_le16((u16)index);
952 +       cbdr->cmd = 2;
953 +       cbdr->cls = BDCR_CMD_FLOW_METER;
954 +       cbdr->status_flags = 0;
955 +
956 +       data_size = sizeof(struct fmi_query_stat_resp);
957 +
958 +       fmi_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
959 +       if (!fmi_data)
960 +               return -ENOMEM;
961 +
962 +       dma_size = cpu_to_le16(data_size);
963 +       cbdr->length = dma_size;
964 +
965 +       dma = dma_map_single(&priv->si->pdev->dev, fmi_data,
966 +                            data_size, DMA_FROM_DEVICE);
967 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
968 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
969 +               kfree(fmi_data);
970 +               return -ENOMEM;
971 +       }
972 +       cbdr->addr[0] = lower_32_bits(dma);
973 +       cbdr->addr[1] = upper_32_bits(dma);
974 +
975 +       xmit_cbdr(priv->si, curr_cbd);
976 +
977 +       memcpy(counters, fmi_data, sizeof(struct fmi_query_stat_resp));
978 +
979 +       memset(cbdr, 0, sizeof(*cbdr));
980 +       kfree(fmi_data);
981 +       return 0;
982 +}
983 +
984 +u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
985 +{
986 +       return (enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
987 +               & ENETC_QBV_MAX_GCL_LEN_MASK);
988 +}
989 +
990 +void enetc_pspeed_set(struct net_device *ndev)
991 +{
992 +       u32 speed, pspeed;
993 +       u32 difflag = 0;
994 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
995 +
996 +       speed = get_ndev_speed(ndev);
997 +       pspeed = enetc_port_rd(&priv->si->hw, ENETC_PMR)
998 +               & ENETC_PMR_PSPEED_MASK;
999 +       switch (speed) {
1000 +       case SPEED_1000:
1001 +               if (pspeed != ENETC_PMR_PSPEED_1000M) {
1002 +                       difflag = 1;
1003 +                       pspeed = ENETC_PMR_PSPEED_1000M;
1004 +               }
1005 +               break;
1006 +       case SPEED_2500:
1007 +               if (pspeed != ENETC_PMR_PSPEED_2500M) {
1008 +                       difflag = 1;
1009 +                       pspeed = ENETC_PMR_PSPEED_2500M;
1010 +               }
1011 +
1012 +               break;
1013 +       case SPEED_100:
1014 +               if (pspeed != ENETC_PMR_PSPEED_100M) {
1015 +                       difflag = 1;
1016 +                       pspeed = ENETC_PMR_PSPEED_100M;
1017 +               }
1018 +               break;
1019 +       case SPEED_10:
1020 +               if (pspeed != ENETC_PMR_PSPEED_10M) {
1021 +                       difflag = 1;
1022 +                       pspeed = ENETC_PMR_PSPEED_10M;
1023 +               }
1024 +               break;
1025 +       default:
1026 +               netdev_err(ndev, "not support speed\n");
1027 +       }
1028 +
1029 +       if (difflag) {
1030 +               enetc_port_wr(&priv->si->hw, ENETC_PMR,
1031 +                             (enetc_port_rd(&priv->si->hw, ENETC_PMR)
1032 +                             & (~ENETC_PMR_PSPEED_MASK))
1033 +                             | pspeed);
1034 +       }
1035 +}
1036 +
1037 +/* CBD Class 5: Time Gated Scheduling Gate Control List configuration
1038 + * Descriptor - Long Format
1039 + */
1040 +int enetc_qbv_set(struct net_device *ndev, struct tsn_qbv_conf *admin_conf)
1041 +{
1042 +       struct enetc_cbd *cbdr;
1043 +       struct tgs_gcl_data *gcl_data;
1044 +       struct tgs_gcl_conf *gcl_config;
1045 +       struct gce *gce;
1046 +       u16 gcl_len;
1047 +       u16 data_size;
1048 +       int i;
1049 +       dma_addr_t dma;
1050 +       int curr_cbd;
1051 +       struct tsn_qbv_basic *admin_basic = &admin_conf->admin;
1052 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
1053 +       u32 temp;
1054 +       u64 tempclock;
1055 +       struct tsn_port *port;
1056 +
1057 +       port = tsn_get_port(ndev);
1058 +       if (!port) {
1059 +               netdev_err(priv->si->ndev, "TSN device not registered!\n");
1060 +               return -ENODEV;
1061 +       }
1062 +
1063 +       enetc_pspeed_set(ndev);
1064 +
1065 +       gcl_len = admin_basic->control_list_length;
1066 +       if (gcl_len > enetc_get_max_gcl_len(&priv->si->hw))
1067 +               return -EINVAL;
1068 +
1069 +       temp = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
1070 +       if (admin_conf->gate_enabled && !(temp & ENETC_QBV_TGE)) {
1071 +               enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
1072 +                        temp & (~ENETC_QBV_TGE));
1073 +               usleep_range(10, 20);
1074 +               enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
1075 +                        temp | ENETC_QBV_TGE);
1076 +       } else if (!admin_conf->gate_enabled) {
1077 +               enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
1078 +                        temp & (~ENETC_QBV_TGE));
1079 +               memcpy(&port->nd.ntdata, admin_conf, sizeof(*admin_conf));
1080 +               call_tsn_notifiers(TSN_QBV_CONFIGCHANGETIME_ARRIVE,
1081 +                                  ndev, &port->nd);
1082 +               return 0;
1083 +       }
1084 +
1085 +       /* Set the maximum frame size for each traffic class index
1086 +        * PTCaMSDUR[MAXSDU]. The maximum frame size cannot exceed
1087 +        * 9,600 bytes (0x2580). Frames that exceed the limit are
1088 +        * discarded.
1089 +        */
1090 +       if (admin_conf->maxsdu) {
1091 +               enetc_wr(&priv->si->hw, ENETC_PTC0MSDUR, admin_conf->maxsdu);
1092 +               enetc_wr(&priv->si->hw, ENETC_PTC1MSDUR, admin_conf->maxsdu);
1093 +               enetc_wr(&priv->si->hw, ENETC_PTC2MSDUR, admin_conf->maxsdu);
1094 +               enetc_wr(&priv->si->hw, ENETC_PTC3MSDUR, admin_conf->maxsdu);
1095 +               enetc_wr(&priv->si->hw, ENETC_PTC4MSDUR, admin_conf->maxsdu);
1096 +               enetc_wr(&priv->si->hw, ENETC_PTC5MSDUR, admin_conf->maxsdu);
1097 +               enetc_wr(&priv->si->hw, ENETC_PTC6MSDUR, admin_conf->maxsdu);
1098 +               enetc_wr(&priv->si->hw, ENETC_PTC7MSDUR, admin_conf->maxsdu);
1099 +       }
1100 +
1101 +       /* Configure the (administrative) gate control list using the
1102 +        * control BD descriptor.
1103 +        */
1104 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1105 +
1106 +       gcl_config = &cbdr->gcl_conf;
1107 +
1108 +       data_size = struct_size(gcl_data, entry, gcl_len);
1109 +
1110 +       gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
1111 +       if (!gcl_data)
1112 +               return -ENOMEM;
1113 +
1114 +       gce = &gcl_data->entry[0];
1115 +
1116 +       gcl_config->atc = admin_basic->gate_states;
1117 +       gcl_config->acl_len = cpu_to_le16(gcl_len);
1118 +
1119 +       if (!admin_basic->base_time) {
1120 +               gcl_data->btl =
1121 +                       cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
1122 +               gcl_data->bth =
1123 +                       cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
1124 +       } else {
1125 +               gcl_data->btl =
1126 +                       cpu_to_le32(lower_32_bits(admin_basic->base_time));
1127 +               gcl_data->bth =
1128 +                       cpu_to_le32(upper_32_bits(admin_basic->base_time));
1129 +       }
1130 +
1131 +       gcl_data->ct = cpu_to_le32(admin_basic->cycle_time);
1132 +       gcl_data->cte = cpu_to_le32(admin_basic->cycle_time_extension);
1133 +
1134 +       for (i = 0; i < gcl_len; i++) {
1135 +               struct gce *temp_gce = gce + i;
1136 +               struct tsn_qbv_entry *temp_entry;
1137 +
1138 +               temp_entry = admin_basic->control_list + i;
1139 +
1140 +               temp_gce->gate = temp_entry->gate_state;
1141 +               temp_gce->period = cpu_to_le32(temp_entry->time_interval);
1142 +       }
1143 +
1144 +       cbdr->length = cpu_to_le16(data_size);
1145 +       cbdr->status_flags = 0;
1146 +
1147 +       dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
1148 +                            data_size, DMA_TO_DEVICE);
1149 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
1150 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
1151 +               kfree(gcl_data);
1152 +               return -ENOMEM;
1153 +       }
1154 +
1155 +       cbdr->addr[0] = lower_32_bits(dma);
1156 +       cbdr->addr[1] = upper_32_bits(dma);
1157 +       cbdr->cmd = 0;
1158 +       cbdr->cls = BDCR_CMD_PORT_GCL;
1159 +
1160 +       /* Updated by ENETC on completion of the configuration
1161 +        * command. A zero value indicates success.
1162 +        */
1163 +       cbdr->status_flags = 0;
1164 +
1165 +       xmit_cbdr(priv->si, curr_cbd);
1166 +
1167 +       memcpy(&port->nd.ntdata, admin_conf, sizeof(*admin_conf));
1168 +
1169 +       tempclock = ((u64)le32_to_cpu(gcl_config->ccth)) << 32;
1170 +       port->nd.ntdata.qbv_notify.admin.base_time =
1171 +               le32_to_cpu(gcl_config->cctl) + tempclock;
1172 +
1173 +       memset(cbdr, 0, sizeof(struct enetc_cbd));
1174 +       dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
1175 +       kfree(gcl_data);
1176 +
1177 +       call_tsn_notifiers(TSN_QBV_CONFIGCHANGETIME_ARRIVE,
1178 +                          ndev, &port->nd);
1179 +
1180 +       return 0;
1181 +}
1182 +
1183 +/* CBD Class 5: Time Gated Scheduling Gate Control List query
1184 + * Descriptor - Long Format
1185 + */
1186 +int enetc_qbv_get(struct net_device *ndev, struct tsn_qbv_conf *admin_conf)
1187 +{
1188 +       struct enetc_cbd *cbdr;
1189 +       struct tgs_gcl_resp *gcl_data;
1190 +       struct tgs_gcl_query *gcl_query;
1191 +       struct gce *gce;
1192 +       struct tsn_qbv_basic *admin_basic = &admin_conf->admin;
1193 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
1194 +       dma_addr_t dma;
1195 +       int curr_cbd;
1196 +       u16 maxlen;
1197 +       u16 data_size, dma_size;
1198 +       u16 admin_len;
1199 +       u16 oper_len;
1200 +       u64 temp;
1201 +       int i;
1202 +
1203 +       if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) {
1204 +               admin_conf->gate_enabled = true;
1205 +       } else {
1206 +               admin_conf->gate_enabled = false;
1207 +               return 0;
1208 +       }
1209 +
1210 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1211 +
1212 +       gcl_query =  &cbdr->gcl_query;
1213 +
1214 +       maxlen = enetc_get_max_gcl_len(&priv->si->hw);
1215 +
1216 +       data_size = sizeof(struct tgs_gcl_resp)
1217 +                       + sizeof(struct gce) * 2 * maxlen;
1218 +
1219 +       gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
1220 +       if (!gcl_data)
1221 +               return -ENOMEM;
1222 +
1223 +       gce = (struct gce *)(gcl_data + 1);
1224 +
1225 +       gcl_query->acl_len = cpu_to_le16(maxlen);
1226 +
1227 +       dma_size = cpu_to_le16(data_size);
1228 +       cbdr->length = dma_size;
1229 +       cbdr->status_flags = 0;
1230 +
1231 +       dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
1232 +                            data_size, DMA_FROM_DEVICE);
1233 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
1234 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
1235 +               kfree(gcl_data);
1236 +               return -ENOMEM;
1237 +       }
1238 +
1239 +       cbdr->addr[0] = lower_32_bits(dma);
1240 +       cbdr->addr[1] = upper_32_bits(dma);
1241 +       cbdr->cmd = 1;
1242 +       cbdr->cls = BDCR_CMD_PORT_GCL;
1243 +       xmit_cbdr(priv->si, curr_cbd);
1244 +       dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
1245 +
1246 +       /* since cbdr already passed to free, below could be get wrong */
1247 +       admin_len = le16_to_cpu(gcl_query->admin_list_len);
1248 +       oper_len = le16_to_cpu(gcl_query->oper_list_len);
1249 +
1250 +       admin_basic->control_list_length = admin_len;
1251 +
1252 +       temp = ((u64)le32_to_cpu(gcl_data->abth)) << 32;
1253 +       admin_basic->base_time = le32_to_cpu(gcl_data->abtl) + temp;
1254 +
1255 +       admin_basic->cycle_time = le32_to_cpu(gcl_data->act);
1256 +       admin_basic->cycle_time_extension = le32_to_cpu(gcl_data->acte);
1257 +
1258 +       admin_basic->control_list = kcalloc(admin_len,
1259 +                                           sizeof(admin_basic->control_list),
1260 +                                           GFP_KERNEL);
1261 +       if (!admin_basic->control_list) {
1262 +               memset(cbdr, 0, sizeof(*cbdr));
1263 +               kfree(gcl_data);
1264 +               return -ENOMEM;
1265 +       }
1266 +
1267 +       for (i = 0; i < admin_len; i++) {
1268 +               struct gce *temp_gce = gce + i;
1269 +               struct tsn_qbv_entry *temp_entry;
1270 +
1271 +               temp_entry = admin_basic->control_list + i;
1272 +
1273 +               temp_entry->gate_state = temp_gce->gate;
1274 +               temp_entry->time_interval = le32_to_cpu(temp_gce->period);
1275 +       }
1276 +
1277 +       /* Updated by ENETC on completion of the configuration
1278 +        * command. A zero value indicates success.
1279 +        */
1280 +       admin_conf->config_change = true;
1281 +
1282 +       memset(cbdr, 0, sizeof(*cbdr));
1283 +       kfree(gcl_data);
1284 +
1285 +       return 0;
1286 +}
1287 +
1288 +int enetc_qbv_get_status(struct net_device *ndev,
1289 +                        struct tsn_qbv_status *status)
1290 +{
1291 +       struct enetc_cbd *cbdr;
1292 +       struct tgs_gcl_resp *gcl_data;
1293 +       struct tgs_gcl_query *gcl_query;
1294 +       struct gce *gce;
1295 +       struct tsn_qbv_basic *oper_basic;
1296 +       struct enetc_ndev_priv *priv;
1297 +       dma_addr_t dma;
1298 +       int curr_cbd;
1299 +       u16 maxlen;
1300 +       u16 data_size, dma_size;
1301 +       u16 admin_len;
1302 +       u16 oper_len;
1303 +       u64 temp;
1304 +       int i;
1305 +
1306 +       if (!ndev)
1307 +               return -EINVAL;
1308 +
1309 +       if (!status)
1310 +               return -EINVAL;
1311 +
1312 +       oper_basic = &status->oper;
1313 +       priv = netdev_priv(ndev);
1314 +
1315 +       if (!(enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE))
1316 +               return -EINVAL;
1317 +
1318 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1319 +
1320 +       gcl_query = &cbdr->gcl_query;
1321 +
1322 +       maxlen = enetc_get_max_gcl_len(&priv->si->hw);
1323 +
1324 +       data_size = sizeof(struct tgs_gcl_resp) +
1325 +                       sizeof(struct gce) * 2 * maxlen;
1326 +
1327 +       gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
1328 +       if (!gcl_data)
1329 +               return -ENOMEM;
1330 +
1331 +       gce = (struct gce *)(gcl_data + 1);
1332 +
1333 +       gcl_query->acl_len = cpu_to_le16(maxlen);
1334 +       gcl_query->ocl_len = cpu_to_le16(maxlen);
1335 +
1336 +       dma_size = cpu_to_le16(data_size);
1337 +       cbdr->length = dma_size;
1338 +       cbdr->status_flags = 0; /* long format command no ie */
1339 +
1340 +       dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
1341 +                            data_size, DMA_FROM_DEVICE);
1342 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
1343 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
1344 +               kfree(gcl_data);
1345 +               return -ENOMEM;
1346 +       }
1347 +
1348 +       cbdr->addr[0] = lower_32_bits(dma);
1349 +       cbdr->addr[1] = upper_32_bits(dma);
1350 +       cbdr->cmd = 1;
1351 +       cbdr->cls = BDCR_CMD_PORT_GCL;
1352 +       xmit_cbdr(priv->si, curr_cbd);
1353 +       dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
1354 +
1355 +       /* since cbdr already passed to free, below could be get wrong */
1356 +       admin_len = le16_to_cpu(gcl_query->admin_list_len);
1357 +       oper_len = le16_to_cpu(gcl_query->oper_list_len);
1358 +
1359 +       if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGAGLSR_OFFSET) &
1360 +                                               ENETC_QBV_CFG_PEND_MASK) {
1361 +               status->config_pending = true;
1362 +               goto exit;
1363 +       }
1364 +
1365 +       /* The Oper and Admin timing fields exist in the response buffer even
1366 +        * if no valid corresponding lists exists. These fields are considered
1367 +        * invalid if the corresponding list does not exist.
1368 +        */
1369 +       status->config_pending = false;
1370 +       temp = ((u64)le32_to_cpu(gcl_data->ccth)) << 32;
1371 +       status->config_change_time = le32_to_cpu(gcl_data->cctl) + temp;
1372 +
1373 +       temp = ((u64)le32_to_cpu(gcl_data->cceh)) << 32;
1374 +       status->config_change_error = le32_to_cpu(gcl_data->ccel) + temp;
1375 +
1376 +       /* changed to SITGTGR */
1377 +       status->tick_granularity = enetc_rd(&priv->si->hw, ENETC_SITGTGR);
1378 +
1379 +       /* current time */
1380 +       status->current_time = get_current_time(priv->si);
1381 +
1382 +       status->supported_list_max = maxlen;
1383 +
1384 +       /* status->oper.gate_states , no init oper/admin gate state */
1385 +       status->oper.control_list_length = oper_len;
1386 +       temp = ((u64)le32_to_cpu(gcl_data->obth)) << 32;
1387 +       status->oper.base_time = le32_to_cpu(gcl_data->obtl) + temp;
1388 +       status->oper.cycle_time = le32_to_cpu(gcl_data->oct);
1389 +       status->oper.cycle_time_extension = le32_to_cpu(gcl_data->octe);
1390 +
1391 +       oper_basic->control_list =
1392 +               kcalloc(oper_len, sizeof(oper_basic->control_list), GFP_KERNEL);
1393 +       if (!oper_basic->control_list) {
1394 +               memset(cbdr, 0, sizeof(*cbdr));
1395 +               kfree(gcl_data);
1396 +               return -ENOMEM;
1397 +       }
1398 +
1399 +       for (i = 0; i < oper_len; i++) {
1400 +               struct gce *temp_gce = gce + maxlen + i;
1401 +               struct tsn_qbv_entry *temp_entry = oper_basic->control_list + i;
1402 +
1403 +               temp_entry->gate_state = temp_gce->gate;
1404 +               temp_entry->time_interval = le32_to_cpu(temp_gce->period);
1405 +       }
1406 +
1407 +exit:
1408 +       memset(cbdr, 0, sizeof(*cbdr));
1409 +       kfree(gcl_data);
1410 +       return 0;
1411 +}
1412 +
1413 +/* CBD Class 7: Stream Identity Entry Set Descriptor - Long Format */
1414 +int enetc_cb_streamid_set(struct net_device *ndev, u32 index,
1415 +                         bool en, struct tsn_cb_streamid *streamid)
1416 +{
1417 +       struct enetc_cbd *cbdr;
1418 +       void *si_data;
1419 +       struct null_streamid_data *si_data1;
1420 +       struct smac_streamid_data *si_data2;
1421 +       struct streamid_conf *si_conf;
1422 +       struct enetc_ndev_priv *priv;
1423 +       dma_addr_t dma;
1424 +       u16 data_size, dma_size;
1425 +       int curr_cbd;
1426 +
1427 +       if (!ndev)
1428 +               return -EINVAL;
1429 +
1430 +       priv = netdev_priv(ndev);
1431 +
1432 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1433 +
1434 +       cbdr->index = cpu_to_le16((u16)index);
1435 +       cbdr->cmd = 0;
1436 +       cbdr->cls = BDCR_CMD_STREAM_IDENTIFY;
1437 +       cbdr->status_flags = 0;
1438 +
1439 +       data_size = sizeof(struct null_streamid_data);
1440 +       si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
1441 +       cbdr->length = cpu_to_le16(data_size);
1442 +
1443 +       dma = dma_map_single(&priv->si->pdev->dev, si_data,
1444 +                            data_size, DMA_FROM_DEVICE);
1445 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
1446 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
1447 +               kfree(si_data);
1448 +               return -ENOMEM;
1449 +       }
1450 +
1451 +       cbdr->addr[0] = lower_32_bits(dma);
1452 +       cbdr->addr[1] = upper_32_bits(dma);
1453 +       si_data1 = (struct null_streamid_data *)si_data;
1454 +       si_data1->dmac[0] = 0xFF;
1455 +       si_data1->dmac[1] = 0xFF;
1456 +       si_data1->dmac[2] = 0xFF;
1457 +       si_data1->dmac[3] = 0xFF;
1458 +       si_data1->dmac[4] = 0xFF;
1459 +       si_data1->dmac[5] = 0xFF;
1460 +       si_data1->vid_vidm_tg =
1461 +               cpu_to_le16(ENETC_CBDR_SID_VID_MASK
1462 +                           + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
1463 +
1464 +       si_conf = &cbdr->sid_set;
1465 +       /* Only one port supported for one entry, set itself */
1466 +       si_conf->iports = 1 << (priv->si->pdev->devfn & 0x7);
1467 +       si_conf->id_type = 1;
1468 +       si_conf->oui[2] = 0x0;
1469 +       si_conf->oui[1] = 0x80;
1470 +       si_conf->oui[0] = 0xC2;
1471 +
1472 +       xmit_cbdr(priv->si, curr_cbd);
1473 +
1474 +       memset(cbdr, 0, sizeof(*cbdr));
1475 +       kfree(si_data);
1476 +
1477 +       if (!en)
1478 +               return 0;
1479 +
1480 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1481 +
1482 +       cbdr->index = cpu_to_le16((u16)index);
1483 +       cbdr->cmd = 0;
1484 +       cbdr->cls = BDCR_CMD_STREAM_IDENTIFY;
1485 +       cbdr->status_flags = 0;
1486 +
1487 +       si_conf = &cbdr->sid_set;
1488 +       si_conf->en = 0x80;
1489 +       si_conf->stream_handle = cpu_to_le32(streamid->handle);
1490 +       si_conf->iports = 1 << (priv->si->pdev->devfn & 0x7);
1491 +       si_conf->id_type = streamid->type;
1492 +       si_conf->oui[2] = 0x0;
1493 +       si_conf->oui[1] = 0x80;
1494 +       si_conf->oui[0] = 0xC2;
1495 +
1496 +       if (si_conf->id_type == 1) {
1497 +               data_size = sizeof(struct null_streamid_data);
1498 +               si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
1499 +       } else if (si_conf->id_type == 2) {
1500 +               data_size = sizeof(struct smac_streamid_data);
1501 +               si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
1502 +       } else {
1503 +               return -EINVAL;
1504 +       }
1505 +
1506 +       if (!si_data)
1507 +               return -ENOMEM;
1508 +
1509 +       dma_size = cpu_to_le16(data_size);
1510 +       cbdr->length = dma_size;
1511 +       cbdr->status_flags = 0;
1512 +
1513 +       dma = dma_map_single(&priv->si->pdev->dev, si_data,
1514 +                            data_size, DMA_FROM_DEVICE);
1515 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
1516 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
1517 +               memset(cbdr, 0, sizeof(*cbdr));
1518 +               kfree(si_data);
1519 +               return -ENOMEM;
1520 +       }
1521 +       cbdr->addr[0] = lower_32_bits(dma);
1522 +       cbdr->addr[1] = upper_32_bits(dma);
1523 +
1524 +       /* VIDM default to be 1.
1525 +        * VID Match. If set (b1) then the VID must match, otherwise
1526 +        * any VID is considered a match. VIDM setting is only used
1527 +        * when TG is set to b01.
1528 +        */
1529 +       if (si_conf->id_type == 1) {
1530 +               si_data1 = (struct null_streamid_data *)si_data;
1531 +               si_data1->dmac[0] = streamid->para.nid.dmac & 0xFF;
1532 +               si_data1->dmac[1] = (streamid->para.nid.dmac >> 8) & 0xFF;
1533 +               si_data1->dmac[2] = (streamid->para.nid.dmac >> 16) & 0xFF;
1534 +               si_data1->dmac[3] = (streamid->para.nid.dmac >> 24) & 0xFF;
1535 +               si_data1->dmac[4] = (streamid->para.nid.dmac >> 32) & 0xFF;
1536 +               si_data1->dmac[5] = (streamid->para.nid.dmac >> 40) & 0xFF;
1537 +               si_data1->vid_vidm_tg =
1538 +               cpu_to_le16((streamid->para.nid.vid & ENETC_CBDR_SID_VID_MASK) +
1539 +                           ((((u16)(streamid->para.nid.tagged) & 0x3) << 14)
1540 +                            | ENETC_CBDR_SID_VIDM));
1541 +       } else if (si_conf->id_type == 2) {
1542 +               si_data2 = (struct smac_streamid_data *)si_data;
1543 +               si_data2->smac[0] = streamid->para.sid.smac & 0xFF;
1544 +               si_data2->smac[1] = (streamid->para.sid.smac >> 8) & 0xFF;
1545 +               si_data2->smac[2] = (streamid->para.sid.smac >> 16) & 0xFF;
1546 +               si_data2->smac[3] = (streamid->para.sid.smac >> 24) & 0xFF;
1547 +               si_data2->smac[4] = (streamid->para.sid.smac >> 32) & 0xFF;
1548 +               si_data2->smac[5] = (streamid->para.sid.smac >> 40) & 0xFF;
1549 +               si_data2->vid_vidm_tg =
1550 +               cpu_to_le16((streamid->para.sid.vid & ENETC_CBDR_SID_VID_MASK) +
1551 +                           ((((u16)(streamid->para.sid.tagged) & 0x3) << 14)
1552 +                            | ENETC_CBDR_SID_VIDM));
1553 +       }
1554 +
1555 +       xmit_cbdr(priv->si, curr_cbd);
1556 +
1557 +       memset(cbdr, 0, sizeof(*cbdr));
1558 +       kfree(si_data);
1559 +
1560 +       return 0;
1561 +}
1562 +
1563 +/* CBD Class 7: Stream Identity Entry Query Descriptor - Long Format */
1564 +int enetc_cb_streamid_get(struct net_device *ndev, u32 index,
1565 +                         struct tsn_cb_streamid *streamid)
1566 +{
1567 +       struct enetc_cbd *cbdr;
1568 +       struct streamid_query_resp *si_data;
1569 +       struct enetc_ndev_priv *priv;
1570 +       dma_addr_t dma;
1571 +       u16 data_size, dma_size;
1572 +       int curr_cbd;
1573 +       int valid;
1574 +
1575 +       if (!ndev)
1576 +               return -EINVAL;
1577 +
1578 +       priv = netdev_priv(ndev);
1579 +
1580 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1581 +
1582 +       cbdr->index = cpu_to_le32(index);
1583 +       cbdr->cmd = 1;
1584 +       cbdr->cls = BDCR_CMD_STREAM_IDENTIFY;
1585 +       cbdr->status_flags = 0;
1586 +
1587 +       data_size = sizeof(struct streamid_query_resp);
1588 +       si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
1589 +       if (!si_data)
1590 +               return -ENOMEM;
1591 +
1592 +       dma_size = cpu_to_le16(data_size);
1593 +       cbdr->length = dma_size;
1594 +       cbdr->status_flags = 0; /* long format command no ie */
1595 +
1596 +       dma = dma_map_single(&priv->si->pdev->dev, si_data,
1597 +                            data_size, DMA_FROM_DEVICE);
1598 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
1599 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
1600 +               kfree(si_data);
1601 +               return -ENOMEM;
1602 +       }
1603 +       cbdr->addr[0] = lower_32_bits(dma);
1604 +       cbdr->addr[1] = upper_32_bits(dma);
1605 +
1606 +       xmit_cbdr(priv->si, curr_cbd);
1607 +
1608 +       streamid->type = si_data->id_type;
1609 +
1610 +       if (streamid->type == 1) {
1611 +               streamid->para.nid.dmac = si_data->mac[0]
1612 +                       + ((u64)si_data->mac[1] << 8)
1613 +                       + ((u64)si_data->mac[2] << 16)
1614 +                       + ((u64)si_data->mac[3] << 24)
1615 +                       + ((u64)si_data->mac[4] << 32)
1616 +                       + ((u64)si_data->mac[5] << 40);
1617 +               /* VID Match. If set (b1) then the VID must match, otherwise
1618 +                * any VID is considered a match.
1619 +                */
1620 +               streamid->para.nid.vid =
1621 +                               le16_to_cpu(si_data->vid_vidm_tg
1622 +                                           & ENETC_CBDR_SID_VID_MASK);
1623 +               streamid->para.nid.tagged =
1624 +                               le16_to_cpu(si_data->vid_vidm_tg >> 14 & 0x3);
1625 +       } else if (streamid->type == 2) {
1626 +               streamid->para.sid.smac = si_data->mac[0]
1627 +                       + ((u64)si_data->mac[1] << 8)
1628 +                       + ((u64)si_data->mac[2] << 16)
1629 +                       + ((u64)si_data->mac[3] << 24)
1630 +                       + ((u64)si_data->mac[4] << 32)
1631 +                       + ((u64)si_data->mac[5] << 40);
1632 +               /* VID Match. If set (b1) then the VID must match, otherwise
1633 +                * any VID is considered a match.
1634 +                */
1635 +               streamid->para.sid.vid =
1636 +                               le16_to_cpu(si_data->vid_vidm_tg
1637 +                                           & ENETC_CBDR_SID_VID_MASK);
1638 +               streamid->para.sid.tagged =
1639 +                               le16_to_cpu(si_data->vid_vidm_tg >> 14 & 0x3);
1640 +       }
1641 +
1642 +       streamid->handle = le32_to_cpu(si_data->stream_handle);
1643 +       streamid->ifac_iport = le32_to_cpu(si_data->input_ports);
1644 +       valid = si_data->en ? 1 : 0;
1645 +
1646 +       memset(cbdr, 0, sizeof(*cbdr));
1647 +       kfree(si_data);
1648 +
1649 +       return valid;
1650 +}
1651 +
1652 +/*  CBD Class 7: Stream Identity Statistics Query Descriptor - Long Format */
1653 +int enetc_cb_streamid_counters_get(struct net_device *ndev, u32 index,
1654 +                                  struct tsn_cb_streamid_counters *counters)
1655 +{
1656 +       return 0;
1657 +}
1658 +
1659 +void enetc_qci_enable(struct enetc_hw *hw)
1660 +{
1661 +       enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR)
1662 +                | ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS
1663 +                | ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
1664 +}
1665 +
1666 +void enetc_qci_disable(struct enetc_hw *hw)
1667 +{
1668 +       enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR)
1669 +                & ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS
1670 +                & ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
1671 +}
1672 +
1673 +/* CBD Class 8: Stream Filter Instance Set Descriptor - Short Format */
1674 +int enetc_qci_sfi_set(struct net_device *ndev, u32 index, bool en,
1675 +                     struct tsn_qci_psfp_sfi_conf *tsn_qci_sfi)
1676 +{
1677 +       struct enetc_cbd *cbdr;
1678 +       struct sfi_conf *sfi_config;
1679 +
1680 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
1681 +       int curr_cbd;
1682 +
1683 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1684 +
1685 +       cbdr->index = cpu_to_le16(index);
1686 +       cbdr->cmd = 0;
1687 +       cbdr->cls = BDCR_CMD_STREAM_FILTER;
1688 +       cbdr->status_flags = 0x80;
1689 +       cbdr->length = cpu_to_le16(1);
1690 +
1691 +       sfi_config = &cbdr->sfi_conf;
1692 +       if (en)
1693 +               sfi_config->en = 0x80;
1694 +
1695 +       if (tsn_qci_sfi->stream_handle_spec >= 0) {
1696 +               sfi_config->stream_handle =
1697 +                       cpu_to_le32(tsn_qci_sfi->stream_handle_spec);
1698 +               sfi_config->sthm |= 0x80;
1699 +       }
1700 +
1701 +       sfi_config->sg_inst_table_index =
1702 +               cpu_to_le16(tsn_qci_sfi->stream_gate_instance_id);
1703 +       sfi_config->input_ports = 1 << (priv->si->pdev->devfn & 0x7);
1704 +
1705 +       /* The priority value which may be matched against the
1706 +        * frame’s priority value to determine a match for this entry.
1707 +        */
1708 +       if (tsn_qci_sfi->priority_spec >= 0)
1709 +               sfi_config->multi |= (tsn_qci_sfi->priority_spec & 0x7) | 0x8;
1710 +
1711 +       /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
1712 +        * field as being either an MSDU value or an index into the Flow
1713 +        * Meter Instance table.
1714 +        */
1715 +       if (tsn_qci_sfi->stream_filter.maximum_sdu_size != 0) {
1716 +               sfi_config->msdu =
1717 +               cpu_to_le16(tsn_qci_sfi->stream_filter.maximum_sdu_size);
1718 +               sfi_config->multi |= 0x40;
1719 +       }
1720 +
1721 +       if (tsn_qci_sfi->stream_filter.flow_meter_instance_id >= 0) {
1722 +               sfi_config->fm_inst_table_index =
1723 +               cpu_to_le16(tsn_qci_sfi->stream_filter.flow_meter_instance_id);
1724 +               sfi_config->multi |= 0x80;
1725 +       }
1726 +
1727 +       /* Stream blocked due to oversized frame enable. TRUE or FALSE */
1728 +       if (tsn_qci_sfi->block_oversize_enable)
1729 +               sfi_config->multi |= 0x20;
1730 +
1731 +       /* Stream blocked due to oversized frame. TRUE or FALSE */
1732 +       if (tsn_qci_sfi->block_oversize)
1733 +               sfi_config->multi |= 0x10;
1734 +
1735 +       xmit_cbdr(priv->si, curr_cbd);
1736 +
1737 +       memset(cbdr, 0, sizeof(*cbdr));
1738 +       return 0;
1739 +}
1740 +
1741 +/* CBD Class 8: Stream Filter Instance Query Descriptor - Short Format */
1742 +int enetc_qci_sfi_get(struct net_device *ndev, u32 index,
1743 +                     struct tsn_qci_psfp_sfi_conf *tsn_qci_sfi)
1744 +{
1745 +       struct enetc_cbd *cbdr;
1746 +       struct sfi_conf *sfi_config;
1747 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
1748 +       int curr_cbd;
1749 +
1750 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1751 +
1752 +       cbdr->index = cpu_to_le16(index);
1753 +       cbdr->cmd = 1;
1754 +       cbdr->cls = BDCR_CMD_STREAM_FILTER;
1755 +       cbdr->status_flags = 0x80;
1756 +
1757 +       xmit_cbdr(priv->si, curr_cbd);
1758 +
1759 +       sfi_config = &cbdr->sfi_conf;
1760 +       if (sfi_config->sthm & 0x80)
1761 +               tsn_qci_sfi->stream_handle_spec =
1762 +                       le32_to_cpu(sfi_config->stream_handle);
1763 +       else
1764 +               tsn_qci_sfi->stream_handle_spec = -1;
1765 +
1766 +       tsn_qci_sfi->stream_gate_instance_id =
1767 +               le16_to_cpu(sfi_config->sg_inst_table_index);
1768 +
1769 +       if (sfi_config->multi & 0x8)
1770 +               tsn_qci_sfi->priority_spec =
1771 +                       le16_to_cpu(sfi_config->multi & 0x7);
1772 +       else
1773 +               tsn_qci_sfi->priority_spec = -1;
1774 +
1775 +       /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
1776 +        * field as being either an MSDU value or an index into the Flow
1777 +        * Meter Instance table.
1778 +        */
1779 +       if (sfi_config->multi & 0x80)
1780 +               tsn_qci_sfi->stream_filter.flow_meter_instance_id =
1781 +                       le16_to_cpu(sfi_config->fm_inst_table_index);
1782 +       else
1783 +               tsn_qci_sfi->stream_filter.flow_meter_instance_id = -1;
1784 +
1785 +       if (sfi_config->multi & 0x40)
1786 +               tsn_qci_sfi->stream_filter.maximum_sdu_size =
1787 +                       le16_to_cpu(sfi_config->msdu);
1788 +
1789 +       /* Stream blocked due to oversized frame enable. TRUE or FALSE */
1790 +       if (sfi_config->multi & 0x20)
1791 +               tsn_qci_sfi->block_oversize_enable = true;
1792 +       /* Stream blocked due to oversized frame. TRUE or FALSE */
1793 +       if (sfi_config->multi & 0x10)
1794 +               tsn_qci_sfi->block_oversize = true;
1795 +
1796 +       if (sfi_config->en & 0x80) {
1797 +               memset(cbdr, 0, sizeof(*cbdr));
1798 +               return 1;
1799 +       }
1800 +
1801 +       memset(cbdr, 0, sizeof(*cbdr));
1802 +       return 0;
1803 +}
1804 +
1805 +/* CBD Class 8: Stream Filter Instance Query Statistics
1806 + * Descriptor - Long Format
1807 + */
1808 +int enetc_qci_sfi_counters_get(struct net_device *ndev, u32 index,
1809 +                              struct tsn_qci_psfp_sfi_counters *counters)
1810 +{
1811 +       struct enetc_cbd *cbdr;
1812 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
1813 +       int curr_cbd;
1814 +       struct sfi_counter_data *sfi_counter_data;
1815 +       dma_addr_t dma;
1816 +       u16 data_size, dma_size;
1817 +
1818 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1819 +
1820 +       cbdr->index = cpu_to_le16((u16)index);
1821 +       cbdr->cmd = 2;
1822 +       cbdr->cls = BDCR_CMD_STREAM_FILTER;
1823 +       cbdr->status_flags = 0;
1824 +
1825 +       data_size = sizeof(struct sfi_counter_data);
1826 +       sfi_counter_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
1827 +       if (!sfi_counter_data)
1828 +               return -ENOMEM;
1829 +
1830 +       dma = dma_map_single(&priv->si->pdev->dev, sfi_counter_data,
1831 +                            data_size, DMA_FROM_DEVICE);
1832 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
1833 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
1834 +               kfree(sfi_counter_data);
1835 +               return -ENOMEM;
1836 +       }
1837 +       cbdr->addr[0] = lower_32_bits(dma);
1838 +       cbdr->addr[1] = upper_32_bits(dma);
1839 +
1840 +       dma_size = cpu_to_le16(data_size);
1841 +       cbdr->length = dma_size;
1842 +
1843 +       xmit_cbdr(priv->si, curr_cbd);
1844 +
1845 +       counters->matching_frames_count =
1846 +                       ((u64)le32_to_cpu(sfi_counter_data->matchh) << 32)
1847 +                       + sfi_counter_data->matchl;
1848 +
1849 +       counters->not_passing_sdu_count =
1850 +                       ((u64)le32_to_cpu(sfi_counter_data->msdu_droph) << 32)
1851 +                       + sfi_counter_data->msdu_dropl;
1852 +
1853 +       counters->passing_sdu_count = counters->matching_frames_count
1854 +                               - counters->not_passing_sdu_count;
1855 +
1856 +       counters->not_passing_frames_count =
1857 +               ((u64)le32_to_cpu(sfi_counter_data->stream_gate_droph) << 32)
1858 +               + le32_to_cpu(sfi_counter_data->stream_gate_dropl);
1859 +
1860 +       counters->passing_frames_count = counters->matching_frames_count
1861 +                               - counters->not_passing_sdu_count
1862 +                               - counters->not_passing_frames_count;
1863 +
1864 +       counters->red_frames_count =
1865 +               ((u64)le32_to_cpu(sfi_counter_data->flow_meter_droph) << 32)
1866 +               + le32_to_cpu(sfi_counter_data->flow_meter_dropl);
1867 +
1868 +       memset(cbdr, 0, sizeof(*cbdr));
1869 +       return 0;
1870 +}
1871 +
1872 +/* CBD Class 9: Stream Gate Instance Table Entry Set
1873 + * Descriptor - Short Format
1874 + */
1875 +int enetc_qci_sgi_set(struct net_device *ndev, u32 index,
1876 +                     struct tsn_qci_psfp_sgi_conf *tsn_qci_sgi)
1877 +{
1878 +       struct enetc_cbd *cbdr, *cbdr_sgcl;
1879 +       struct sgi_table *sgi_config;
1880 +       struct sgcl_conf *sgcl_config;
1881 +       struct sgcl_data *sgcl_data;
1882 +       struct sgce *sgce;
1883 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
1884 +
1885 +       dma_addr_t dma;
1886 +       u16 data_size, dma_size;
1887 +       int curr_cbd, i;
1888 +
1889 +       /* disable first */
1890 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1891 +       memset(cbdr, 0, sizeof(*cbdr));
1892 +
1893 +       cbdr->index = cpu_to_le16(index);
1894 +       cbdr->cmd = 0;
1895 +       cbdr->cls = BDCR_CMD_STREAM_GCL;
1896 +       cbdr->status_flags = 0x80;
1897 +
1898 +       xmit_cbdr(priv->si, curr_cbd);
1899 +
1900 +       if (!tsn_qci_sgi->gate_enabled) {
1901 +               memset(cbdr, 0, sizeof(*cbdr));
1902 +               return 0;
1903 +       }
1904 +
1905 +       /* Re-enable */
1906 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
1907 +       memset(cbdr, 0, sizeof(*cbdr));
1908 +
1909 +       cbdr->index = cpu_to_le16(index);
1910 +       cbdr->cmd = 0;
1911 +       cbdr->cls = BDCR_CMD_STREAM_GCL;
1912 +       cbdr->status_flags = 0x80;
1913 +
1914 +       sgi_config = &cbdr->sgi_table;
1915 +
1916 +       sgi_config->ocgtst = tsn_qci_sgi->admin.control_list_length ?
1917 +                       0x80 : (tsn_qci_sgi->admin.gate_states ? 0x80 : 0x0);
1918 +
1919 +       sgi_config->oipv =
1920 +               tsn_qci_sgi->admin.control_list_length ?
1921 +               0x0 : ((tsn_qci_sgi->admin.init_ipv < 0) ?
1922 +                      0x0 : ((tsn_qci_sgi->admin.init_ipv & 0x7) | 0x8));
1923 +
1924 +       sgi_config->en = 0x80;
1925 +
1926 +       if (tsn_qci_sgi->block_invalid_rx_enable)
1927 +               sgi_config->gset |= 0x80;
1928 +       if (tsn_qci_sgi->block_invalid_rx)
1929 +               sgi_config->gset |= 0x40;
1930 +       if (tsn_qci_sgi->block_octets_exceeded)
1931 +               sgi_config->gset |= 0x10;
1932 +       if (tsn_qci_sgi->block_octets_exceeded_enable)
1933 +               sgi_config->gset |= 0x20;
1934 +
1935 +       xmit_cbdr(priv->si, curr_cbd);
1936 +
1937 +       if (tsn_qci_sgi->admin.control_list_length == 0)
1938 +               goto exit;
1939 +
1940 +       curr_cbd = alloc_cbdr(priv->si, &cbdr_sgcl);
1941 +       memset(cbdr, 0, sizeof(*cbdr));
1942 +
1943 +       cbdr_sgcl->index = cpu_to_le16(index);
1944 +       cbdr_sgcl->cmd = 1;
1945 +       cbdr_sgcl->cls = BDCR_CMD_STREAM_GCL;
1946 +       cbdr_sgcl->status_flags = 0;
1947 +
1948 +       sgcl_config = &cbdr_sgcl->sgcl_conf;
1949 +
1950 +       /* tsn_qci_sgi->admin.control_list_length is not zero now */
1951 +       if (tsn_qci_sgi->admin.control_list_length > 4)
1952 +               return -EINVAL;
1953 +
1954 +       sgcl_config->acl_len =
1955 +               (tsn_qci_sgi->admin.control_list_length - 1) & 0x3;
1956 +
1957 +       data_size = sizeof(struct sgcl_data) +
1958 +               (sgcl_config->acl_len + 1) * sizeof(struct sgce);
1959 +
1960 +       sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
1961 +       if (!sgcl_data)
1962 +               return -ENOMEM;
1963 +
1964 +       dma_size = cpu_to_le16(data_size);
1965 +       cbdr_sgcl->length = dma_size;
1966 +
1967 +       dma = dma_map_single(&priv->si->pdev->dev,
1968 +                            sgcl_data, data_size,
1969 +                            DMA_FROM_DEVICE);
1970 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
1971 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
1972 +               memset(cbdr, 0, sizeof(*cbdr));
1973 +               memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
1974 +               kfree(sgcl_data);
1975 +               return -ENOMEM;
1976 +       }
1977 +       cbdr_sgcl->addr[0] = lower_32_bits(dma);
1978 +       cbdr_sgcl->addr[1] = upper_32_bits(dma);
1979 +
1980 +       sgce = (struct sgce *)(sgcl_data + 1);
1981 +
1982 +       if (tsn_qci_sgi->admin.gate_states)
1983 +               sgcl_config->agtst = 0x80;
1984 +
1985 +       sgcl_data->ct = cpu_to_le32(tsn_qci_sgi->admin.cycle_time);
1986 +       sgcl_data->cte = cpu_to_le32(tsn_qci_sgi->admin.cycle_time_extension);
1987 +
1988 +       if (tsn_qci_sgi->admin.init_ipv >= 0)
1989 +               sgcl_config->aipv = (tsn_qci_sgi->admin.init_ipv & 0x7) | 0x8;
1990 +
1991 +       for (i = 0; i < tsn_qci_sgi->admin.control_list_length; i++) {
1992 +               struct tsn_qci_psfp_gcl *temp_sgcl = tsn_qci_sgi->admin.gcl + i;
1993 +               struct sgce *temp_entry = (struct sgce *)(sgce + i);
1994 +
1995 +               if (temp_sgcl->gate_state)
1996 +                       temp_entry->multi |= 0x10;
1997 +
1998 +               if (temp_sgcl->ipv >= 0)
1999 +                       temp_entry->multi |= ((temp_sgcl->ipv & 0x7) << 5)
2000 +                                               | 0x08;
2001 +
2002 +               if (temp_sgcl->octet_max)
2003 +                       temp_entry->multi |= 0x01;
2004 +
2005 +               temp_entry->interval = cpu_to_le32(temp_sgcl->time_interval);
2006 +               temp_entry->msdu[0] = temp_sgcl->octet_max & 0xFF;
2007 +               temp_entry->msdu[1] = (temp_sgcl->octet_max >> 8) & 0xFF;
2008 +               temp_entry->msdu[2] = (temp_sgcl->octet_max >> 16) & 0xFF;
2009 +       }
2010 +
2011 +       if (!tsn_qci_sgi->admin.base_time) {
2012 +               sgcl_data->btl =
2013 +                       cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
2014 +               sgcl_data->bth =
2015 +                       cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
2016 +       } else {
2017 +               u32 tempu, templ;
2018 +
2019 +               tempu = upper_32_bits(tsn_qci_sgi->admin.base_time);
2020 +               templ = lower_32_bits(tsn_qci_sgi->admin.base_time);
2021 +               sgcl_data->bth = cpu_to_le32(tempu);
2022 +               sgcl_data->btl = cpu_to_le32(templ);
2023 +       }
2024 +
2025 +       xmit_cbdr(priv->si, curr_cbd);
2026 +
2027 +       memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
2028 +       kfree(sgcl_data);
2029 +
2030 +exit:
2031 +       memset(cbdr, 0, sizeof(*cbdr));
2032 +       return 0;
2033 +}
2034 +
2035 +/* CBD Class 9: Stream Gate Instance Table Entry Query
2036 + * Descriptor - Short Format
2037 + */
2038 +int enetc_qci_sgi_get(struct net_device *ndev, u32 index,
2039 +                     struct tsn_qci_psfp_sgi_conf *tsn_qci_sgi)
2040 +{
2041 +       struct enetc_cbd *cbdr, *cbdr_sgcl;
2042 +       struct sgi_table *sgi_config;
2043 +       struct sgcl_query *sgcl_query;
2044 +       struct sgcl_query_resp *sgcl_data;
2045 +       struct sgce *sgce;
2046 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2047 +       dma_addr_t dma;
2048 +       u16 data_size, dma_size, gcl_data_stat = 0;
2049 +       u8 admin_len = 0;
2050 +       int curr_cbd, i;
2051 +
2052 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
2053 +
2054 +       cbdr->index = cpu_to_le16(index);
2055 +       cbdr->cmd = 2;
2056 +       cbdr->cls = BDCR_CMD_STREAM_GCL;
2057 +       cbdr->status_flags = 0x80;
2058 +
2059 +       xmit_cbdr(priv->si, curr_cbd);
2060 +
2061 +       sgi_config = &cbdr->sgi_table;
2062 +
2063 +       tsn_qci_sgi->admin.gate_states = (sgi_config->ocgtst & 0x80) ?
2064 +                                               true : false;
2065 +       if (sgi_config->oipv & 0x08)
2066 +               tsn_qci_sgi->admin.init_ipv = sgi_config->oipv & 0x7;
2067 +       else
2068 +               tsn_qci_sgi->admin.init_ipv = -1;
2069 +
2070 +       if (sgi_config->en & 0x80)
2071 +               tsn_qci_sgi->gate_enabled = true;
2072 +       if (sgi_config->gset & 0x80)
2073 +               tsn_qci_sgi->block_invalid_rx_enable = true;
2074 +       if (sgi_config->gset & 0x40)
2075 +               tsn_qci_sgi->block_invalid_rx = true;
2076 +       if (sgi_config->gset & 0x20)
2077 +               tsn_qci_sgi->block_octets_exceeded_enable = true;
2078 +       if (sgi_config->gset & 0x10)
2079 +               tsn_qci_sgi->block_octets_exceeded = true;
2080 +
2081 +       /* Check gate list length is zero? */
2082 +       if (!(sgi_config->oacl_len & 0x30)) {
2083 +               tsn_qci_sgi->admin.control_list_length = 0;
2084 +               goto exit;
2085 +       }
2086 +
2087 +       curr_cbd = alloc_cbdr(priv->si, &cbdr_sgcl);
2088 +
2089 +       cbdr_sgcl->index = cpu_to_le16(index);
2090 +       cbdr_sgcl->cmd = 3;
2091 +       cbdr_sgcl->cls = BDCR_CMD_STREAM_GCL;
2092 +       cbdr_sgcl->status_flags = 0;
2093 +
2094 +       data_size = sizeof(struct sgcl_query_resp) + 4 * sizeof(struct sgce);
2095 +
2096 +       sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
2097 +       if (!sgcl_data)
2098 +               return -ENOMEM;
2099 +
2100 +       dma_size = cpu_to_le16(data_size);
2101 +       cbdr_sgcl->length = dma_size;
2102 +       cbdr_sgcl->status_flags = 0;
2103 +
2104 +       sgcl_query = &cbdr_sgcl->sgcl_query;
2105 +
2106 +       sgcl_query->oacl_len = 0x10;
2107 +
2108 +       dma = dma_map_single(&priv->si->pdev->dev, sgcl_data,
2109 +                            data_size, DMA_FROM_DEVICE);
2110 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
2111 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
2112 +               memset(cbdr, 0, sizeof(*cbdr));
2113 +               memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
2114 +               kfree(sgcl_data);
2115 +               return -ENOMEM;
2116 +       }
2117 +       cbdr_sgcl->addr[0] = lower_32_bits(dma);
2118 +       cbdr_sgcl->addr[1] = upper_32_bits(dma);
2119 +
2120 +       xmit_cbdr(priv->si, curr_cbd);
2121 +
2122 +       sgce = (struct sgce *)(sgcl_data + 1);
2123 +
2124 +       gcl_data_stat = le16_to_cpu(sgcl_data->stat);
2125 +       if (gcl_data_stat & 0x10)
2126 +               tsn_qci_sgi->admin.gate_states = true;
2127 +
2128 +       if (gcl_data_stat & 0x80)
2129 +               tsn_qci_sgi->admin.init_ipv = gcl_data_stat & 0x7;
2130 +       else
2131 +               tsn_qci_sgi->admin.init_ipv = -1;
2132 +
2133 +       /* admin_len can also get from gcl_data_stat bit 5,6
2134 +        * OR sgi_config->oacl_len
2135 +        */
2136 +       admin_len = (sgcl_query->oacl_len & 0x3) + 1;
2137 +       tsn_qci_sgi->admin.control_list_length = admin_len;
2138 +       tsn_qci_sgi->admin.cycle_time = le32_to_cpu(sgcl_data->act);
2139 +       tsn_qci_sgi->admin.cycle_time_extension = le32_to_cpu(sgcl_data->acte);
2140 +       tsn_qci_sgi->admin.base_time = ((u64)(le32_to_cpu(sgcl_data->abth))
2141 +                                             << 32)
2142 +                                       + le32_to_cpu(sgcl_data->abtl);
2143 +
2144 +       tsn_qci_sgi->admin.gcl = kcalloc(admin_len,
2145 +                                        sizeof(struct tsn_qci_psfp_gcl),
2146 +                                        GFP_KERNEL);
2147 +       if (!tsn_qci_sgi->admin.gcl) {
2148 +               kfree(sgcl_data);
2149 +               return -ENOMEM;
2150 +       }
2151 +
2152 +       for (i = 0; i < admin_len; i++) {
2153 +               struct tsn_qci_psfp_gcl *temp_sgcl = tsn_qci_sgi->admin.gcl + i;
2154 +               struct sgce *temp_entry = (struct sgce *)(sgce + i);
2155 +
2156 +               if (temp_entry->multi & 0x10)
2157 +                       temp_sgcl->gate_state = true;
2158 +
2159 +               if (temp_entry->multi & 0x08)
2160 +                       temp_sgcl->ipv = temp_entry->multi >> 5;
2161 +               else
2162 +                       temp_sgcl->ipv = -1;
2163 +
2164 +               temp_sgcl->time_interval = le32_to_cpu(temp_entry->interval);
2165 +
2166 +               if (temp_entry->multi & 0x01)
2167 +                       temp_sgcl->octet_max = (temp_entry->msdu[0] & 0xff)
2168 +                               | (((u32)temp_entry->msdu[1] << 8) & 0xff00)
2169 +                               | (((u32)temp_entry->msdu[1] << 16) & 0xff0000);
2170 +               else
2171 +                       temp_sgcl->octet_max = 0;
2172 +       }
2173 +
2174 +       memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
2175 +       kfree(sgcl_data);
2176 +
2177 +exit:
2178 +       memset(cbdr, 0, sizeof(*cbdr));
2179 +       return 0;
2180 +}
2181 +
2182 +/* CBD Class 9: Stream Gate Instance Table Entry Query Descriptor
2183 + * CBD Class 9: Stream Gate Control List Query Descriptor
2184 + */
2185 +int enetc_qci_sgi_status_get(struct net_device *ndev, u16 index,
2186 +                            struct tsn_psfp_sgi_status *status)
2187 +{
2188 +       struct enetc_cbd *cbdr_sgi, *cbdr_sgcl;
2189 +       struct sgi_table *sgi_config;
2190 +       struct sgcl_query *sgcl_query;
2191 +       struct sgcl_query_resp *sgcl_data;
2192 +       struct sgce *sgce;
2193 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2194 +       dma_addr_t dma;
2195 +       u16 data_size, dma_size, gcl_data_stat = 0;
2196 +       u8 oper_len = 0;
2197 +       int curr_cbd, i;
2198 +
2199 +       curr_cbd = alloc_cbdr(priv->si, &cbdr_sgi);
2200 +
2201 +       cbdr_sgi->index = cpu_to_le16(index);
2202 +       cbdr_sgi->cmd = 2;
2203 +       cbdr_sgi->cls = BDCR_CMD_STREAM_GCL;
2204 +       cbdr_sgi->status_flags = 0x80;
2205 +
2206 +       sgi_config = &cbdr_sgi->sgi_table;
2207 +
2208 +       if (sgi_config->gset & 0x4)
2209 +               status->config_pending = true;
2210 +
2211 +       status->oper.gate_states = ((sgi_config->ocgtst & 0x80) ? true : false);
2212 +
2213 +       /* Check gate list length is zero */
2214 +       if (!(sgi_config->oacl_len & 0x30)) {
2215 +               status->oper.control_list_length = 0;
2216 +               goto cmd2quit;
2217 +       }
2218 +
2219 +       xmit_cbdr(priv->si, curr_cbd);
2220 +
2221 +       curr_cbd = alloc_cbdr(priv->si, &cbdr_sgcl);
2222 +
2223 +       cbdr_sgcl->index = cpu_to_le16(index);
2224 +       cbdr_sgcl->cmd = 3;
2225 +       cbdr_sgcl->cls = BDCR_CMD_STREAM_GCL;
2226 +       cbdr_sgcl->status_flags = 0;
2227 +
2228 +       /* Max size */
2229 +       data_size = sizeof(struct sgcl_query_resp) + 4 * sizeof(struct sgce);
2230 +
2231 +       sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
2232 +       if (!sgcl_data)
2233 +               return -ENOMEM;
2234 +
2235 +       dma_size = cpu_to_le16(data_size);
2236 +       cbdr_sgcl->length = dma_size;
2237 +       cbdr_sgcl->status_flags = 0;
2238 +
2239 +       sgcl_query = &cbdr_sgcl->sgcl_query;
2240 +
2241 +       sgcl_query->oacl_len = 0x20;
2242 +
2243 +       dma = dma_map_single(&priv->si->pdev->dev, sgcl_data,
2244 +                            data_size, DMA_FROM_DEVICE);
2245 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
2246 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
2247 +               memset(cbdr_sgi, 0, sizeof(*cbdr_sgi));
2248 +               memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
2249 +               kfree(sgcl_data);
2250 +               return -ENOMEM;
2251 +       }
2252 +       cbdr_sgcl->addr[0] = lower_32_bits(dma);
2253 +       cbdr_sgcl->addr[1] = upper_32_bits(dma);
2254 +
2255 +       xmit_cbdr(priv->si, curr_cbd);
2256 +
2257 +       sgce = (struct sgce *)(sgcl_data + 1);
2258 +
2259 +       /* oper_len can also get from gcl_data_stat bit 5,6
2260 +        * OR sgi_config->oacl_len
2261 +        */
2262 +       oper_len = ((sgcl_query->oacl_len & 0x0c) >> 2) + 1;
2263 +
2264 +       /* Get Stream Gate Control List */
2265 +       status->oper.cycle_time = le32_to_cpu(sgcl_data->oct);
2266 +       status->oper.cycle_time_extension = le32_to_cpu(sgcl_data->octe);
2267 +       status->oper.base_time = le32_to_cpu(sgcl_data->obtl)
2268 +                               + ((u64)le32_to_cpu(sgcl_data->obth) << 32);
2269 +       status->oper.control_list_length = oper_len;
2270 +
2271 +       gcl_data_stat = le16_to_cpu(sgcl_data->stat);
2272 +       if (gcl_data_stat & 0x400)
2273 +               status->oper.init_ipv = gcl_data_stat & 0x38 >> 7;
2274 +       else
2275 +               status->oper.init_ipv = -1;
2276 +
2277 +       if (gcl_data_stat & 0x800)
2278 +               status->oper.gate_states = true;
2279 +
2280 +       status->oper.gcl = kcalloc(oper_len,
2281 +                                  sizeof(struct tsn_qci_psfp_gcl),
2282 +                                  GFP_KERNEL);
2283 +       if (!status->oper.gcl) {
2284 +               memset(cbdr_sgi, 0, sizeof(*cbdr_sgi));
2285 +               memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
2286 +               kfree(sgcl_data);
2287 +               return -ENOMEM;
2288 +       }
2289 +
2290 +       for (i = 0; i < oper_len; i++) {
2291 +               struct tsn_qci_psfp_gcl *temp_sgcl = status->oper.gcl + i;
2292 +               struct sgce *temp_entry = (struct sgce *)(sgce + i);
2293 +
2294 +               if (temp_entry->multi & 0x10)
2295 +                       temp_sgcl->gate_state = true;
2296 +
2297 +               if (temp_entry->multi & 0x08)
2298 +                       temp_sgcl->ipv = temp_entry->multi >> 5;
2299 +               else
2300 +                       temp_sgcl->ipv = -1;
2301 +
2302 +               temp_sgcl->time_interval = le32_to_cpu(temp_entry->interval);
2303 +
2304 +               if (temp_entry->multi & 0x01)
2305 +                       temp_sgcl->octet_max = temp_entry->msdu[0]
2306 +                                       | ((((u32)temp_entry->msdu[1]) << 8)
2307 +                                          & 0xff00)
2308 +                                       | ((((u32)temp_entry->msdu[2]) << 16)
2309 +                                          & 0xff0000);
2310 +               else
2311 +                       temp_sgcl->octet_max = 0;
2312 +       }
2313 +
2314 +       status->config_change_time = le32_to_cpu(sgcl_data->cctl)
2315 +                               + ((u64)le32_to_cpu(sgcl_data->ccth) << 32);
2316 +
2317 +       memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
2318 +       kfree(sgcl_data);
2319 +
2320 +cmd2quit:
2321 +       /* changed to SITGTGR */
2322 +       status->tick_granularity = enetc_rd(&priv->si->hw, ENETC_SITGTGR);
2323 +
2324 +       /* current time */
2325 +       status->current_time = get_current_time(priv->si);
2326 +
2327 +       memset(cbdr_sgi, 0, sizeof(*cbdr_sgi));
2328 +
2329 +       return 0;
2330 +}
2331 +
2332 +/* CBD Class 10: Flow Meter Instance Set Descriptor - Short Format */
2333 +int enetc_qci_fmi_set(struct net_device *ndev, u32 index, bool enable,
2334 +                     struct tsn_qci_psfp_fmi *tsn_qci_fmi)
2335 +{
2336 +       struct enetc_cbd *cbdr;
2337 +       struct fmi_conf *fmi_config;
2338 +
2339 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2340 +       int curr_cbd;
2341 +       u64 temp = 0;
2342 +
2343 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
2344 +
2345 +       cbdr->index = cpu_to_le16((u16)index);
2346 +       cbdr->cmd = 0;
2347 +       cbdr->cls = BDCR_CMD_FLOW_METER;
2348 +       cbdr->status_flags = 0x80;
2349 +
2350 +       xmit_cbdr(priv->si, curr_cbd);
2351 +
2352 +       if (!enable) {
2353 +               memset(cbdr, 0, sizeof(*cbdr));
2354 +               return 0;
2355 +       }
2356 +
2357 +       /* Re-enable */
2358 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
2359 +       memset(cbdr, 0, sizeof(*cbdr));
2360 +       cbdr->index = cpu_to_le16((u16)index);
2361 +       cbdr->cmd = 0;
2362 +       cbdr->cls = BDCR_CMD_FLOW_METER;
2363 +       cbdr->status_flags = 0x80;
2364 +
2365 +       fmi_config = &cbdr->fmi_conf;
2366 +       fmi_config->en = 0x80;
2367 +       if (tsn_qci_fmi->cir) {
2368 +               temp = (u64)1000 * tsn_qci_fmi->cir;
2369 +               temp = temp / 3725;
2370 +       }
2371 +       fmi_config->cir = cpu_to_le32((u32)temp);
2372 +       fmi_config->cbs = cpu_to_le32(tsn_qci_fmi->cbs);
2373 +       temp = 0;
2374 +       if (tsn_qci_fmi->eir) {
2375 +               temp = (u64)1000 * tsn_qci_fmi->eir;
2376 +               temp = temp / 3725;
2377 +       }
2378 +       fmi_config->eir = cpu_to_le32((u32)temp);
2379 +       fmi_config->ebs = cpu_to_le32(tsn_qci_fmi->ebs);
2380 +
2381 +       if (tsn_qci_fmi->mark_red)
2382 +               fmi_config->conf |= 0x1;
2383 +
2384 +       if (tsn_qci_fmi->mark_red_enable)
2385 +               fmi_config->conf |= 0x2;
2386 +
2387 +       if (tsn_qci_fmi->drop_on_yellow)
2388 +               fmi_config->conf |= 0x4;
2389 +
2390 +       if (tsn_qci_fmi->cm)
2391 +               fmi_config->conf |= 0x8;
2392 +
2393 +       if (tsn_qci_fmi->cf)
2394 +               fmi_config->conf |= 0x10;
2395 +
2396 +       xmit_cbdr(priv->si, curr_cbd);
2397 +
2398 +       memset(cbdr, 0, sizeof(*cbdr));
2399 +       return 0;
2400 +}
2401 +
2402 +/* CBD Class 10: Flow Meter Instance Query Descriptor - Short Format */
2403 +int enetc_qci_fmi_get(struct net_device *ndev, u32 index,
2404 +                     struct tsn_qci_psfp_fmi *tsn_qci_fmi,
2405 +                     struct tsn_qci_psfp_fmi_counters *counters)
2406 +{
2407 +       struct enetc_cbd *cbdr;
2408 +       struct fmi_conf *fmi_config;
2409 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2410 +       int curr_cbd;
2411 +       u16 data_size, dma_size;
2412 +       dma_addr_t dma;
2413 +       struct fmi_query_stat_resp *fmi_counter_data;
2414 +       u64 temp = 0;
2415 +
2416 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
2417 +
2418 +       cbdr->index = cpu_to_le16(index);
2419 +       cbdr->cmd = 1;
2420 +       cbdr->cls = BDCR_CMD_FLOW_METER;
2421 +       cbdr->status_flags = 0x80;
2422 +
2423 +       xmit_cbdr(priv->si, curr_cbd);
2424 +
2425 +       fmi_config = &cbdr->fmi_conf;
2426 +       if (fmi_config->cir) {
2427 +               temp = (u64)3725 * fmi_config->cir;
2428 +               temp = temp / 1000;
2429 +       }
2430 +       tsn_qci_fmi->cir = le32_to_cpu((u32)temp);
2431 +       tsn_qci_fmi->cbs = le32_to_cpu(fmi_config->cbs);
2432 +       temp = 0;
2433 +       if (fmi_config->eir) {
2434 +               temp = (u64)3725 * fmi_config->eir;
2435 +               temp = temp / 1000;
2436 +       }
2437 +       tsn_qci_fmi->eir = le32_to_cpu((u32)temp);
2438 +       tsn_qci_fmi->ebs = le32_to_cpu(fmi_config->ebs);
2439 +
2440 +       if (fmi_config->conf & 0x1)
2441 +               tsn_qci_fmi->mark_red = true;
2442 +
2443 +       if (fmi_config->conf & 0x2)
2444 +               tsn_qci_fmi->mark_red_enable = true;
2445 +
2446 +       if (fmi_config->conf & 0x4)
2447 +               tsn_qci_fmi->drop_on_yellow = true;
2448 +
2449 +       if (fmi_config->conf & 0x8)
2450 +               tsn_qci_fmi->cm = true;
2451 +
2452 +       if (fmi_config->conf & 0x10)
2453 +               tsn_qci_fmi->cf = true;
2454 +
2455 +       memset(cbdr, 0, sizeof(*cbdr));
2456 +
2457 +       /* Get counters */
2458 +       curr_cbd = alloc_cbdr(priv->si, &cbdr);
2459 +
2460 +       cbdr->index = cpu_to_le16(index);
2461 +       cbdr->cmd = 2;
2462 +       cbdr->cls = BDCR_CMD_FLOW_METER;
2463 +       cbdr->status_flags = 0x0;
2464 +
2465 +       data_size = sizeof(struct fmi_query_stat_resp);
2466 +       fmi_counter_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
2467 +       if (!fmi_counter_data)
2468 +               return -ENOMEM;
2469 +
2470 +       dma = dma_map_single(&priv->si->pdev->dev, fmi_counter_data,
2471 +                            data_size, DMA_FROM_DEVICE);
2472 +       if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
2473 +               netdev_err(priv->si->ndev, "DMA mapping failed!\n");
2474 +               kfree(fmi_counter_data);
2475 +               return -ENOMEM;
2476 +       }
2477 +       cbdr->addr[0] = lower_32_bits(dma);
2478 +       cbdr->addr[1] = upper_32_bits(dma);
2479 +
2480 +       dma_size = cpu_to_le16(data_size);
2481 +       cbdr->length = dma_size;
2482 +
2483 +       xmit_cbdr(priv->si, curr_cbd);
2484 +
2485 +       memcpy(counters, fmi_counter_data, sizeof(*counters));
2486 +
2487 +       return 0;
2488 +}
2489 +
2490 +int enetc_qbu_set(struct net_device *ndev, u8 ptvector)
2491 +{
2492 +       u32 temp;
2493 +       int i;
2494 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2495 +
2496 +       temp = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
2497 +       if (temp & ENETC_QBV_TGE)
2498 +               enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
2499 +                        temp & (~ENETC_QBV_TGPE));
2500 +
2501 +       for (i = 0; i < 8; i++) {
2502 +               /* 1 Enabled. Traffic is transmitted on the preemptive MAC. */
2503 +               temp = enetc_port_rd(&priv->si->hw, ENETC_PTCFPR(i));
2504 +
2505 +               if ((ptvector >> i) & 0x1)
2506 +                       enetc_port_wr(&priv->si->hw,
2507 +                                     ENETC_PTCFPR(i),
2508 +                                     temp | ENETC_FPE);
2509 +               else
2510 +                       enetc_port_wr(&priv->si->hw,
2511 +                                     ENETC_PTCFPR(i),
2512 +                                     temp & ~ENETC_FPE);
2513 +       }
2514 +
2515 +       return 0;
2516 +}
2517 +
2518 +int enetc_qbu_get(struct net_device *ndev,
2519 +                 struct tsn_preempt_status *preemptstat)
2520 +{
2521 +       int i;
2522 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2523 +
2524 +       if (enetc_port_rd(&priv->si->hw, ENETC_PFPMR) & ENETC_PFPMR_PMACE) {
2525 +               preemptstat->preemption_active = true;
2526 +               if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET)
2527 +                                                       & ENETC_QBV_TGE)
2528 +                       preemptstat->hold_request = 1;
2529 +               else
2530 +                       preemptstat->hold_request = 2;
2531 +       } else {
2532 +               preemptstat->preemption_active = false;
2533 +               return 0;
2534 +       }
2535 +
2536 +       for (i = 0; i < 8; i++)
2537 +               if (enetc_port_rd(&priv->si->hw, ENETC_PTCFPR(i)) & 0x80000000)
2538 +                       preemptstat->admin_state |= 1 << i;
2539 +
2540 +       preemptstat->hold_advance =
2541 +               enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & 0xFFFF;
2542 +       preemptstat->release_advance =
2543 +               enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & 0xFFFF;
2544 +
2545 +       return 0;
2546 +}
2547 +
2548 +u32 __enetc_tsn_get_cap(struct enetc_si *si)
2549 +{
2550 +       u32 reg = 0;
2551 +       u32 cap = 0;
2552 +
2553 +       reg = enetc_port_rd(&si->hw, ENETC_PCAPR0);
2554 +
2555 +       if (reg & ENETC_PCAPR0_PSFP)
2556 +               cap |= TSN_CAP_QCI;
2557 +
2558 +       if (reg & ENETC_PCAPR0_TSN)
2559 +               cap |= TSN_CAP_QBV;
2560 +
2561 +       if (reg & ENETC_PCAPR0_QBU)
2562 +               cap |= TSN_CAP_QBU;
2563 +
2564 +       cap |= TSN_CAP_CBS;
2565 +       cap |= TSN_CAP_TBS;
2566 +
2567 +       return cap;
2568 +}
2569 +
2570 +u32 enetc_tsn_get_capability(struct net_device *ndev)
2571 +{
2572 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2573 +
2574 +       return __enetc_tsn_get_cap(priv->si);
2575 +}
2576 +
2577 +static int  __enetc_get_max_cap(struct enetc_si *si,
2578 +                               struct tsn_qci_psfp_stream_param *stream_para)
2579 +{
2580 +       u32 reg = 0;
2581 +
2582 +       /* Port stream filter capability */
2583 +       reg = enetc_port_rd(&si->hw, ENETC_PSFCAPR);
2584 +       stream_para->max_sf_instance = reg & ENETC_PSFCAPR_MSK;
2585 +       /* Port stream filter capability */
2586 +       reg = enetc_port_rd(&si->hw, ENETC_PSGCAPR);
2587 +       stream_para->max_sg_instance = (reg & ENETC_PSGCAPR_SGIT_MSK);
2588 +       stream_para->supported_list_max = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
2589 +       /* Port flow meter capability */
2590 +       reg = enetc_port_rd(&si->hw, ENETC_PFMCAPR);
2591 +       stream_para->max_fm_instance = reg & ENETC_PFMCAPR_MSK;
2592 +
2593 +       return 0;
2594 +}
2595 +
2596 +int enetc_get_max_cap(struct net_device *ndev,
2597 +                     struct tsn_qci_psfp_stream_param *stream_para)
2598 +{
2599 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2600 +
2601 +       return __enetc_get_max_cap(priv->si, stream_para);
2602 +}
2603 +
2604 +static int enetc_set_cbs(struct net_device *ndev, u8 tc, u8 bw)
2605 +{
2606 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2607 +       struct enetc_si *si = priv->si;
2608 +       struct enetc_cbs *ecbs = si->ecbs;
2609 +       struct cbs *cbs;
2610 +
2611 +       int bw_sum = 0;
2612 +       u32 port_transmit_rate;
2613 +       u32 port_frame_max_size;
2614 +       u8 tc_nums;
2615 +       int i;
2616 +
2617 +       u32 max_interfrence_size;
2618 +       u32 send_slope;
2619 +       u32 hi_credit;
2620 +
2621 +       if (!ecbs)
2622 +               return -ENOMEM;
2623 +
2624 +       port_transmit_rate = get_ndev_speed(si->ndev);
2625 +       if (port_transmit_rate != ecbs->port_transmit_rate)
2626 +               ecbs->port_transmit_rate = port_transmit_rate;
2627 +       port_frame_max_size = ecbs->port_max_size_frame;
2628 +       tc_nums = ecbs->tc_nums;
2629 +       cbs = ecbs->cbs;
2630 +
2631 +       if (tc >= tc_nums) {
2632 +               dev_err(&ndev->dev, "Make sure the TC less than %d\n", tc_nums);
2633 +               return -EINVAL;
2634 +       }
2635 +
2636 +       if (!bw) {
2637 +               if (cbs[tc].enable) {
2638 +                       /* Make sure the other TC that are numerically
2639 +                        * lower than this TC have been disabled.
2640 +                        */
2641 +                       for (i = 0; i < tc; i++) {
2642 +                               if (cbs[i].enable)
2643 +                                       break;
2644 +                       }
2645 +                       if (i < tc) {
2646 +                               dev_err(&ndev->dev,
2647 +                                       "TC%d has been disabled first\n", i);
2648 +                               return -EINVAL;
2649 +                       }
2650 +                       memset(&cbs[tc], 0, sizeof(*cbs));
2651 +                       cbs[tc].enable = false;
2652 +                       enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
2653 +                       enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
2654 +               }
2655 +               return 0;
2656 +       }
2657 +
2658 +       /* Make sure the other TC that are numerically
2659 +        * higher than this TC have been enabled.
2660 +        */
2661 +       for (i = tc_nums - 1; i > tc; i--) {
2662 +               if (!cbs[i].enable) {
2663 +                       dev_err(&ndev->dev,
2664 +                               "TC%d has been enabled first\n", i);
2665 +                       return -EINVAL;
2666 +               }
2667 +               bw_sum += cbs[i].bw;
2668 +       }
2669 +
2670 +       if (bw_sum + bw >= 100) {
2671 +               dev_err(&ndev->dev,
2672 +                       "The sum of all CBS Bandwidth cann't exceed 100\n");
2673 +               return -EINVAL;
2674 +       }
2675 +
2676 +       cbs[tc].bw = bw;
2677 +       cbs[tc].tc_max_sized_frame = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
2678 +       cbs[tc].idle_slope = port_transmit_rate / 100 * bw;
2679 +       cbs[tc].send_slope = port_transmit_rate - cbs[tc].idle_slope;
2680 +
2681 +       /* For TC7, the max_interfrence_size is ENETC_MAC_MAXFRM_SIZE.
2682 +        * For TC6, the max_interfrence_size is calculated as below:
2683 +        *
2684 +        *      max_interfrence_size = (M0 + Ma + Ra * M0 / (R0 - Ra))
2685 +        *
2686 +        * For other traffic class, for example SR class Q:
2687 +        *
2688 +        *                            R0 * (M0 + Ma + ... + Mp)
2689 +        *      max_interfrence_size =  ------------------------------
2690 +        *                            (R0 - Ra) + ... + (R0 - Rp)
2691 +        *
2692 +        */
2693 +
2694 +       if (tc == tc_nums - 1) {
2695 +               cbs[tc].max_interfrence_size = port_frame_max_size * 8;
2696 +
2697 +       } else if (tc == tc_nums - 2) {
2698 +               cbs[tc].max_interfrence_size = (port_frame_max_size
2699 +                               + cbs[tc + 1].tc_max_sized_frame
2700 +                               + port_frame_max_size * (cbs[tc + 1].idle_slope
2701 +                               / cbs[tc + 1].send_slope)) * 8;
2702 +       } else {
2703 +               max_interfrence_size = port_frame_max_size;
2704 +               send_slope = 0;
2705 +               for (i = tc + 1; i < tc_nums; i++) {
2706 +                       send_slope += cbs[i].send_slope;
2707 +                       max_interfrence_size += cbs[i].tc_max_sized_frame;
2708 +               }
2709 +               max_interfrence_size = ((u64)port_transmit_rate
2710 +                               * max_interfrence_size) / send_slope;
2711 +               cbs[tc].max_interfrence_size = max_interfrence_size * 8;
2712 +       }
2713 +
2714 +       cbs[tc].hi_credit = cbs[tc].max_interfrence_size * cbs[tc].bw / 100;
2715 +       cbs[tc].lo_credit = cbs[tc].tc_max_sized_frame * (cbs[tc].send_slope
2716 +                       / port_transmit_rate);
2717 +       cbs[tc].tc = tc;
2718 +
2719 +       hi_credit = (ENETC_CLK * 100L) * (u64)cbs[tc].hi_credit
2720 +                       / port_transmit_rate;
2721 +       enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit);
2722 +
2723 +       /* Set bw register and enable this traffic class*/
2724 +       enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc),
2725 +                     (cbs[tc].bw & 0x7F) | (1 << 31));
2726 +       cbs[tc].enable = true;
2727 +
2728 +       return 0;
2729 +}
2730 +
2731 +static int enetc_get_cbs(struct net_device *ndev, u8 tc)
2732 +{
2733 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2734 +       struct enetc_si *si = priv->si;
2735 +       struct enetc_cbs *ecbs = si->ecbs;
2736 +       struct cbs *cbs;
2737 +
2738 +       if (!ecbs)
2739 +               return -ENOMEM;
2740 +       cbs = ecbs->cbs;
2741 +       if (tc >= ecbs->tc_nums) {
2742 +               dev_err(&ndev->dev, "The maximum of TC is %d\n", ecbs->tc_nums);
2743 +               return -EINVAL;
2744 +       }
2745 +
2746 +       return cbs[tc].bw;
2747 +}
2748 +
2749 +static int enetc_set_tsd(struct net_device *ndev, struct tsn_tsd *ttsd)
2750 +{
2751 +       return 0;
2752 +}
2753 +
2754 +static int enetc_get_tsd(struct net_device *ndev, struct tsn_tsd_status *tts)
2755 +{
2756 +       return 0;
2757 +}
2758 +
2759 +static u32 get_ndev_speed(struct net_device *netdev)
2760 +{
2761 +       struct ethtool_link_ksettings ksettings;
2762 +       int rc = -1;
2763 +
2764 +       if (netdev->ethtool_ops->get_link_ksettings) {
2765 +               if (netdev->ethtool_ops->begin) {
2766 +                       rc = netdev->ethtool_ops->begin(netdev);
2767 +                       if (rc < 0)
2768 +                               return 0;
2769 +               }
2770 +
2771 +               memset(&ksettings, 0, sizeof(ksettings));
2772 +
2773 +               if (!netdev->ethtool_ops->get_link_ksettings)
2774 +                       return 0;
2775 +
2776 +               rc = netdev->ethtool_ops->get_link_ksettings(netdev,
2777 +                                                            &ksettings);
2778 +
2779 +               if (netdev->ethtool_ops->complete)
2780 +                       netdev->ethtool_ops->complete(netdev);
2781 +       }
2782 +
2783 +       return (rc < 0) ? 0 : ksettings.base.speed;
2784 +}
2785 +
2786 +static void enetc_cbs_init(struct enetc_si *si)
2787 +{
2788 +       struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
2789 +       u8 tc_nums;
2790 +
2791 +       tc_nums = priv->num_tx_rings;
2792 +       si->ecbs = kzalloc(sizeof(*si->ecbs) +
2793 +                          sizeof(struct cbs) * tc_nums, GFP_KERNEL);
2794 +       if (!si->ecbs)
2795 +               return;
2796 +
2797 +       si->ecbs->port_max_size_frame = si->ndev->mtu + ETH_HLEN
2798 +                                               + VLAN_HLEN + ETH_FCS_LEN;
2799 +       si->ecbs->tc_nums = tc_nums;
2800 +       si->ecbs->port_transmit_rate = get_ndev_speed(si->ndev);
2801 +
2802 +       /*This trick is used only for CFP*/
2803 +       if (!si->ecbs->port_transmit_rate)
2804 +               si->ecbs->port_transmit_rate = 1000000000;
2805 +
2806 +       if (!si->ecbs->port_transmit_rate) {
2807 +               dev_err(&si->pdev->dev, "Failure to get port speed for CBS\n");
2808 +               kfree(si->ecbs);
2809 +               si->ecbs = NULL;
2810 +       }
2811 +}
2812 +
2813 +static void enetc_qbv_init(struct enetc_hw *hw)
2814 +{
2815 +       /* Set PSPEED to be 1Gbps */
2816 +       enetc_port_wr(hw, ENETC_PMR,
2817 +                     (enetc_port_rd(hw, ENETC_PMR)
2818 +                     & (~ENETC_PMR_PSPEED_MASK))
2819 +                     | ENETC_PMR_PSPEED_1000M);
2820 +}
2821 +
2822 +void enetc_tsn_init(struct net_device *ndev)
2823 +{
2824 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2825 +       struct enetc_si *si = priv->si;
2826 +       u32 capability = 0;
2827 +
2828 +       capability = __enetc_tsn_get_cap(si);
2829 +
2830 +       if (capability & TSN_CAP_CBS)
2831 +               enetc_cbs_init(si);
2832 +
2833 +       if (capability & TSN_CAP_QBV)
2834 +               enetc_qbv_init(&si->hw);
2835 +
2836 +       if (capability & TSN_CAP_QCI)
2837 +               enetc_qci_enable(&si->hw);
2838 +
2839 +       dev_info(&si->pdev->dev, "%s: setup done\n", __func__);
2840 +}
2841 +
2842 +void enetc_tsn_deinit(struct net_device *ndev)
2843 +{
2844 +       struct enetc_ndev_priv *priv = netdev_priv(ndev);
2845 +       struct enetc_si *si = priv->si;
2846 +
2847 +       dev_info(&si->pdev->dev, "%s: release\n", __func__);
2848 +}
2849 +
2850 +static struct tsn_ops enetc_tsn_ops_full = {
2851 +       .device_init = enetc_tsn_init,
2852 +       .device_deinit = enetc_tsn_deinit,
2853 +       .get_capability = enetc_tsn_get_capability,
2854 +       .qbv_set = enetc_qbv_set,
2855 +       .qbv_get = enetc_qbv_get,
2856 +       .qbv_get_status = enetc_qbv_get_status,
2857 +       .cb_streamid_set = enetc_cb_streamid_set,
2858 +       .cb_streamid_get = enetc_cb_streamid_get,
2859 +       .cb_streamid_counters_get = enetc_cb_streamid_counters_get,
2860 +       .qci_get_maxcap = enetc_get_max_cap,
2861 +       .qci_sfi_set = enetc_qci_sfi_set,
2862 +       .qci_sfi_get = enetc_qci_sfi_get,
2863 +       .qci_sfi_counters_get = enetc_qci_sfi_counters_get,
2864 +       .qci_sgi_set = enetc_qci_sgi_set,
2865 +       .qci_sgi_get = enetc_qci_sgi_get,
2866 +       .qci_sgi_status_get = enetc_qci_sgi_status_get,
2867 +       .qci_fmi_set = enetc_qci_fmi_set,
2868 +       .qci_fmi_get = enetc_qci_fmi_get,
2869 +       .qbu_set = enetc_qbu_set,
2870 +       .qbu_get = enetc_qbu_get,
2871 +       .cbs_set = enetc_set_cbs,
2872 +       .cbs_get = enetc_get_cbs,
2873 +       .tsd_set = enetc_set_tsd,
2874 +       .tsd_get = enetc_get_tsd,
2875 +};
2876 +
2877 +static struct tsn_ops enetc_tsn_ops_part = {
2878 +       .device_init = enetc_tsn_init,
2879 +       .device_deinit = enetc_tsn_deinit,
2880 +       .get_capability = enetc_tsn_get_capability,
2881 +       .cb_streamid_set = enetc_cb_streamid_set,
2882 +       .cb_streamid_get = enetc_cb_streamid_get,
2883 +       .cb_streamid_counters_get = enetc_cb_streamid_counters_get,
2884 +       .qci_get_maxcap = enetc_get_max_cap,
2885 +       .qci_sfi_set = enetc_qci_sfi_set,
2886 +       .qci_sfi_get = enetc_qci_sfi_get,
2887 +       .qci_sfi_counters_get = enetc_qci_sfi_counters_get,
2888 +       .qci_sgi_set = enetc_qci_sgi_set,
2889 +       .qci_sgi_get = enetc_qci_sgi_get,
2890 +       .qci_sgi_status_get = enetc_qci_sgi_status_get,
2891 +       .qci_fmi_set = enetc_qci_fmi_set,
2892 +       .qci_fmi_get = enetc_qci_fmi_get,
2893 +};
2894 +
2895 +void enetc_tsn_pf_init(struct net_device *netdev, struct pci_dev *pdev)
2896 +{
2897 +       int port = pdev->devfn & 0x7;
2898 +
2899 +       if (port == 1 || port == 3)
2900 +               tsn_port_register(netdev, &enetc_tsn_ops_part,
2901 +                                 (u16)pdev->bus->number);
2902 +       else
2903 +               tsn_port_register(netdev, &enetc_tsn_ops_full,
2904 +                                 (u16)pdev->bus->number);
2905 +}
2906 +
2907 +void enetc_tsn_pf_deinit(struct net_device *netdev)
2908 +{
2909 +       tsn_port_unregister(netdev);
2910 +}
2911 +#endif /* #if IS_ENABLED(CONFIG_ENETC_TSN) */