1656ddf3e8d316d271656063319a4c9a4e60715f
[oweals/openwrt.git] / target / linux / layerscape / patches-4.14 / 702-dpaa2-ethernet-support-layerscape.patch
1 From 90b3f1705785f0e30de6f41abc8764aae1391245 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:28 +0800
4 Subject: [PATCH] dpaa2-ethernet: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch of dpaa2-ethernet for layerscape
10
11 Signed-off-by: Biwen Li <biwen.li@nxp.com>
12 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
13 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
14 Signed-off-by: David S. Miller <davem@davemloft.net>
15 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
16 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
17 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
18 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
19 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
20 Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
21 Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
22 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
23 ---
24  drivers/staging/fsl-dpaa2/Kconfig             |    7 +
25  drivers/staging/fsl-dpaa2/ethernet/Makefile   |    3 +
26  .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c      | 1187 ++++++++
27  .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h      |  183 ++
28  .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c    |  356 +++
29  .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h    |   60 +
30  .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h      |   29 +-
31  .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c    | 2509 +++++++++++++----
32  .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h    |  394 ++-
33  .../fsl-dpaa2/ethernet/dpaa2-ethtool.c        |  716 ++++-
34  drivers/staging/fsl-dpaa2/ethernet/dpkg.h     |  380 ++-
35  drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h |  255 +-
36  drivers/staging/fsl-dpaa2/ethernet/dpni.c     |  704 ++++-
37  drivers/staging/fsl-dpaa2/ethernet/dpni.h     |  401 ++-
38  drivers/staging/fsl-dpaa2/ethernet/net.h      |   30 +-
39  15 files changed, 6315 insertions(+), 899 deletions(-)
40  create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
41  create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
42  create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
43  create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
44
45 --- a/drivers/staging/fsl-dpaa2/Kconfig
46 +++ b/drivers/staging/fsl-dpaa2/Kconfig
47 @@ -17,6 +17,13 @@ config FSL_DPAA2_ETH
48           Ethernet driver for Freescale DPAA2 SoCs, using the
49           Freescale MC bus driver
50  
51 +config FSL_DPAA2_ETH_CEETM
52 +       depends on NET_SCHED
53 +       bool "DPAA2 Ethernet CEETM QoS"
54 +       default n
55 +       ---help---
56 +         Enable QoS offloading support through the CEETM hardware block.
57 +
58  if FSL_DPAA2_ETH
59  config FSL_DPAA2_ETH_USE_ERR_QUEUE
60         bool "Enable Rx error queue"
61 --- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
62 +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
63 @@ -1,3 +1,4 @@
64 +# SPDX-License-Identifier: GPL-2.0
65  #
66  # Makefile for the Freescale DPAA2 Ethernet controller
67  #
68 @@ -5,6 +6,8 @@
69  obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
70  
71  fsl-dpaa2-eth-objs    := dpaa2-eth.o dpaa2-ethtool.o dpni.o
72 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
73 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
74  
75  # Needed by the tracing framework
76  CFLAGS_dpaa2-eth.o := -I$(src)
77 --- /dev/null
78 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
79 @@ -0,0 +1,1187 @@
80 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
81 +/*
82 + * Copyright 2017-2019 NXP
83 + *
84 + */
85 +
86 +#include <linux/init.h>
87 +#include <linux/module.h>
88 +
89 +#include "dpaa2-eth-ceetm.h"
90 +#include "dpaa2-eth.h"
91 +
92 +#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
93 +/* Conversion formula from userspace passed Bps to expected Mbit */
94 +#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
95 +
96 +static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
97 +       [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
98 +       [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
99 +};
100 +
101 +struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
102 +
103 +static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
104 +                                          struct dpni_tx_shaping_cfg *scfg,
105 +                                          struct dpni_tx_shaping_cfg *ecfg,
106 +                                          int coupled, int ch_id)
107 +{
108 +       int err = 0;
109 +
110 +       netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
111 +                  ch_id, scfg->rate_limit);
112 +       err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
113 +                                 ecfg, coupled);
114 +       if (err)
115 +               netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
116 +
117 +       return err;
118 +}
119 +
120 +static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
121 +                                            int ch_id)
122 +{
123 +       struct dpni_tx_shaping_cfg cfg = { 0 };
124 +
125 +       return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
126 +}
127 +
128 +static inline int
129 +dpaa2_eth_update_shaping_cfg(struct net_device *dev,
130 +                            struct dpaa2_ceetm_shaping_cfg cfg,
131 +                            struct dpni_tx_shaping_cfg *scfg,
132 +                            struct dpni_tx_shaping_cfg *ecfg)
133 +{
134 +       scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
135 +       ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
136 +
137 +       if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
138 +               netdev_err(dev, "Committed burst size must be under %d\n",
139 +                          DPAA2_ETH_MAX_BURST_SIZE);
140 +               return -EINVAL;
141 +       }
142 +
143 +       scfg->max_burst_size = cfg.cbs;
144 +
145 +       if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
146 +               netdev_err(dev, "Excess burst size must be under %d\n",
147 +                          DPAA2_ETH_MAX_BURST_SIZE);
148 +               return -EINVAL;
149 +       }
150 +
151 +       ecfg->max_burst_size = cfg.ebs;
152 +
153 +       if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
154 +               netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
155 +               return -EINVAL;
156 +       }
157 +
158 +       return 0;
159 +}
160 +
161 +enum update_tx_prio {
162 +       DPAA2_ETH_ADD_CQ,
163 +       DPAA2_ETH_DEL_CQ,
164 +};
165 +
166 +/* Normalize weights based on max passed value */
167 +static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
168 +{
169 +       struct dpni_tx_schedule_cfg *sched_cfg;
170 +       struct dpaa2_ceetm_class *cl;
171 +       u32 qpri;
172 +       u16 weight_max = 0, increment;
173 +       int i;
174 +
175 +       /* Check the boundaries of the provided values */
176 +       for (i = 0; i < priv->clhash.hashsize; i++)
177 +               hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
178 +                       weight_max = (weight_max == 0 ? cl->prio.weight :
179 +                                    (weight_max < cl->prio.weight ?
180 +                                     cl->prio.weight : weight_max));
181 +
182 +       /* If there are no elements, there's nothing to do */
183 +       if (weight_max == 0)
184 +               return 0;
185 +
186 +       increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
187 +                   weight_max;
188 +
189 +       for (i = 0; i < priv->clhash.hashsize; i++) {
190 +               hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
191 +                       if (cl->prio.mode == STRICT_PRIORITY)
192 +                               continue;
193 +
194 +                       qpri = cl->prio.qpri;
195 +                       sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
196 +
197 +                       sched_cfg->delta_bandwidth =
198 +                               DPAA2_CEETM_MIN_WEIGHT +
199 +                               (cl->prio.weight * increment);
200 +
201 +                       pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
202 +                                __func__, qpri, sched_cfg->delta_bandwidth);
203 +               }
204 +       }
205 +
206 +       return 0;
207 +}
208 +
209 +static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
210 +                                          struct dpaa2_ceetm_class *cl,
211 +                                          enum update_tx_prio type)
212 +{
213 +       struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
214 +       struct dpni_tx_schedule_cfg *sched_cfg;
215 +       struct dpni_taildrop td = {0};
216 +       u8 ch_id = 0, tc_id = 0;
217 +       u32 qpri = 0;
218 +       int err = 0;
219 +
220 +       qpri = cl->prio.qpri;
221 +       tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
222 +
223 +       switch (type) {
224 +       case DPAA2_ETH_ADD_CQ:
225 +               /* Enable taildrop */
226 +               td.enable = 1;
227 +               td.units = DPNI_CONGESTION_UNIT_FRAMES;
228 +               td.threshold = DPAA2_CEETM_TD_THRESHOLD;
229 +               err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
230 +                                       DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
231 +                                       0, &td);
232 +               if (err) {
233 +                       netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
234 +                                  err);
235 +                       return err;
236 +               }
237 +               break;
238 +       case DPAA2_ETH_DEL_CQ:
239 +               /* Disable taildrop */
240 +               td.enable = 0;
241 +               err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
242 +                                       DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
243 +                                       0, &td);
244 +               if (err) {
245 +                       netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
246 +                                  err);
247 +                       return err;
248 +               }
249 +               break;
250 +       }
251 +
252 +       /* We can zero out the structure in the tx_prio_conf array */
253 +       if (type == DPAA2_ETH_DEL_CQ) {
254 +               sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
255 +               memset(sched_cfg, 0, sizeof(*sched_cfg));
256 +       }
257 +
258 +       /* Normalize priorities */
259 +       err = dpaa2_eth_normalize_tx_prio(sch);
260 +
261 +       /* Debug print goes here */
262 +       print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
263 +                            &sch->prio.tx_prio_cfg,
264 +                            sizeof(sch->prio.tx_prio_cfg), 0);
265 +
266 +       /* Call dpni_set_tx_priorities for the entire prio qdisc */
267 +       err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
268 +                                    &sch->prio.tx_prio_cfg);
269 +       if (err)
270 +               netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
271 +                          err);
272 +
273 +       return err;
274 +}
275 +
276 +static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
277 +{
278 +       priv->ceetm_en = true;
279 +}
280 +
281 +static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
282 +{
283 +       priv->ceetm_en = false;
284 +}
285 +
286 +/* Find class in qdisc hash table using given handle */
287 +static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
288 +                                                        struct Qdisc *sch)
289 +{
290 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
291 +       struct Qdisc_class_common *clc;
292 +
293 +       pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
294 +                __func__, handle, sch->handle);
295 +
296 +       clc = qdisc_class_find(&priv->clhash, handle);
297 +       return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
298 +}
299 +
300 +/* Insert a class in the qdisc's class hash */
301 +static void dpaa2_ceetm_link_class(struct Qdisc *sch,
302 +                                  struct Qdisc_class_hash *clhash,
303 +                                  struct Qdisc_class_common *common)
304 +{
305 +       sch_tree_lock(sch);
306 +       qdisc_class_hash_insert(clhash, common);
307 +       sch_tree_unlock(sch);
308 +       qdisc_class_hash_grow(sch, clhash);
309 +}
310 +
311 +/* Destroy a ceetm class */
312 +static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
313 +                                   struct dpaa2_ceetm_class *cl)
314 +{
315 +       struct net_device *dev = qdisc_dev(sch);
316 +       struct dpaa2_eth_priv *priv = netdev_priv(dev);
317 +
318 +       if (!cl)
319 +               return;
320 +
321 +       pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
322 +                __func__, cl->common.classid, sch->handle);
323 +
324 +       /* Recurse into child first */
325 +       if (cl->child) {
326 +               qdisc_destroy(cl->child);
327 +               cl->child = NULL;
328 +       }
329 +
330 +       switch (cl->type) {
331 +       case CEETM_ROOT:
332 +               if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
333 +                       netdev_err(dev, "Error resetting channel shaping\n");
334 +
335 +               break;
336 +
337 +       case CEETM_PRIO:
338 +               if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
339 +                       netdev_err(dev, "Error resetting tx_priorities\n");
340 +
341 +               if (cl->prio.cstats)
342 +                       free_percpu(cl->prio.cstats);
343 +
344 +               break;
345 +       }
346 +
347 +       tcf_block_put(cl->block);
348 +       kfree(cl);
349 +}
350 +
351 +/* Destroy a ceetm qdisc */
352 +static void dpaa2_ceetm_destroy(struct Qdisc *sch)
353 +{
354 +       unsigned int i;
355 +       struct hlist_node *next;
356 +       struct dpaa2_ceetm_class *cl;
357 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
358 +       struct net_device *dev = qdisc_dev(sch);
359 +       struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
360 +
361 +       pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
362 +                __func__, sch->handle);
363 +
364 +       /* All filters need to be removed before destroying the classes */
365 +       tcf_block_put(priv->block);
366 +
367 +       for (i = 0; i < priv->clhash.hashsize; i++) {
368 +               hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
369 +                       tcf_block_put(cl->block);
370 +       }
371 +
372 +       for (i = 0; i < priv->clhash.hashsize; i++) {
373 +               hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
374 +                                         common.hnode)
375 +                       dpaa2_ceetm_cls_destroy(sch, cl);
376 +       }
377 +
378 +       qdisc_class_hash_destroy(&priv->clhash);
379 +
380 +       switch (priv->type) {
381 +       case CEETM_ROOT:
382 +               dpaa2_eth_ceetm_disable(priv_eth);
383 +
384 +               if (priv->root.qstats)
385 +                       free_percpu(priv->root.qstats);
386 +
387 +               if (!priv->root.qdiscs)
388 +                       break;
389 +
390 +               /* Destroy the pfifo qdiscs in case they haven't been attached
391 +                * to the netdev queues yet.
392 +                */
393 +               for (i = 0; i < dev->num_tx_queues; i++)
394 +                       if (priv->root.qdiscs[i])
395 +                               qdisc_destroy(priv->root.qdiscs[i]);
396 +
397 +               kfree(priv->root.qdiscs);
398 +               break;
399 +
400 +       case CEETM_PRIO:
401 +               if (priv->prio.parent)
402 +                       priv->prio.parent->child = NULL;
403 +               break;
404 +       }
405 +}
406 +
407 +static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
408 +{
409 +       struct Qdisc *qdisc;
410 +       unsigned int ntx, i;
411 +       struct nlattr *nest;
412 +       struct dpaa2_ceetm_tc_qopt qopt;
413 +       struct dpaa2_ceetm_qdisc_stats *qstats;
414 +       struct net_device *dev = qdisc_dev(sch);
415 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
416 +
417 +       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
418 +
419 +       sch_tree_lock(sch);
420 +       memset(&qopt, 0, sizeof(qopt));
421 +       qopt.type = priv->type;
422 +       qopt.shaped = priv->shaped;
423 +
424 +       switch (priv->type) {
425 +       case CEETM_ROOT:
426 +               /* Gather statistics from the underlying pfifo qdiscs */
427 +               sch->q.qlen = 0;
428 +               memset(&sch->bstats, 0, sizeof(sch->bstats));
429 +               memset(&sch->qstats, 0, sizeof(sch->qstats));
430 +
431 +               for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
432 +                       qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
433 +                       sch->q.qlen             += qdisc->q.qlen;
434 +                       sch->bstats.bytes       += qdisc->bstats.bytes;
435 +                       sch->bstats.packets     += qdisc->bstats.packets;
436 +                       sch->qstats.qlen        += qdisc->qstats.qlen;
437 +                       sch->qstats.backlog     += qdisc->qstats.backlog;
438 +                       sch->qstats.drops       += qdisc->qstats.drops;
439 +                       sch->qstats.requeues    += qdisc->qstats.requeues;
440 +                       sch->qstats.overlimits  += qdisc->qstats.overlimits;
441 +               }
442 +
443 +               for_each_online_cpu(i) {
444 +                       qstats = per_cpu_ptr(priv->root.qstats, i);
445 +                       sch->qstats.drops += qstats->drops;
446 +               }
447 +
448 +               break;
449 +
450 +       case CEETM_PRIO:
451 +               qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
452 +               qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
453 +               qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
454 +               break;
455 +
456 +       default:
457 +               pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
458 +               sch_tree_unlock(sch);
459 +               return -EINVAL;
460 +       }
461 +
462 +       nest = nla_nest_start(skb, TCA_OPTIONS);
463 +       if (!nest)
464 +               goto nla_put_failure;
465 +       if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
466 +               goto nla_put_failure;
467 +       nla_nest_end(skb, nest);
468 +
469 +       sch_tree_unlock(sch);
470 +       return skb->len;
471 +
472 +nla_put_failure:
473 +       sch_tree_unlock(sch);
474 +       nla_nest_cancel(skb, nest);
475 +       return -EMSGSIZE;
476 +}
477 +
478 +static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
479 +                                  struct dpaa2_ceetm_qdisc *priv,
480 +                                  struct dpaa2_ceetm_tc_qopt *qopt)
481 +{
482 +       /* TODO: Once LX2 support is added */
483 +       /* priv->shaped = parent_cl->shaped; */
484 +       priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
485 +       priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
486 +       priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
487 +
488 +       return 0;
489 +}
490 +
491 +/* Edit a ceetm qdisc */
492 +static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
493 +{
494 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
495 +       struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
496 +       struct dpaa2_ceetm_tc_qopt *qopt;
497 +       int err;
498 +
499 +       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
500 +
501 +       err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
502 +                              dpaa2_ceetm_policy, NULL);
503 +       if (err < 0) {
504 +               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
505 +                      "nla_parse_nested");
506 +               return err;
507 +       }
508 +
509 +       if (!tb[DPAA2_CEETM_TCA_QOPS]) {
510 +               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
511 +                      "tb");
512 +               return -EINVAL;
513 +       }
514 +
515 +       if (TC_H_MIN(sch->handle)) {
516 +               pr_err("CEETM: a qdisc should not have a minor\n");
517 +               return -EINVAL;
518 +       }
519 +
520 +       qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
521 +
522 +       if (priv->type != qopt->type) {
523 +               pr_err("CEETM: qdisc %X is not of the provided type\n",
524 +                      sch->handle);
525 +               return -EINVAL;
526 +       }
527 +
528 +       switch (priv->type) {
529 +       case CEETM_PRIO:
530 +               err = dpaa2_ceetm_change_prio(sch, priv, qopt);
531 +               break;
532 +       default:
533 +               pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
534 +               err = -EINVAL;
535 +       }
536 +
537 +       return err;
538 +}
539 +
540 +/* Configure a root ceetm qdisc */
541 +static int dpaa2_ceetm_init_root(struct Qdisc *sch,
542 +                                struct dpaa2_ceetm_qdisc *priv,
543 +                                struct dpaa2_ceetm_tc_qopt *qopt)
544 +{
545 +       struct net_device *dev = qdisc_dev(sch);
546 +       struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
547 +       struct netdev_queue *dev_queue;
548 +       unsigned int i, parent_id;
549 +       struct Qdisc *qdisc;
550 +
551 +       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
552 +
553 +       /* Validate inputs */
554 +       if (sch->parent != TC_H_ROOT) {
555 +               pr_err("CEETM: a root ceetm qdisc must be root\n");
556 +               return -EINVAL;
557 +       }
558 +
559 +       /* Pre-allocate underlying pfifo qdiscs.
560 +        *
561 +        * We want to offload shaping and scheduling decisions to the hardware.
562 +        * The pfifo qdiscs will be attached to the netdev queues and will
563 +        * guide the traffic from the IP stack down to the driver with minimum
564 +        * interference.
565 +        *
566 +        * The CEETM qdiscs and classes will be crossed when the traffic
567 +        * reaches the driver.
568 +        */
569 +       priv->root.qdiscs = kcalloc(dev->num_tx_queues,
570 +                                   sizeof(priv->root.qdiscs[0]),
571 +                                   GFP_KERNEL);
572 +       if (!priv->root.qdiscs)
573 +               return -ENOMEM;
574 +
575 +       for (i = 0; i < dev->num_tx_queues; i++) {
576 +               dev_queue = netdev_get_tx_queue(dev, i);
577 +               parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
578 +                                     TC_H_MIN(i + PFIFO_MIN_OFFSET));
579 +
580 +               qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
581 +                                         parent_id);
582 +               if (!qdisc)
583 +                       return -ENOMEM;
584 +
585 +               priv->root.qdiscs[i] = qdisc;
586 +               qdisc->flags |= TCQ_F_ONETXQUEUE;
587 +       }
588 +
589 +       sch->flags |= TCQ_F_MQROOT;
590 +
591 +       priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
592 +       if (!priv->root.qstats) {
593 +               pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
594 +                      __func__);
595 +               return -ENOMEM;
596 +       }
597 +
598 +       dpaa2_eth_ceetm_enable(priv_eth);
599 +       return 0;
600 +}
601 +
602 +/* Configure a prio ceetm qdisc */
603 +static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
604 +                                struct dpaa2_ceetm_qdisc *priv,
605 +                                struct dpaa2_ceetm_tc_qopt *qopt)
606 +{
607 +       struct net_device *dev = qdisc_dev(sch);
608 +       struct dpaa2_ceetm_class *parent_cl;
609 +       struct Qdisc *parent_qdisc;
610 +
611 +       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
612 +
613 +       if (sch->parent == TC_H_ROOT) {
614 +               pr_err("CEETM: a prio ceetm qdisc can not be root\n");
615 +               return -EINVAL;
616 +       }
617 +
618 +       parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
619 +       if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
620 +               pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
621 +               return -EINVAL;
622 +       }
623 +
624 +       /* Obtain the parent root ceetm_class */
625 +       parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
626 +
627 +       if (!parent_cl || parent_cl->type != CEETM_ROOT) {
628 +               pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
629 +               return -EINVAL;
630 +       }
631 +
632 +       priv->prio.parent = parent_cl;
633 +       parent_cl->child = sch;
634 +
635 +       return dpaa2_ceetm_change_prio(sch, priv, qopt);
636 +}
637 +
638 +/* Configure a generic ceetm qdisc */
639 +static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
640 +{
641 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
642 +       struct net_device *dev = qdisc_dev(sch);
643 +       struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
644 +       struct dpaa2_ceetm_tc_qopt *qopt;
645 +       int err;
646 +
647 +       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
648 +
649 +       if (!netif_is_multiqueue(dev))
650 +               return -EOPNOTSUPP;
651 +
652 +       err = tcf_block_get(&priv->block, &priv->filter_list);
653 +       if (err) {
654 +               pr_err("CEETM: unable to get tcf_block\n");
655 +               return err;
656 +       }
657 +
658 +       if (!opt) {
659 +               pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
660 +                      __func__);
661 +               return -EINVAL;
662 +       }
663 +
664 +       err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
665 +                              dpaa2_ceetm_policy, NULL);
666 +       if (err < 0) {
667 +               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
668 +                      "nla_parse_nested");
669 +               return err;
670 +       }
671 +
672 +       if (!tb[DPAA2_CEETM_TCA_QOPS]) {
673 +               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
674 +                      "tb");
675 +               return -EINVAL;
676 +       }
677 +
678 +       if (TC_H_MIN(sch->handle)) {
679 +               pr_err("CEETM: a qdisc should not have a minor\n");
680 +               return -EINVAL;
681 +       }
682 +
683 +       qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
684 +
685 +       /* Initialize the class hash list. Each qdisc has its own class hash */
686 +       err = qdisc_class_hash_init(&priv->clhash);
687 +       if (err < 0) {
688 +               pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
689 +                      __func__);
690 +               return err;
691 +       }
692 +
693 +       priv->type = qopt->type;
694 +       priv->shaped = qopt->shaped;
695 +
696 +       switch (priv->type) {
697 +       case CEETM_ROOT:
698 +               err = dpaa2_ceetm_init_root(sch, priv, qopt);
699 +               break;
700 +       case CEETM_PRIO:
701 +               err = dpaa2_ceetm_init_prio(sch, priv, qopt);
702 +               break;
703 +       default:
704 +               pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
705 +               /* Note: dpaa2_ceetm_destroy() will be called by our caller */
706 +               err = -EINVAL;
707 +       }
708 +
709 +       return err;
710 +}
711 +
712 +/* Attach the underlying pfifo qdiscs */
713 +static void dpaa2_ceetm_attach(struct Qdisc *sch)
714 +{
715 +       struct net_device *dev = qdisc_dev(sch);
716 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
717 +       struct Qdisc *qdisc, *old_qdisc;
718 +       unsigned int i;
719 +
720 +       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
721 +
722 +       for (i = 0; i < dev->num_tx_queues; i++) {
723 +               qdisc = priv->root.qdiscs[i];
724 +               old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
725 +               if (old_qdisc)
726 +                       qdisc_destroy(old_qdisc);
727 +       }
728 +
729 +       /* Remove the references to the pfifo qdiscs since the kernel will
730 +        * destroy them when needed. No cleanup from our part is required from
731 +        * this point on.
732 +        */
733 +       kfree(priv->root.qdiscs);
734 +       priv->root.qdiscs = NULL;
735 +}
736 +
737 +static unsigned long dpaa2_ceetm_cls_find(struct Qdisc *sch, u32 classid)
738 +{
739 +       struct dpaa2_ceetm_class *cl;
740 +
741 +       pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
742 +                __func__, classid, sch->handle);
743 +       cl = dpaa2_ceetm_find(classid, sch);
744 +
745 +       return (unsigned long)cl;
746 +}
747 +
748 +static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
749 +                                      struct dpaa2_ceetm_tc_copt *copt,
750 +                                      struct net_device *dev)
751 +{
752 +       struct dpaa2_eth_priv *priv = netdev_priv(dev);
753 +       struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
754 +       int err = 0;
755 +
756 +       pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
757 +                cl->common.classid);
758 +
759 +       if (!cl->shaped)
760 +               return 0;
761 +
762 +       if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
763 +                                        &scfg, &ecfg))
764 +               return -EINVAL;
765 +
766 +       err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
767 +                                      copt->shaping_cfg.coupled,
768 +                                      cl->root.ch_id);
769 +       if (err)
770 +               return err;
771 +
772 +       memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
773 +              sizeof(struct dpaa2_ceetm_shaping_cfg));
774 +
775 +       return err;
776 +}
777 +
778 +static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
779 +                                      struct dpaa2_ceetm_tc_copt *copt,
780 +                                      struct net_device *dev)
781 +{
782 +       struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
783 +       struct dpni_tx_schedule_cfg *sched_cfg;
784 +       struct dpaa2_eth_priv *priv = netdev_priv(dev);
785 +       int err;
786 +
787 +       pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
788 +                __func__, cl->common.classid, copt->mode, copt->weight);
789 +
790 +       if (!cl->prio.cstats) {
791 +               cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
792 +               if (!cl->prio.cstats) {
793 +                       pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
794 +                              __func__);
795 +                       return -ENOMEM;
796 +               }
797 +       }
798 +
799 +       cl->prio.mode = copt->mode;
800 +       cl->prio.weight = copt->weight;
801 +
802 +       sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
803 +
804 +       switch (copt->mode) {
805 +       case STRICT_PRIORITY:
806 +               sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
807 +               break;
808 +       case WEIGHTED_A:
809 +               sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
810 +               break;
811 +       case WEIGHTED_B:
812 +               sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
813 +               break;
814 +       }
815 +
816 +       err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
817 +
818 +       return err;
819 +}
820 +
821 +/* Add a new ceetm class */
822 +static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
823 +                              struct dpaa2_ceetm_tc_copt *copt,
824 +                              unsigned long *arg)
825 +{
826 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
827 +       struct net_device *dev = qdisc_dev(sch);
828 +       struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
829 +       struct dpaa2_ceetm_class *cl;
830 +       int err;
831 +
832 +       if (copt->type == CEETM_ROOT &&
833 +           priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
834 +               pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
835 +                      dpaa2_eth_ch_count(priv_eth),
836 +                      dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
837 +               return -EINVAL;
838 +       }
839 +
840 +       if (copt->type == CEETM_PRIO &&
841 +           priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
842 +               pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
843 +                      dpaa2_eth_tc_count(priv_eth),
844 +                      dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
845 +               return -EINVAL;
846 +       }
847 +
848 +       cl = kzalloc(sizeof(*cl), GFP_KERNEL);
849 +       if (!cl)
850 +               return -ENOMEM;
851 +
852 +       err = tcf_block_get(&cl->block, &cl->filter_list);
853 +       if (err) {
854 +               pr_err("%s: Unable to set new root class\n", __func__);
855 +               goto out_free;
856 +       }
857 +
858 +       cl->common.classid = classid;
859 +       cl->parent = sch;
860 +       cl->child = NULL;
861 +
862 +       /* Add class handle in Qdisc */
863 +       dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
864 +
865 +       cl->shaped = copt->shaped;
866 +       cl->type = copt->type;
867 +
868 +       /* Claim a CEETM channel / tc - DPAA2. will assume transition from
869 +        * classid to qdid/qpri, starting from qdid / qpri 0
870 +        */
871 +       switch (copt->type) {
872 +       case CEETM_ROOT:
873 +               cl->root.ch_id = classid - sch->handle - 1;
874 +               err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
875 +               break;
876 +       case CEETM_PRIO:
877 +               cl->prio.qpri = classid - sch->handle - 1;
878 +               err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
879 +               break;
880 +       }
881 +
882 +       if (err) {
883 +               pr_err("%s: Unable to set new %s class\n", __func__,
884 +                      (copt->type == CEETM_ROOT ? "root" : "prio"));
885 +               goto out_free;
886 +       }
887 +
888 +       switch (copt->type) {
889 +       case CEETM_ROOT:
890 +               pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
891 +                        __func__, classid, cl->root.ch_id);
892 +               break;
893 +       case CEETM_PRIO:
894 +               pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
895 +                        __func__, classid, cl->prio.qpri);
896 +               break;
897 +       }
898 +
899 +       *arg = (unsigned long)cl;
900 +       return 0;
901 +
902 +out_free:
903 +       kfree(cl);
904 +       return err;
905 +}
906 +
907 +/* Add or configure a ceetm class */
908 +static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
909 +                                 struct nlattr **tca, unsigned long *arg)
910 +{
911 +       struct dpaa2_ceetm_qdisc *priv;
912 +       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
913 +       struct nlattr *opt = tca[TCA_OPTIONS];
914 +       struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
915 +       struct dpaa2_ceetm_tc_copt *copt;
916 +       struct net_device *dev = qdisc_dev(sch);
917 +       int err;
918 +
919 +       pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
920 +                __func__, classid, sch->handle);
921 +
922 +       if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
923 +               pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
924 +               return -EINVAL;
925 +       }
926 +
927 +       priv = qdisc_priv(sch);
928 +
929 +       if (!opt) {
930 +               pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
931 +               return -EINVAL;
932 +       }
933 +
934 +       err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
935 +                              dpaa2_ceetm_policy, NULL);
936 +       if (err < 0) {
937 +               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
938 +                      "nla_parse_nested");
939 +               return -EINVAL;
940 +       }
941 +
942 +       if (!tb[DPAA2_CEETM_TCA_COPT]) {
943 +               pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
944 +                      "tb");
945 +               return -EINVAL;
946 +       }
947 +
948 +       copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
949 +
950 +       /* Configure an existing ceetm class */
951 +       if (cl) {
952 +               if (copt->type != cl->type) {
953 +                       pr_err("CEETM: class %X is not of the provided type\n",
954 +                              cl->common.classid);
955 +                       return -EINVAL;
956 +               }
957 +
958 +               switch (copt->type) {
959 +               case CEETM_ROOT:
960 +                       return dpaa2_ceetm_cls_change_root(cl, copt, dev);
961 +               case CEETM_PRIO:
962 +                       return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
963 +
964 +               default:
965 +                       pr_err(KBUILD_BASENAME " : %s : invalid class\n",
966 +                              __func__);
967 +                       return -EINVAL;
968 +               }
969 +       }
970 +
971 +       return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
972 +}
973 +
974 +static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
975 +{
976 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
977 +       struct dpaa2_ceetm_class *cl;
978 +       unsigned int i;
979 +
980 +       pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
981 +
982 +       if (arg->stop)
983 +               return;
984 +
985 +       for (i = 0; i < priv->clhash.hashsize; i++) {
986 +               hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
987 +                       if (arg->count < arg->skip) {
988 +                               arg->count++;
989 +                               continue;
990 +                       }
991 +                       if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
992 +                               arg->stop = 1;
993 +                               return;
994 +                       }
995 +                       arg->count++;
996 +               }
997 +       }
998 +}
999 +
1000 +static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
1001 +                               struct sk_buff *skb, struct tcmsg *tcm)
1002 +{
1003 +       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1004 +       struct nlattr *nest;
1005 +       struct dpaa2_ceetm_tc_copt copt;
1006 +
1007 +       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1008 +                __func__, cl->common.classid, sch->handle);
1009 +
1010 +       sch_tree_lock(sch);
1011 +
1012 +       tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
1013 +       tcm->tcm_handle = cl->common.classid;
1014 +
1015 +       memset(&copt, 0, sizeof(copt));
1016 +
1017 +       copt.shaped = cl->shaped;
1018 +       copt.type = cl->type;
1019 +
1020 +       switch (cl->type) {
1021 +       case CEETM_ROOT:
1022 +               if (cl->child)
1023 +                       tcm->tcm_info = cl->child->handle;
1024 +
1025 +               memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
1026 +                      sizeof(struct dpaa2_ceetm_shaping_cfg));
1027 +
1028 +               break;
1029 +
1030 +       case CEETM_PRIO:
1031 +               if (cl->child)
1032 +                       tcm->tcm_info = cl->child->handle;
1033 +
1034 +               copt.mode = cl->prio.mode;
1035 +               copt.weight = cl->prio.weight;
1036 +
1037 +               break;
1038 +       }
1039 +
1040 +       nest = nla_nest_start(skb, TCA_OPTIONS);
1041 +       if (!nest)
1042 +               goto nla_put_failure;
1043 +       if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
1044 +               goto nla_put_failure;
1045 +       nla_nest_end(skb, nest);
1046 +       sch_tree_unlock(sch);
1047 +       return skb->len;
1048 +
1049 +nla_put_failure:
1050 +       sch_tree_unlock(sch);
1051 +       nla_nest_cancel(skb, nest);
1052 +       return -EMSGSIZE;
1053 +}
1054 +
1055 +static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
1056 +{
1057 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1058 +       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1059 +
1060 +       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1061 +                __func__, cl->common.classid, sch->handle);
1062 +
1063 +       sch_tree_lock(sch);
1064 +       qdisc_class_hash_remove(&priv->clhash, &cl->common);
1065 +       sch_tree_unlock(sch);
1066 +       return 0;
1067 +}
1068 +
1069 +/* Get the class' child qdisc, if any */
1070 +static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
1071 +{
1072 +       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1073 +
1074 +       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1075 +                __func__, cl->common.classid, sch->handle);
1076 +
1077 +       switch (cl->type) {
1078 +       case CEETM_ROOT:
1079 +       case CEETM_PRIO:
1080 +               return cl->child;
1081 +       }
1082 +
1083 +       return NULL;
1084 +}
1085 +
1086 +static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
1087 +                                struct Qdisc *new, struct Qdisc **old)
1088 +{
1089 +       if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
1090 +               pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
1091 +               return -EOPNOTSUPP;
1092 +       }
1093 +
1094 +       return 0;
1095 +}
1096 +
1097 +static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
1098 +                                     struct gnet_dump *d)
1099 +{
1100 +       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1101 +       struct gnet_stats_basic_packed tmp_bstats;
1102 +       struct dpaa2_ceetm_tc_xstats xstats;
1103 +       union dpni_statistics dpni_stats;
1104 +       struct net_device *dev = qdisc_dev(sch);
1105 +       struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
1106 +       u8 ch_id = 0;
1107 +       int err;
1108 +
1109 +       memset(&xstats, 0, sizeof(xstats));
1110 +       memset(&tmp_bstats, 0, sizeof(tmp_bstats));
1111 +
1112 +       if (cl->type == CEETM_ROOT)
1113 +               return 0;
1114 +
1115 +       err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
1116 +                                 DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
1117 +                                 &dpni_stats);
1118 +       if (err)
1119 +               netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
1120 +
1121 +       xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
1122 +       xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
1123 +       xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
1124 +       xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
1125 +
1126 +       return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1127 +}
1128 +
1129 +static struct tcf_block *dpaa2_ceetm_tcf_block(struct Qdisc *sch,
1130 +                                              unsigned long arg)
1131 +{
1132 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1133 +       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1134 +
1135 +       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1136 +                cl ? cl->common.classid : 0, sch->handle);
1137 +       return cl ? cl->block : priv->block;
1138 +}
1139 +
1140 +static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
1141 +                                         unsigned long parent,
1142 +                                         u32 classid)
1143 +{
1144 +       struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
1145 +
1146 +       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1147 +                cl ? cl->common.classid : 0, sch->handle);
1148 +       return (unsigned long)cl;
1149 +}
1150 +
1151 +static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
1152 +{
1153 +       struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1154 +
1155 +       pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1156 +                cl ? cl->common.classid : 0, sch->handle);
1157 +}
1158 +
1159 +const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
1160 +       .graft          =       dpaa2_ceetm_cls_graft,
1161 +       .leaf           =       dpaa2_ceetm_cls_leaf,
1162 +       .find           =       dpaa2_ceetm_cls_find,
1163 +       .change         =       dpaa2_ceetm_cls_change,
1164 +       .delete         =       dpaa2_ceetm_cls_delete,
1165 +       .walk           =       dpaa2_ceetm_cls_walk,
1166 +       .tcf_block      =       dpaa2_ceetm_tcf_block,
1167 +       .bind_tcf       =       dpaa2_ceetm_tcf_bind,
1168 +       .unbind_tcf     =       dpaa2_ceetm_tcf_unbind,
1169 +       .dump           =       dpaa2_ceetm_cls_dump,
1170 +       .dump_stats     =       dpaa2_ceetm_cls_dump_stats,
1171 +};
1172 +
1173 +struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
1174 +       .id             =       "ceetm",
1175 +       .priv_size      =       sizeof(struct dpaa2_ceetm_qdisc),
1176 +       .cl_ops         =       &dpaa2_ceetm_cls_ops,
1177 +       .init           =       dpaa2_ceetm_init,
1178 +       .destroy        =       dpaa2_ceetm_destroy,
1179 +       .change         =       dpaa2_ceetm_change,
1180 +       .dump           =       dpaa2_ceetm_dump,
1181 +       .attach         =       dpaa2_ceetm_attach,
1182 +       .owner          =       THIS_MODULE,
1183 +};
1184 +
1185 +/* Run the filters and classifiers attached to the qdisc on the provided skb */
1186 +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1187 +                        int *qdid, u8 *qpri)
1188 +{
1189 +       struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1190 +       struct dpaa2_ceetm_class *cl = NULL;
1191 +       struct tcf_result res;
1192 +       struct tcf_proto *tcf;
1193 +       int result;
1194 +
1195 +       tcf = rcu_dereference_bh(priv->filter_list);
1196 +       while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
1197 +#ifdef CONFIG_NET_CLS_ACT
1198 +               switch (result) {
1199 +               case TC_ACT_QUEUED:
1200 +               case TC_ACT_STOLEN:
1201 +               case TC_ACT_SHOT:
1202 +                       /* No valid class found due to action */
1203 +                       return -1;
1204 +               }
1205 +#endif
1206 +               cl = (void *)res.class;
1207 +               if (!cl) {
1208 +                       /* The filter leads to the qdisc */
1209 +                       if (res.classid == sch->handle)
1210 +                               return 0;
1211 +
1212 +                       cl = dpaa2_ceetm_find(res.classid, sch);
1213 +                       /* The filter leads to an invalid class */
1214 +                       if (!cl)
1215 +                               break;
1216 +               }
1217 +
1218 +               /* The class might have its own filters attached */
1219 +               tcf = rcu_dereference_bh(cl->filter_list);
1220 +       }
1221 +
1222 +       /* No valid class found */
1223 +       if (!cl)
1224 +               return 0;
1225 +
1226 +       switch (cl->type) {
1227 +       case CEETM_ROOT:
1228 +               *qdid = cl->root.ch_id;
1229 +
1230 +               /* The root class does not have a child prio qdisc */
1231 +               if (!cl->child)
1232 +                       return 0;
1233 +
1234 +               /* Run the prio qdisc classifiers */
1235 +               return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
1236 +
1237 +       case CEETM_PRIO:
1238 +               *qpri = cl->prio.qpri;
1239 +               break;
1240 +       }
1241 +
1242 +       return 0;
1243 +}
1244 +
1245 +int __init dpaa2_ceetm_register(void)
1246 +{
1247 +       int err = 0;
1248 +
1249 +       pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
1250 +
1251 +       err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
1252 +       if (unlikely(err))
1253 +               pr_err(KBUILD_MODNAME
1254 +                      ": %s:%hu:%s(): register_qdisc() = %d\n",
1255 +                      KBUILD_BASENAME ".c", __LINE__, __func__, err);
1256 +
1257 +       return err;
1258 +}
1259 +
1260 +void __exit dpaa2_ceetm_unregister(void)
1261 +{
1262 +       pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
1263 +                KBUILD_BASENAME ".c", __func__);
1264 +
1265 +       unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
1266 +}
1267 --- /dev/null
1268 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
1269 @@ -0,0 +1,183 @@
1270 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
1271 +/*
1272 + * Copyright 2017 NXP
1273 + *
1274 + */
1275 +
1276 +#ifndef __DPAA2_ETH_CEETM_H
1277 +#define __DPAA2_ETH_CEETM_H
1278 +
1279 +#include <net/pkt_sched.h>
1280 +#include <net/pkt_cls.h>
1281 +#include <net/netlink.h>
1282 +
1283 +#include "dpaa2-eth.h"
1284 +
1285 +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
1286 + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
1287 + * are reserved for the maximum 32 CEETM channels (majors and minors are in
1288 + * hex).
1289 + */
1290 +#define PFIFO_MIN_OFFSET               0x21
1291 +
1292 +#define DPAA2_CEETM_MIN_WEIGHT         100
1293 +#define DPAA2_CEETM_MAX_WEIGHT         24800
1294 +
1295 +#define DPAA2_CEETM_TD_THRESHOLD       1000
1296 +
1297 +enum wbfs_group_type {
1298 +       WBFS_GRP_A,
1299 +       WBFS_GRP_B,
1300 +       WBFS_GRP_LARGE
1301 +};
1302 +
1303 +enum {
1304 +       DPAA2_CEETM_TCA_UNSPEC,
1305 +       DPAA2_CEETM_TCA_COPT,
1306 +       DPAA2_CEETM_TCA_QOPS,
1307 +       DPAA2_CEETM_TCA_MAX,
1308 +};
1309 +
1310 +/* CEETM configuration types */
1311 +enum dpaa2_ceetm_type {
1312 +       CEETM_ROOT = 1,
1313 +       CEETM_PRIO,
1314 +};
1315 +
1316 +enum {
1317 +       STRICT_PRIORITY = 0,
1318 +       WEIGHTED_A,
1319 +       WEIGHTED_B,
1320 +};
1321 +
1322 +struct dpaa2_ceetm_shaping_cfg {
1323 +       __u64 cir; /* committed information rate */
1324 +       __u64 eir; /* excess information rate */
1325 +       __u16 cbs; /* committed burst size */
1326 +       __u16 ebs; /* excess burst size */
1327 +       __u8 coupled; /* shaper coupling */
1328 +};
1329 +
1330 +extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
1331 +
1332 +struct dpaa2_ceetm_class;
1333 +struct dpaa2_ceetm_qdisc_stats;
1334 +struct dpaa2_ceetm_class_stats;
1335 +
1336 +/* corresponds to CEETM shaping at LNI level */
1337 +struct dpaa2_root_q {
1338 +       struct Qdisc **qdiscs;
1339 +       struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
1340 +};
1341 +
1342 +/* corresponds to the number of priorities a channel serves */
1343 +struct dpaa2_prio_q {
1344 +       struct dpaa2_ceetm_class *parent;
1345 +       struct dpni_tx_priorities_cfg tx_prio_cfg;
1346 +};
1347 +
1348 +struct dpaa2_ceetm_qdisc {
1349 +       struct Qdisc_class_hash clhash;
1350 +       struct tcf_proto *filter_list; /* qdisc attached filters */
1351 +       struct tcf_block *block;
1352 +
1353 +       enum dpaa2_ceetm_type type; /* ROOT/PRIO */
1354 +       bool shaped;
1355 +       union {
1356 +               struct dpaa2_root_q root;
1357 +               struct dpaa2_prio_q prio;
1358 +       };
1359 +};
1360 +
1361 +/* CEETM Qdisc configuration parameters */
1362 +struct dpaa2_ceetm_tc_qopt {
1363 +       enum dpaa2_ceetm_type type;
1364 +       __u16 shaped;
1365 +       __u8 prio_group_A;
1366 +       __u8 prio_group_B;
1367 +       __u8 separate_groups;
1368 +};
1369 +
1370 +/* root class - corresponds to a channel */
1371 +struct dpaa2_root_c {
1372 +       struct dpaa2_ceetm_shaping_cfg shaping_cfg;
1373 +       u32 ch_id;
1374 +};
1375 +
1376 +/* prio class - corresponds to a strict priority queue (group) */
1377 +struct dpaa2_prio_c {
1378 +       struct dpaa2_ceetm_class_stats __percpu *cstats;
1379 +       u32 qpri;
1380 +       u8 mode;
1381 +       u16 weight;
1382 +};
1383 +
1384 +struct dpaa2_ceetm_class {
1385 +       struct Qdisc_class_common common;
1386 +       struct tcf_proto *filter_list; /* class attached filters */
1387 +       struct tcf_block *block;
1388 +       struct Qdisc *parent;
1389 +       struct Qdisc *child;
1390 +
1391 +       enum dpaa2_ceetm_type type; /* ROOT/PRIO */
1392 +       bool shaped;
1393 +       union {
1394 +               struct dpaa2_root_c root;
1395 +               struct dpaa2_prio_c prio;
1396 +       };
1397 +};
1398 +
1399 +/* CEETM Class configuration parameters */
1400 +struct dpaa2_ceetm_tc_copt {
1401 +       enum dpaa2_ceetm_type type;
1402 +       struct dpaa2_ceetm_shaping_cfg shaping_cfg;
1403 +       __u16 shaped;
1404 +       __u8 mode;
1405 +       __u16 weight;
1406 +};
1407 +
1408 +/* CEETM stats */
1409 +struct dpaa2_ceetm_qdisc_stats {
1410 +       __u32 drops;
1411 +};
1412 +
1413 +struct dpaa2_ceetm_class_stats {
1414 +       /* Software counters */
1415 +       struct gnet_stats_basic_packed bstats;
1416 +       __u32 ern_drop_count;
1417 +       __u32 congested_count;
1418 +};
1419 +
1420 +struct dpaa2_ceetm_tc_xstats {
1421 +       __u64 ceetm_dequeue_bytes;
1422 +       __u64 ceetm_dequeue_frames;
1423 +       __u64 ceetm_reject_bytes;
1424 +       __u64 ceetm_reject_frames;
1425 +};
1426 +
1427 +#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
1428 +int __init dpaa2_ceetm_register(void);
1429 +void __exit dpaa2_ceetm_unregister(void);
1430 +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1431 +                        int *qdid, u8 *qpri);
1432 +#else
1433 +static inline int dpaa2_ceetm_register(void)
1434 +{
1435 +       return 0;
1436 +}
1437 +
1438 +static inline void dpaa2_ceetm_unregister(void) {}
1439 +
1440 +static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1441 +                                      int *qdid, u8 *qpri)
1442 +{
1443 +       return 0;
1444 +}
1445 +#endif
1446 +
1447 +static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
1448 +{
1449 +       return priv->ceetm_en;
1450 +}
1451 +
1452 +#endif
1453 --- /dev/null
1454 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
1455 @@ -0,0 +1,356 @@
1456 +
1457 +/* Copyright 2015 Freescale Semiconductor Inc.
1458 + *
1459 + * Redistribution and use in source and binary forms, with or without
1460 + * modification, are permitted provided that the following conditions are met:
1461 + *     * Redistributions of source code must retain the above copyright
1462 + *      notice, this list of conditions and the following disclaimer.
1463 + *     * Redistributions in binary form must reproduce the above copyright
1464 + *      notice, this list of conditions and the following disclaimer in the
1465 + *      documentation and/or other materials provided with the distribution.
1466 + *     * Neither the name of Freescale Semiconductor nor the
1467 + *      names of its contributors may be used to endorse or promote products
1468 + *      derived from this software without specific prior written permission.
1469 + *
1470 + *
1471 + * ALTERNATIVELY, this software may be distributed under the terms of the
1472 + * GNU General Public License ("GPL") as published by the Free Software
1473 + * Foundation, either version 2 of that License or (at your option) any
1474 + * later version.
1475 + *
1476 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1477 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1478 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1479 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1480 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1481 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1482 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1483 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1484 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1485 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1486 + */
1487 +
1488 +#include <linux/module.h>
1489 +#include <linux/debugfs.h>
1490 +#include "dpaa2-eth.h"
1491 +#include "dpaa2-eth-debugfs.h"
1492 +
1493 +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
1494 +
1495 +static struct dentry *dpaa2_dbg_root;
1496 +
1497 +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
1498 +{
1499 +       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
1500 +       struct rtnl_link_stats64 *stats;
1501 +       struct dpaa2_eth_drv_stats *extras;
1502 +       int i;
1503 +
1504 +       seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
1505 +       seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
1506 +                  "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
1507 +                  "Tx SG", "Tx realloc", "Enq busy");
1508 +
1509 +       for_each_online_cpu(i) {
1510 +               stats = per_cpu_ptr(priv->percpu_stats, i);
1511 +               extras = per_cpu_ptr(priv->percpu_extras, i);
1512 +               seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
1513 +                          i,
1514 +                          stats->rx_packets,
1515 +                          stats->rx_errors,
1516 +                          extras->rx_sg_frames,
1517 +                          stats->tx_packets,
1518 +                          stats->tx_errors,
1519 +                          extras->tx_conf_frames,
1520 +                          extras->tx_sg_frames,
1521 +                          extras->tx_reallocs,
1522 +                          extras->tx_portal_busy);
1523 +       }
1524 +
1525 +       return 0;
1526 +}
1527 +
1528 +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
1529 +{
1530 +       int err;
1531 +       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
1532 +
1533 +       err = single_open(file, dpaa2_dbg_cpu_show, priv);
1534 +       if (err < 0)
1535 +               netdev_err(priv->net_dev, "single_open() failed\n");
1536 +
1537 +       return err;
1538 +}
1539 +
1540 +static const struct file_operations dpaa2_dbg_cpu_ops = {
1541 +       .open = dpaa2_dbg_cpu_open,
1542 +       .read = seq_read,
1543 +       .llseek = seq_lseek,
1544 +       .release = single_release,
1545 +};
1546 +
1547 +static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
1548 +{
1549 +       switch (fq->type) {
1550 +       case DPAA2_RX_FQ:
1551 +               return "Rx";
1552 +       case DPAA2_TX_CONF_FQ:
1553 +               return "Tx conf";
1554 +       case DPAA2_RX_ERR_FQ:
1555 +               return "Rx err";
1556 +       default:
1557 +               return "N/A";
1558 +       }
1559 +}
1560 +
1561 +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
1562 +{
1563 +       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
1564 +       struct dpaa2_eth_fq *fq;
1565 +       u32 fcnt, bcnt;
1566 +       int i, err;
1567 +
1568 +       seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
1569 +       seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
1570 +                  "VFQID", "CPU", "Traffic Class", "Type", "Frames",
1571 +                  "Pending frames");
1572 +
1573 +       for (i = 0; i <  priv->num_fqs; i++) {
1574 +               fq = &priv->fq[i];
1575 +               err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1576 +               if (err)
1577 +                       fcnt = 0;
1578 +
1579 +               /* A lot of queues, no use displaying zero traffic ones */
1580 +               if (!fq->stats.frames && !fcnt)
1581 +                       continue;
1582 +
1583 +               seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
1584 +                          fq->fqid,
1585 +                          fq->target_cpu,
1586 +                          fq->tc,
1587 +                          fq_type_to_str(fq),
1588 +                          fq->stats.frames,
1589 +                          fcnt);
1590 +       }
1591 +
1592 +       return 0;
1593 +}
1594 +
1595 +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
1596 +{
1597 +       int err;
1598 +       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
1599 +
1600 +       err = single_open(file, dpaa2_dbg_fqs_show, priv);
1601 +       if (err < 0)
1602 +               netdev_err(priv->net_dev, "single_open() failed\n");
1603 +
1604 +       return err;
1605 +}
1606 +
1607 +static const struct file_operations dpaa2_dbg_fq_ops = {
1608 +       .open = dpaa2_dbg_fqs_open,
1609 +       .read = seq_read,
1610 +       .llseek = seq_lseek,
1611 +       .release = single_release,
1612 +};
1613 +
1614 +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
1615 +{
1616 +       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
1617 +       struct dpaa2_eth_channel *ch;
1618 +       int i;
1619 +
1620 +       seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
1621 +       seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
1622 +                  "CHID", "CPU", "Deq busy", "Frames", "CDANs",
1623 +                  "Avg frm/CDAN", "Buf count");
1624 +
1625 +       for (i = 0; i < priv->num_channels; i++) {
1626 +               ch = priv->channel[i];
1627 +               seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
1628 +                          ch->ch_id,
1629 +                          ch->nctx.desired_cpu,
1630 +                          ch->stats.dequeue_portal_busy,
1631 +                          ch->stats.frames,
1632 +                          ch->stats.cdan,
1633 +                          ch->stats.frames / ch->stats.cdan,
1634 +                          ch->buf_count);
1635 +       }
1636 +
1637 +       return 0;
1638 +}
1639 +
1640 +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
1641 +{
1642 +       int err;
1643 +       struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
1644 +
1645 +       err = single_open(file, dpaa2_dbg_ch_show, priv);
1646 +       if (err < 0)
1647 +               netdev_err(priv->net_dev, "single_open() failed\n");
1648 +
1649 +       return err;
1650 +}
1651 +
1652 +static const struct file_operations dpaa2_dbg_ch_ops = {
1653 +       .open = dpaa2_dbg_ch_open,
1654 +       .read = seq_read,
1655 +       .llseek = seq_lseek,
1656 +       .release = single_release,
1657 +};
1658 +
1659 +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
1660 +                                    size_t count, loff_t *offset)
1661 +{
1662 +       struct dpaa2_eth_priv *priv = file->private_data;
1663 +       struct rtnl_link_stats64 *percpu_stats;
1664 +       struct dpaa2_eth_drv_stats *percpu_extras;
1665 +       struct dpaa2_eth_fq *fq;
1666 +       struct dpaa2_eth_channel *ch;
1667 +       int i;
1668 +
1669 +       for_each_online_cpu(i) {
1670 +               percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1671 +               memset(percpu_stats, 0, sizeof(*percpu_stats));
1672 +
1673 +               percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
1674 +               memset(percpu_extras, 0, sizeof(*percpu_extras));
1675 +       }
1676 +
1677 +       for (i = 0; i < priv->num_fqs; i++) {
1678 +               fq = &priv->fq[i];
1679 +               memset(&fq->stats, 0, sizeof(fq->stats));
1680 +       }
1681 +
1682 +       for (i = 0; i < priv->num_channels; i++) {
1683 +               ch = priv->channel[i];
1684 +               memset(&ch->stats, 0, sizeof(ch->stats));
1685 +       }
1686 +
1687 +       return count;
1688 +}
1689 +
1690 +static const struct file_operations dpaa2_dbg_reset_ops = {
1691 +       .open = simple_open,
1692 +       .write = dpaa2_dbg_reset_write,
1693 +};
1694 +
1695 +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
1696 +                                       const char __user *buf,
1697 +                                       size_t count, loff_t *offset)
1698 +{
1699 +       struct dpaa2_eth_priv *priv = file->private_data;
1700 +       int err;
1701 +
1702 +       err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
1703 +       if (err)
1704 +               netdev_err(priv->net_dev,
1705 +                          "dpni_reset_statistics() failed %d\n", err);
1706 +
1707 +       return count;
1708 +}
1709 +
1710 +static const struct file_operations dpaa2_dbg_reset_mc_ops = {
1711 +       .open = simple_open,
1712 +       .write = dpaa2_dbg_reset_mc_write,
1713 +};
1714 +
1715 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
1716 +{
1717 +       if (!dpaa2_dbg_root)
1718 +               return;
1719 +
1720 +       /* Create a directory for the interface */
1721 +       priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
1722 +                                          dpaa2_dbg_root);
1723 +       if (!priv->dbg.dir) {
1724 +               netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
1725 +               return;
1726 +       }
1727 +
1728 +       /* per-cpu stats file */
1729 +       priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
1730 +                                                 priv->dbg.dir, priv,
1731 +                                                 &dpaa2_dbg_cpu_ops);
1732 +       if (!priv->dbg.cpu_stats) {
1733 +               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1734 +               goto err_cpu_stats;
1735 +       }
1736 +
1737 +       /* per-fq stats file */
1738 +       priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
1739 +                                                priv->dbg.dir, priv,
1740 +                                                &dpaa2_dbg_fq_ops);
1741 +       if (!priv->dbg.fq_stats) {
1742 +               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1743 +               goto err_fq_stats;
1744 +       }
1745 +
1746 +       /* per-fq stats file */
1747 +       priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
1748 +                                                priv->dbg.dir, priv,
1749 +                                                &dpaa2_dbg_ch_ops);
1750 +       if (!priv->dbg.fq_stats) {
1751 +               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1752 +               goto err_ch_stats;
1753 +       }
1754 +
1755 +       /* reset stats */
1756 +       priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
1757 +                                                   priv->dbg.dir, priv,
1758 +                                                   &dpaa2_dbg_reset_ops);
1759 +       if (!priv->dbg.reset_stats) {
1760 +               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1761 +               goto err_reset_stats;
1762 +       }
1763 +
1764 +       /* reset MC stats */
1765 +       priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
1766 +                                               0222, priv->dbg.dir, priv,
1767 +                                               &dpaa2_dbg_reset_mc_ops);
1768 +       if (!priv->dbg.reset_mc_stats) {
1769 +               netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1770 +               goto err_reset_mc_stats;
1771 +       }
1772 +
1773 +       return;
1774 +
1775 +err_reset_mc_stats:
1776 +       debugfs_remove(priv->dbg.reset_stats);
1777 +err_reset_stats:
1778 +       debugfs_remove(priv->dbg.ch_stats);
1779 +err_ch_stats:
1780 +       debugfs_remove(priv->dbg.fq_stats);
1781 +err_fq_stats:
1782 +       debugfs_remove(priv->dbg.cpu_stats);
1783 +err_cpu_stats:
1784 +       debugfs_remove(priv->dbg.dir);
1785 +}
1786 +
1787 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
1788 +{
1789 +       debugfs_remove(priv->dbg.reset_mc_stats);
1790 +       debugfs_remove(priv->dbg.reset_stats);
1791 +       debugfs_remove(priv->dbg.fq_stats);
1792 +       debugfs_remove(priv->dbg.ch_stats);
1793 +       debugfs_remove(priv->dbg.cpu_stats);
1794 +       debugfs_remove(priv->dbg.dir);
1795 +}
1796 +
1797 +void dpaa2_eth_dbg_init(void)
1798 +{
1799 +       dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
1800 +       if (!dpaa2_dbg_root) {
1801 +               pr_err("DPAA2-ETH: debugfs create failed\n");
1802 +               return;
1803 +       }
1804 +
1805 +       pr_info("DPAA2-ETH: debugfs created\n");
1806 +}
1807 +
1808 +void __exit dpaa2_eth_dbg_exit(void)
1809 +{
1810 +       debugfs_remove(dpaa2_dbg_root);
1811 +}
1812 --- /dev/null
1813 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
1814 @@ -0,0 +1,60 @@
1815 +/* Copyright 2015 Freescale Semiconductor Inc.
1816 + *
1817 + * Redistribution and use in source and binary forms, with or without
1818 + * modification, are permitted provided that the following conditions are met:
1819 + *     * Redistributions of source code must retain the above copyright
1820 + *      notice, this list of conditions and the following disclaimer.
1821 + *     * Redistributions in binary form must reproduce the above copyright
1822 + *      notice, this list of conditions and the following disclaimer in the
1823 + *      documentation and/or other materials provided with the distribution.
1824 + *     * Neither the name of Freescale Semiconductor nor the
1825 + *      names of its contributors may be used to endorse or promote products
1826 + *      derived from this software without specific prior written permission.
1827 + *
1828 + *
1829 + * ALTERNATIVELY, this software may be distributed under the terms of the
1830 + * GNU General Public License ("GPL") as published by the Free Software
1831 + * Foundation, either version 2 of that License or (at your option) any
1832 + * later version.
1833 + *
1834 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1835 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1836 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1837 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1838 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1839 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1840 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1841 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1842 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1843 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1844 + */
1845 +
1846 +#ifndef DPAA2_ETH_DEBUGFS_H
1847 +#define DPAA2_ETH_DEBUGFS_H
1848 +
1849 +#include <linux/dcache.h>
1850 +
1851 +struct dpaa2_eth_priv;
1852 +
1853 +struct dpaa2_debugfs {
1854 +       struct dentry *dir;
1855 +       struct dentry *fq_stats;
1856 +       struct dentry *ch_stats;
1857 +       struct dentry *cpu_stats;
1858 +       struct dentry *reset_stats;
1859 +       struct dentry *reset_mc_stats;
1860 +};
1861 +
1862 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
1863 +void dpaa2_eth_dbg_init(void);
1864 +void dpaa2_eth_dbg_exit(void);
1865 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
1866 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
1867 +#else
1868 +static inline void dpaa2_eth_dbg_init(void) {}
1869 +static inline void dpaa2_eth_dbg_exit(void) {}
1870 +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
1871 +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
1872 +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
1873 +
1874 +#endif /* DPAA2_ETH_DEBUGFS_H */
1875 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1876 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1877 @@ -1,32 +1,5 @@
1878 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
1879  /* Copyright 2014-2015 Freescale Semiconductor Inc.
1880 - *
1881 - * Redistribution and use in source and binary forms, with or without
1882 - * modification, are permitted provided that the following conditions are met:
1883 - *     * Redistributions of source code must retain the above copyright
1884 - *      notice, this list of conditions and the following disclaimer.
1885 - *     * Redistributions in binary form must reproduce the above copyright
1886 - *      notice, this list of conditions and the following disclaimer in the
1887 - *      documentation and/or other materials provided with the distribution.
1888 - *     * Neither the name of Freescale Semiconductor nor the
1889 - *      names of its contributors may be used to endorse or promote products
1890 - *      derived from this software without specific prior written permission.
1891 - *
1892 - *
1893 - * ALTERNATIVELY, this software may be distributed under the terms of the
1894 - * GNU General Public License ("GPL") as published by the Free Software
1895 - * Foundation, either version 2 of that License or (at your option) any
1896 - * later version.
1897 - *
1898 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1899 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1900 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1901 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1902 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1903 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1904 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1905 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1906 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1907 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1908   */
1909  
1910  #undef TRACE_SYSTEM
1911 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1912 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1913 @@ -1,33 +1,6 @@
1914 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
1915  /* Copyright 2014-2016 Freescale Semiconductor Inc.
1916   * Copyright 2016-2017 NXP
1917 - *
1918 - * Redistribution and use in source and binary forms, with or without
1919 - * modification, are permitted provided that the following conditions are met:
1920 - *     * Redistributions of source code must retain the above copyright
1921 - *      notice, this list of conditions and the following disclaimer.
1922 - *     * Redistributions in binary form must reproduce the above copyright
1923 - *      notice, this list of conditions and the following disclaimer in the
1924 - *      documentation and/or other materials provided with the distribution.
1925 - *     * Neither the name of Freescale Semiconductor nor the
1926 - *      names of its contributors may be used to endorse or promote products
1927 - *      derived from this software without specific prior written permission.
1928 - *
1929 - *
1930 - * ALTERNATIVELY, this software may be distributed under the terms of the
1931 - * GNU General Public License ("GPL") as published by the Free Software
1932 - * Foundation, either version 2 of that License or (at your option) any
1933 - * later version.
1934 - *
1935 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1936 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1937 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1938 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1939 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1940 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1941 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1942 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1943 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1944 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1945   */
1946  #include <linux/init.h>
1947  #include <linux/module.h>
1948 @@ -38,9 +11,14 @@
1949  #include <linux/msi.h>
1950  #include <linux/kthread.h>
1951  #include <linux/iommu.h>
1952 -
1953 +#include <linux/net_tstamp.h>
1954 +#include <linux/bpf.h>
1955 +#include <linux/filter.h>
1956 +#include <linux/atomic.h>
1957 +#include <net/sock.h>
1958  #include "../../fsl-mc/include/mc.h"
1959  #include "dpaa2-eth.h"
1960 +#include "dpaa2-eth-ceetm.h"
1961  
1962  /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1963   * using trace events only need to #include <trace/events/sched.h>
1964 @@ -52,8 +30,6 @@ MODULE_LICENSE("Dual BSD/GPL");
1965  MODULE_AUTHOR("Freescale Semiconductor, Inc");
1966  MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
1967  
1968 -const char dpaa2_eth_drv_version[] = "0.1";
1969 -
1970  static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
1971                                 dma_addr_t iova_addr)
1972  {
1973 @@ -104,26 +80,27 @@ static void free_rx_fd(struct dpaa2_eth_
1974                 /* We don't support any other format */
1975                 return;
1976  
1977 -       /* For S/G frames, we first need to free all SG entries */
1978 +       /* For S/G frames, we first need to free all SG entries
1979 +        * except the first one, which was taken care of already
1980 +        */
1981         sgt = vaddr + dpaa2_fd_get_offset(fd);
1982 -       for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1983 +       for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1984                 addr = dpaa2_sg_get_addr(&sgt[i]);
1985                 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
1986 -               dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1987 -                                DMA_FROM_DEVICE);
1988 +               dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1989 +                              DMA_BIDIRECTIONAL);
1990  
1991 -               skb_free_frag(sg_vaddr);
1992 +               free_pages((unsigned long)sg_vaddr, 0);
1993                 if (dpaa2_sg_is_final(&sgt[i]))
1994                         break;
1995         }
1996  
1997  free_buf:
1998 -       skb_free_frag(vaddr);
1999 +       free_pages((unsigned long)vaddr, 0);
2000  }
2001  
2002  /* Build a linear skb based on a single-buffer frame descriptor */
2003 -static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
2004 -                                       struct dpaa2_eth_channel *ch,
2005 +static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
2006                                         const struct dpaa2_fd *fd,
2007                                         void *fd_vaddr)
2008  {
2009 @@ -133,8 +110,7 @@ static struct sk_buff *build_linear_skb(
2010  
2011         ch->buf_count--;
2012  
2013 -       skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
2014 -                       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2015 +       skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
2016         if (unlikely(!skb))
2017                 return NULL;
2018  
2019 @@ -169,16 +145,20 @@ static struct sk_buff *build_frag_skb(st
2020                 /* Get the address and length from the S/G entry */
2021                 sg_addr = dpaa2_sg_get_addr(sge);
2022                 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
2023 -               dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
2024 -                                DMA_FROM_DEVICE);
2025 +               dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
2026 +                              DMA_BIDIRECTIONAL);
2027  
2028                 sg_length = dpaa2_sg_get_len(sge);
2029  
2030                 if (i == 0) {
2031                         /* We build the skb around the first data buffer */
2032 -                       skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
2033 -                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2034 +                       skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
2035                         if (unlikely(!skb)) {
2036 +                               /* Free the first SG entry now, since we already
2037 +                                * unmapped it and obtained the virtual address
2038 +                                */
2039 +                               free_pages((unsigned long)sg_vaddr, 0);
2040 +
2041                                 /* We still need to subtract the buffers used
2042                                  * by this FD from our software counter
2043                                  */
2044 @@ -213,17 +193,172 @@ static struct sk_buff *build_frag_skb(st
2045                         break;
2046         }
2047  
2048 +       WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
2049 +
2050         /* Count all data buffers + SG table buffer */
2051         ch->buf_count -= i + 2;
2052  
2053         return skb;
2054  }
2055  
2056 +static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
2057 +                           struct dpaa2_fd *fd,
2058 +                           void *buf_start,
2059 +                           u16 queue_id)
2060 +{
2061 +       struct dpaa2_eth_fq *fq;
2062 +       struct rtnl_link_stats64 *percpu_stats;
2063 +       struct dpaa2_eth_drv_stats *percpu_extras;
2064 +       struct dpaa2_faead *faead;
2065 +       u32 ctrl, frc;
2066 +       int i, err;
2067 +
2068 +       /* Mark the egress frame annotation area as valid */
2069 +       frc = dpaa2_fd_get_frc(fd);
2070 +       dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
2071 +       dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
2072 +
2073 +       ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
2074 +       faead = dpaa2_get_faead(buf_start, false);
2075 +       faead->ctrl = cpu_to_le32(ctrl);
2076 +       faead->conf_fqid = 0;
2077 +
2078 +       percpu_stats = this_cpu_ptr(priv->percpu_stats);
2079 +       percpu_extras = this_cpu_ptr(priv->percpu_extras);
2080 +
2081 +       fq = &priv->fq[queue_id];
2082 +       for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
2083 +               err = priv->enqueue(priv, fq, fd, 0);
2084 +               if (err != -EBUSY)
2085 +                       break;
2086 +       }
2087 +
2088 +       percpu_extras->tx_portal_busy += i;
2089 +       if (unlikely(err)) {
2090 +               percpu_stats->tx_errors++;
2091 +       } else {
2092 +               percpu_stats->tx_packets++;
2093 +               percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
2094 +       }
2095 +
2096 +       return err;
2097 +}
2098 +
2099 +/* Free buffers acquired from the buffer pool or which were meant to
2100 + * be released in the pool
2101 + */
2102 +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
2103 +{
2104 +       struct device *dev = priv->net_dev->dev.parent;
2105 +       void *vaddr;
2106 +       int i;
2107 +
2108 +       for (i = 0; i < count; i++) {
2109 +               vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
2110 +               dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
2111 +                              DMA_BIDIRECTIONAL);
2112 +               free_pages((unsigned long)vaddr, 0);
2113 +       }
2114 +}
2115 +
2116 +static void release_fd_buf(struct dpaa2_eth_priv *priv,
2117 +                          struct dpaa2_eth_channel *ch,
2118 +                          dma_addr_t addr)
2119 +{
2120 +       int err;
2121 +
2122 +       ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
2123 +       if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
2124 +               return;
2125 +
2126 +       while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
2127 +                                              ch->rel_buf_array,
2128 +                                              ch->rel_buf_cnt)) == -EBUSY)
2129 +               cpu_relax();
2130 +
2131 +       if (err)
2132 +               free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
2133 +
2134 +       ch->rel_buf_cnt = 0;
2135 +}
2136 +
2137 +static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
2138 +                            struct dpaa2_eth_channel *ch,
2139 +                            struct dpaa2_fd *fd,
2140 +                            u16 queue_id,
2141 +                            void *vaddr)
2142 +{
2143 +       struct device *dev = priv->net_dev->dev.parent;
2144 +       dma_addr_t addr = dpaa2_fd_get_addr(fd);
2145 +       struct rtnl_link_stats64 *percpu_stats;
2146 +       struct bpf_prog *xdp_prog;
2147 +       struct xdp_buff xdp;
2148 +       u32 xdp_act = XDP_PASS;
2149 +
2150 +       xdp_prog = READ_ONCE(ch->xdp_prog);
2151 +       if (!xdp_prog)
2152 +               return xdp_act;
2153 +
2154 +       percpu_stats = this_cpu_ptr(priv->percpu_stats);
2155 +
2156 +       xdp.data = vaddr + dpaa2_fd_get_offset(fd);
2157 +       xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
2158 +       /* Allow the XDP program to use the specially reserved headroom */
2159 +       xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
2160 +
2161 +       rcu_read_lock();
2162 +       xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
2163 +
2164 +       /* xdp.data pointer may have changed */
2165 +       dpaa2_fd_set_offset(fd, xdp.data - vaddr);
2166 +       dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
2167 +
2168 +       switch (xdp_act) {
2169 +       case XDP_PASS:
2170 +               break;
2171 +       default:
2172 +               bpf_warn_invalid_xdp_action(xdp_act);
2173 +       case XDP_ABORTED:
2174 +       case XDP_DROP:
2175 +               /* This is our buffer, so we can release it back to hardware */
2176 +               release_fd_buf(priv, ch, addr);
2177 +               percpu_stats->rx_dropped++;
2178 +               break;
2179 +       case XDP_TX:
2180 +               if (dpaa2_eth_xdp_tx(priv, fd, vaddr, queue_id)) {
2181 +                       dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2182 +                                        DMA_BIDIRECTIONAL);
2183 +                       free_rx_fd(priv, fd, vaddr);
2184 +                       ch->buf_count--;
2185 +               }
2186 +               break;
2187 +       case XDP_REDIRECT:
2188 +               dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2189 +                                DMA_BIDIRECTIONAL);
2190 +               ch->buf_count--;
2191 +               ch->flush = true;
2192 +               /* Mark the actual start of the data buffer */
2193 +               xdp.data_hard_start = vaddr;
2194 +               if (xdp_do_redirect(priv->net_dev, &xdp, xdp_prog))
2195 +                       free_rx_fd(priv, fd, vaddr);
2196 +               break;
2197 +       }
2198 +
2199 +       if (xdp_act == XDP_TX || xdp_act == XDP_REDIRECT) {
2200 +               percpu_stats->rx_packets++;
2201 +               percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
2202 +       }
2203 +
2204 +       rcu_read_unlock();
2205 +
2206 +       return xdp_act;
2207 +}
2208 +
2209  /* Main Rx frame processing routine */
2210  static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
2211                          struct dpaa2_eth_channel *ch,
2212                          const struct dpaa2_fd *fd,
2213 -                        struct napi_struct *napi)
2214 +                        struct dpaa2_eth_fq *fq)
2215  {
2216         dma_addr_t addr = dpaa2_fd_get_addr(fd);
2217         u8 fd_format = dpaa2_fd_get_format(fd);
2218 @@ -235,14 +370,16 @@ static void dpaa2_eth_rx(struct dpaa2_et
2219         struct dpaa2_fas *fas;
2220         void *buf_data;
2221         u32 status = 0;
2222 +       u32 xdp_act;
2223  
2224         /* Tracing point */
2225         trace_dpaa2_rx_fd(priv->net_dev, fd);
2226  
2227         vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
2228 -       dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
2229 +       dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2230 +                               DMA_BIDIRECTIONAL);
2231  
2232 -       fas = dpaa2_get_fas(vaddr);
2233 +       fas = dpaa2_get_fas(vaddr, false);
2234         prefetch(fas);
2235         buf_data = vaddr + dpaa2_fd_get_offset(fd);
2236         prefetch(buf_data);
2237 @@ -251,22 +388,43 @@ static void dpaa2_eth_rx(struct dpaa2_et
2238         percpu_extras = this_cpu_ptr(priv->percpu_extras);
2239  
2240         if (fd_format == dpaa2_fd_single) {
2241 -               skb = build_linear_skb(priv, ch, fd, vaddr);
2242 +               xdp_act = dpaa2_eth_run_xdp(priv, ch, (struct dpaa2_fd *)fd,
2243 +                                           fq->flowid, vaddr);
2244 +               if (xdp_act != XDP_PASS)
2245 +                       return;
2246 +
2247 +               dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2248 +                              DMA_BIDIRECTIONAL);
2249 +               skb = build_linear_skb(ch, fd, vaddr);
2250         } else if (fd_format == dpaa2_fd_sg) {
2251 +               dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2252 +                                DMA_BIDIRECTIONAL);
2253                 skb = build_frag_skb(priv, ch, buf_data);
2254 -               skb_free_frag(vaddr);
2255 +               free_pages((unsigned long)vaddr, 0);
2256                 percpu_extras->rx_sg_frames++;
2257                 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
2258         } else {
2259                 /* We don't support any other format */
2260 -               goto err_frame_format;
2261 +               goto drop_cnt;
2262         }
2263  
2264         if (unlikely(!skb))
2265 -               goto err_build_skb;
2266 +               goto drop_fd;
2267  
2268         prefetch(skb->data);
2269  
2270 +       /* Get the timestamp value */
2271 +       if (priv->ts_rx_en) {
2272 +               struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2273 +               __le64 *ts = dpaa2_get_ts(vaddr, false);
2274 +               u64 ns;
2275 +
2276 +               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2277 +
2278 +               ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
2279 +               shhwtstamps->hwtstamp = ns_to_ktime(ns);
2280 +       }
2281 +
2282         /* Check if we need to validate the L4 csum */
2283         if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
2284                 status = le32_to_cpu(fas->status);
2285 @@ -274,30 +432,80 @@ static void dpaa2_eth_rx(struct dpaa2_et
2286         }
2287  
2288         skb->protocol = eth_type_trans(skb, priv->net_dev);
2289 +       skb_record_rx_queue(skb, fq->flowid);
2290  
2291         percpu_stats->rx_packets++;
2292         percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
2293  
2294 -       napi_gro_receive(napi, skb);
2295 +       napi_gro_receive(&ch->napi, skb);
2296  
2297         return;
2298  
2299 -err_build_skb:
2300 +drop_fd:
2301         free_rx_fd(priv, fd, vaddr);
2302 -err_frame_format:
2303 +drop_cnt:
2304         percpu_stats->rx_dropped++;
2305  }
2306  
2307 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
2308 +/* Processing of Rx frames received on the error FQ
2309 + * We check and print the error bits and then free the frame
2310 + */
2311 +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
2312 +                            struct dpaa2_eth_channel *ch,
2313 +                            const struct dpaa2_fd *fd,
2314 +                            struct napi_struct *napi __always_unused,
2315 +                            u16 queue_id __always_unused)
2316 +{
2317 +       struct device *dev = priv->net_dev->dev.parent;
2318 +       dma_addr_t addr = dpaa2_fd_get_addr(fd);
2319 +       void *vaddr;
2320 +       struct rtnl_link_stats64 *percpu_stats;
2321 +       struct dpaa2_fas *fas;
2322 +       u32 status = 0;
2323 +       u32 fd_errors;
2324 +       bool has_fas_errors = false;
2325 +
2326 +       vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
2327 +       dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
2328 +
2329 +       /* check frame errors in the FD field */
2330 +       fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
2331 +       if (likely(fd_errors)) {
2332 +               has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
2333 +                                !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2334 +               if (net_ratelimit())
2335 +                       netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
2336 +                                  fd_errors);
2337 +       }
2338 +
2339 +       /* check frame errors in the FAS field */
2340 +       if (has_fas_errors) {
2341 +               fas = dpaa2_get_fas(vaddr, false);
2342 +               status = le32_to_cpu(fas->status);
2343 +               if (net_ratelimit())
2344 +                       netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
2345 +                                  status & DPAA2_FAS_RX_ERR_MASK);
2346 +       }
2347 +       free_rx_fd(priv, fd, vaddr);
2348 +
2349 +       percpu_stats = this_cpu_ptr(priv->percpu_stats);
2350 +       percpu_stats->rx_errors++;
2351 +       ch->buf_count--;
2352 +}
2353 +#endif
2354 +
2355  /* Consume all frames pull-dequeued into the store. This is the simplest way to
2356   * make sure we don't accidentally issue another volatile dequeue which would
2357   * overwrite (leak) frames already in the store.
2358   *
2359   * Observance of NAPI budget is not our concern, leaving that to the caller.
2360   */
2361 -static int consume_frames(struct dpaa2_eth_channel *ch)
2362 +static int consume_frames(struct dpaa2_eth_channel *ch,
2363 +                         struct dpaa2_eth_fq **src)
2364  {
2365         struct dpaa2_eth_priv *priv = ch->priv;
2366 -       struct dpaa2_eth_fq *fq;
2367 +       struct dpaa2_eth_fq *fq = NULL;
2368         struct dpaa2_dq *dq;
2369         const struct dpaa2_fd *fd;
2370         int cleaned = 0;
2371 @@ -315,16 +523,51 @@ static int consume_frames(struct dpaa2_e
2372                 }
2373  
2374                 fd = dpaa2_dq_fd(dq);
2375 +               prefetch(fd);
2376 +
2377                 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
2378 -               fq->stats.frames++;
2379  
2380 -               fq->consume(priv, ch, fd, &ch->napi);
2381 +               fq->consume(priv, ch, fd, fq);
2382                 cleaned++;
2383         } while (!is_last);
2384  
2385 +       if (!cleaned)
2386 +               return 0;
2387 +
2388 +       fq->stats.frames += cleaned;
2389 +       ch->stats.frames += cleaned;
2390 +
2391 +       /* A dequeue operation only pulls frames from a single queue
2392 +        * into the store. Return the frame queue as an out param.
2393 +        */
2394 +       if (src)
2395 +               *src = fq;
2396 +
2397         return cleaned;
2398  }
2399  
2400 +/* Configure the egress frame annotation for timestamp update */
2401 +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
2402 +{
2403 +       struct dpaa2_faead *faead;
2404 +       u32 ctrl, frc;
2405 +
2406 +       /* Mark the egress frame annotation area as valid */
2407 +       frc = dpaa2_fd_get_frc(fd);
2408 +       dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
2409 +
2410 +       /* Set hardware annotation size */
2411 +       ctrl = dpaa2_fd_get_ctrl(fd);
2412 +       dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
2413 +
2414 +       /* enable UPD (update prepanded data) bit in FAEAD field of
2415 +        * hardware frame annotation area
2416 +        */
2417 +       ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
2418 +       faead = dpaa2_get_faead(buf_start, true);
2419 +       faead->ctrl = cpu_to_le32(ctrl);
2420 +}
2421 +
2422  /* Create a frame descriptor based on a fragmented skb */
2423  static int build_sg_fd(struct dpaa2_eth_priv *priv,
2424                        struct sk_buff *skb,
2425 @@ -341,7 +584,6 @@ static int build_sg_fd(struct dpaa2_eth_
2426         int num_sg;
2427         int num_dma_bufs;
2428         struct dpaa2_eth_swa *swa;
2429 -       struct dpaa2_fas *fas;
2430  
2431         /* Create and map scatterlist.
2432          * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
2433 @@ -365,21 +607,14 @@ static int build_sg_fd(struct dpaa2_eth_
2434  
2435         /* Prepare the HW SGT structure */
2436         sgt_buf_size = priv->tx_data_offset +
2437 -                      sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
2438 -       sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
2439 +                      sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
2440 +       sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
2441         if (unlikely(!sgt_buf)) {
2442                 err = -ENOMEM;
2443                 goto sgt_buf_alloc_failed;
2444         }
2445         sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
2446 -
2447 -       /* PTA from egress side is passed as is to the confirmation side so
2448 -        * we need to clear some fields here in order to find consistent values
2449 -        * on TX confirmation. We are clearing FAS (Frame Annotation Status)
2450 -        * field from the hardware annotation area
2451 -        */
2452 -       fas = dpaa2_get_fas(sgt_buf);
2453 -       memset(fas, 0, DPAA2_FAS_SIZE);
2454 +       memset(sgt_buf, 0, sgt_buf_size);
2455  
2456         sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
2457  
2458 @@ -402,10 +637,11 @@ static int build_sg_fd(struct dpaa2_eth_
2459          * all of them on Tx Conf.
2460          */
2461         swa = (struct dpaa2_eth_swa *)sgt_buf;
2462 -       swa->skb = skb;
2463 -       swa->scl = scl;
2464 -       swa->num_sg = num_sg;
2465 -       swa->num_dma_bufs = num_dma_bufs;
2466 +       swa->type = DPAA2_ETH_SWA_SG;
2467 +       swa->sg.skb = skb;
2468 +       swa->sg.scl = scl;
2469 +       swa->sg.num_sg = num_sg;
2470 +       swa->sg.sgt_size = sgt_buf_size;
2471  
2472         /* Separately map the SGT buffer */
2473         addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
2474 @@ -417,13 +653,15 @@ static int build_sg_fd(struct dpaa2_eth_
2475         dpaa2_fd_set_format(fd, dpaa2_fd_sg);
2476         dpaa2_fd_set_addr(fd, addr);
2477         dpaa2_fd_set_len(fd, skb->len);
2478 -       dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
2479 -                         DPAA2_FD_CTRL_PTV1);
2480 +       dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2481 +
2482 +       if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
2483 +               enable_tx_tstamp(fd, sgt_buf);
2484  
2485         return 0;
2486  
2487  dma_map_single_failed:
2488 -       kfree(sgt_buf);
2489 +       skb_free_frag(sgt_buf);
2490  sgt_buf_alloc_failed:
2491         dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
2492  dma_map_sg_failed:
2493 @@ -437,29 +675,27 @@ static int build_single_fd(struct dpaa2_
2494                            struct dpaa2_fd *fd)
2495  {
2496         struct device *dev = priv->net_dev->dev.parent;
2497 -       u8 *buffer_start;
2498 -       struct dpaa2_fas *fas;
2499 -       struct sk_buff **skbh;
2500 +       u8 *buffer_start, *aligned_start;
2501 +       struct dpaa2_eth_swa *swa;
2502         dma_addr_t addr;
2503  
2504 -       buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
2505 -                                DPAA2_ETH_TX_BUF_ALIGN,
2506 -                                DPAA2_ETH_TX_BUF_ALIGN);
2507 -
2508 -       /* PTA from egress side is passed as is to the confirmation side so
2509 -        * we need to clear some fields here in order to find consistent values
2510 -        * on TX confirmation. We are clearing FAS (Frame Annotation Status)
2511 -        * field from the hardware annotation area
2512 +       buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
2513 +
2514 +       /* If there's enough room to align the FD address, do it.
2515 +        * It will help hardware optimize accesses.
2516          */
2517 -       fas = dpaa2_get_fas(buffer_start);
2518 -       memset(fas, 0, DPAA2_FAS_SIZE);
2519 +       aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2520 +                                 DPAA2_ETH_TX_BUF_ALIGN);
2521 +       if (aligned_start >= skb->head)
2522 +               buffer_start = aligned_start;
2523  
2524         /* Store a backpointer to the skb at the beginning of the buffer
2525          * (in the private data area) such that we can release it
2526          * on Tx confirm
2527          */
2528 -       skbh = (struct sk_buff **)buffer_start;
2529 -       *skbh = skb;
2530 +       swa = (struct dpaa2_eth_swa *)buffer_start;
2531 +       swa->type = DPAA2_ETH_SWA_SINGLE;
2532 +       swa->single.skb = skb;
2533  
2534         addr = dma_map_single(dev, buffer_start,
2535                               skb_tail_pointer(skb) - buffer_start,
2536 @@ -471,8 +707,10 @@ static int build_single_fd(struct dpaa2_
2537         dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
2538         dpaa2_fd_set_len(fd, skb->len);
2539         dpaa2_fd_set_format(fd, dpaa2_fd_single);
2540 -       dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
2541 -                         DPAA2_FD_CTRL_PTV1);
2542 +       dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2543 +
2544 +       if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
2545 +               enable_tx_tstamp(fd, buffer_start);
2546  
2547         return 0;
2548  }
2549 @@ -483,72 +721,75 @@ static int build_single_fd(struct dpaa2_
2550   * back-pointed to is also freed.
2551   * This can be called either from dpaa2_eth_tx_conf() or on the error path of
2552   * dpaa2_eth_tx().
2553 - * Optionally, return the frame annotation status word (FAS), which needs
2554 - * to be checked if we're on the confirmation path.
2555   */
2556  static void free_tx_fd(const struct dpaa2_eth_priv *priv,
2557 -                      const struct dpaa2_fd *fd,
2558 -                      u32 *status)
2559 +                      const struct dpaa2_fd *fd, bool in_napi)
2560  {
2561         struct device *dev = priv->net_dev->dev.parent;
2562         dma_addr_t fd_addr;
2563 -       struct sk_buff **skbh, *skb;
2564 +       struct sk_buff *skb = NULL;
2565         unsigned char *buffer_start;
2566 -       int unmap_size;
2567 -       struct scatterlist *scl;
2568 -       int num_sg, num_dma_bufs;
2569         struct dpaa2_eth_swa *swa;
2570         u8 fd_format = dpaa2_fd_get_format(fd);
2571 -       struct dpaa2_fas *fas;
2572  
2573         fd_addr = dpaa2_fd_get_addr(fd);
2574 -       skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
2575 -       fas = dpaa2_get_fas(skbh);
2576 +       buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
2577 +       swa = (struct dpaa2_eth_swa *)buffer_start;
2578  
2579         if (fd_format == dpaa2_fd_single) {
2580 -               skb = *skbh;
2581 -               buffer_start = (unsigned char *)skbh;
2582 -               /* Accessing the skb buffer is safe before dma unmap, because
2583 -                * we didn't map the actual skb shell.
2584 -                */
2585 -               dma_unmap_single(dev, fd_addr,
2586 -                                skb_tail_pointer(skb) - buffer_start,
2587 -                                DMA_BIDIRECTIONAL);
2588 +               if (swa->type == DPAA2_ETH_SWA_SINGLE) {
2589 +                       skb = swa->single.skb;
2590 +                       /* Accessing the skb buffer is safe before dma unmap,
2591 +                        * because we didn't map the actual skb shell.
2592 +                        */
2593 +                       dma_unmap_single(dev, fd_addr,
2594 +                                        skb_tail_pointer(skb) - buffer_start,
2595 +                                        DMA_BIDIRECTIONAL);
2596 +               } else {
2597 +                       WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP,
2598 +                                 "Wrong SWA type");
2599 +                       dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
2600 +                                        DMA_BIDIRECTIONAL);
2601 +               }
2602         } else if (fd_format == dpaa2_fd_sg) {
2603 -               swa = (struct dpaa2_eth_swa *)skbh;
2604 -               skb = swa->skb;
2605 -               scl = swa->scl;
2606 -               num_sg = swa->num_sg;
2607 -               num_dma_bufs = swa->num_dma_bufs;
2608 +               skb = swa->sg.skb;
2609  
2610                 /* Unmap the scatterlist */
2611 -               dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
2612 -               kfree(scl);
2613 +               dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
2614 +               kfree(swa->sg.scl);
2615  
2616                 /* Unmap the SGT buffer */
2617 -               unmap_size = priv->tx_data_offset +
2618 -                      sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
2619 -               dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
2620 +               dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
2621 +                                DMA_BIDIRECTIONAL);
2622         } else {
2623 -               /* Unsupported format, mark it as errored and give up */
2624 -               if (status)
2625 -                       *status = ~0;
2626 +               netdev_dbg(priv->net_dev, "Invalid FD format\n");
2627                 return;
2628         }
2629  
2630 -       /* Read the status from the Frame Annotation after we unmap the first
2631 -        * buffer but before we free it. The caller function is responsible
2632 -        * for checking the status value.
2633 -        */
2634 -       if (status)
2635 -               *status = le32_to_cpu(fas->status);
2636 +       if (swa->type == DPAA2_ETH_SWA_XDP) {
2637 +               page_frag_free(buffer_start);
2638 +               return;
2639 +       }
2640 +
2641 +       /* Get the timestamp value */
2642 +       if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2643 +               struct skb_shared_hwtstamps shhwtstamps;
2644 +               __le64 *ts = dpaa2_get_ts(buffer_start, true);
2645 +               u64 ns;
2646 +
2647 +               memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2648 +
2649 +               ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
2650 +               shhwtstamps.hwtstamp = ns_to_ktime(ns);
2651 +               skb_tstamp_tx(skb, &shhwtstamps);
2652 +       }
2653  
2654 -       /* Free SGT buffer kmalloc'ed on tx */
2655 +       /* Free SGT buffer allocated on tx */
2656         if (fd_format != dpaa2_fd_single)
2657 -               kfree(skbh);
2658 +               skb_free_frag(buffer_start);
2659  
2660         /* Move on with skb release */
2661 -       dev_kfree_skb(skb);
2662 +       napi_consume_skb(skb, in_napi);
2663  }
2664  
2665  static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
2666 @@ -558,20 +799,41 @@ static netdev_tx_t dpaa2_eth_tx(struct s
2667         struct rtnl_link_stats64 *percpu_stats;
2668         struct dpaa2_eth_drv_stats *percpu_extras;
2669         struct dpaa2_eth_fq *fq;
2670 +       struct netdev_queue *nq;
2671         u16 queue_mapping;
2672 -       int err, i;
2673 +       unsigned int needed_headroom;
2674 +       u32 fd_len;
2675 +       u8 prio;
2676 +       int err, i, ch_id = 0;
2677 +
2678 +       queue_mapping = skb_get_queue_mapping(skb);
2679 +       prio = netdev_txq_to_tc(net_dev, queue_mapping);
2680 +       /* Hardware interprets priority level 0 as being the highest,
2681 +        * so we need to do a reverse mapping to the netdev tc index
2682 +        */
2683 +       if (net_dev->num_tc)
2684 +               prio = net_dev->num_tc - prio - 1;
2685 +
2686 +       queue_mapping %= dpaa2_eth_queue_count(priv);
2687 +       fq = &priv->fq[queue_mapping];
2688  
2689         percpu_stats = this_cpu_ptr(priv->percpu_stats);
2690         percpu_extras = this_cpu_ptr(priv->percpu_extras);
2691  
2692 -       if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
2693 +       needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
2694 +       if (skb_headroom(skb) < needed_headroom) {
2695                 struct sk_buff *ns;
2696  
2697 -               ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
2698 +               ns = skb_realloc_headroom(skb, needed_headroom);
2699                 if (unlikely(!ns)) {
2700                         percpu_stats->tx_dropped++;
2701                         goto err_alloc_headroom;
2702                 }
2703 +               percpu_extras->tx_reallocs++;
2704 +
2705 +               if (skb->sk)
2706 +                       skb_set_owner_w(ns, skb->sk);
2707 +
2708                 dev_kfree_skb(skb);
2709                 skb = ns;
2710         }
2711 @@ -602,17 +864,24 @@ static netdev_tx_t dpaa2_eth_tx(struct s
2712                 goto err_build_fd;
2713         }
2714  
2715 +       if (dpaa2_eth_ceetm_is_enabled(priv)) {
2716 +               err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &prio);
2717 +               if (err)
2718 +                       goto err_ceetm_classify;
2719 +       }
2720 +
2721         /* Tracing point */
2722         trace_dpaa2_tx_fd(net_dev, &fd);
2723  
2724 -       /* TxConf FQ selection primarily based on cpu affinity; this is
2725 -        * non-migratable context, so it's safe to call smp_processor_id().
2726 +       fd_len = dpaa2_fd_get_len(&fd);
2727 +       nq = netdev_get_tx_queue(net_dev, queue_mapping);
2728 +       netdev_tx_sent_queue(nq, fd_len);
2729 +
2730 +       /* Everything that happens after this enqueues might race with
2731 +        * the Tx confirmation callback for this frame
2732          */
2733 -       queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
2734 -       fq = &priv->fq[queue_mapping];
2735         for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
2736 -               err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
2737 -                                                 fq->tx_qdbin, &fd);
2738 +               err = priv->enqueue(priv, fq, &fd, 0);
2739                 if (err != -EBUSY)
2740                         break;
2741         }
2742 @@ -620,14 +889,17 @@ static netdev_tx_t dpaa2_eth_tx(struct s
2743         if (unlikely(err < 0)) {
2744                 percpu_stats->tx_errors++;
2745                 /* Clean up everything, including freeing the skb */
2746 -               free_tx_fd(priv, &fd, NULL);
2747 +               free_tx_fd(priv, &fd, false);
2748 +               netdev_tx_completed_queue(nq, 1, fd_len);
2749         } else {
2750                 percpu_stats->tx_packets++;
2751 -               percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
2752 +               percpu_stats->tx_bytes += fd_len;
2753         }
2754  
2755         return NETDEV_TX_OK;
2756  
2757 +err_ceetm_classify:
2758 +       free_tx_fd(priv, &fd, false);
2759  err_build_fd:
2760  err_alloc_headroom:
2761         dev_kfree_skb(skb);
2762 @@ -637,48 +909,39 @@ err_alloc_headroom:
2763  
2764  /* Tx confirmation frame processing routine */
2765  static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
2766 -                             struct dpaa2_eth_channel *ch,
2767 +                             struct dpaa2_eth_channel *ch __always_unused,
2768                               const struct dpaa2_fd *fd,
2769 -                             struct napi_struct *napi __always_unused)
2770 +                             struct dpaa2_eth_fq *fq)
2771  {
2772         struct rtnl_link_stats64 *percpu_stats;
2773         struct dpaa2_eth_drv_stats *percpu_extras;
2774 -       u32 status = 0;
2775 +       u32 fd_len = dpaa2_fd_get_len(fd);
2776         u32 fd_errors;
2777 -       bool has_fas_errors = false;
2778  
2779         /* Tracing point */
2780         trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
2781  
2782         percpu_extras = this_cpu_ptr(priv->percpu_extras);
2783         percpu_extras->tx_conf_frames++;
2784 -       percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
2785 +       percpu_extras->tx_conf_bytes += fd_len;
2786 +
2787 +       fq->dq_frames++;
2788 +       fq->dq_bytes += fd_len;
2789  
2790         /* Check frame errors in the FD field */
2791         fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
2792 -       if (unlikely(fd_errors)) {
2793 -               /* We only check error bits in the FAS field if corresponding
2794 -                * FAERR bit is set in FD and the FAS field is marked as valid
2795 -                */
2796 -               has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) &&
2797 -                                !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2798 -               if (net_ratelimit())
2799 -                       netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
2800 -                                  fd_errors);
2801 -       }
2802 -
2803 -       free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
2804 +       free_tx_fd(priv, fd, true);
2805  
2806         if (likely(!fd_errors))
2807                 return;
2808  
2809 +       if (net_ratelimit())
2810 +               netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
2811 +                          fd_errors);
2812 +
2813         percpu_stats = this_cpu_ptr(priv->percpu_stats);
2814         /* Tx-conf logically pertains to the egress path. */
2815         percpu_stats->tx_errors++;
2816 -
2817 -       if (has_fas_errors && net_ratelimit())
2818 -               netdev_dbg(priv->net_dev, "TX frame FAS error: 0x%08x\n",
2819 -                          status & DPAA2_FAS_TX_ERR_MASK);
2820  }
2821  
2822  static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
2823 @@ -728,26 +991,29 @@ static int set_tx_csum(struct dpaa2_eth_
2824  /* Perform a single release command to add buffers
2825   * to the specified buffer pool
2826   */
2827 -static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
2828 +static int add_bufs(struct dpaa2_eth_priv *priv,
2829 +                   struct dpaa2_eth_channel *ch, u16 bpid)
2830  {
2831         struct device *dev = priv->net_dev->dev.parent;
2832         u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2833 -       void *buf;
2834 +       struct page *page;
2835         dma_addr_t addr;
2836 -       int i;
2837 +       int i, err;
2838  
2839         for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
2840                 /* Allocate buffer visible to WRIOP + skb shared info +
2841                  * alignment padding
2842                  */
2843 -               buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
2844 -               if (unlikely(!buf))
2845 +               /* allocate one page for each Rx buffer. WRIOP sees
2846 +                * the entire page except for a tailroom reserved for
2847 +                * skb shared info
2848 +                */
2849 +               page = dev_alloc_pages(0);
2850 +               if (!page)
2851                         goto err_alloc;
2852  
2853 -               buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
2854 -
2855 -               addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
2856 -                                     DMA_FROM_DEVICE);
2857 +               addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
2858 +                                   DMA_BIDIRECTIONAL);
2859                 if (unlikely(dma_mapping_error(dev, addr)))
2860                         goto err_map;
2861  
2862 @@ -755,28 +1021,33 @@ static int add_bufs(struct dpaa2_eth_pri
2863  
2864                 /* tracing point */
2865                 trace_dpaa2_eth_buf_seed(priv->net_dev,
2866 -                                        buf, DPAA2_ETH_BUF_RAW_SIZE,
2867 +                                        page, DPAA2_ETH_RX_BUF_RAW_SIZE,
2868                                          addr, DPAA2_ETH_RX_BUF_SIZE,
2869                                          bpid);
2870         }
2871  
2872  release_bufs:
2873 -       /* In case the portal is busy, retry until successful.
2874 -        * The buffer release function would only fail if the QBMan portal
2875 -        * was busy, which implies portal contention (i.e. more CPUs than
2876 -        * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
2877 -        * there is little we can realistically do, short of giving up -
2878 -        * in which case we'd risk depleting the buffer pool and never again
2879 -        * receiving the Rx interrupt which would kick-start the refill logic.
2880 -        * So just keep retrying, at the risk of being moved to ksoftirqd.
2881 -        */
2882 -       while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
2883 +       /* In case the portal is busy, retry until successful */
2884 +       while ((err = dpaa2_io_service_release(ch->dpio, bpid,
2885 +                                              buf_array, i)) == -EBUSY)
2886                 cpu_relax();
2887 +
2888 +       /* If release command failed, clean up and bail out;
2889 +        * not much else we can do about it
2890 +        */
2891 +       if (err) {
2892 +               free_bufs(priv, buf_array, i);
2893 +               return 0;
2894 +       }
2895 +
2896         return i;
2897  
2898  err_map:
2899 -       skb_free_frag(buf);
2900 +       __free_pages(page, 0);
2901  err_alloc:
2902 +       /* If we managed to allocate at least some buffers,
2903 +        * release them to hardware
2904 +        */
2905         if (i)
2906                 goto release_bufs;
2907  
2908 @@ -796,9 +1067,10 @@ static int seed_pool(struct dpaa2_eth_pr
2909          */
2910         preempt_disable();
2911         for (j = 0; j < priv->num_channels; j++) {
2912 -               for (i = 0; i < DPAA2_ETH_NUM_BUFS;
2913 +               priv->channel[j]->buf_count = 0;
2914 +               for (i = 0; i < priv->max_bufs_per_ch;
2915                      i += DPAA2_ETH_BUFS_PER_CMD) {
2916 -                       new_count = add_bufs(priv, bpid);
2917 +                       new_count = add_bufs(priv, priv->channel[j], bpid);
2918                         priv->channel[j]->buf_count += new_count;
2919  
2920                         if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
2921 @@ -818,10 +1090,8 @@ static int seed_pool(struct dpaa2_eth_pr
2922   */
2923  static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
2924  {
2925 -       struct device *dev = priv->net_dev->dev.parent;
2926         u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2927 -       void *vaddr;
2928 -       int ret, i;
2929 +       int ret;
2930  
2931         do {
2932                 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
2933 @@ -830,27 +1100,16 @@ static void drain_bufs(struct dpaa2_eth_
2934                         netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
2935                         return;
2936                 }
2937 -               for (i = 0; i < ret; i++) {
2938 -                       /* Same logic as on regular Rx path */
2939 -                       vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
2940 -                                                  buf_array[i]);
2941 -                       dma_unmap_single(dev, buf_array[i],
2942 -                                        DPAA2_ETH_RX_BUF_SIZE,
2943 -                                        DMA_FROM_DEVICE);
2944 -                       skb_free_frag(vaddr);
2945 -               }
2946 +               free_bufs(priv, buf_array, ret);
2947         } while (ret);
2948  }
2949  
2950  static void drain_pool(struct dpaa2_eth_priv *priv)
2951  {
2952 -       int i;
2953 -
2954 +       preempt_disable();
2955         drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
2956         drain_bufs(priv, 1);
2957 -
2958 -       for (i = 0; i < priv->num_channels; i++)
2959 -               priv->channel[i]->buf_count = 0;
2960 +       preempt_enable();
2961  }
2962  
2963  /* Function is called from softirq context only, so we don't need to guard
2964 @@ -862,19 +1121,19 @@ static int refill_pool(struct dpaa2_eth_
2965  {
2966         int new_count;
2967  
2968 -       if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
2969 +       if (likely(ch->buf_count >= priv->refill_thresh))
2970                 return 0;
2971  
2972         do {
2973 -               new_count = add_bufs(priv, bpid);
2974 +               new_count = add_bufs(priv, ch, bpid);
2975                 if (unlikely(!new_count)) {
2976                         /* Out of memory; abort for now, we'll try later on */
2977                         break;
2978                 }
2979                 ch->buf_count += new_count;
2980 -       } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
2981 +       } while (ch->buf_count < priv->max_bufs_per_ch);
2982  
2983 -       if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
2984 +       if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
2985                 return -ENOMEM;
2986  
2987         return 0;
2988 @@ -887,7 +1146,8 @@ static int pull_channel(struct dpaa2_eth
2989  
2990         /* Retry while portal is busy */
2991         do {
2992 -               err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
2993 +               err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
2994 +                                                   ch->store);
2995                 dequeues++;
2996                 cpu_relax();
2997         } while (err == -EBUSY);
2998 @@ -908,14 +1168,17 @@ static int pull_channel(struct dpaa2_eth
2999  static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
3000  {
3001         struct dpaa2_eth_channel *ch;
3002 -       int cleaned = 0, store_cleaned;
3003         struct dpaa2_eth_priv *priv;
3004 +       int rx_cleaned = 0, txconf_cleaned = 0;
3005 +       struct dpaa2_eth_fq *fq, *txc_fq = NULL;
3006 +       struct netdev_queue *nq;
3007 +       int store_cleaned, work_done;
3008         int err;
3009  
3010         ch = container_of(napi, struct dpaa2_eth_channel, napi);
3011         priv = ch->priv;
3012  
3013 -       while (cleaned < budget) {
3014 +       do {
3015                 err = pull_channel(ch);
3016                 if (unlikely(err))
3017                         break;
3018 @@ -923,29 +1186,56 @@ static int dpaa2_eth_poll(struct napi_st
3019                 /* Refill pool if appropriate */
3020                 refill_pool(priv, ch, priv->bpid);
3021  
3022 -               store_cleaned = consume_frames(ch);
3023 -               cleaned += store_cleaned;
3024 +               store_cleaned = consume_frames(ch, &fq);
3025 +               if (!store_cleaned)
3026 +                       break;
3027 +               if (fq->type == DPAA2_RX_FQ) {
3028 +                       rx_cleaned += store_cleaned;
3029 +                       /* If these are XDP_REDIRECT frames, flush them now */
3030 +                       /* TODO: Do we need this? */
3031 +                       if (ch->flush) {
3032 +                               xdp_do_flush_map();
3033 +                               ch->flush = false;
3034 +                       }
3035 +               } else {
3036 +                       txconf_cleaned += store_cleaned;
3037 +                       /* We have a single Tx conf FQ on this channel */
3038 +                       txc_fq = fq;
3039 +               }
3040  
3041 -               /* If we have enough budget left for a full store,
3042 -                * try a new pull dequeue, otherwise we're done here
3043 +               /* If we either consumed the whole NAPI budget with Rx frames
3044 +                * or we reached the Tx confirmations threshold, we're done.
3045                  */
3046 -               if (store_cleaned == 0 ||
3047 -                   cleaned > budget - DPAA2_ETH_STORE_SIZE)
3048 -                       break;
3049 -       }
3050 +               if (rx_cleaned >= budget ||
3051 +                   txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
3052 +                       work_done = budget;
3053 +                       goto out;
3054 +               }
3055 +       } while (store_cleaned);
3056  
3057 -       if (cleaned < budget) {
3058 -               napi_complete_done(napi, cleaned);
3059 -               /* Re-enable data available notifications */
3060 -               do {
3061 -                       err = dpaa2_io_service_rearm(NULL, &ch->nctx);
3062 -                       cpu_relax();
3063 -               } while (err == -EBUSY);
3064 -       }
3065 +       /* We didn't consume the entire budget, so finish napi and
3066 +        * re-enable data availability notifications
3067 +        */
3068 +       napi_complete_done(napi, rx_cleaned);
3069 +       do {
3070 +               err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
3071 +               cpu_relax();
3072 +       } while (err == -EBUSY);
3073 +       WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
3074 +                 ch->nctx.desired_cpu);
3075  
3076 -       ch->stats.frames += cleaned;
3077 +       work_done = max(rx_cleaned, 1);
3078  
3079 -       return cleaned;
3080 +out:
3081 +       if (txc_fq) {
3082 +               nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
3083 +               netdev_tx_completed_queue(nq, txc_fq->dq_frames,
3084 +                                         txc_fq->dq_bytes);
3085 +               txc_fq->dq_frames = 0;
3086 +               txc_fq->dq_bytes = 0;
3087 +       }
3088 +
3089 +       return work_done;
3090  }
3091  
3092  static void enable_ch_napi(struct dpaa2_eth_priv *priv)
3093 @@ -970,9 +1260,23 @@ static void disable_ch_napi(struct dpaa2
3094         }
3095  }
3096  
3097 +static void update_tx_fqids(struct dpaa2_eth_priv *priv);
3098 +
3099 +static void update_pf(struct dpaa2_eth_priv *priv,
3100 +                     struct dpni_link_state *state)
3101 +{
3102 +       bool pause_frames;
3103 +
3104 +       pause_frames = !!(state->options & DPNI_LINK_OPT_PAUSE);
3105 +       if (priv->tx_pause_frames != pause_frames) {
3106 +               priv->tx_pause_frames = pause_frames;
3107 +               set_rx_taildrop(priv);
3108 +       }
3109 +}
3110 +
3111  static int link_state_update(struct dpaa2_eth_priv *priv)
3112  {
3113 -       struct dpni_link_state state;
3114 +       struct dpni_link_state state = {0};
3115         int err;
3116  
3117         err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
3118 @@ -988,6 +1292,8 @@ static int link_state_update(struct dpaa
3119  
3120         priv->link_state = state;
3121         if (state.up) {
3122 +               update_tx_fqids(priv);
3123 +               update_pf(priv, &state);
3124                 netif_carrier_on(priv->net_dev);
3125                 netif_tx_start_all_queues(priv->net_dev);
3126         } else {
3127 @@ -1006,28 +1312,30 @@ static int dpaa2_eth_open(struct net_dev
3128         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3129         int err;
3130  
3131 -       err = seed_pool(priv, priv->bpid);
3132 -       if (err) {
3133 -               /* Not much to do; the buffer pool, though not filled up,
3134 -                * may still contain some buffers which would enable us
3135 -                * to limp on.
3136 -                */
3137 -               netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
3138 -                          priv->dpbp_dev->obj_desc.id, priv->bpid);
3139 -       }
3140 -
3141         /* We'll only start the txqs when the link is actually ready; make sure
3142          * we don't race against the link up notification, which may come
3143          * immediately after dpni_enable();
3144          */
3145         netif_tx_stop_all_queues(net_dev);
3146 -       enable_ch_napi(priv);
3147 +
3148         /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
3149          * return true and cause 'ip link show' to report the LOWER_UP flag,
3150          * even though the link notification wasn't even received.
3151          */
3152         netif_carrier_off(net_dev);
3153  
3154 +       err = seed_pool(priv, priv->bpid);
3155 +       if (err) {
3156 +               /* Not much to do; the buffer pool, though not filled up,
3157 +                * may still contain some buffers which would enable us
3158 +                * to limp on.
3159 +                */
3160 +               netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
3161 +                          priv->dpbp_dev->obj_desc.id, priv->bpid);
3162 +       }
3163 +
3164 +       priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
3165 +
3166         err = dpni_enable(priv->mc_io, 0, priv->mc_token);
3167         if (err < 0) {
3168                 netdev_err(net_dev, "dpni_enable() failed\n");
3169 @@ -1047,48 +1355,17 @@ static int dpaa2_eth_open(struct net_dev
3170  
3171  link_state_err:
3172  enable_err:
3173 -       disable_ch_napi(priv);
3174 +       priv->refill_thresh = 0;
3175         drain_pool(priv);
3176         return err;
3177  }
3178  
3179 -/* The DPIO store must be empty when we call this,
3180 - * at the end of every NAPI cycle.
3181 - */
3182 -static u32 drain_channel(struct dpaa2_eth_priv *priv,
3183 -                        struct dpaa2_eth_channel *ch)
3184 -{
3185 -       u32 drained = 0, total = 0;
3186 -
3187 -       do {
3188 -               pull_channel(ch);
3189 -               drained = consume_frames(ch);
3190 -               total += drained;
3191 -       } while (drained);
3192 -
3193 -       return total;
3194 -}
3195 -
3196 -static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
3197 -{
3198 -       struct dpaa2_eth_channel *ch;
3199 -       int i;
3200 -       u32 drained = 0;
3201 -
3202 -       for (i = 0; i < priv->num_channels; i++) {
3203 -               ch = priv->channel[i];
3204 -               drained += drain_channel(priv, ch);
3205 -       }
3206 -
3207 -       return drained;
3208 -}
3209 -
3210  static int dpaa2_eth_stop(struct net_device *net_dev)
3211  {
3212         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3213 -       int dpni_enabled;
3214 -       int retries = 10;
3215 -       u32 drained;
3216 +       int dpni_enabled = 0;
3217 +       int retries = 10, i;
3218 +       int err = 0;
3219  
3220         netif_tx_stop_all_queues(net_dev);
3221         netif_carrier_off(net_dev);
3222 @@ -1105,56 +1382,24 @@ static int dpaa2_eth_stop(struct net_dev
3223         } while (dpni_enabled && --retries);
3224         if (!retries) {
3225                 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
3226 -               /* Must go on and disable NAPI nonetheless, so we don't crash at
3227 -                * the next "ifconfig up"
3228 +               /* Must go on and finish processing pending frames, so we don't
3229 +                * crash at the next "ifconfig up"
3230                  */
3231 +               err = -ETIMEDOUT;
3232         }
3233  
3234 -       /* Wait for NAPI to complete on every core and disable it.
3235 -        * In particular, this will also prevent NAPI from being rescheduled if
3236 -        * a new CDAN is serviced, effectively discarding the CDAN. We therefore
3237 -        * don't even need to disarm the channels, except perhaps for the case
3238 -        * of a huge coalescing value.
3239 -        */
3240 -       disable_ch_napi(priv);
3241 +       priv->refill_thresh = 0;
3242  
3243 -        /* Manually drain the Rx and TxConf queues */
3244 -       drained = drain_ingress_frames(priv);
3245 -       if (drained)
3246 -               netdev_dbg(net_dev, "Drained %d frames.\n", drained);
3247 +       /* Wait for all running napi poll routines to finish, so that no
3248 +        * new refill operations are started
3249 +        */
3250 +       for (i = 0; i < priv->num_channels; i++)
3251 +               napi_synchronize(&priv->channel[i]->napi);
3252  
3253         /* Empty the buffer pool */
3254         drain_pool(priv);
3255  
3256 -       return 0;
3257 -}
3258 -
3259 -static int dpaa2_eth_init(struct net_device *net_dev)
3260 -{
3261 -       u64 supported = 0;
3262 -       u64 not_supported = 0;
3263 -       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3264 -       u32 options = priv->dpni_attrs.options;
3265 -
3266 -       /* Capabilities listing */
3267 -       supported |= IFF_LIVE_ADDR_CHANGE;
3268 -
3269 -       if (options & DPNI_OPT_NO_MAC_FILTER)
3270 -               not_supported |= IFF_UNICAST_FLT;
3271 -       else
3272 -               supported |= IFF_UNICAST_FLT;
3273 -
3274 -       net_dev->priv_flags |= supported;
3275 -       net_dev->priv_flags &= ~not_supported;
3276 -
3277 -       /* Features */
3278 -       net_dev->features = NETIF_F_RXCSUM |
3279 -                           NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3280 -                           NETIF_F_SG | NETIF_F_HIGHDMA |
3281 -                           NETIF_F_LLTX;
3282 -       net_dev->hw_features = net_dev->features;
3283 -
3284 -       return 0;
3285 +       return err;
3286  }
3287  
3288  static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
3289 @@ -1200,25 +1445,6 @@ static void dpaa2_eth_get_stats(struct n
3290         }
3291  }
3292  
3293 -static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
3294 -{
3295 -       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3296 -       int err;
3297 -
3298 -       /* Set the maximum Rx frame length to match the transmit side;
3299 -        * account for L2 headers when computing the MFL
3300 -        */
3301 -       err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3302 -                                       (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
3303 -       if (err) {
3304 -               netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
3305 -               return err;
3306 -       }
3307 -
3308 -       net_dev->mtu = mtu;
3309 -       return 0;
3310 -}
3311 -
3312  /* Copy mac unicast addresses from @net_dev to @priv.
3313   * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
3314   */
3315 @@ -1380,16 +1606,430 @@ static int dpaa2_eth_set_features(struct
3316         return 0;
3317  }
3318  
3319 +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3320 +{
3321 +       struct dpaa2_eth_priv *priv = netdev_priv(dev);
3322 +       struct hwtstamp_config config;
3323 +
3324 +       if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
3325 +               return -EFAULT;
3326 +
3327 +       switch (config.tx_type) {
3328 +       case HWTSTAMP_TX_OFF:
3329 +               priv->ts_tx_en = false;
3330 +               break;
3331 +       case HWTSTAMP_TX_ON:
3332 +               priv->ts_tx_en = true;
3333 +               break;
3334 +       default:
3335 +               return -ERANGE;
3336 +       }
3337 +
3338 +       if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
3339 +               priv->ts_rx_en = false;
3340 +       } else {
3341 +               priv->ts_rx_en = true;
3342 +               /* TS is set for all frame types, not only those requested */
3343 +               config.rx_filter = HWTSTAMP_FILTER_ALL;
3344 +       }
3345 +
3346 +       return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
3347 +                       -EFAULT : 0;
3348 +}
3349 +
3350 +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3351 +{
3352 +       if (cmd == SIOCSHWTSTAMP)
3353 +               return dpaa2_eth_ts_ioctl(dev, rq, cmd);
3354 +
3355 +       return -EINVAL;
3356 +}
3357 +
3358 +static int set_buffer_layout(struct dpaa2_eth_priv *priv)
3359 +{
3360 +       struct device *dev = priv->net_dev->dev.parent;
3361 +       struct dpni_buffer_layout buf_layout = {0};
3362 +       u16 rx_buf_align;
3363 +       int err;
3364 +
3365 +       /* We need to check for WRIOP version 1.0.0, but depending on the MC
3366 +        * version, this number is not always provided correctly on rev1.
3367 +        * We need to check for both alternatives in this situation.
3368 +        */
3369 +       if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3370 +           priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3371 +               rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3372 +       else
3373 +               rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3374 +
3375 +       /* tx buffer */
3376 +       buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3377 +       buf_layout.pass_timestamp = true;
3378 +       buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3379 +                            DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3380 +       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3381 +                                    DPNI_QUEUE_TX, &buf_layout);
3382 +       if (err) {
3383 +               dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3384 +               return err;
3385 +       }
3386 +
3387 +       /* tx-confirm buffer */
3388 +       buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3389 +       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3390 +                                    DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3391 +       if (err) {
3392 +               dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3393 +               return err;
3394 +       }
3395 +
3396 +       /* Now that we've set our tx buffer layout, retrieve the minimum
3397 +        * required tx data offset.
3398 +        */
3399 +       err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3400 +                                     &priv->tx_data_offset);
3401 +       if (err) {
3402 +               dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3403 +               return err;
3404 +       }
3405 +
3406 +       if ((priv->tx_data_offset % 64) != 0)
3407 +               dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3408 +                        priv->tx_data_offset);
3409 +
3410 +       /* rx buffer */
3411 +       buf_layout.pass_frame_status = true;
3412 +       buf_layout.pass_parser_result = true;
3413 +       buf_layout.data_align = rx_buf_align;
3414 +       buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
3415 +       buf_layout.private_data_size = 0;
3416 +       /* If XDP program is attached, reserve extra space for
3417 +        * potential header expansions
3418 +        */
3419 +       if (priv->has_xdp_prog)
3420 +               buf_layout.data_head_room += XDP_PACKET_HEADROOM;
3421 +       buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3422 +                            DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3423 +                            DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3424 +                            DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3425 +                            DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3426 +       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3427 +                                    DPNI_QUEUE_RX, &buf_layout);
3428 +       if (err) {
3429 +               dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3430 +               return err;
3431 +       }
3432 +
3433 +       return 0;
3434 +}
3435 +
3436 +#define DPNI_ENQUEUE_FQID_VER_MAJOR    7
3437 +#define DPNI_ENQUEUE_FQID_VER_MINOR    9
3438 +
3439 +static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3440 +                                      struct dpaa2_eth_fq *fq,
3441 +                                      struct dpaa2_fd *fd, u8 prio)
3442 +{
3443 +       return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3444 +                                          priv->tx_qdid, prio,
3445 +                                          fq->tx_qdbin, fd);
3446 +}
3447 +
3448 +static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
3449 +                                      struct dpaa2_eth_fq *fq,
3450 +                                      struct dpaa2_fd *fd,
3451 +                                      u8 prio __always_unused)
3452 +{
3453 +       return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
3454 +                                          fq->tx_fqid, fd);
3455 +}
3456 +
3457 +static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
3458 +{
3459 +       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3460 +                                  DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3461 +               priv->enqueue = dpaa2_eth_enqueue_qd;
3462 +       else
3463 +               priv->enqueue = dpaa2_eth_enqueue_fq;
3464 +}
3465 +
3466 +static void update_tx_fqids(struct dpaa2_eth_priv *priv)
3467 +{
3468 +       struct dpaa2_eth_fq *fq;
3469 +       struct dpni_queue queue;
3470 +       struct dpni_queue_id qid = {0};
3471 +       int i, err;
3472 +
3473 +       /* We only use Tx FQIDs for FQID-based enqueue, so check
3474 +        * if DPNI version supports it before updating FQIDs
3475 +        */
3476 +       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3477 +                                  DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3478 +               return;
3479 +
3480 +       for (i = 0; i < priv->num_fqs; i++) {
3481 +               fq = &priv->fq[i];
3482 +               if (fq->type != DPAA2_TX_CONF_FQ)
3483 +                       continue;
3484 +               err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3485 +                                    DPNI_QUEUE_TX, 0, fq->flowid,
3486 +                                    &queue, &qid);
3487 +               if (err)
3488 +                       goto out_err;
3489 +
3490 +               fq->tx_fqid = qid.fqid;
3491 +               if (fq->tx_fqid == 0)
3492 +                       goto out_err;
3493 +       }
3494 +
3495 +       return;
3496 +
3497 +out_err:
3498 +       netdev_info(priv->net_dev,
3499 +                   "Error reading Tx FQID, fallback to QDID-based enqueue");
3500 +       priv->enqueue = dpaa2_eth_enqueue_qd;
3501 +}
3502 +
3503 +static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
3504 +{
3505 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3506 +       struct dpaa2_eth_channel *ch;
3507 +       struct bpf_prog *old_prog = NULL;
3508 +       int i, err;
3509 +
3510 +       /* No support for SG frames */
3511 +       if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE)
3512 +               return -EINVAL;
3513 +
3514 +       if (netif_running(net_dev)) {
3515 +               err = dpaa2_eth_stop(net_dev);
3516 +               if (err)
3517 +                       return err;
3518 +       }
3519 +
3520 +       if (prog) {
3521 +               prog = bpf_prog_add(prog, priv->num_channels - 1);
3522 +               if (IS_ERR(prog))
3523 +                       return PTR_ERR(prog);
3524 +       }
3525 +
3526 +       priv->has_xdp_prog = !!prog;
3527 +
3528 +       for (i = 0; i < priv->num_channels; i++) {
3529 +               ch = priv->channel[i];
3530 +               old_prog = xchg(&ch->xdp_prog, prog);
3531 +               if (old_prog)
3532 +                       bpf_prog_put(old_prog);
3533 +       }
3534 +
3535 +       /* When turning XDP on/off we need to do some reconfiguring
3536 +        * of the Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
3537 +        * so we are sure no old format buffers will be used from now on
3538 +        */
3539 +       if (priv->has_xdp_prog != !!old_prog)
3540 +               set_buffer_layout(priv);
3541 +
3542 +       if (netif_running(net_dev)) {
3543 +               err = dpaa2_eth_open(net_dev);
3544 +               if (err)
3545 +                       return err;
3546 +       }
3547 +
3548 +       return 0;
3549 +}
3550 +
3551 +static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp)
3552 +{
3553 +       struct dpaa2_eth_priv *priv = netdev_priv(dev);
3554 +
3555 +       switch (xdp->command) {
3556 +       case XDP_SETUP_PROG:
3557 +               return dpaa2_eth_set_xdp(dev, xdp->prog);
3558 +       case XDP_QUERY_PROG:
3559 +               xdp->prog_attached = priv->has_xdp_prog;
3560 +               return 0;
3561 +       default:
3562 +               return -EINVAL;
3563 +       }
3564 +}
3565 +
3566 +static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, struct xdp_buff *xdp)
3567 +{
3568 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3569 +       struct device *dev = net_dev->dev.parent;
3570 +       struct rtnl_link_stats64 *percpu_stats;
3571 +       struct dpaa2_eth_drv_stats *percpu_extras;
3572 +       unsigned int needed_headroom;
3573 +       struct dpaa2_eth_swa *swa;
3574 +       struct dpaa2_eth_fq *fq;
3575 +       struct dpaa2_fd fd;
3576 +       void *buffer_start, *aligned_start;
3577 +       dma_addr_t addr;
3578 +       int err, i;
3579 +
3580 +       if (!netif_running(net_dev))
3581 +               return -ENETDOWN;
3582 +
3583 +       /* We require a minimum headroom to be able to transmit the frame.
3584 +        * Otherwise return an error and let the original net_device handle it
3585 +        */
3586 +       /* TODO: Do we update i/f counters here or just on the Rx device? */
3587 +       needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
3588 +       if (xdp->data < xdp->data_hard_start ||
3589 +           xdp->data - xdp->data_hard_start < needed_headroom) {
3590 +               percpu_stats->tx_dropped++;
3591 +               return -EINVAL;
3592 +       }
3593 +
3594 +       percpu_stats = this_cpu_ptr(priv->percpu_stats);
3595 +       percpu_extras = this_cpu_ptr(priv->percpu_extras);
3596 +
3597 +       /* Setup the FD fields */
3598 +       memset(&fd, 0, sizeof(fd));
3599 +
3600 +       /* Align FD address, if possible */
3601 +       buffer_start = xdp->data - needed_headroom;
3602 +       aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
3603 +                                 DPAA2_ETH_TX_BUF_ALIGN);
3604 +       if (aligned_start >= xdp->data_hard_start)
3605 +               buffer_start = aligned_start;
3606 +
3607 +       swa = (struct dpaa2_eth_swa *)buffer_start;
3608 +       /* fill in necessary fields here */
3609 +       swa->type = DPAA2_ETH_SWA_XDP;
3610 +       swa->xdp.dma_size = xdp->data_end - buffer_start;
3611 +
3612 +       addr = dma_map_single(dev, buffer_start,
3613 +                             xdp->data_end - buffer_start,
3614 +                             DMA_BIDIRECTIONAL);
3615 +       if (unlikely(dma_mapping_error(dev, addr))) {
3616 +               percpu_stats->tx_dropped++;
3617 +               return -ENOMEM;
3618 +       }
3619 +
3620 +       dpaa2_fd_set_addr(&fd, addr);
3621 +       dpaa2_fd_set_offset(&fd, xdp->data - buffer_start);
3622 +       dpaa2_fd_set_len(&fd, xdp->data_end - xdp->data);
3623 +       dpaa2_fd_set_format(&fd, dpaa2_fd_single);
3624 +       dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
3625 +
3626 +       fq = &priv->fq[smp_processor_id()];
3627 +       for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
3628 +               err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
3629 +                                                 fq->tx_qdbin, &fd);
3630 +               if (err != -EBUSY)
3631 +                       break;
3632 +       }
3633 +       percpu_extras->tx_portal_busy += i;
3634 +       if (unlikely(err < 0)) {
3635 +               percpu_stats->tx_errors++;
3636 +               /* let the Rx device handle the cleanup */
3637 +               return err;
3638 +       }
3639 +
3640 +       percpu_stats->tx_packets++;
3641 +       percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
3642 +
3643 +       return 0;
3644 +}
3645 +
3646 +static void dpaa2_eth_xdp_flush(struct net_device *net_dev)
3647 +{
3648 +       /* We don't have hardware support for Tx batching,
3649 +        * so we do the actual frame enqueue in ndo_xdp_xmit
3650 +        */
3651 +}
3652 +static int dpaa2_eth_update_xps(struct dpaa2_eth_priv *priv)
3653 +{
3654 +       struct net_device *net_dev = priv->net_dev;
3655 +       unsigned int i, num_queues;
3656 +       struct cpumask xps_mask;
3657 +       struct dpaa2_eth_fq *fq;
3658 +       int err = 0;
3659 +
3660 +       num_queues = (net_dev->num_tc ? : 1) * dpaa2_eth_queue_count(priv);
3661 +       for (i = 0; i < num_queues; i++) {
3662 +               fq = &priv->fq[i % dpaa2_eth_queue_count(priv)];
3663 +               cpumask_clear(&xps_mask);
3664 +               cpumask_set_cpu(fq->target_cpu, &xps_mask);
3665 +               err = netif_set_xps_queue(net_dev, &xps_mask, i);
3666 +               if (err) {
3667 +                       dev_info_once(net_dev->dev.parent,
3668 +                                     "Error setting XPS queue\n");
3669 +                       break;
3670 +               }
3671 +       }
3672 +
3673 +       return err;
3674 +}
3675 +
3676 +static int dpaa2_eth_setup_tc(struct net_device *net_dev,
3677 +                             enum tc_setup_type type,
3678 +                             void *type_data)
3679 +{
3680 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3681 +       struct tc_mqprio_qopt *mqprio = (struct tc_mqprio_qopt *)type_data;
3682 +       int i, err = 0;
3683 +
3684 +       if (type != TC_SETUP_MQPRIO)
3685 +               return -EINVAL;
3686 +
3687 +       if (mqprio->num_tc > dpaa2_eth_tc_count(priv)) {
3688 +               netdev_err(net_dev, "Max %d traffic classes supported\n",
3689 +                          dpaa2_eth_tc_count(priv));
3690 +               return -EINVAL;
3691 +       }
3692 +
3693 +       if (mqprio->num_tc == net_dev->num_tc)
3694 +               return 0;
3695 +
3696 +       mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3697 +
3698 +       if (!mqprio->num_tc) {
3699 +               netdev_reset_tc(net_dev);
3700 +               err = netif_set_real_num_tx_queues(net_dev,
3701 +                                                  dpaa2_eth_queue_count(priv));
3702 +               if (err)
3703 +                       return err;
3704 +
3705 +               goto update_xps;
3706 +       }
3707 +
3708 +       err = netdev_set_num_tc(net_dev, mqprio->num_tc);
3709 +       if (err)
3710 +               return err;
3711 +
3712 +       err = netif_set_real_num_tx_queues(net_dev, mqprio->num_tc *
3713 +                                          dpaa2_eth_queue_count(priv));
3714 +       if (err)
3715 +               return err;
3716 +
3717 +       for (i = 0; i < mqprio->num_tc; i++) {
3718 +               err = netdev_set_tc_queue(net_dev, i,
3719 +                                         dpaa2_eth_queue_count(priv),
3720 +                                         i * dpaa2_eth_queue_count(priv));
3721 +               if (err)
3722 +                       return err;
3723 +       }
3724 +
3725 +update_xps:
3726 +       err = dpaa2_eth_update_xps(priv);
3727 +       return err;
3728 +}
3729 +
3730  static const struct net_device_ops dpaa2_eth_ops = {
3731         .ndo_open = dpaa2_eth_open,
3732         .ndo_start_xmit = dpaa2_eth_tx,
3733         .ndo_stop = dpaa2_eth_stop,
3734 -       .ndo_init = dpaa2_eth_init,
3735         .ndo_set_mac_address = dpaa2_eth_set_addr,
3736         .ndo_get_stats64 = dpaa2_eth_get_stats,
3737 -       .ndo_change_mtu = dpaa2_eth_change_mtu,
3738         .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
3739         .ndo_set_features = dpaa2_eth_set_features,
3740 +       .ndo_do_ioctl = dpaa2_eth_ioctl,
3741 +       .ndo_xdp = dpaa2_eth_xdp,
3742 +       .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
3743 +       .ndo_xdp_flush = dpaa2_eth_xdp_flush,
3744 +       .ndo_setup_tc = dpaa2_eth_setup_tc,
3745  };
3746  
3747  static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
3748 @@ -1422,34 +2062,32 @@ static struct fsl_mc_device *setup_dpcon
3749         err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
3750         if (err) {
3751                 dev_err(dev, "dpcon_open() failed\n");
3752 -               goto err_open;
3753 +               goto free;
3754         }
3755  
3756         err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
3757         if (err) {
3758                 dev_err(dev, "dpcon_reset() failed\n");
3759 -               goto err_reset;
3760 +               goto close;
3761         }
3762  
3763         err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
3764         if (err) {
3765                 dev_err(dev, "dpcon_get_attributes() failed\n");
3766 -               goto err_get_attr;
3767 +               goto close;
3768         }
3769  
3770         err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
3771         if (err) {
3772                 dev_err(dev, "dpcon_enable() failed\n");
3773 -               goto err_enable;
3774 +               goto close;
3775         }
3776  
3777         return dpcon;
3778  
3779 -err_enable:
3780 -err_get_attr:
3781 -err_reset:
3782 +close:
3783         dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3784 -err_open:
3785 +free:
3786         fsl_mc_object_free(dpcon);
3787  
3788         return NULL;
3789 @@ -1502,7 +2140,14 @@ err_setup:
3790  static void free_channel(struct dpaa2_eth_priv *priv,
3791                          struct dpaa2_eth_channel *channel)
3792  {
3793 +       struct bpf_prog *prog;
3794 +
3795         free_dpcon(priv, channel->dpcon);
3796 +
3797 +       prog = READ_ONCE(channel->xdp_prog);
3798 +       if (prog)
3799 +               bpf_prog_put(prog);
3800 +
3801         kfree(channel);
3802  }
3803  
3804 @@ -1546,7 +2191,8 @@ static int setup_dpio(struct dpaa2_eth_p
3805                 nctx->desired_cpu = i;
3806  
3807                 /* Register the new context */
3808 -               err = dpaa2_io_service_register(NULL, nctx);
3809 +               channel->dpio = dpaa2_io_service_select(i);
3810 +               err = dpaa2_io_service_register(channel->dpio, nctx, dev);
3811                 if (err) {
3812                         dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
3813                         /* If no affine DPIO for this core, there's probably
3814 @@ -1579,14 +2225,14 @@ static int setup_dpio(struct dpaa2_eth_p
3815                 /* Stop if we already have enough channels to accommodate all
3816                  * RX and TX conf queues
3817                  */
3818 -               if (priv->num_channels == dpaa2_eth_queue_count(priv))
3819 +               if (priv->num_channels == priv->dpni_attrs.num_queues)
3820                         break;
3821         }
3822  
3823         return 0;
3824  
3825  err_set_cdan:
3826 -       dpaa2_io_service_deregister(NULL, nctx);
3827 +       dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3828  err_service_reg:
3829         free_channel(priv, channel);
3830  err_alloc_ch:
3831 @@ -1603,13 +2249,14 @@ err_alloc_ch:
3832  
3833  static void free_dpio(struct dpaa2_eth_priv *priv)
3834  {
3835 -       int i;
3836 +       struct device *dev = priv->net_dev->dev.parent;
3837         struct dpaa2_eth_channel *ch;
3838 +       int i;
3839  
3840         /* deregister CDAN notifications and free channels */
3841         for (i = 0; i < priv->num_channels; i++) {
3842                 ch = priv->channel[i];
3843 -               dpaa2_io_service_deregister(NULL, &ch->nctx);
3844 +               dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3845                 free_channel(priv, ch);
3846         }
3847  }
3848 @@ -1636,8 +2283,7 @@ static void set_fq_affinity(struct dpaa2
3849  {
3850         struct device *dev = priv->net_dev->dev.parent;
3851         struct dpaa2_eth_fq *fq;
3852 -       int rx_cpu, txc_cpu;
3853 -       int i;
3854 +       int rx_cpu, txc_cpu, i;
3855  
3856         /* For each FQ, pick one channel/CPU to deliver frames to.
3857          * This may well change at runtime, either through irqbalance or
3858 @@ -1649,6 +2295,7 @@ static void set_fq_affinity(struct dpaa2
3859                 fq = &priv->fq[i];
3860                 switch (fq->type) {
3861                 case DPAA2_RX_FQ:
3862 +               case DPAA2_RX_ERR_FQ:
3863                         fq->target_cpu = rx_cpu;
3864                         rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3865                         if (rx_cpu >= nr_cpu_ids)
3866 @@ -1665,11 +2312,13 @@ static void set_fq_affinity(struct dpaa2
3867                 }
3868                 fq->channel = get_affine_channel(priv, fq->target_cpu);
3869         }
3870 +
3871 +       dpaa2_eth_update_xps(priv);
3872  }
3873  
3874  static void setup_fqs(struct dpaa2_eth_priv *priv)
3875  {
3876 -       int i;
3877 +       int i, j;
3878  
3879         /* We have one TxConf FQ per Tx flow.
3880          * The number of Tx and Rx queues is the same.
3881 @@ -1681,11 +2330,19 @@ static void setup_fqs(struct dpaa2_eth_p
3882                 priv->fq[priv->num_fqs++].flowid = (u16)i;
3883         }
3884  
3885 -       for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3886 -               priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3887 -               priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3888 -               priv->fq[priv->num_fqs++].flowid = (u16)i;
3889 -       }
3890 +       for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
3891 +               for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
3892 +                       priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3893 +                       priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3894 +                       priv->fq[priv->num_fqs].tc = (u8)i;
3895 +                       priv->fq[priv->num_fqs++].flowid = (u16)j;
3896 +               }
3897 +
3898 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3899 +       /* We have exactly one Rx error queue per DPNI */
3900 +       priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3901 +       priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3902 +#endif
3903  
3904         /* For each FQ, decide on which core to process incoming frames */
3905         set_fq_affinity(priv);
3906 @@ -1735,6 +2392,9 @@ static int setup_dpbp(struct dpaa2_eth_p
3907         }
3908         priv->bpid = dpbp_attrs.bpid;
3909  
3910 +       /* By default we start with flow control enabled */
3911 +       priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
3912 +
3913         return 0;
3914  
3915  err_get_attr:
3916 @@ -1762,7 +2422,7 @@ static int setup_dpni(struct fsl_mc_devi
3917         struct device *dev = &ls_dev->dev;
3918         struct dpaa2_eth_priv *priv;
3919         struct net_device *net_dev;
3920 -       struct dpni_buffer_layout buf_layout = {0};
3921 +       struct dpni_link_cfg cfg = {0};
3922         int err;
3923  
3924         net_dev = dev_get_drvdata(dev);
3925 @@ -1772,7 +2432,22 @@ static int setup_dpni(struct fsl_mc_devi
3926         err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3927         if (err) {
3928                 dev_err(dev, "dpni_open() failed\n");
3929 -               goto err_open;
3930 +               return err;
3931 +       }
3932 +
3933 +       /* Check if we can work with this DPNI object */
3934 +       err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3935 +                                  &priv->dpni_ver_minor);
3936 +       if (err) {
3937 +               dev_err(dev, "dpni_get_api_version() failed\n");
3938 +               goto close;
3939 +       }
3940 +       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3941 +               dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3942 +                       priv->dpni_ver_major, priv->dpni_ver_minor,
3943 +                       DPNI_VER_MAJOR, DPNI_VER_MINOR);
3944 +               err = -ENOTSUPP;
3945 +               goto close;
3946         }
3947  
3948         ls_dev->mc_io = priv->mc_io;
3949 @@ -1781,77 +2456,41 @@ static int setup_dpni(struct fsl_mc_devi
3950         err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3951         if (err) {
3952                 dev_err(dev, "dpni_reset() failed\n");
3953 -               goto err_reset;
3954 +               goto close;
3955         }
3956  
3957         err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3958                                   &priv->dpni_attrs);
3959         if (err) {
3960                 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3961 -               goto err_get_attr;
3962 +               goto close;
3963         }
3964  
3965 -       /* Configure buffer layouts */
3966 -       /* rx buffer */
3967 -       buf_layout.pass_parser_result = true;
3968 -       buf_layout.pass_frame_status = true;
3969 -       buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3970 -       buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
3971 -       buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3972 -                            DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3973 -                            DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3974 -                            DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
3975 -       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3976 -                                    DPNI_QUEUE_RX, &buf_layout);
3977 -       if (err) {
3978 -               dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3979 -               goto err_buf_layout;
3980 -       }
3981 +       err = set_buffer_layout(priv);
3982 +       if (err)
3983 +               goto close;
3984  
3985 -       /* tx buffer */
3986 -       buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3987 -                            DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
3988 -       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3989 -                                    DPNI_QUEUE_TX, &buf_layout);
3990 -       if (err) {
3991 -               dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3992 -               goto err_buf_layout;
3993 -       }
3994 +       set_enqueue_mode(priv);
3995  
3996 -       /* tx-confirm buffer */
3997 -       buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3998 -       err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3999 -                                    DPNI_QUEUE_TX_CONFIRM, &buf_layout);
4000 -       if (err) {
4001 -               dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
4002 -               goto err_buf_layout;
4003 -       }
4004 +       priv->cls_rule = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
4005 +                                     dpaa2_eth_fs_count(priv), GFP_KERNEL);
4006 +       if (!priv->cls_rule)
4007 +               goto close;
4008  
4009 -       /* Now that we've set our tx buffer layout, retrieve the minimum
4010 -        * required tx data offset.
4011 -        */
4012 -       err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
4013 -                                     &priv->tx_data_offset);
4014 +       /* Enable flow control */
4015 +       cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
4016 +       priv->tx_pause_frames = true;
4017 +       err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
4018         if (err) {
4019 -               dev_err(dev, "dpni_get_tx_data_offset() failed\n");
4020 -               goto err_data_offset;
4021 +               dev_err(dev, "dpni_set_link_cfg() failed\n");
4022 +               goto close;
4023         }
4024  
4025 -       if ((priv->tx_data_offset % 64) != 0)
4026 -               dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
4027 -                        priv->tx_data_offset);
4028 -
4029 -       /* Accommodate software annotation space (SWA) */
4030 -       priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
4031 -
4032         return 0;
4033  
4034 -err_data_offset:
4035 -err_buf_layout:
4036 -err_get_attr:
4037 -err_reset:
4038 +close:
4039         dpni_close(priv->mc_io, 0, priv->mc_token);
4040 -err_open:
4041 +
4042         return err;
4043  }
4044  
4045 @@ -1865,6 +2504,7 @@ static void free_dpni(struct dpaa2_eth_p
4046                             err);
4047  
4048         dpni_close(priv->mc_io, 0, priv->mc_token);
4049 +
4050  }
4051  
4052  static int setup_rx_flow(struct dpaa2_eth_priv *priv,
4053 @@ -1873,11 +2513,10 @@ static int setup_rx_flow(struct dpaa2_et
4054         struct device *dev = priv->net_dev->dev.parent;
4055         struct dpni_queue queue;
4056         struct dpni_queue_id qid;
4057 -       struct dpni_taildrop td;
4058         int err;
4059  
4060         err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
4061 -                            DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
4062 +                            DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
4063         if (err) {
4064                 dev_err(dev, "dpni_get_queue(RX) failed\n");
4065                 return err;
4066 @@ -1889,24 +2528,136 @@ static int setup_rx_flow(struct dpaa2_et
4067         queue.destination.type = DPNI_DEST_DPCON;
4068         queue.destination.priority = 1;
4069         queue.user_context = (u64)(uintptr_t)fq;
4070 +       queue.flc.stash_control = 1;
4071 +       queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
4072 +       /* 01 01 00 - data, annotation, flow context*/
4073 +       queue.flc.value |= 0x14;
4074 +
4075         err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
4076 -                            DPNI_QUEUE_RX, 0, fq->flowid,
4077 -                            DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
4078 +                            DPNI_QUEUE_RX, fq->tc, fq->flowid,
4079 +                            DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
4080 +                            DPNI_QUEUE_OPT_FLC,
4081                              &queue);
4082         if (err) {
4083                 dev_err(dev, "dpni_set_queue(RX) failed\n");
4084                 return err;
4085         }
4086  
4087 -       td.enable = 1;
4088 -       td.threshold = DPAA2_ETH_TAILDROP_THRESH;
4089 -       err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
4090 -                               DPNI_QUEUE_RX, 0, fq->flowid, &td);
4091 -       if (err) {
4092 -               dev_err(dev, "dpni_set_threshold() failed\n");
4093 -               return err;
4094 +       return 0;
4095 +}
4096 +
4097 +static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
4098 +                             struct dpni_taildrop *td)
4099 +{
4100 +       struct device *dev = priv->net_dev->dev.parent;
4101 +       int i, err;
4102 +
4103 +       for (i = 0; i < priv->num_fqs; i++) {
4104 +               if (priv->fq[i].type != DPAA2_RX_FQ)
4105 +                       continue;
4106 +
4107 +               err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
4108 +                                       DPNI_CP_QUEUE, DPNI_QUEUE_RX,
4109 +                                       priv->fq[i].tc, priv->fq[i].flowid,
4110 +                                       td);
4111 +               if (err) {
4112 +                       dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
4113 +                       return err;
4114 +               }
4115 +
4116 +               dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n",
4117 +                       (td->enable ? "Enabled" : "Disabled"),
4118 +                       priv->fq[i].flowid, priv->fq[i].tc);
4119 +       }
4120 +
4121 +       return 0;
4122 +}
4123 +
4124 +static int set_group_taildrop(struct dpaa2_eth_priv *priv,
4125 +                             struct dpni_taildrop *td)
4126 +{
4127 +       struct device *dev = priv->net_dev->dev.parent;
4128 +       struct dpni_taildrop disable_td, *tc_td;
4129 +       int i, err;
4130 +
4131 +       memset(&disable_td, 0, sizeof(struct dpni_taildrop));
4132 +       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4133 +               if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
4134 +                       /* Do not set taildrop thresholds for PFC-enabled
4135 +                        * traffic classes. We will enable congestion
4136 +                        * notifications for them.
4137 +                        */
4138 +                       tc_td = &disable_td;
4139 +               else
4140 +                       tc_td = td;
4141 +
4142 +               err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
4143 +                                       DPNI_CP_GROUP, DPNI_QUEUE_RX,
4144 +                                       i, 0, tc_td);
4145 +               if (err) {
4146 +                       dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
4147 +                       return err;
4148 +               }
4149 +
4150 +               dev_dbg(dev, "%s taildrop for Rx group tc %d\n",
4151 +                       (tc_td->enable ? "Enabled" : "Disabled"),
4152 +                       i);
4153 +       }
4154 +
4155 +       return 0;
4156 +}
4157 +
4158 +/* Enable/disable Rx FQ taildrop
4159 + *
4160 + * Rx FQ taildrop is mutually exclusive with flow control and it only gets
4161 + * disabled when FC is active. Depending on FC status, we need to compute
4162 + * the maximum number of buffers in the pool differently, so use the
4163 + * opportunity to update max number of buffers as well.
4164 + */
4165 +int set_rx_taildrop(struct dpaa2_eth_priv *priv)
4166 +{
4167 +       enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
4168 +       struct dpni_taildrop td_queue, td_group;
4169 +       int err = 0;
4170 +
4171 +       switch (cfg) {
4172 +       case DPAA2_ETH_TD_NONE:
4173 +               memset(&td_queue, 0, sizeof(struct dpni_taildrop));
4174 +               memset(&td_group, 0, sizeof(struct dpni_taildrop));
4175 +               priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC /
4176 +                                       priv->num_channels;
4177 +               break;
4178 +       case DPAA2_ETH_TD_QUEUE:
4179 +               memset(&td_group, 0, sizeof(struct dpni_taildrop));
4180 +               td_queue.enable = 1;
4181 +               td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
4182 +               td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
4183 +                                    dpaa2_eth_tc_count(priv);
4184 +               priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH;
4185 +               break;
4186 +       case DPAA2_ETH_TD_GROUP:
4187 +               memset(&td_queue, 0, sizeof(struct dpni_taildrop));
4188 +               td_group.enable = 1;
4189 +               td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
4190 +               td_group.threshold = NAPI_POLL_WEIGHT *
4191 +                                    dpaa2_eth_queue_count(priv);
4192 +               priv->max_bufs_per_ch = NAPI_POLL_WEIGHT *
4193 +                                       dpaa2_eth_tc_count(priv);
4194 +               break;
4195 +       default:
4196 +               break;
4197         }
4198  
4199 +       err = set_queue_taildrop(priv, &td_queue);
4200 +       if (err)
4201 +               return err;
4202 +
4203 +       err = set_group_taildrop(priv, &td_group);
4204 +       if (err)
4205 +               return err;
4206 +
4207 +       priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
4208 +
4209         return 0;
4210  }
4211  
4212 @@ -1926,6 +2677,7 @@ static int setup_tx_flow(struct dpaa2_et
4213         }
4214  
4215         fq->tx_qdbin = qid.qdbin;
4216 +       fq->tx_fqid = qid.fqid;
4217  
4218         err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
4219                              DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
4220 @@ -1953,23 +2705,88 @@ static int setup_tx_flow(struct dpaa2_et
4221         return 0;
4222  }
4223  
4224 -/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
4225 -static const struct dpaa2_eth_hash_fields hash_fields[] = {
4226 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
4227 +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
4228 +                            struct dpaa2_eth_fq *fq)
4229 +{
4230 +       struct device *dev = priv->net_dev->dev.parent;
4231 +       struct dpni_queue q = { { 0 } };
4232 +       struct dpni_queue_id qid;
4233 +       u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
4234 +       int err;
4235 +
4236 +       err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
4237 +                            DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
4238 +       if (err) {
4239 +               dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
4240 +               return err;
4241 +       }
4242 +
4243 +       fq->fqid = qid.fqid;
4244 +
4245 +       q.destination.id = fq->channel->dpcon_id;
4246 +       q.destination.type = DPNI_DEST_DPCON;
4247 +       q.destination.priority = 1;
4248 +       q.user_context = (u64)fq;
4249 +       err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
4250 +                            DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
4251 +       if (err) {
4252 +               dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
4253 +               return err;
4254 +       }
4255 +
4256 +       return 0;
4257 +}
4258 +#endif
4259 +
4260 +/* Supported header fields for Rx hash distribution key */
4261 +static const struct dpaa2_eth_dist_fields dist_fields[] = {
4262         {
4263 +               /* L2 header */
4264 +               .rxnfc_field = RXH_L2DA,
4265 +               .cls_prot = NET_PROT_ETH,
4266 +               .cls_field = NH_FLD_ETH_DA,
4267 +               .id = DPAA2_ETH_DIST_ETHDST,
4268 +               .size = 6,
4269 +       }, {
4270 +               .cls_prot = NET_PROT_ETH,
4271 +               .cls_field = NH_FLD_ETH_SA,
4272 +               .id = DPAA2_ETH_DIST_ETHSRC,
4273 +               .size = 6,
4274 +       }, {
4275 +               /* This is the last ethertype field parsed:
4276 +                * depending on frame format, it can be the MAC ethertype
4277 +                * or the VLAN etype.
4278 +                */
4279 +               .cls_prot = NET_PROT_ETH,
4280 +               .cls_field = NH_FLD_ETH_TYPE,
4281 +               .id = DPAA2_ETH_DIST_ETHTYPE,
4282 +               .size = 2,
4283 +       }, {
4284 +               /* VLAN header */
4285 +               .rxnfc_field = RXH_VLAN,
4286 +               .cls_prot = NET_PROT_VLAN,
4287 +               .cls_field = NH_FLD_VLAN_TCI,
4288 +               .id = DPAA2_ETH_DIST_VLAN,
4289 +               .size = 2,
4290 +       }, {
4291                 /* IP header */
4292                 .rxnfc_field = RXH_IP_SRC,
4293                 .cls_prot = NET_PROT_IP,
4294                 .cls_field = NH_FLD_IP_SRC,
4295 +               .id = DPAA2_ETH_DIST_IPSRC,
4296                 .size = 4,
4297         }, {
4298                 .rxnfc_field = RXH_IP_DST,
4299                 .cls_prot = NET_PROT_IP,
4300                 .cls_field = NH_FLD_IP_DST,
4301 +               .id = DPAA2_ETH_DIST_IPDST,
4302                 .size = 4,
4303         }, {
4304                 .rxnfc_field = RXH_L3_PROTO,
4305                 .cls_prot = NET_PROT_IP,
4306                 .cls_field = NH_FLD_IP_PROTO,
4307 +               .id = DPAA2_ETH_DIST_IPPROTO,
4308                 .size = 1,
4309         }, {
4310                 /* Using UDP ports, this is functionally equivalent to raw
4311 @@ -1978,41 +2795,170 @@ static const struct dpaa2_eth_hash_field
4312                 .rxnfc_field = RXH_L4_B_0_1,
4313                 .cls_prot = NET_PROT_UDP,
4314                 .cls_field = NH_FLD_UDP_PORT_SRC,
4315 +               .id = DPAA2_ETH_DIST_L4SRC,
4316                 .size = 2,
4317         }, {
4318                 .rxnfc_field = RXH_L4_B_2_3,
4319                 .cls_prot = NET_PROT_UDP,
4320                 .cls_field = NH_FLD_UDP_PORT_DST,
4321 +               .id = DPAA2_ETH_DIST_L4DST,
4322                 .size = 2,
4323         },
4324  };
4325  
4326 -/* Set RX hash options
4327 +/* Configure the Rx hash key using the legacy API */
4328 +static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4329 +{
4330 +       struct device *dev = priv->net_dev->dev.parent;
4331 +       struct dpni_rx_tc_dist_cfg dist_cfg;
4332 +       int i, err = 0;
4333 +
4334 +       memset(&dist_cfg, 0, sizeof(dist_cfg));
4335 +
4336 +       dist_cfg.key_cfg_iova = key;
4337 +       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4338 +       dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
4339 +
4340 +       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4341 +               err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
4342 +                                         i, &dist_cfg);
4343 +               if (err) {
4344 +                       dev_err(dev, "dpni_set_rx_tc_dist failed\n");
4345 +                       break;
4346 +               }
4347 +       }
4348 +
4349 +       return err;
4350 +}
4351 +
4352 +/* Configure the Rx hash key using the new API */
4353 +static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4354 +{
4355 +       struct device *dev = priv->net_dev->dev.parent;
4356 +       struct dpni_rx_dist_cfg dist_cfg;
4357 +       int i, err = 0;
4358 +
4359 +       memset(&dist_cfg, 0, sizeof(dist_cfg));
4360 +
4361 +       dist_cfg.key_cfg_iova = key;
4362 +       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4363 +       dist_cfg.enable = 1;
4364 +
4365 +       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4366 +               dist_cfg.tc = i;
4367 +               err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
4368 +                                           &dist_cfg);
4369 +               if (err) {
4370 +                       dev_err(dev, "dpni_set_rx_hash_dist failed\n");
4371 +                       break;
4372 +               }
4373 +       }
4374 +
4375 +       return err;
4376 +}
4377 +
4378 +/* Configure the Rx flow classification key */
4379 +static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4380 +{
4381 +       struct device *dev = priv->net_dev->dev.parent;
4382 +       struct dpni_rx_dist_cfg dist_cfg;
4383 +       int i, err = 0;
4384 +
4385 +       memset(&dist_cfg, 0, sizeof(dist_cfg));
4386 +
4387 +       dist_cfg.key_cfg_iova = key;
4388 +       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4389 +       dist_cfg.enable = 1;
4390 +
4391 +       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4392 +               dist_cfg.tc = i;
4393 +               err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
4394 +                                         &dist_cfg);
4395 +               if (err) {
4396 +                       dev_err(dev, "dpni_set_rx_fs_dist failed\n");
4397 +                       break;
4398 +               }
4399 +       }
4400 +
4401 +       return err;
4402 +}
4403 +
4404 +/* Size of the Rx flow classification key */
4405 +int dpaa2_eth_cls_key_size(u64 fields)
4406 +{
4407 +       int i, size = 0;
4408 +
4409 +       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4410 +               if (!(fields & dist_fields[i].id))
4411 +                       continue;
4412 +               size += dist_fields[i].size;
4413 +       }
4414 +
4415 +       return size;
4416 +}
4417 +
4418 +/* Offset of header field in Rx classification key */
4419 +int dpaa2_eth_cls_fld_off(int prot, int field)
4420 +{
4421 +       int i, off = 0;
4422 +
4423 +       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4424 +               if (dist_fields[i].cls_prot == prot &&
4425 +                   dist_fields[i].cls_field == field)
4426 +                       return off;
4427 +               off += dist_fields[i].size;
4428 +       }
4429 +
4430 +       WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
4431 +       return 0;
4432 +}
4433 +
4434 +/* Prune unused fields from the classification rule.
4435 + * Used when masking is not supported
4436 + */
4437 +void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
4438 +{
4439 +       int off = 0, new_off = 0;
4440 +       int i, size;
4441 +
4442 +       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4443 +               size = dist_fields[i].size;
4444 +               if (dist_fields[i].id & fields) {
4445 +                       memcpy(key_mem + new_off, key_mem + off, size);
4446 +                       new_off += size;
4447 +               }
4448 +               off += size;
4449 +       }
4450 +}
4451 +
4452 +/* Set Rx distribution (hash or flow classification) key
4453   * flags is a combination of RXH_ bits
4454   */
4455 -static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4456 +static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
4457 +                                 enum dpaa2_eth_rx_dist type, u64 flags)
4458  {
4459         struct device *dev = net_dev->dev.parent;
4460         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4461         struct dpkg_profile_cfg cls_cfg;
4462 -       struct dpni_rx_tc_dist_cfg dist_cfg;
4463 +       u32 rx_hash_fields = 0;
4464 +       dma_addr_t key_iova;
4465         u8 *dma_mem;
4466         int i;
4467         int err = 0;
4468  
4469 -       if (!dpaa2_eth_hash_enabled(priv)) {
4470 -               dev_dbg(dev, "Hashing support is not enabled\n");
4471 -               return 0;
4472 -       }
4473 -
4474         memset(&cls_cfg, 0, sizeof(cls_cfg));
4475  
4476 -       for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
4477 +       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4478                 struct dpkg_extract *key =
4479                         &cls_cfg.extracts[cls_cfg.num_extracts];
4480  
4481 -               if (!(flags & hash_fields[i].rxnfc_field))
4482 +               /* For both Rx hashing and classification keys
4483 +                * we set only the selected fields.
4484 +                */
4485 +               if (!(flags & dist_fields[i].id))
4486                         continue;
4487 +               if (type == DPAA2_ETH_RX_DIST_HASH)
4488 +                       rx_hash_fields |= dist_fields[i].rxnfc_field;
4489  
4490                 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
4491                         dev_err(dev, "error adding key extraction rule, too many rules?\n");
4492 @@ -2020,12 +2966,10 @@ static int dpaa2_eth_set_hash(struct net
4493                 }
4494  
4495                 key->type = DPKG_EXTRACT_FROM_HDR;
4496 -               key->extract.from_hdr.prot = hash_fields[i].cls_prot;
4497 +               key->extract.from_hdr.prot = dist_fields[i].cls_prot;
4498                 key->extract.from_hdr.type = DPKG_FULL_FIELD;
4499 -               key->extract.from_hdr.field = hash_fields[i].cls_field;
4500 +               key->extract.from_hdr.field = dist_fields[i].cls_field;
4501                 cls_cfg.num_extracts++;
4502 -
4503 -               priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
4504         }
4505  
4506         dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4507 @@ -2035,36 +2979,96 @@ static int dpaa2_eth_set_hash(struct net
4508         err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
4509         if (err) {
4510                 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4511 -               goto err_prep_key;
4512 +               goto free_key;
4513         }
4514  
4515 -       memset(&dist_cfg, 0, sizeof(dist_cfg));
4516 -
4517         /* Prepare for setting the rx dist */
4518 -       dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
4519 -                                              DPAA2_CLASSIFIER_DMA_SIZE,
4520 -                                              DMA_TO_DEVICE);
4521 -       if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
4522 +       key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
4523 +                                 DMA_TO_DEVICE);
4524 +       if (dma_mapping_error(dev, key_iova)) {
4525                 dev_err(dev, "DMA mapping failed\n");
4526                 err = -ENOMEM;
4527 -               goto err_dma_map;
4528 +               goto free_key;
4529         }
4530  
4531 -       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4532 -       dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
4533 +       if (type == DPAA2_ETH_RX_DIST_HASH) {
4534 +               if (dpaa2_eth_has_legacy_dist(priv))
4535 +                       err = config_legacy_hash_key(priv, key_iova);
4536 +               else
4537 +                       err = config_hash_key(priv, key_iova);
4538 +       } else {
4539 +               err = config_cls_key(priv, key_iova);
4540 +       }
4541  
4542 -       err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
4543 -       dma_unmap_single(dev, dist_cfg.key_cfg_iova,
4544 -                        DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
4545 -       if (err)
4546 -               dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
4547 +       dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
4548 +                        DMA_TO_DEVICE);
4549 +       if (!err && type == DPAA2_ETH_RX_DIST_HASH)
4550 +               priv->rx_hash_fields = rx_hash_fields;
4551  
4552 -err_dma_map:
4553 -err_prep_key:
4554 +free_key:
4555         kfree(dma_mem);
4556         return err;
4557  }
4558  
4559 +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4560 +{
4561 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4562 +       u64 key = 0;
4563 +       int i;
4564 +
4565 +       if (!dpaa2_eth_hash_enabled(priv))
4566 +               return -EOPNOTSUPP;
4567 +
4568 +       for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
4569 +               if (dist_fields[i].rxnfc_field & flags)
4570 +                       key |= dist_fields[i].id;
4571 +
4572 +       return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4573 +}
4574 +
4575 +int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
4576 +{
4577 +       return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
4578 +}
4579 +
4580 +static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4581 +{
4582 +       struct device *dev = priv->net_dev->dev.parent;
4583 +       int err;
4584 +
4585 +       /* Check if we actually support Rx flow classification */
4586 +       if (dpaa2_eth_has_legacy_dist(priv)) {
4587 +               dev_dbg(dev, "Rx cls not supported by current MC version\n");
4588 +               return -EOPNOTSUPP;
4589 +       }
4590 +
4591 +       if (!dpaa2_eth_fs_enabled(priv)) {
4592 +               dev_dbg(dev, "Rx cls disabled in DPNI options\n");
4593 +               return -EOPNOTSUPP;
4594 +       }
4595 +
4596 +       if (!dpaa2_eth_hash_enabled(priv)) {
4597 +               dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
4598 +               return -EOPNOTSUPP;
4599 +       }
4600 +
4601 +       /* If there is no support for masking in the classification table,
4602 +        * we don't set a default key, as it will depend on the rules
4603 +        * added by the user at runtime.
4604 +        */
4605 +       if (!dpaa2_eth_fs_mask_enabled(priv))
4606 +               goto out;
4607 +
4608 +       err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
4609 +       if (err)
4610 +               return err;
4611 +
4612 +out:
4613 +       priv->rx_cls_enabled = 1;
4614 +
4615 +       return 0;
4616 +}
4617 +
4618  /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
4619   * frame queues and channels
4620   */
4621 @@ -2080,6 +3084,7 @@ static int bind_dpni(struct dpaa2_eth_pr
4622         pools_params.num_dpbp = 1;
4623         pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
4624         pools_params.pools[0].backup_pool = 0;
4625 +       pools_params.pools[0].priority_mask = 0xff;
4626         pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
4627         err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
4628         if (err) {
4629 @@ -2087,17 +3092,28 @@ static int bind_dpni(struct dpaa2_eth_pr
4630                 return err;
4631         }
4632  
4633 -       /* have the interface implicitly distribute traffic based on supported
4634 -        * header fields
4635 +       /* have the interface implicitly distribute traffic based on
4636 +        * the default hash key
4637          */
4638 -       err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
4639 -       if (err)
4640 -               netdev_err(net_dev, "Failed to configure hashing\n");
4641 +       err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
4642 +       if (err && err != -EOPNOTSUPP)
4643 +               dev_err(dev, "Failed to configure hashing\n");
4644 +
4645 +       /* Configure the flow classification key; it includes all
4646 +        * supported header fields and cannot be modified at runtime
4647 +        */
4648 +       err = dpaa2_eth_set_default_cls(priv);
4649 +       if (err && err != -EOPNOTSUPP)
4650 +               dev_err(dev, "Failed to configure Rx classification key\n");
4651  
4652         /* Configure handling of error frames */
4653         err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
4654         err_cfg.set_frame_annotation = 1;
4655 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
4656 +       err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
4657 +#else
4658         err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
4659 +#endif
4660         err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
4661                                        &err_cfg);
4662         if (err) {
4663 @@ -2114,6 +3130,11 @@ static int bind_dpni(struct dpaa2_eth_pr
4664                 case DPAA2_TX_CONF_FQ:
4665                         err = setup_tx_flow(priv, &priv->fq[i]);
4666                         break;
4667 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
4668 +               case DPAA2_RX_ERR_FQ:
4669 +                       err = setup_rx_err_flow(priv, &priv->fq[i]);
4670 +                       break;
4671 +#endif
4672                 default:
4673                         dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
4674                         return -EINVAL;
4675 @@ -2237,11 +3258,14 @@ static int netdev_init(struct net_device
4676  {
4677         struct device *dev = net_dev->dev.parent;
4678         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4679 +       u32 options = priv->dpni_attrs.options;
4680 +       u64 supported = 0, not_supported = 0;
4681         u8 bcast_addr[ETH_ALEN];
4682         u8 num_queues;
4683         int err;
4684  
4685         net_dev->netdev_ops = &dpaa2_eth_ops;
4686 +       net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4687  
4688         err = set_mac_addr(priv);
4689         if (err)
4690 @@ -2255,14 +3279,14 @@ static int netdev_init(struct net_device
4691                 return err;
4692         }
4693  
4694 -       /* Reserve enough space to align buffer as per hardware requirement;
4695 -        * NOTE: priv->tx_data_offset MUST be initialized at this point.
4696 -        */
4697 -       net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
4698 -
4699 -       /* Set MTU limits */
4700 -       net_dev->min_mtu = 68;
4701 +       /* Set MTU upper limit; lower limit is 68B (default value) */
4702         net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4703 +       err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4704 +                                       DPAA2_ETH_MFL);
4705 +       if (err) {
4706 +               dev_err(dev, "dpni_set_max_frame_length() failed\n");
4707 +               return err;
4708 +       }
4709  
4710         /* Set actual number of queues in the net device */
4711         num_queues = dpaa2_eth_queue_count(priv);
4712 @@ -2277,12 +3301,23 @@ static int netdev_init(struct net_device
4713                 return err;
4714         }
4715  
4716 -       /* Our .ndo_init will be called herein */
4717 -       err = register_netdev(net_dev);
4718 -       if (err < 0) {
4719 -               dev_err(dev, "register_netdev() failed\n");
4720 -               return err;
4721 -       }
4722 +       /* Capabilities listing */
4723 +       supported |= IFF_LIVE_ADDR_CHANGE;
4724 +
4725 +       if (options & DPNI_OPT_NO_MAC_FILTER)
4726 +               not_supported |= IFF_UNICAST_FLT;
4727 +       else
4728 +               supported |= IFF_UNICAST_FLT;
4729 +
4730 +       net_dev->priv_flags |= supported;
4731 +       net_dev->priv_flags &= ~not_supported;
4732 +
4733 +       /* Features */
4734 +       net_dev->features = NETIF_F_RXCSUM |
4735 +                           NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4736 +                           NETIF_F_SG | NETIF_F_HIGHDMA |
4737 +                           NETIF_F_LLTX;
4738 +       net_dev->hw_features = net_dev->features;
4739  
4740         return 0;
4741  }
4742 @@ -2303,14 +3338,9 @@ static int poll_link_state(void *arg)
4743         return 0;
4744  }
4745  
4746 -static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
4747 -{
4748 -       return IRQ_WAKE_THREAD;
4749 -}
4750 -
4751  static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4752  {
4753 -       u32 status = 0, clear = 0;
4754 +       u32 status = ~0;
4755         struct device *dev = (struct device *)arg;
4756         struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4757         struct net_device *net_dev = dev_get_drvdata(dev);
4758 @@ -2320,18 +3350,12 @@ static irqreturn_t dpni_irq0_handler_thr
4759                                   DPNI_IRQ_INDEX, &status);
4760         if (unlikely(err)) {
4761                 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4762 -               clear = 0xffffffff;
4763 -               goto out;
4764 +               return IRQ_HANDLED;
4765         }
4766  
4767 -       if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
4768 -               clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
4769 +       if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4770                 link_state_update(netdev_priv(net_dev));
4771 -       }
4772  
4773 -out:
4774 -       dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4775 -                             DPNI_IRQ_INDEX, clear);
4776         return IRQ_HANDLED;
4777  }
4778  
4779 @@ -2348,8 +3372,7 @@ static int setup_irqs(struct fsl_mc_devi
4780  
4781         irq = ls_dev->irqs[0];
4782         err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
4783 -                                       dpni_irq0_handler,
4784 -                                       dpni_irq0_handler_thread,
4785 +                                       NULL, dpni_irq0_handler_thread,
4786                                         IRQF_NO_SUSPEND | IRQF_ONESHOT,
4787                                         dev_name(&ls_dev->dev), &ls_dev->dev);
4788         if (err < 0) {
4789 @@ -2405,6 +3428,393 @@ static void del_ch_napi(struct dpaa2_eth
4790         }
4791  }
4792  
4793 +/* SysFS support */
4794 +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
4795 +                                        struct device_attribute *attr,
4796 +                                        char *buf)
4797 +{
4798 +       struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4799 +       /* No MC API for getting the shaping config. We're stateful. */
4800 +       struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
4801 +
4802 +       return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
4803 +}
4804 +
4805 +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
4806 +                                         struct device_attribute *attr,
4807 +                                         const char *buf,
4808 +                                         size_t count)
4809 +{
4810 +       int err, items;
4811 +       struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4812 +       struct dpni_tx_shaping_cfg scfg, ercfg = { 0 };
4813 +
4814 +       items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
4815 +       if (items != 2) {
4816 +               pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
4817 +               return -EINVAL;
4818 +       }
4819 +       /* Size restriction as per MC API documentation */
4820 +       if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
4821 +               pr_err("max_burst_size must be <= %d\n",
4822 +                      DPAA2_ETH_MAX_BURST_SIZE);
4823 +               return -EINVAL;
4824 +       }
4825 +
4826 +       err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg,
4827 +                                 &ercfg, 0);
4828 +       if (err) {
4829 +               dev_err(dev, "dpni_set_tx_shaping() failed\n");
4830 +               return -EPERM;
4831 +       }
4832 +       /* If successful, save the current configuration for future inquiries */
4833 +       priv->shaping_cfg = scfg;
4834 +
4835 +       return count;
4836 +}
4837 +
4838 +static struct device_attribute dpaa2_eth_attrs[] = {
4839 +       __ATTR(tx_shaping,
4840 +              0600,
4841 +              dpaa2_eth_show_tx_shaping,
4842 +              dpaa2_eth_write_tx_shaping),
4843 +};
4844 +
4845 +static void dpaa2_eth_sysfs_init(struct device *dev)
4846 +{
4847 +       int i, err;
4848 +
4849 +       for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
4850 +               err = device_create_file(dev, &dpaa2_eth_attrs[i]);
4851 +               if (err) {
4852 +                       dev_err(dev, "ERROR creating sysfs file\n");
4853 +                       goto undo;
4854 +               }
4855 +       }
4856 +       return;
4857 +
4858 +undo:
4859 +       while (i > 0)
4860 +               device_remove_file(dev, &dpaa2_eth_attrs[--i]);
4861 +}
4862 +
4863 +static void dpaa2_eth_sysfs_remove(struct device *dev)
4864 +{
4865 +       int i;
4866 +
4867 +       for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
4868 +               device_remove_file(dev, &dpaa2_eth_attrs[i]);
4869 +}
4870 +
4871 +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
4872 +static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
4873 +                                      struct ieee_pfc *pfc)
4874 +{
4875 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4876 +       struct dpni_congestion_notification_cfg notification_cfg;
4877 +       struct dpni_link_state state;
4878 +       int err, i;
4879 +
4880 +       priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv);
4881 +
4882 +       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4883 +       if (err) {
4884 +               netdev_err(net_dev, "ERROR %d getting link state", err);
4885 +               return err;
4886 +       }
4887 +
4888 +       if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
4889 +               return 0;
4890 +
4891 +       priv->pfc.pfc_en = 0;
4892 +       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4893 +               err = dpni_get_congestion_notification(priv->mc_io, 0,
4894 +                                                      priv->mc_token,
4895 +                                                      DPNI_QUEUE_RX,
4896 +                                                      i, &notification_cfg);
4897 +               if (err) {
4898 +                       netdev_err(net_dev, "Error %d getting congestion notif",
4899 +                                  err);
4900 +                       return err;
4901 +               }
4902 +
4903 +               if (notification_cfg.threshold_entry)
4904 +                       priv->pfc.pfc_en |= 1 << i;
4905 +       }
4906 +
4907 +       memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
4908 +
4909 +       return 0;
4910 +}
4911 +
4912 +/* Configure ingress classification based on VLAN PCP */
4913 +static int set_vlan_qos(struct dpaa2_eth_priv *priv)
4914 +{
4915 +       struct device *dev = priv->net_dev->dev.parent;
4916 +       struct dpkg_profile_cfg kg_cfg = {0};
4917 +       struct dpni_qos_tbl_cfg qos_cfg = {0};
4918 +       struct dpni_rule_cfg key_params;
4919 +       u8 *params_iova, *key, *mask = NULL;
4920 +       /* We only need the trailing 16 bits, without the TPID */
4921 +       u8 key_size = VLAN_HLEN / 2;
4922 +       int err = 0, i, j = 0;
4923 +
4924 +       if (priv->vlan_clsf_set)
4925 +               return 0;
4926 +
4927 +       params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4928 +       if (!params_iova)
4929 +               return -ENOMEM;
4930 +
4931 +       kg_cfg.num_extracts = 1;
4932 +       kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
4933 +       kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
4934 +       kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
4935 +       kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
4936 +
4937 +       err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
4938 +       if (err) {
4939 +               dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
4940 +               goto out_free;
4941 +       }
4942 +
4943 +       /* Set QoS table */
4944 +       qos_cfg.default_tc = 0;
4945 +       qos_cfg.discard_on_miss = 0;
4946 +       qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
4947 +                                             DPAA2_CLASSIFIER_DMA_SIZE,
4948 +                                             DMA_TO_DEVICE);
4949 +       if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
4950 +               dev_err(dev, "%s: DMA mapping failed\n", __func__);
4951 +               err = -ENOMEM;
4952 +               goto out_free;
4953 +       }
4954 +       err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
4955 +       dma_unmap_single(dev, qos_cfg.key_cfg_iova,
4956 +                        DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
4957 +
4958 +       if (err) {
4959 +               dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
4960 +               goto out_free;
4961 +       }
4962 +
4963 +       key_params.key_size = key_size;
4964 +
4965 +       if (dpaa2_eth_fs_mask_enabled(priv)) {
4966 +               mask = kzalloc(key_size, GFP_KERNEL);
4967 +               if (!mask)
4968 +                       goto out_free;
4969 +
4970 +               *mask = cpu_to_be16(VLAN_PRIO_MASK);
4971 +
4972 +               key_params.mask_iova = dma_map_single(dev, mask, key_size,
4973 +                                                     DMA_TO_DEVICE);
4974 +               if (dma_mapping_error(dev, key_params.mask_iova)) {
4975 +                       dev_err(dev, "DMA mapping failed %s\n", __func__);
4976 +                       err = -ENOMEM;
4977 +                       goto out_free_mask;
4978 +               }
4979 +       } else {
4980 +               key_params.mask_iova = 0;
4981 +       }
4982 +
4983 +       key = kzalloc(key_size, GFP_KERNEL);
4984 +       if (!key)
4985 +               goto out_cleanup_mask;
4986 +
4987 +       key_params.key_iova = dma_map_single(dev, key, key_size,
4988 +                                            DMA_TO_DEVICE);
4989 +       if (dma_mapping_error(dev, key_params.key_iova)) {
4990 +               dev_err(dev, "%s: DMA mapping failed\n", __func__);
4991 +               err = -ENOMEM;
4992 +               goto out_free_key;
4993 +       }
4994 +
4995 +       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4996 +               *key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
4997 +
4998 +               dma_sync_single_for_device(dev, key_params.key_iova,
4999 +                                          key_size, DMA_TO_DEVICE);
5000 +
5001 +               err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
5002 +                                        &key_params, i, j++);
5003 +               if (err) {
5004 +                       dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
5005 +                       goto out_remove;
5006 +               }
5007 +       }
5008 +
5009 +       priv->vlan_clsf_set = true;
5010 +       dev_dbg(dev, "Vlan PCP QoS classification set\n");
5011 +       goto out_cleanup;
5012 +
5013 +out_remove:
5014 +       for (j = 0; j < i; j++) {
5015 +               *key = cpu_to_be16(j << VLAN_PRIO_SHIFT);
5016 +
5017 +               dma_sync_single_for_device(dev, key_params.key_iova, key_size,
5018 +                                          DMA_TO_DEVICE);
5019 +
5020 +               err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token,
5021 +                                           &key_params);
5022 +               if (err)
5023 +                       dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err);
5024 +       }
5025 +
5026 +out_cleanup:
5027 +       dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE);
5028 +out_free_key:
5029 +       kfree(key);
5030 +out_cleanup_mask:
5031 +       if (key_params.mask_iova)
5032 +               dma_unmap_single(dev, key_params.mask_iova, key_size,
5033 +                                DMA_TO_DEVICE);
5034 +out_free_mask:
5035 +       kfree(mask);
5036 +out_free:
5037 +       kfree(params_iova);
5038 +       return err;
5039 +}
5040 +
5041 +static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
5042 +                                      struct ieee_pfc *pfc)
5043 +{
5044 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5045 +       struct dpni_congestion_notification_cfg notification_cfg = {0};
5046 +       struct dpni_link_state state = {0};
5047 +       struct dpni_link_cfg cfg = {0};
5048 +       struct ieee_pfc old_pfc;
5049 +       int err = 0, i;
5050 +
5051 +       if (dpaa2_eth_tc_count(priv) == 1) {
5052 +               netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n");
5053 +               return 0;
5054 +       }
5055 +
5056 +       /* Zero out pfc_enabled prios greater than tc_count */
5057 +       pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1;
5058 +
5059 +       if (priv->pfc.pfc_en == pfc->pfc_en)
5060 +               /* Same enabled mask, nothing to be done */
5061 +               return 0;
5062 +
5063 +       err = set_vlan_qos(priv);
5064 +       if (err)
5065 +               return err;
5066 +
5067 +       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5068 +       if (err) {
5069 +               netdev_err(net_dev, "ERROR %d getting link state", err);
5070 +               return err;
5071 +       }
5072 +
5073 +       cfg.rate = state.rate;
5074 +       cfg.options = state.options;
5075 +       if (pfc->pfc_en)
5076 +               cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
5077 +       else
5078 +               cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
5079 +
5080 +       err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
5081 +       if (err) {
5082 +               netdev_err(net_dev, "ERROR %d setting link cfg", err);
5083 +               return err;
5084 +       }
5085 +
5086 +       memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc));
5087 +       memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
5088 +
5089 +       err = set_rx_taildrop(priv);
5090 +       if (err)
5091 +               goto out_restore_config;
5092 +
5093 +       /* configure congestion notifications */
5094 +       notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
5095 +       notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
5096 +       notification_cfg.message_iova = 0ULL;
5097 +       notification_cfg.message_ctx = 0ULL;
5098 +
5099 +       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
5100 +               if (dpaa2_eth_is_pfc_enabled(priv, i)) {
5101 +                       notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
5102 +                       notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
5103 +               } else {
5104 +                       notification_cfg.threshold_entry = 0;
5105 +                       notification_cfg.threshold_exit = 0;
5106 +               }
5107 +
5108 +               err = dpni_set_congestion_notification(priv->mc_io, 0,
5109 +                                                      priv->mc_token,
5110 +                                                      DPNI_QUEUE_RX,
5111 +                                                      i, &notification_cfg);
5112 +               if (err) {
5113 +                       netdev_err(net_dev, "Error %d setting congestion notif",
5114 +                                  err);
5115 +                       goto out_restore_config;
5116 +               }
5117 +
5118 +               netdev_dbg(net_dev, "%s congestion notifications for tc %d\n",
5119 +                          (notification_cfg.threshold_entry ?
5120 +                           "Enabled" : "Disabled"), i);
5121 +       }
5122 +
5123 +       return 0;
5124 +
5125 +out_restore_config:
5126 +       memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc));
5127 +       return err;
5128 +}
5129 +
5130 +static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
5131 +{
5132 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5133 +
5134 +       return priv->dcbx_mode;
5135 +}
5136 +
5137 +static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
5138 +{
5139 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5140 +
5141 +       priv->dcbx_mode = mode;
5142 +       return 0;
5143 +}
5144 +
5145 +static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
5146 +{
5147 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5148 +
5149 +       switch (capid) {
5150 +       case DCB_CAP_ATTR_PFC:
5151 +               *cap = true;
5152 +               break;
5153 +       case DCB_CAP_ATTR_PFC_TCS:
5154 +               /* bitmap where each bit represents a number of traffic
5155 +                * classes the device can be configured to use for Priority
5156 +                * Flow Control
5157 +                */
5158 +               *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
5159 +               break;
5160 +       case DCB_CAP_ATTR_DCBX:
5161 +               *cap = priv->dcbx_mode;
5162 +               break;
5163 +       default:
5164 +               *cap = false;
5165 +               break;
5166 +       }
5167 +
5168 +       return 0;
5169 +}
5170 +
5171 +const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
5172 +       .ieee_getpfc    = dpaa2_eth_dcbnl_ieee_getpfc,
5173 +       .ieee_setpfc    = dpaa2_eth_dcbnl_ieee_setpfc,
5174 +       .getdcbx        = dpaa2_eth_dcbnl_getdcbx,
5175 +       .setdcbx        = dpaa2_eth_dcbnl_setdcbx,
5176 +       .getcap         = dpaa2_eth_dcbnl_getcap,
5177 +};
5178 +#endif
5179 +
5180  static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
5181  {
5182         struct device *dev;
5183 @@ -2415,7 +3825,7 @@ static int dpaa2_eth_probe(struct fsl_mc
5184         dev = &dpni_dev->dev;
5185  
5186         /* Net device */
5187 -       net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
5188 +       net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
5189         if (!net_dev) {
5190                 dev_err(dev, "alloc_etherdev_mq() failed\n");
5191                 return -ENOMEM;
5192 @@ -2433,7 +3843,10 @@ static int dpaa2_eth_probe(struct fsl_mc
5193         err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
5194                                      &priv->mc_io);
5195         if (err) {
5196 -               dev_err(dev, "MC portal allocation failed\n");
5197 +               if (err == -ENXIO)
5198 +                       err = -EPROBE_DEFER;
5199 +               else
5200 +                       dev_err(dev, "MC portal allocation failed\n");
5201                 goto err_portal_alloc;
5202         }
5203  
5204 @@ -2456,9 +3869,6 @@ static int dpaa2_eth_probe(struct fsl_mc
5205         if (err)
5206                 goto err_bind;
5207  
5208 -       /* Add a NAPI context for each channel */
5209 -       add_ch_napi(priv);
5210 -
5211         /* Percpu statistics */
5212         priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
5213         if (!priv->percpu_stats) {
5214 @@ -2491,7 +3901,14 @@ static int dpaa2_eth_probe(struct fsl_mc
5215         if (err)
5216                 goto err_alloc_rings;
5217  
5218 -       net_dev->ethtool_ops = &dpaa2_ethtool_ops;
5219 +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
5220 +       net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
5221 +       priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
5222 +#endif
5223 +
5224 +       /* Add a NAPI context for each channel */
5225 +       add_ch_napi(priv);
5226 +       enable_ch_napi(priv);
5227  
5228         err = setup_irqs(dpni_dev);
5229         if (err) {
5230 @@ -2499,25 +3916,41 @@ static int dpaa2_eth_probe(struct fsl_mc
5231                 priv->poll_thread = kthread_run(poll_link_state, priv,
5232                                                 "%s_poll_link", net_dev->name);
5233                 if (IS_ERR(priv->poll_thread)) {
5234 -                       netdev_err(net_dev, "Error starting polling thread\n");
5235 +                       dev_err(dev, "Error starting polling thread\n");
5236                         goto err_poll_thread;
5237                 }
5238                 priv->do_link_poll = true;
5239         }
5240  
5241 +       err = register_netdev(net_dev);
5242 +       if (err < 0) {
5243 +               dev_err(dev, "register_netdev() failed\n");
5244 +               goto err_netdev_reg;
5245 +       }
5246 +
5247 +       dpaa2_eth_sysfs_init(&net_dev->dev);
5248 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
5249 +       dpaa2_dbg_add(priv);
5250 +#endif
5251 +
5252         dev_info(dev, "Probed interface %s\n", net_dev->name);
5253         return 0;
5254  
5255 +err_netdev_reg:
5256 +       if (priv->do_link_poll)
5257 +               kthread_stop(priv->poll_thread);
5258 +       else
5259 +               fsl_mc_free_irqs(dpni_dev);
5260  err_poll_thread:
5261         free_rings(priv);
5262  err_alloc_rings:
5263  err_csum:
5264 -       unregister_netdev(net_dev);
5265  err_netdev_init:
5266         free_percpu(priv->percpu_extras);
5267  err_alloc_percpu_extras:
5268         free_percpu(priv->percpu_stats);
5269  err_alloc_percpu_stats:
5270 +       disable_ch_napi(priv);
5271         del_ch_napi(priv);
5272  err_bind:
5273         free_dpbp(priv);
5274 @@ -2544,8 +3977,15 @@ static int dpaa2_eth_remove(struct fsl_m
5275         net_dev = dev_get_drvdata(dev);
5276         priv = netdev_priv(net_dev);
5277  
5278 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
5279 +       dpaa2_dbg_remove(priv);
5280 +#endif
5281 +       dpaa2_eth_sysfs_remove(&net_dev->dev);
5282 +
5283         unregister_netdev(net_dev);
5284 -       dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
5285 +
5286 +       disable_ch_napi(priv);
5287 +       del_ch_napi(priv);
5288  
5289         if (priv->do_link_poll)
5290                 kthread_stop(priv->poll_thread);
5291 @@ -2555,17 +3995,16 @@ static int dpaa2_eth_remove(struct fsl_m
5292         free_rings(priv);
5293         free_percpu(priv->percpu_stats);
5294         free_percpu(priv->percpu_extras);
5295 -
5296 -       del_ch_napi(priv);
5297         free_dpbp(priv);
5298         free_dpio(priv);
5299         free_dpni(priv);
5300  
5301         fsl_mc_portal_free(priv->mc_io);
5302  
5303 -       dev_set_drvdata(dev, NULL);
5304         free_netdev(net_dev);
5305  
5306 +       dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
5307 +
5308         return 0;
5309  }
5310  
5311 @@ -2588,4 +4027,34 @@ static struct fsl_mc_driver dpaa2_eth_dr
5312         .match_id_table = dpaa2_eth_match_id_table
5313  };
5314  
5315 -module_fsl_mc_driver(dpaa2_eth_driver);
5316 +static int __init dpaa2_eth_driver_init(void)
5317 +{
5318 +       int err;
5319 +
5320 +       dpaa2_eth_dbg_init();
5321 +       err = fsl_mc_driver_register(&dpaa2_eth_driver);
5322 +       if (err)
5323 +               goto out_debugfs_err;
5324 +
5325 +       err = dpaa2_ceetm_register();
5326 +       if (err)
5327 +               goto out_ceetm_err;
5328 +
5329 +       return 0;
5330 +
5331 +out_ceetm_err:
5332 +       fsl_mc_driver_unregister(&dpaa2_eth_driver);
5333 +out_debugfs_err:
5334 +       dpaa2_eth_dbg_exit();
5335 +       return err;
5336 +}
5337 +
5338 +static void __exit dpaa2_eth_driver_exit(void)
5339 +{
5340 +       dpaa2_ceetm_unregister();
5341 +       fsl_mc_driver_unregister(&dpaa2_eth_driver);
5342 +       dpaa2_eth_dbg_exit();
5343 +}
5344 +
5345 +module_init(dpaa2_eth_driver_init);
5346 +module_exit(dpaa2_eth_driver_exit);
5347 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
5348 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
5349 @@ -1,40 +1,15 @@
5350 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
5351  /* Copyright 2014-2016 Freescale Semiconductor Inc.
5352   * Copyright 2016 NXP
5353 - *
5354 - * Redistribution and use in source and binary forms, with or without
5355 - * modification, are permitted provided that the following conditions are met:
5356 - *     * Redistributions of source code must retain the above copyright
5357 - *      notice, this list of conditions and the following disclaimer.
5358 - *     * Redistributions in binary form must reproduce the above copyright
5359 - *      notice, this list of conditions and the following disclaimer in the
5360 - *      documentation and/or other materials provided with the distribution.
5361 - *     * Neither the name of Freescale Semiconductor nor the
5362 - *      names of its contributors may be used to endorse or promote products
5363 - *      derived from this software without specific prior written permission.
5364 - *
5365 - *
5366 - * ALTERNATIVELY, this software may be distributed under the terms of the
5367 - * GNU General Public License ("GPL") as published by the Free Software
5368 - * Foundation, either version 2 of that License or (at your option) any
5369 - * later version.
5370 - *
5371 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5372 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5373 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5374 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5375 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5376 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5377 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5378 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5379 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5380 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5381   */
5382  
5383  #ifndef __DPAA2_ETH_H
5384  #define __DPAA2_ETH_H
5385  
5386 +#include <linux/dcbnl.h>
5387  #include <linux/netdevice.h>
5388  #include <linux/if_vlan.h>
5389 +#include <linux/filter.h>
5390  
5391  #include "../../fsl-mc/include/dpaa2-io.h"
5392  #include "../../fsl-mc/include/dpaa2-fd.h"
5393 @@ -44,6 +19,9 @@
5394  #include "dpni-cmd.h"
5395  
5396  #include "dpaa2-eth-trace.h"
5397 +#include "dpaa2-eth-debugfs.h"
5398 +
5399 +#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
5400  
5401  #define DPAA2_ETH_STORE_SIZE           16
5402  
5403 @@ -60,43 +38,59 @@
5404  /* Convert L3 MTU to L2 MFL */
5405  #define DPAA2_ETH_L2_MAX_FRM(mtu)      ((mtu) + VLAN_ETH_HLEN)
5406  
5407 -/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
5408 - * frames in the Rx queues (length of the current frame is not
5409 - * taken into account when making the taildrop decision)
5410 - */
5411 -#define DPAA2_ETH_TAILDROP_THRESH      (64 * 1024)
5412 -
5413 -/* Buffer quota per queue. Must be large enough such that for minimum sized
5414 - * frames taildrop kicks in before the bpool gets depleted, so we compute
5415 - * how many 64B frames fit inside the taildrop threshold and add a margin
5416 - * to accommodate the buffer refill delay.
5417 - */
5418 -#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
5419 -#define DPAA2_ETH_NUM_BUFS             (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
5420 -#define DPAA2_ETH_REFILL_THRESH                DPAA2_ETH_MAX_FRAMES_PER_QUEUE
5421 +/* Maximum burst size value for Tx shaping */
5422 +#define DPAA2_ETH_MAX_BURST_SIZE       0xF7FF
5423  
5424  /* Maximum number of buffers that can be acquired/released through a single
5425   * QBMan command
5426   */
5427  #define DPAA2_ETH_BUFS_PER_CMD         7
5428  
5429 -/* Hardware requires alignment for ingress/egress buffer addresses
5430 - * and ingress buffer lengths.
5431 +/* Set the taildrop threshold to 1MB to allow the enqueue of a sufficiently
5432 + * large number of jumbo frames in the Rx queues (length of the current frame
5433 + * is not taken into account when making the taildrop decision)
5434 + */
5435 +#define DPAA2_ETH_TAILDROP_THRESH      (1024 * 1024)
5436 +
5437 +/* Maximum number of Tx confirmation frames to be processed
5438 + * in a single NAPI call
5439 + */
5440 +#define DPAA2_ETH_TXCONF_PER_NAPI      256
5441 +
5442 +/* Buffer quota per channel.
5443 + * We want to keep in check number of ingress frames in flight: for small
5444 + * sized frames, buffer pool depletion will kick in first; for large sizes,
5445 + * Rx FQ taildrop threshold will ensure only a reasonable number of frames
5446 + * will be pending at any given time.
5447   */
5448 -#define DPAA2_ETH_RX_BUF_SIZE          2048
5449 +#define DPAA2_ETH_NUM_BUFS_PER_CH      1024
5450 +#define DPAA2_ETH_REFILL_THRESH(priv)  \
5451 +       ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
5452 +
5453 +/* Global buffer quota in case flow control is enabled */
5454 +#define DPAA2_ETH_NUM_BUFS_FC          256
5455 +
5456 +/* Hardware requires alignment for ingress/egress buffer addresses */
5457  #define DPAA2_ETH_TX_BUF_ALIGN         64
5458 -#define DPAA2_ETH_RX_BUF_ALIGN         256
5459 -#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
5460 -       ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
5461 -
5462 -/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
5463 - * buffers large enough to allow building an skb around them and also account
5464 - * for alignment restrictions
5465 - */
5466 -#define DPAA2_ETH_BUF_RAW_SIZE \
5467 -       (DPAA2_ETH_RX_BUF_SIZE + \
5468 -       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
5469 -       DPAA2_ETH_RX_BUF_ALIGN)
5470 +
5471 +#define DPAA2_ETH_RX_BUF_RAW_SIZE      PAGE_SIZE
5472 +#define DPAA2_ETH_RX_BUF_TAILROOM \
5473 +       SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
5474 +#define DPAA2_ETH_RX_BUF_SIZE \
5475 +       (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
5476 +
5477 +/* Hardware annotation area in RX/TX buffers */
5478 +#define DPAA2_ETH_RX_HWA_SIZE          64
5479 +#define DPAA2_ETH_TX_HWA_SIZE          128
5480 +
5481 +/* PTP nominal frequency 1GHz */
5482 +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
5483 +
5484 +/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
5485 + * to 256B. For newer revisions, the requirement is only for 64B alignment
5486 + */
5487 +#define DPAA2_ETH_RX_BUF_ALIGN_REV1    256
5488 +#define DPAA2_ETH_RX_BUF_ALIGN         64
5489  
5490  /* We are accommodating a skb backpointer and some S/G info
5491   * in the frame's software annotation. The hardware
5492 @@ -104,12 +98,32 @@
5493   */
5494  #define DPAA2_ETH_SWA_SIZE             64
5495  
5496 +/* We store different information in the software annotation area of a Tx frame
5497 + * based on what type of frame it is
5498 + */
5499 +enum dpaa2_eth_swa_type {
5500 +       DPAA2_ETH_SWA_SINGLE,
5501 +       DPAA2_ETH_SWA_SG,
5502 +       DPAA2_ETH_SWA_XDP,
5503 +};
5504 +
5505  /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
5506  struct dpaa2_eth_swa {
5507 -       struct sk_buff *skb;
5508 -       struct scatterlist *scl;
5509 -       int num_sg;
5510 -       int num_dma_bufs;
5511 +       enum dpaa2_eth_swa_type type;
5512 +       union {
5513 +               struct {
5514 +                       struct sk_buff *skb;
5515 +               } single;
5516 +               struct {
5517 +                       struct sk_buff *skb;
5518 +                       struct scatterlist *scl;
5519 +                       int num_sg;
5520 +                       int sgt_size;
5521 +               } sg;
5522 +               struct {
5523 +                       int dma_size;
5524 +               } xdp;
5525 +       };
5526  };
5527  
5528  /* Annotation valid bits in FD FRC */
5529 @@ -121,22 +135,14 @@ struct dpaa2_eth_swa {
5530  #define DPAA2_FD_FRC_FAICFDV           0x0400
5531  
5532  /* Error bits in FD CTRL */
5533 -#define DPAA2_FD_CTRL_UFD              0x00000004
5534 -#define DPAA2_FD_CTRL_SBE              0x00000008
5535 -#define DPAA2_FD_CTRL_FSE              0x00000020
5536 -#define DPAA2_FD_CTRL_FAERR            0x00000040
5537 -
5538 -#define DPAA2_FD_RX_ERR_MASK           (DPAA2_FD_CTRL_SBE      | \
5539 -                                        DPAA2_FD_CTRL_FAERR)
5540 -#define DPAA2_FD_TX_ERR_MASK           (DPAA2_FD_CTRL_UFD      | \
5541 -                                        DPAA2_FD_CTRL_SBE      | \
5542 -                                        DPAA2_FD_CTRL_FSE      | \
5543 -                                        DPAA2_FD_CTRL_FAERR)
5544 +#define DPAA2_FD_RX_ERR_MASK           (FD_CTRL_SBE | FD_CTRL_FAERR)
5545 +#define DPAA2_FD_TX_ERR_MASK           (FD_CTRL_UFD    | \
5546 +                                        FD_CTRL_SBE    | \
5547 +                                        FD_CTRL_FSE    | \
5548 +                                        FD_CTRL_FAERR)
5549  
5550  /* Annotation bits in FD CTRL */
5551 -#define DPAA2_FD_CTRL_ASAL             0x00020000      /* ASAL = 128 */
5552 -#define DPAA2_FD_CTRL_PTA              0x00800000
5553 -#define DPAA2_FD_CTRL_PTV1             0x00400000
5554 +#define DPAA2_FD_CTRL_ASAL             0x00020000      /* ASAL = 128B */
5555  
5556  /* Frame annotation status */
5557  struct dpaa2_fas {
5558 @@ -144,7 +150,7 @@ struct dpaa2_fas {
5559         u8 ppid;
5560         __le16 ifpid;
5561         __le32 status;
5562 -} __packed;
5563 +};
5564  
5565  /* Frame annotation status word is located in the first 8 bytes
5566   * of the buffer's hardware annoatation area
5567 @@ -152,11 +158,45 @@ struct dpaa2_fas {
5568  #define DPAA2_FAS_OFFSET               0
5569  #define DPAA2_FAS_SIZE                 (sizeof(struct dpaa2_fas))
5570  
5571 +/* Timestamp is located in the next 8 bytes of the buffer's
5572 + * hardware annotation area
5573 + */
5574 +#define DPAA2_TS_OFFSET                        0x8
5575 +
5576 +/* Frame annotation egress action descriptor */
5577 +#define DPAA2_FAEAD_OFFSET             0x58
5578 +
5579 +struct dpaa2_faead {
5580 +       __le32 conf_fqid;
5581 +       __le32 ctrl;
5582 +};
5583 +
5584 +#define DPAA2_FAEAD_A2V                        0x20000000
5585 +#define DPAA2_FAEAD_A4V                        0x08000000
5586 +#define DPAA2_FAEAD_UPDV               0x00001000
5587 +#define DPAA2_FAEAD_EBDDV              0x00002000
5588 +#define DPAA2_FAEAD_UPD                        0x00000010
5589 +
5590  /* Accessors for the hardware annotation fields that we use */
5591 -#define dpaa2_get_hwa(buf_addr) \
5592 -       ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
5593 -#define dpaa2_get_fas(buf_addr) \
5594 -       (struct dpaa2_fas *)(dpaa2_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
5595 +static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
5596 +{
5597 +       return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
5598 +}
5599 +
5600 +static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
5601 +{
5602 +       return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
5603 +}
5604 +
5605 +static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
5606 +{
5607 +       return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
5608 +}
5609 +
5610 +static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
5611 +{
5612 +       return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
5613 +}
5614  
5615  /* Error and status bits in the frame annotation status word */
5616  /* Debug frame, otherwise supposed to be discarded */
5617 @@ -203,11 +243,6 @@ struct dpaa2_fas {
5618                                          DPAA2_FAS_BLE          | \
5619                                          DPAA2_FAS_L3CE         | \
5620                                          DPAA2_FAS_L4CE)
5621 -/* Tx errors */
5622 -#define DPAA2_FAS_TX_ERR_MASK          (DPAA2_FAS_KSE          | \
5623 -                                        DPAA2_FAS_EOFHE        | \
5624 -                                        DPAA2_FAS_MNLE         | \
5625 -                                        DPAA2_FAS_TIDE)
5626  
5627  /* Time in milliseconds between link state updates */
5628  #define DPAA2_ETH_LINK_STATE_REFRESH   1000
5629 @@ -226,6 +261,7 @@ struct dpaa2_eth_drv_stats {
5630         __u64   tx_conf_bytes;
5631         __u64   tx_sg_frames;
5632         __u64   tx_sg_bytes;
5633 +       __u64   tx_reallocs;
5634         __u64   rx_sg_frames;
5635         __u64   rx_sg_bytes;
5636         /* Enqueues retried due to portal busy */
5637 @@ -250,17 +286,23 @@ struct dpaa2_eth_ch_stats {
5638         __u64 pull_err;
5639  };
5640  
5641 +#define DPAA2_ETH_MAX_TCS              8
5642 +
5643  /* Maximum number of queues associated with a DPNI */
5644 -#define DPAA2_ETH_MAX_RX_QUEUES                16
5645 -#define DPAA2_ETH_MAX_TX_QUEUES                NR_CPUS
5646 +#define DPAA2_ETH_MAX_RX_QUEUES                (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
5647 +#define DPAA2_ETH_MAX_TX_QUEUES                DPNI_MAX_SENDERS
5648 +#define DPAA2_ETH_MAX_RX_ERR_QUEUES    1
5649  #define DPAA2_ETH_MAX_QUEUES           (DPAA2_ETH_MAX_RX_QUEUES + \
5650 -                                       DPAA2_ETH_MAX_TX_QUEUES)
5651 +                                       DPAA2_ETH_MAX_TX_QUEUES + \
5652 +                                       DPAA2_ETH_MAX_RX_ERR_QUEUES)
5653 +#define DPAA2_ETH_MAX_NETDEV_QUEUES    (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
5654  
5655 -#define DPAA2_ETH_MAX_DPCONS           NR_CPUS
5656 +#define DPAA2_ETH_MAX_DPCONS           16
5657  
5658  enum dpaa2_eth_fq_type {
5659         DPAA2_RX_FQ = 0,
5660         DPAA2_TX_CONF_FQ,
5661 +       DPAA2_RX_ERR_FQ
5662  };
5663  
5664  struct dpaa2_eth_priv;
5665 @@ -268,15 +310,19 @@ struct dpaa2_eth_priv;
5666  struct dpaa2_eth_fq {
5667         u32 fqid;
5668         u32 tx_qdbin;
5669 +       u32 tx_fqid;
5670         u16 flowid;
5671 +       u8 tc;
5672         int target_cpu;
5673 +       u32 dq_frames;
5674 +       u32 dq_bytes;
5675         struct dpaa2_eth_channel *channel;
5676         enum dpaa2_eth_fq_type type;
5677  
5678 -       void (*consume)(struct dpaa2_eth_priv *,
5679 -                       struct dpaa2_eth_channel *,
5680 -                       const struct dpaa2_fd *,
5681 -                       struct napi_struct *);
5682 +       void (*consume)(struct dpaa2_eth_priv *priv,
5683 +                       struct dpaa2_eth_channel *ch,
5684 +                       const struct dpaa2_fd *fd,
5685 +                       struct dpaa2_eth_fq *fq);
5686         struct dpaa2_eth_fq_stats stats;
5687  };
5688  
5689 @@ -285,19 +331,29 @@ struct dpaa2_eth_channel {
5690         struct fsl_mc_device *dpcon;
5691         int dpcon_id;
5692         int ch_id;
5693 -       int dpio_id;
5694         struct napi_struct napi;
5695 +       struct dpaa2_io *dpio;
5696         struct dpaa2_io_store *store;
5697         struct dpaa2_eth_priv *priv;
5698         int buf_count;
5699         struct dpaa2_eth_ch_stats stats;
5700 +       struct bpf_prog *xdp_prog;
5701 +       u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
5702 +       u8 rel_buf_cnt;
5703 +       bool flush;
5704  };
5705  
5706 -struct dpaa2_eth_hash_fields {
5707 +struct dpaa2_eth_dist_fields {
5708         u64 rxnfc_field;
5709         enum net_prot cls_prot;
5710         int cls_field;
5711         int size;
5712 +       u64 id;
5713 +};
5714 +
5715 +struct dpaa2_eth_cls_rule {
5716 +       struct ethtool_rx_flow_spec fs;
5717 +       u8 in_use;
5718  };
5719  
5720  /* Driver private data */
5721 @@ -306,17 +362,29 @@ struct dpaa2_eth_priv {
5722  
5723         u8 num_fqs;
5724         struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
5725 +       int (*enqueue)(struct dpaa2_eth_priv *priv,
5726 +                      struct dpaa2_eth_fq *fq,
5727 +                      struct dpaa2_fd *fd, u8 prio);
5728  
5729         u8 num_channels;
5730         struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
5731 +       int max_bufs_per_ch;
5732 +       int refill_thresh;
5733 +
5734 +       bool has_xdp_prog;
5735  
5736         struct dpni_attr dpni_attrs;
5737 +       u16 dpni_ver_major;
5738 +       u16 dpni_ver_minor;
5739         u16 tx_data_offset;
5740  
5741         struct fsl_mc_device *dpbp_dev;
5742         u16 bpid;
5743         struct iommu_domain *iommu_domain;
5744  
5745 +       bool ts_tx_en; /* Tx timestamping enabled */
5746 +       bool ts_rx_en; /* Rx timestamping enabled */
5747 +
5748         u16 tx_qdid;
5749         struct fsl_mc_io *mc_io;
5750         /* Cores which have an affine DPIO/DPCON.
5751 @@ -337,13 +405,30 @@ struct dpaa2_eth_priv {
5752  
5753         /* enabled ethtool hashing bits */
5754         u64 rx_hash_fields;
5755 +       u64 rx_cls_fields;
5756 +       struct dpaa2_eth_cls_rule *cls_rule;
5757 +       u8 rx_cls_enabled;
5758 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
5759 +       struct dpaa2_debugfs dbg;
5760 +#endif
5761 +       struct dpni_tx_shaping_cfg shaping_cfg;
5762 +
5763 +       u8 dcbx_mode;
5764 +       struct ieee_pfc pfc;
5765 +       bool vlan_clsf_set;
5766 +       bool tx_pause_frames;
5767 +
5768 +       bool ceetm_en;
5769  };
5770  
5771 -/* default Rx hash options, set during probing */
5772  #define DPAA2_RXH_SUPPORTED    (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
5773                                 | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
5774                                 | RXH_L4_B_2_3)
5775  
5776 +/* default Rx hash options, set during probing */
5777 +#define DPAA2_RXH_DEFAULT      (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
5778 +                                RXH_L4_B_0_1 | RXH_L4_B_2_3)
5779 +
5780  #define dpaa2_eth_hash_enabled(priv)   \
5781         ((priv)->dpni_attrs.num_queues > 1)
5782  
5783 @@ -352,10 +437,127 @@ struct dpaa2_eth_priv {
5784  
5785  extern const struct ethtool_ops dpaa2_ethtool_ops;
5786  extern const char dpaa2_eth_drv_version[];
5787 +extern int dpaa2_phc_index;
5788 +
5789 +static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
5790 +                                        u16 ver_major, u16 ver_minor)
5791 +{
5792 +       if (priv->dpni_ver_major == ver_major)
5793 +               return priv->dpni_ver_minor - ver_minor;
5794 +       return priv->dpni_ver_major - ver_major;
5795 +}
5796 +
5797 +/* Minimum firmware version that supports a more flexible API
5798 + * for configuring the Rx flow hash key
5799 + */
5800 +#define DPNI_RX_DIST_KEY_VER_MAJOR     7
5801 +#define DPNI_RX_DIST_KEY_VER_MINOR     5
5802 +
5803 +#define dpaa2_eth_has_legacy_dist(priv)                                        \
5804 +       (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR,     \
5805 +                               DPNI_RX_DIST_KEY_VER_MINOR) < 0)
5806 +
5807 +#define dpaa2_eth_fs_enabled(priv)     \
5808 +       (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
5809 +
5810 +#define dpaa2_eth_fs_mask_enabled(priv)        \
5811 +       ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
5812 +
5813 +#define dpaa2_eth_fs_count(priv)       \
5814 +       ((priv)->dpni_attrs.fs_entries)
5815 +
5816 +#define dpaa2_eth_queue_count(priv)    \
5817 +       ((priv)->num_channels)
5818 +
5819 +#define dpaa2_eth_tc_count(priv)       \
5820 +       ((priv)->dpni_attrs.num_tcs)
5821 +
5822 +enum dpaa2_eth_rx_dist {
5823 +       DPAA2_ETH_RX_DIST_HASH,
5824 +       DPAA2_ETH_RX_DIST_CLS
5825 +};
5826 +
5827 +/* Unique IDs for the supported Rx classification header fields */
5828 +#define DPAA2_ETH_DIST_ETHDST          BIT(0)
5829 +#define DPAA2_ETH_DIST_ETHSRC          BIT(1)
5830 +#define DPAA2_ETH_DIST_ETHTYPE         BIT(2)
5831 +#define DPAA2_ETH_DIST_VLAN            BIT(3)
5832 +#define DPAA2_ETH_DIST_IPSRC           BIT(4)
5833 +#define DPAA2_ETH_DIST_IPDST           BIT(5)
5834 +#define DPAA2_ETH_DIST_IPPROTO         BIT(6)
5835 +#define DPAA2_ETH_DIST_L4SRC           BIT(7)
5836 +#define DPAA2_ETH_DIST_L4DST           BIT(8)
5837 +#define DPAA2_ETH_DIST_ALL             (~0U)
5838 +
5839 +static inline
5840 +unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
5841 +                                      struct sk_buff *skb)
5842 +{
5843 +       unsigned int headroom = DPAA2_ETH_SWA_SIZE;
5844 +
5845 +       /* If we don't have an skb (e.g. XDP buffer), we only need space for
5846 +        * the software annotation area
5847 +        */
5848 +       if (!skb)
5849 +               return headroom;
5850  
5851 -static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
5852 +       /* For non-linear skbs we have no headroom requirement, as we build a
5853 +        * SG frame with a newly allocated SGT buffer
5854 +        */
5855 +       if (skb_is_nonlinear(skb))
5856 +               return 0;
5857 +
5858 +       /* If we have Tx timestamping, need 128B hardware annotation */
5859 +       if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
5860 +               headroom += DPAA2_ETH_TX_HWA_SIZE;
5861 +
5862 +       return headroom;
5863 +}
5864 +
5865 +/* Extra headroom space requested to hardware, in order to make sure there's
5866 + * no realloc'ing in forwarding scenarios
5867 + */
5868 +static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
5869 +{
5870 +       return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
5871 +}
5872 +
5873 +static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
5874 +                                           int traffic_class)
5875 +{
5876 +       return priv->pfc.pfc_en & (1 << traffic_class);
5877 +}
5878 +
5879 +enum dpaa2_eth_td_cfg {
5880 +       DPAA2_ETH_TD_NONE,
5881 +       DPAA2_ETH_TD_QUEUE,
5882 +       DPAA2_ETH_TD_GROUP
5883 +};
5884 +
5885 +static inline enum dpaa2_eth_td_cfg
5886 +dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
5887 +{
5888 +       bool pfc_enabled = !!(priv->pfc.pfc_en);
5889 +
5890 +       if (pfc_enabled)
5891 +               return DPAA2_ETH_TD_GROUP;
5892 +       else if (priv->tx_pause_frames)
5893 +               return DPAA2_ETH_TD_NONE;
5894 +       else
5895 +               return DPAA2_ETH_TD_QUEUE;
5896 +}
5897 +
5898 +static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
5899  {
5900 -       return priv->dpni_attrs.num_queues;
5901 +       return 1;
5902  }
5903  
5904 +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
5905 +int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
5906 +int dpaa2_eth_cls_key_size(u64 key);
5907 +int dpaa2_eth_cls_fld_off(int prot, int field);
5908 +void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
5909 +
5910 +int set_rx_taildrop(struct dpaa2_eth_priv *priv);
5911 +
5912  #endif /* __DPAA2_H */
5913 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
5914 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
5915 @@ -1,35 +1,10 @@
5916 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
5917  /* Copyright 2014-2016 Freescale Semiconductor Inc.
5918 - * Copyright 2016 NXP
5919 - *
5920 - * Redistribution and use in source and binary forms, with or without
5921 - * modification, are permitted provided that the following conditions are met:
5922 - *     * Redistributions of source code must retain the above copyright
5923 - *      notice, this list of conditions and the following disclaimer.
5924 - *     * Redistributions in binary form must reproduce the above copyright
5925 - *      notice, this list of conditions and the following disclaimer in the
5926 - *      documentation and/or other materials provided with the distribution.
5927 - *     * Neither the name of Freescale Semiconductor nor the
5928 - *      names of its contributors may be used to endorse or promote products
5929 - *      derived from this software without specific prior written permission.
5930 - *
5931 - *
5932 - * ALTERNATIVELY, this software may be distributed under the terms of the
5933 - * GNU General Public License ("GPL") as published by the Free Software
5934 - * Foundation, either version 2 of that License or (at your option) any
5935 - * later version.
5936 - *
5937 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5938 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5939 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5940 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5941 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5942 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5943 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5944 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5945 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5946 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5947 + * Copyright 2016-2017 NXP
5948   */
5949  
5950 +#include <linux/net_tstamp.h>
5951 +
5952  #include "dpni.h"      /* DPNI_LINK_OPT_* */
5953  #include "dpaa2-eth.h"
5954  
5955 @@ -52,6 +27,10 @@ static char dpaa2_ethtool_stats[][ETH_GS
5956         "[hw] rx nobuffer discards",
5957         "[hw] tx discarded frames",
5958         "[hw] tx confirmed frames",
5959 +       "[hw] tx dequeued bytes",
5960 +       "[hw] tx dequeued frames",
5961 +       "[hw] tx rejected bytes",
5962 +       "[hw] tx rejected frames",
5963  };
5964  
5965  #define DPAA2_ETH_NUM_STATS    ARRAY_SIZE(dpaa2_ethtool_stats)
5966 @@ -62,6 +41,7 @@ static char dpaa2_ethtool_extras[][ETH_G
5967         "[drv] tx conf bytes",
5968         "[drv] tx sg frames",
5969         "[drv] tx sg bytes",
5970 +       "[drv] tx realloc frames",
5971         "[drv] rx sg frames",
5972         "[drv] rx sg bytes",
5973         "[drv] enqueue portal busy",
5974 @@ -69,6 +49,12 @@ static char dpaa2_ethtool_extras[][ETH_G
5975         "[drv] dequeue portal busy",
5976         "[drv] channel pull errors",
5977         "[drv] cdan",
5978 +       /* FQ stats */
5979 +       "rx pending frames",
5980 +       "rx pending bytes",
5981 +       "tx conf pending frames",
5982 +       "tx conf pending bytes",
5983 +       "buffer count"
5984  };
5985  
5986  #define DPAA2_ETH_NUM_EXTRA_STATS      ARRAY_SIZE(dpaa2_ethtool_extras)
5987 @@ -76,14 +62,55 @@ static char dpaa2_ethtool_extras[][ETH_G
5988  static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
5989                                   struct ethtool_drvinfo *drvinfo)
5990  {
5991 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5992 +
5993         strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
5994 -       strlcpy(drvinfo->version, dpaa2_eth_drv_version,
5995 -               sizeof(drvinfo->version));
5996 -       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
5997 +
5998 +       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
5999 +                "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
6000 +
6001         strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
6002                 sizeof(drvinfo->bus_info));
6003  }
6004  
6005 +#define DPNI_LINK_AUTONEG_VER_MAJOR            7
6006 +#define DPNI_LINK_AUTONEG_VER_MINOR            8
6007 +
6008 +struct dpaa2_eth_link_mode_map {
6009 +       u64 dpni_lm;
6010 +       u64 ethtool_lm;
6011 +};
6012 +
6013 +static const struct dpaa2_eth_link_mode_map dpaa2_eth_lm_map[] = {
6014 +       {DPNI_ADVERTISED_10BASET_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
6015 +       {DPNI_ADVERTISED_100BASET_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
6016 +       {DPNI_ADVERTISED_1000BASET_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
6017 +       {DPNI_ADVERTISED_10000BASET_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
6018 +       {DPNI_ADVERTISED_2500BASEX_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
6019 +       {DPNI_ADVERTISED_AUTONEG, ETHTOOL_LINK_MODE_Autoneg_BIT},
6020 +};
6021 +
6022 +static void link_mode_dpni2ethtool(u64 dpni_lm, unsigned long *ethtool_lm)
6023 +{
6024 +       int i;
6025 +
6026 +       for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
6027 +               if (dpni_lm & dpaa2_eth_lm_map[i].dpni_lm)
6028 +                       __set_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm);
6029 +       }
6030 +}
6031 +
6032 +static void link_mode_ethtool2dpni(const unsigned long *ethtool_lm,
6033 +                                  u64 *dpni_lm)
6034 +{
6035 +       int i;
6036 +
6037 +       for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
6038 +               if (test_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm))
6039 +                       *dpni_lm |= dpaa2_eth_lm_map[i].dpni_lm;
6040 +       }
6041 +}
6042 +
6043  static int
6044  dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
6045                              struct ethtool_link_ksettings *link_settings)
6046 @@ -92,17 +119,27 @@ dpaa2_eth_get_link_ksettings(struct net_
6047         int err = 0;
6048         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6049  
6050 -       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
6051 -       if (err) {
6052 -               netdev_err(net_dev, "ERROR %d getting link state\n", err);
6053 -               goto out;
6054 +       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
6055 +                                  DPNI_LINK_AUTONEG_VER_MINOR) < 0) {
6056 +               err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token,
6057 +                                         &state);
6058 +               if (err) {
6059 +                       netdev_err(net_dev, "dpni_get_link_state failed\n");
6060 +                       goto out;
6061 +               }
6062 +       } else {
6063 +               err = dpni_get_link_state_v2(priv->mc_io, 0, priv->mc_token,
6064 +                                            &state);
6065 +               if (err) {
6066 +                       netdev_err(net_dev, "dpni_get_link_state_v2 failed\n");
6067 +                       goto out;
6068 +               }
6069 +               link_mode_dpni2ethtool(state.supported,
6070 +                                      link_settings->link_modes.supported);
6071 +               link_mode_dpni2ethtool(state.advertising,
6072 +                                      link_settings->link_modes.advertising);
6073         }
6074  
6075 -       /* At the moment, we have no way of interrogating the DPMAC
6076 -        * from the DPNI side - and for that matter there may exist
6077 -        * no DPMAC at all. So for now we just don't report anything
6078 -        * beyond the DPNI attributes.
6079 -        */
6080         if (state.options & DPNI_LINK_OPT_AUTONEG)
6081                 link_settings->base.autoneg = AUTONEG_ENABLE;
6082         if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
6083 @@ -113,25 +150,37 @@ out:
6084         return err;
6085  }
6086  
6087 +#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR                7
6088 +#define DPNI_DYNAMIC_LINK_SET_VER_MINOR                1
6089  static int
6090  dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
6091                              const struct ethtool_link_ksettings *link_settings)
6092  {
6093 -       struct dpni_link_cfg cfg = {0};
6094         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6095 +       struct dpni_link_state state = {0};
6096 +       struct dpni_link_cfg cfg = {0};
6097         int err = 0;
6098  
6099 -       netdev_dbg(net_dev, "Setting link parameters...");
6100 +       /* If using an older MC version, the DPNI must be down
6101 +        * in order to be able to change link settings. Taking steps to let
6102 +        * the user know that.
6103 +        */
6104 +       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
6105 +                                  DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
6106 +               if (netif_running(net_dev)) {
6107 +                       netdev_info(net_dev, "Interface must be brought down first.\n");
6108 +                       return -EACCES;
6109 +               }
6110 +       }
6111  
6112 -       /* Due to a temporary MC limitation, the DPNI must be down
6113 -        * in order to be able to change link settings. Taking steps to let
6114 -        * the user know that.
6115 -        */
6116 -       if (netif_running(net_dev)) {
6117 -               netdev_info(net_dev, "Sorry, interface must be brought down first.\n");
6118 -               return -EACCES;
6119 +       /* Need to interrogate link state to get flow control params */
6120 +       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
6121 +       if (err) {
6122 +               netdev_err(net_dev, "Error getting link state\n");
6123 +               goto out;
6124         }
6125  
6126 +       cfg.options = state.options;
6127         cfg.rate = link_settings->base.speed;
6128         if (link_settings->base.autoneg == AUTONEG_ENABLE)
6129                 cfg.options |= DPNI_LINK_OPT_AUTONEG;
6130 @@ -142,13 +191,92 @@ dpaa2_eth_set_link_ksettings(struct net_
6131         else
6132                 cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
6133  
6134 +       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
6135 +                                  DPNI_LINK_AUTONEG_VER_MINOR)) {
6136 +               err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
6137 +       } else {
6138 +               link_mode_ethtool2dpni(link_settings->link_modes.advertising,
6139 +                                      &cfg.advertising);
6140 +               dpni_set_link_cfg_v2(priv->mc_io, 0, priv->mc_token, &cfg);
6141 +       }
6142 +       if (err)
6143 +               netdev_err(net_dev, "dpni_set_link_cfg failed");
6144 +
6145 +out:
6146 +       return err;
6147 +}
6148 +
6149 +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
6150 +                                    struct ethtool_pauseparam *pause)
6151 +{
6152 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6153 +       struct dpni_link_state state = {0};
6154 +       int err;
6155 +
6156 +       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
6157 +       if (err)
6158 +               netdev_dbg(net_dev, "Error getting link state\n");
6159 +
6160 +       /* Report general port autonegotiation status */
6161 +       pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
6162 +       pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
6163 +       pause->tx_pause = pause->rx_pause ^
6164 +                         !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
6165 +}
6166 +
6167 +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
6168 +                                   struct ethtool_pauseparam *pause)
6169 +{
6170 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6171 +       struct dpni_link_state state = {0};
6172 +       struct dpni_link_cfg cfg = {0};
6173 +       u32 current_tx_pause;
6174 +       int err = 0;
6175 +
6176 +       err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
6177 +       if (err) {
6178 +               netdev_dbg(net_dev, "Error getting link state\n");
6179 +               goto out;
6180 +       }
6181 +
6182 +       cfg.rate = state.rate;
6183 +       cfg.options = state.options;
6184 +       current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
6185 +                          !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
6186 +
6187 +       /* We don't support changing pause frame autonegotiation separately
6188 +        * from general port autoneg
6189 +        */
6190 +       if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
6191 +               netdev_warn(net_dev,
6192 +                           "Cannot change pause frame autoneg separately\n");
6193 +
6194 +       if (pause->rx_pause)
6195 +               cfg.options |= DPNI_LINK_OPT_PAUSE;
6196 +       else
6197 +               cfg.options &= ~DPNI_LINK_OPT_PAUSE;
6198 +
6199 +       if (pause->rx_pause ^ pause->tx_pause)
6200 +               cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
6201 +       else
6202 +               cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
6203 +
6204         err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
6205 +       if (err) {
6206 +               netdev_dbg(net_dev, "Error setting link\n");
6207 +               goto out;
6208 +       }
6209 +
6210 +       /* Enable/disable Rx FQ taildrop if Tx pause frames have changed */
6211 +       if (current_tx_pause == pause->tx_pause)
6212 +               goto out;
6213 +
6214 +       priv->tx_pause_frames = pause->tx_pause;
6215 +       err = set_rx_taildrop(priv);
6216         if (err)
6217 -               /* ethtool will be loud enough if we return an error; no point
6218 -                * in putting our own error message on the console by default
6219 -                */
6220 -               netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
6221 +               netdev_dbg(net_dev, "Error configuring taildrop\n");
6222  
6223 +out:
6224         return err;
6225  }
6226  
6227 @@ -192,6 +320,10 @@ static void dpaa2_eth_get_ethtool_stats(
6228         int j, k, err;
6229         int num_cnt;
6230         union dpni_statistics dpni_stats;
6231 +       u32 fcnt, bcnt;
6232 +       u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
6233 +       u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
6234 +       u32 buf_cnt;
6235         u64 cdan = 0;
6236         u64 portal_busy = 0, pull_err = 0;
6237         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6238 @@ -202,9 +334,9 @@ static void dpaa2_eth_get_ethtool_stats(
6239                sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
6240  
6241         /* Print standard counters, from DPNI statistics */
6242 -       for (j = 0; j <= 2; j++) {
6243 +       for (j = 0; j <= 3; j++) {
6244                 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
6245 -                                         j, &dpni_stats);
6246 +                                         j, 0, &dpni_stats);
6247                 if (err != 0)
6248                         netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
6249                 switch (j) {
6250 @@ -217,6 +349,9 @@ static void dpaa2_eth_get_ethtool_stats(
6251                 case 2:
6252                         num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
6253                         break;
6254 +               case 3:
6255 +                       num_cnt = sizeof(dpni_stats.page_3) / sizeof(u64);
6256 +                       break;
6257                 }
6258                 for (k = 0; k < num_cnt; k++)
6259                         *(data + i++) = dpni_stats.raw.counter[k];
6260 @@ -240,12 +375,410 @@ static void dpaa2_eth_get_ethtool_stats(
6261         *(data + i++) = portal_busy;
6262         *(data + i++) = pull_err;
6263         *(data + i++) = cdan;
6264 +
6265 +       for (j = 0; j < priv->num_fqs; j++) {
6266 +               /* Print FQ instantaneous counts */
6267 +               err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
6268 +                                             &fcnt, &bcnt);
6269 +               if (err) {
6270 +                       netdev_warn(net_dev, "FQ query error %d", err);
6271 +                       return;
6272 +               }
6273 +
6274 +               if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
6275 +                       fcnt_tx_total += fcnt;
6276 +                       bcnt_tx_total += bcnt;
6277 +               } else {
6278 +                       fcnt_rx_total += fcnt;
6279 +                       bcnt_rx_total += bcnt;
6280 +               }
6281 +       }
6282 +
6283 +       *(data + i++) = fcnt_rx_total;
6284 +       *(data + i++) = bcnt_rx_total;
6285 +       *(data + i++) = fcnt_tx_total;
6286 +       *(data + i++) = bcnt_tx_total;
6287 +
6288 +       err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
6289 +       if (err) {
6290 +               netdev_warn(net_dev, "Buffer count query error %d\n", err);
6291 +               return;
6292 +       }
6293 +       *(data + i++) = buf_cnt;
6294 +}
6295 +
6296 +static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
6297 +                        void *key, void *mask, u64 *fields)
6298 +{
6299 +       int off;
6300 +
6301 +       if (eth_mask->h_proto) {
6302 +               off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
6303 +               *(__be16 *)(key + off) = eth_value->h_proto;
6304 +               *(__be16 *)(mask + off) = eth_mask->h_proto;
6305 +               *fields |= DPAA2_ETH_DIST_ETHTYPE;
6306 +       }
6307 +
6308 +       if (!is_zero_ether_addr(eth_mask->h_source)) {
6309 +               off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
6310 +               ether_addr_copy(key + off, eth_value->h_source);
6311 +               ether_addr_copy(mask + off, eth_mask->h_source);
6312 +               *fields |= DPAA2_ETH_DIST_ETHSRC;
6313 +       }
6314 +
6315 +       if (!is_zero_ether_addr(eth_mask->h_dest)) {
6316 +               off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
6317 +               ether_addr_copy(key + off, eth_value->h_dest);
6318 +               ether_addr_copy(mask + off, eth_mask->h_dest);
6319 +               *fields |= DPAA2_ETH_DIST_ETHDST;
6320 +       }
6321 +
6322 +       return 0;
6323 +}
6324 +
6325 +static int prep_user_ip_rule(struct ethtool_usrip4_spec *uip_value,
6326 +                            struct ethtool_usrip4_spec *uip_mask,
6327 +                            void *key, void *mask, u64 *fields)
6328 +{
6329 +       int off;
6330 +       u32 tmp_value, tmp_mask;
6331 +
6332 +       if (uip_mask->tos || uip_mask->ip_ver)
6333 +               return -EOPNOTSUPP;
6334 +
6335 +       if (uip_mask->ip4src) {
6336 +               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
6337 +               *(__be32 *)(key + off) = uip_value->ip4src;
6338 +               *(__be32 *)(mask + off) = uip_mask->ip4src;
6339 +               *fields |= DPAA2_ETH_DIST_IPSRC;
6340 +       }
6341 +
6342 +       if (uip_mask->ip4dst) {
6343 +               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
6344 +               *(__be32 *)(key + off) = uip_value->ip4dst;
6345 +               *(__be32 *)(mask + off) = uip_mask->ip4dst;
6346 +               *fields |= DPAA2_ETH_DIST_IPDST;
6347 +       }
6348 +
6349 +       if (uip_mask->proto) {
6350 +               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
6351 +               *(u8 *)(key + off) = uip_value->proto;
6352 +               *(u8 *)(mask + off) = uip_mask->proto;
6353 +               *fields |= DPAA2_ETH_DIST_IPPROTO;
6354 +       }
6355 +
6356 +       if (uip_mask->l4_4_bytes) {
6357 +               tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
6358 +               tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
6359 +
6360 +               off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
6361 +               *(__be16 *)(key + off) = htons(tmp_value >> 16);
6362 +               *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
6363 +               *fields |= DPAA2_ETH_DIST_L4SRC;
6364 +
6365 +               off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
6366 +               *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
6367 +               *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
6368 +               *fields |= DPAA2_ETH_DIST_L4DST;
6369 +       }
6370 +
6371 +       /* Only apply the rule for IPv4 frames */
6372 +       off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
6373 +       *(__be16 *)(key + off) = htons(ETH_P_IP);
6374 +       *(__be16 *)(mask + off) = htons(0xFFFF);
6375 +       *fields |= DPAA2_ETH_DIST_ETHTYPE;
6376 +
6377 +       return 0;
6378 +}
6379 +
6380 +static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
6381 +                       struct ethtool_tcpip4_spec *l4_mask,
6382 +                       void *key, void *mask, u8 l4_proto, u64 *fields)
6383 +{
6384 +       int off;
6385 +
6386 +       if (l4_mask->tos)
6387 +               return -EOPNOTSUPP;
6388 +       if (l4_mask->ip4src) {
6389 +               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
6390 +               *(__be32 *)(key + off) = l4_value->ip4src;
6391 +               *(__be32 *)(mask + off) = l4_mask->ip4src;
6392 +               *fields |= DPAA2_ETH_DIST_IPSRC;
6393 +       }
6394 +
6395 +       if (l4_mask->ip4dst) {
6396 +               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
6397 +               *(__be32 *)(key + off) = l4_value->ip4dst;
6398 +               *(__be32 *)(mask + off) = l4_mask->ip4dst;
6399 +               *fields |= DPAA2_ETH_DIST_IPDST;
6400 +       }
6401 +
6402 +       if (l4_mask->psrc) {
6403 +               off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
6404 +               *(__be16 *)(key + off) = l4_value->psrc;
6405 +               *(__be16 *)(mask + off) = l4_mask->psrc;
6406 +               *fields |= DPAA2_ETH_DIST_L4SRC;
6407 +       }
6408 +
6409 +       if (l4_mask->pdst) {
6410 +               off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
6411 +               *(__be16 *)(key + off) = l4_value->pdst;
6412 +               *(__be16 *)(mask + off) = l4_mask->pdst;
6413 +               *fields |= DPAA2_ETH_DIST_L4DST;
6414 +       }
6415 +
6416 +       /* Only apply the rule for the user-specified L4 protocol
6417 +        * and if ethertype matches IPv4
6418 +        */
6419 +       off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
6420 +       *(__be16 *)(key + off) = htons(ETH_P_IP);
6421 +       *(__be16 *)(mask + off) = htons(0xFFFF);
6422 +       *fields |= DPAA2_ETH_DIST_ETHTYPE;
6423 +
6424 +       off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
6425 +       *(u8 *)(key + off) = l4_proto;
6426 +       *(u8 *)(mask + off) = 0xFF;
6427 +       *fields |= DPAA2_ETH_DIST_IPPROTO;
6428 +
6429 +       return 0;
6430 +}
6431 +
6432 +static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
6433 +                        struct ethtool_flow_ext *ext_mask,
6434 +                        void *key, void *mask, u64 *fields)
6435 +{
6436 +       int off;
6437 +
6438 +       if (ext_mask->vlan_etype)
6439 +               return -EOPNOTSUPP;
6440 +
6441 +       if (ext_mask->vlan_tci) {
6442 +               off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
6443 +               *(__be16 *)(key + off) = ext_value->vlan_tci;
6444 +               *(__be16 *)(mask + off) = ext_mask->vlan_tci;
6445 +               *fields |= DPAA2_ETH_DIST_VLAN;
6446 +       }
6447 +
6448 +       return 0;
6449 +}
6450 +
6451 +static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
6452 +                            struct ethtool_flow_ext *ext_mask,
6453 +                            void *key, void *mask, u64 *fields)
6454 +{
6455 +       int off;
6456 +
6457 +       if (!is_zero_ether_addr(ext_mask->h_dest)) {
6458 +               off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
6459 +               ether_addr_copy(key + off, ext_value->h_dest);
6460 +               ether_addr_copy(mask + off, ext_mask->h_dest);
6461 +               *fields |= DPAA2_ETH_DIST_ETHDST;
6462 +       }
6463 +
6464 +       return 0;
6465 +}
6466 +
6467 +static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
6468 +                        u64 *fields)
6469 +{
6470 +       int err;
6471 +
6472 +       switch (fs->flow_type & 0xFF) {
6473 +       case ETHER_FLOW:
6474 +               err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
6475 +                                   key, mask, fields);
6476 +               break;
6477 +       case IP_USER_FLOW:
6478 +               err = prep_user_ip_rule(&fs->h_u.usr_ip4_spec,
6479 +                                   &fs->m_u.usr_ip4_spec, key, mask, fields);
6480 +               break;
6481 +       case TCP_V4_FLOW:
6482 +               err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
6483 +                                  key, mask, IPPROTO_TCP, fields);
6484 +               break;
6485 +       case UDP_V4_FLOW:
6486 +               err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
6487 +                                  key, mask, IPPROTO_UDP, fields);
6488 +               break;
6489 +       case SCTP_V4_FLOW:
6490 +               err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, &fs->m_u.sctp_ip4_spec,
6491 +                                  key, mask, IPPROTO_SCTP, fields);
6492 +               break;
6493 +       default:
6494 +               return -EOPNOTSUPP;
6495 +       }
6496 +
6497 +       if (err)
6498 +               return err;
6499 +
6500 +       if (fs->flow_type & FLOW_EXT) {
6501 +               err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
6502 +               if (err)
6503 +                       return err;
6504 +       }
6505 +
6506 +       if (fs->flow_type & FLOW_MAC_EXT) {
6507 +               err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
6508 +                                       fields);
6509 +               if (err)
6510 +                       return err;
6511 +       }
6512 +
6513 +       return 0;
6514 +}
6515 +
6516 +static int do_cls_rule(struct net_device *net_dev,
6517 +                      struct ethtool_rx_flow_spec *fs,
6518 +                      bool add)
6519 +{
6520 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6521 +       struct device *dev = net_dev->dev.parent;
6522 +       struct dpni_rule_cfg rule_cfg = { 0 };
6523 +       struct dpni_fs_action_cfg fs_act = { 0 };
6524 +       dma_addr_t key_iova;
6525 +       u64 fields = 0;
6526 +       void *key_buf;
6527 +       int i, err = 0;
6528 +
6529 +       if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
6530 +           fs->ring_cookie >= dpaa2_eth_queue_count(priv))
6531 +               return -EINVAL;
6532 +
6533 +       rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
6534 +
6535 +       /* allocate twice the key size, for the actual key and for mask */
6536 +       key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
6537 +       if (!key_buf)
6538 +               return -ENOMEM;
6539 +
6540 +       /* Fill the key and mask memory areas */
6541 +       err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
6542 +       if (err)
6543 +               goto free_mem;
6544 +
6545 +       if (!dpaa2_eth_fs_mask_enabled(priv)) {
6546 +               /* Masking allows us to configure a maximal key during init and
6547 +                * use it for all flow steering rules. Without it, we include
6548 +                * in the key only the fields actually used, so we need to
6549 +                * extract the others from the final key buffer.
6550 +                *
6551 +                * Program the FS key if needed, or return error if previously
6552 +                * set key can't be used for the current rule. User needs to
6553 +                * delete existing rules in this case to allow for the new one.
6554 +                */
6555 +               if (!priv->rx_cls_fields) {
6556 +                       err = dpaa2_eth_set_cls(net_dev, fields);
6557 +                       if (err)
6558 +                               goto free_mem;
6559 +
6560 +                       priv->rx_cls_fields = fields;
6561 +               } else if (priv->rx_cls_fields != fields) {
6562 +                       netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
6563 +                       err = -EOPNOTSUPP;
6564 +                       goto free_mem;
6565 +               }
6566 +
6567 +               dpaa2_eth_cls_trim_rule(key_buf, fields);
6568 +               rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
6569 +       }
6570 +
6571 +       key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
6572 +                                 DMA_TO_DEVICE);
6573 +       if (dma_mapping_error(dev, key_iova)) {
6574 +               err = -ENOMEM;
6575 +               goto free_mem;
6576 +       }
6577 +
6578 +       rule_cfg.key_iova = key_iova;
6579 +       if (dpaa2_eth_fs_mask_enabled(priv))
6580 +               rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
6581 +
6582 +       if (add) {
6583 +               if (fs->ring_cookie == RX_CLS_FLOW_DISC)
6584 +                       fs_act.options |= DPNI_FS_OPT_DISCARD;
6585 +               else
6586 +                       fs_act.flow_id = fs->ring_cookie;
6587 +       }
6588 +       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
6589 +               if (add)
6590 +                       err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
6591 +                                               i, fs->location, &rule_cfg,
6592 +                                               &fs_act);
6593 +               else
6594 +                       err = dpni_remove_fs_entry(priv->mc_io, 0,
6595 +                                                  priv->mc_token, i,
6596 +                                                  &rule_cfg);
6597 +               if (err)
6598 +                       break;
6599 +       }
6600 +
6601 +       dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
6602 +
6603 +free_mem:
6604 +       kfree(key_buf);
6605 +
6606 +       return err;
6607 +}
6608 +
6609 +static int num_rules(struct dpaa2_eth_priv *priv)
6610 +{
6611 +       int i, rules = 0;
6612 +
6613 +       for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
6614 +               if (priv->cls_rule[i].in_use)
6615 +                       rules++;
6616 +
6617 +       return rules;
6618 +}
6619 +
6620 +static int update_cls_rule(struct net_device *net_dev,
6621 +                          struct ethtool_rx_flow_spec *new_fs,
6622 +                          int location)
6623 +{
6624 +       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6625 +       struct dpaa2_eth_cls_rule *rule;
6626 +       int err = -EINVAL;
6627 +
6628 +       if (!priv->rx_cls_enabled)
6629 +               return -EOPNOTSUPP;
6630 +
6631 +       if (location >= dpaa2_eth_fs_count(priv))
6632 +               return -EINVAL;
6633 +
6634 +       rule = &priv->cls_rule[location];
6635 +
6636 +       /* If a rule is present at the specified location, delete it. */
6637 +       if (rule->in_use) {
6638 +               err = do_cls_rule(net_dev, &rule->fs, false);
6639 +               if (err)
6640 +                       return err;
6641 +
6642 +               rule->in_use = 0;
6643 +
6644 +               if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
6645 +                       priv->rx_cls_fields = 0;
6646 +       }
6647 +
6648 +       /* If no new entry to add, return here */
6649 +       if (!new_fs)
6650 +               return err;
6651 +
6652 +       err = do_cls_rule(net_dev, new_fs, true);
6653 +       if (err)
6654 +               return err;
6655 +
6656 +       rule->in_use = 1;
6657 +       rule->fs = *new_fs;
6658 +
6659 +       return 0;
6660  }
6661  
6662  static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
6663                                struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
6664  {
6665         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6666 +       int rule_cnt = dpaa2_eth_fs_count(priv);
6667 +       int i, j = 0;
6668  
6669         switch (rxnfc->cmd) {
6670         case ETHTOOL_GRXFH:
6671 @@ -258,6 +791,29 @@ static int dpaa2_eth_get_rxnfc(struct ne
6672         case ETHTOOL_GRXRINGS:
6673                 rxnfc->data = dpaa2_eth_queue_count(priv);
6674                 break;
6675 +       case ETHTOOL_GRXCLSRLCNT:
6676 +               rxnfc->rule_cnt = 0;
6677 +               rxnfc->rule_cnt = num_rules(priv);
6678 +               rxnfc->data = rule_cnt;
6679 +               break;
6680 +       case ETHTOOL_GRXCLSRULE:
6681 +               if (rxnfc->fs.location >= rule_cnt)
6682 +                       return -EINVAL;
6683 +               if (!priv->cls_rule[rxnfc->fs.location].in_use)
6684 +                       return -EINVAL;
6685 +               rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
6686 +               break;
6687 +       case ETHTOOL_GRXCLSRLALL:
6688 +               for (i = 0; i < rule_cnt; i++) {
6689 +                       if (!priv->cls_rule[i].in_use)
6690 +                               continue;
6691 +                       if (j == rxnfc->rule_cnt)
6692 +                               return -EMSGSIZE;
6693 +                       rule_locs[j++] = i;
6694 +               }
6695 +               rxnfc->rule_cnt = j;
6696 +               rxnfc->data = rule_cnt;
6697 +               break;
6698         default:
6699                 return -EOPNOTSUPP;
6700         }
6701 @@ -265,13 +821,61 @@ static int dpaa2_eth_get_rxnfc(struct ne
6702         return 0;
6703  }
6704  
6705 +int dpaa2_phc_index = -1;
6706 +EXPORT_SYMBOL(dpaa2_phc_index);
6707 +
6708 +static int dpaa2_eth_get_ts_info(struct net_device *dev,
6709 +                                struct ethtool_ts_info *info)
6710 +{
6711 +       info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
6712 +                               SOF_TIMESTAMPING_RX_HARDWARE |
6713 +                               SOF_TIMESTAMPING_RAW_HARDWARE;
6714 +
6715 +       info->phc_index = dpaa2_phc_index;
6716 +
6717 +       info->tx_types = (1 << HWTSTAMP_TX_OFF) |
6718 +                        (1 << HWTSTAMP_TX_ON);
6719 +
6720 +       info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6721 +                          (1 << HWTSTAMP_FILTER_ALL);
6722 +       return 0;
6723 +}
6724 +
6725 +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
6726 +                              struct ethtool_rxnfc *rxnfc)
6727 +{
6728 +       int err = 0;
6729 +
6730 +       switch (rxnfc->cmd) {
6731 +       case ETHTOOL_SRXFH:
6732 +               if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
6733 +                       return -EOPNOTSUPP;
6734 +               err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
6735 +               break;
6736 +       case ETHTOOL_SRXCLSRLINS:
6737 +               err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
6738 +               break;
6739 +       case ETHTOOL_SRXCLSRLDEL:
6740 +               err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
6741 +               break;
6742 +       default:
6743 +               err = -EOPNOTSUPP;
6744 +       }
6745 +
6746 +       return err;
6747 +}
6748 +
6749  const struct ethtool_ops dpaa2_ethtool_ops = {
6750         .get_drvinfo = dpaa2_eth_get_drvinfo,
6751         .get_link = ethtool_op_get_link,
6752         .get_link_ksettings = dpaa2_eth_get_link_ksettings,
6753         .set_link_ksettings = dpaa2_eth_set_link_ksettings,
6754 +       .get_pauseparam = dpaa2_eth_get_pauseparam,
6755 +       .set_pauseparam = dpaa2_eth_set_pauseparam,
6756         .get_sset_count = dpaa2_eth_get_sset_count,
6757         .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
6758         .get_strings = dpaa2_eth_get_strings,
6759         .get_rxnfc = dpaa2_eth_get_rxnfc,
6760 +       .set_rxnfc = dpaa2_eth_set_rxnfc,
6761 +       .get_ts_info = dpaa2_eth_get_ts_info,
6762  };
6763 --- a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
6764 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
6765 @@ -1,39 +1,10 @@
6766 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
6767  /* Copyright 2013-2015 Freescale Semiconductor Inc.
6768 - *
6769 - * Redistribution and use in source and binary forms, with or without
6770 - * modification, are permitted provided that the following conditions are met:
6771 - * * Redistributions of source code must retain the above copyright
6772 - * notice, this list of conditions and the following disclaimer.
6773 - * * Redistributions in binary form must reproduce the above copyright
6774 - * notice, this list of conditions and the following disclaimer in the
6775 - * documentation and/or other materials provided with the distribution.
6776 - * * Neither the name of the above-listed copyright holders nor the
6777 - * names of any contributors may be used to endorse or promote products
6778 - * derived from this software without specific prior written permission.
6779 - *
6780 - *
6781 - * ALTERNATIVELY, this software may be distributed under the terms of the
6782 - * GNU General Public License ("GPL") as published by the Free Software
6783 - * Foundation, either version 2 of that License or (at your option) any
6784 - * later version.
6785 - *
6786 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
6787 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
6788 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
6789 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
6790 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
6791 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
6792 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
6793 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
6794 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
6795 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6796 - * POSSIBILITY OF SUCH DAMAGE.
6797   */
6798  #ifndef __FSL_DPKG_H_
6799  #define __FSL_DPKG_H_
6800  
6801  #include <linux/types.h>
6802 -#include "net.h"
6803  
6804  /* Data Path Key Generator API
6805   * Contains initialization APIs and runtime APIs for the Key Generator
6806 @@ -86,6 +57,355 @@ struct dpkg_mask {
6807         u8 offset;
6808  };
6809  
6810 +/* Protocol fields */
6811 +
6812 +/* Ethernet fields */
6813 +#define NH_FLD_ETH_DA                          BIT(0)
6814 +#define NH_FLD_ETH_SA                          BIT(1)
6815 +#define NH_FLD_ETH_LENGTH                      BIT(2)
6816 +#define NH_FLD_ETH_TYPE                                BIT(3)
6817 +#define NH_FLD_ETH_FINAL_CKSUM                 BIT(4)
6818 +#define NH_FLD_ETH_PADDING                     BIT(5)
6819 +#define NH_FLD_ETH_ALL_FIELDS                  (BIT(6) - 1)
6820 +
6821 +/* VLAN fields */
6822 +#define NH_FLD_VLAN_VPRI                       BIT(0)
6823 +#define NH_FLD_VLAN_CFI                                BIT(1)
6824 +#define NH_FLD_VLAN_VID                                BIT(2)
6825 +#define NH_FLD_VLAN_LENGTH                     BIT(3)
6826 +#define NH_FLD_VLAN_TYPE                       BIT(4)
6827 +#define NH_FLD_VLAN_ALL_FIELDS                 (BIT(5) - 1)
6828 +
6829 +#define NH_FLD_VLAN_TCI                                (NH_FLD_VLAN_VPRI | \
6830 +                                                NH_FLD_VLAN_CFI | \
6831 +                                                NH_FLD_VLAN_VID)
6832 +
6833 +/* IP (generic) fields */
6834 +#define NH_FLD_IP_VER                          BIT(0)
6835 +#define NH_FLD_IP_DSCP                         BIT(2)
6836 +#define NH_FLD_IP_ECN                          BIT(3)
6837 +#define NH_FLD_IP_PROTO                                BIT(4)
6838 +#define NH_FLD_IP_SRC                          BIT(5)
6839 +#define NH_FLD_IP_DST                          BIT(6)
6840 +#define NH_FLD_IP_TOS_TC                       BIT(7)
6841 +#define NH_FLD_IP_ID                           BIT(8)
6842 +#define NH_FLD_IP_ALL_FIELDS                   (BIT(9) - 1)
6843 +
6844 +/* IPV4 fields */
6845 +#define NH_FLD_IPV4_VER                                BIT(0)
6846 +#define NH_FLD_IPV4_HDR_LEN                    BIT(1)
6847 +#define NH_FLD_IPV4_TOS                                BIT(2)
6848 +#define NH_FLD_IPV4_TOTAL_LEN                  BIT(3)
6849 +#define NH_FLD_IPV4_ID                         BIT(4)
6850 +#define NH_FLD_IPV4_FLAG_D                     BIT(5)
6851 +#define NH_FLD_IPV4_FLAG_M                     BIT(6)
6852 +#define NH_FLD_IPV4_OFFSET                     BIT(7)
6853 +#define NH_FLD_IPV4_TTL                                BIT(8)
6854 +#define NH_FLD_IPV4_PROTO                      BIT(9)
6855 +#define NH_FLD_IPV4_CKSUM                      BIT(10)
6856 +#define NH_FLD_IPV4_SRC_IP                     BIT(11)
6857 +#define NH_FLD_IPV4_DST_IP                     BIT(12)
6858 +#define NH_FLD_IPV4_OPTS                       BIT(13)
6859 +#define NH_FLD_IPV4_OPTS_COUNT                 BIT(14)
6860 +#define NH_FLD_IPV4_ALL_FIELDS                 (BIT(15) - 1)
6861 +
6862 +/* IPV6 fields */
6863 +#define NH_FLD_IPV6_VER                                BIT(0)
6864 +#define NH_FLD_IPV6_TC                         BIT(1)
6865 +#define NH_FLD_IPV6_SRC_IP                     BIT(2)
6866 +#define NH_FLD_IPV6_DST_IP                     BIT(3)
6867 +#define NH_FLD_IPV6_NEXT_HDR                   BIT(4)
6868 +#define NH_FLD_IPV6_FL                         BIT(5)
6869 +#define NH_FLD_IPV6_HOP_LIMIT                  BIT(6)
6870 +#define NH_FLD_IPV6_ID                         BIT(7)
6871 +#define NH_FLD_IPV6_ALL_FIELDS                 (BIT(8) - 1)
6872 +
6873 +/* ICMP fields */
6874 +#define NH_FLD_ICMP_TYPE                       BIT(0)
6875 +#define NH_FLD_ICMP_CODE                       BIT(1)
6876 +#define NH_FLD_ICMP_CKSUM                      BIT(2)
6877 +#define NH_FLD_ICMP_ID                         BIT(3)
6878 +#define NH_FLD_ICMP_SQ_NUM                     BIT(4)
6879 +#define NH_FLD_ICMP_ALL_FIELDS                 (BIT(5) - 1)
6880 +
6881 +/* IGMP fields */
6882 +#define NH_FLD_IGMP_VERSION                    BIT(0)
6883 +#define NH_FLD_IGMP_TYPE                       BIT(1)
6884 +#define NH_FLD_IGMP_CKSUM                      BIT(2)
6885 +#define NH_FLD_IGMP_DATA                       BIT(3)
6886 +#define NH_FLD_IGMP_ALL_FIELDS                 (BIT(4) - 1)
6887 +
6888 +/* TCP fields */
6889 +#define NH_FLD_TCP_PORT_SRC                    BIT(0)
6890 +#define NH_FLD_TCP_PORT_DST                    BIT(1)
6891 +#define NH_FLD_TCP_SEQ                         BIT(2)
6892 +#define NH_FLD_TCP_ACK                         BIT(3)
6893 +#define NH_FLD_TCP_OFFSET                      BIT(4)
6894 +#define NH_FLD_TCP_FLAGS                       BIT(5)
6895 +#define NH_FLD_TCP_WINDOW                      BIT(6)
6896 +#define NH_FLD_TCP_CKSUM                       BIT(7)
6897 +#define NH_FLD_TCP_URGPTR                      BIT(8)
6898 +#define NH_FLD_TCP_OPTS                                BIT(9)
6899 +#define NH_FLD_TCP_OPTS_COUNT                  BIT(10)
6900 +#define NH_FLD_TCP_ALL_FIELDS                  (BIT(11) - 1)
6901 +
6902 +/* UDP fields */
6903 +#define NH_FLD_UDP_PORT_SRC                    BIT(0)
6904 +#define NH_FLD_UDP_PORT_DST                    BIT(1)
6905 +#define NH_FLD_UDP_LEN                         BIT(2)
6906 +#define NH_FLD_UDP_CKSUM                       BIT(3)
6907 +#define NH_FLD_UDP_ALL_FIELDS                  (BIT(4) - 1)
6908 +
6909 +/* UDP-lite fields */
6910 +#define NH_FLD_UDP_LITE_PORT_SRC               BIT(0)
6911 +#define NH_FLD_UDP_LITE_PORT_DST               BIT(1)
6912 +#define NH_FLD_UDP_LITE_ALL_FIELDS             (BIT(2) - 1)
6913 +
6914 +/* UDP-encap-ESP fields */
6915 +#define NH_FLD_UDP_ENC_ESP_PORT_SRC            BIT(0)
6916 +#define NH_FLD_UDP_ENC_ESP_PORT_DST            BIT(1)
6917 +#define NH_FLD_UDP_ENC_ESP_LEN                 BIT(2)
6918 +#define NH_FLD_UDP_ENC_ESP_CKSUM               BIT(3)
6919 +#define NH_FLD_UDP_ENC_ESP_SPI                 BIT(4)
6920 +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM                BIT(5)
6921 +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS          (BIT(6) - 1)
6922 +
6923 +/* SCTP fields */
6924 +#define NH_FLD_SCTP_PORT_SRC                   BIT(0)
6925 +#define NH_FLD_SCTP_PORT_DST                   BIT(1)
6926 +#define NH_FLD_SCTP_VER_TAG                    BIT(2)
6927 +#define NH_FLD_SCTP_CKSUM                      BIT(3)
6928 +#define NH_FLD_SCTP_ALL_FIELDS                 (BIT(4) - 1)
6929 +
6930 +/* DCCP fields */
6931 +#define NH_FLD_DCCP_PORT_SRC                   BIT(0)
6932 +#define NH_FLD_DCCP_PORT_DST                   BIT(1)
6933 +#define NH_FLD_DCCP_ALL_FIELDS                 (BIT(2) - 1)
6934 +
6935 +/* IPHC fields */
6936 +#define NH_FLD_IPHC_CID                                BIT(0)
6937 +#define NH_FLD_IPHC_CID_TYPE                   BIT(1)
6938 +#define NH_FLD_IPHC_HCINDEX                    BIT(2)
6939 +#define NH_FLD_IPHC_GEN                                BIT(3)
6940 +#define NH_FLD_IPHC_D_BIT                      BIT(4)
6941 +#define NH_FLD_IPHC_ALL_FIELDS                 (BIT(5) - 1)
6942 +
6943 +/* SCTP fields */
6944 +#define NH_FLD_SCTP_CHUNK_DATA_TYPE            BIT(0)
6945 +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS           BIT(1)
6946 +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH          BIT(2)
6947 +#define NH_FLD_SCTP_CHUNK_DATA_TSN             BIT(3)
6948 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID       BIT(4)
6949 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN      BIT(5)
6950 +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID     BIT(6)
6951 +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED       BIT(7)
6952 +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING       BIT(8)
6953 +#define NH_FLD_SCTP_CHUNK_DATA_END             BIT(9)
6954 +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS      (BIT(10) - 1)
6955 +
6956 +/* L2TPV2 fields */
6957 +#define NH_FLD_L2TPV2_TYPE_BIT                 BIT(0)
6958 +#define NH_FLD_L2TPV2_LENGTH_BIT               BIT(1)
6959 +#define NH_FLD_L2TPV2_SEQUENCE_BIT             BIT(2)
6960 +#define NH_FLD_L2TPV2_OFFSET_BIT               BIT(3)
6961 +#define NH_FLD_L2TPV2_PRIORITY_BIT             BIT(4)
6962 +#define NH_FLD_L2TPV2_VERSION                  BIT(5)
6963 +#define NH_FLD_L2TPV2_LEN                      BIT(6)
6964 +#define NH_FLD_L2TPV2_TUNNEL_ID                        BIT(7)
6965 +#define NH_FLD_L2TPV2_SESSION_ID               BIT(8)
6966 +#define NH_FLD_L2TPV2_NS                       BIT(9)
6967 +#define NH_FLD_L2TPV2_NR                       BIT(10)
6968 +#define NH_FLD_L2TPV2_OFFSET_SIZE              BIT(11)
6969 +#define NH_FLD_L2TPV2_FIRST_BYTE               BIT(12)
6970 +#define NH_FLD_L2TPV2_ALL_FIELDS               (BIT(13) - 1)
6971 +
6972 +/* L2TPV3 fields */
6973 +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT            BIT(0)
6974 +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT          BIT(1)
6975 +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT                BIT(2)
6976 +#define NH_FLD_L2TPV3_CTRL_VERSION             BIT(3)
6977 +#define NH_FLD_L2TPV3_CTRL_LENGTH              BIT(4)
6978 +#define NH_FLD_L2TPV3_CTRL_CONTROL             BIT(5)
6979 +#define NH_FLD_L2TPV3_CTRL_SENT                        BIT(6)
6980 +#define NH_FLD_L2TPV3_CTRL_RECV                        BIT(7)
6981 +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE          BIT(8)
6982 +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS          (BIT(9) - 1)
6983 +
6984 +#define NH_FLD_L2TPV3_SESS_TYPE_BIT            BIT(0)
6985 +#define NH_FLD_L2TPV3_SESS_VERSION             BIT(1)
6986 +#define NH_FLD_L2TPV3_SESS_ID                  BIT(2)
6987 +#define NH_FLD_L2TPV3_SESS_COOKIE              BIT(3)
6988 +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS          (BIT(4) - 1)
6989 +
6990 +/* PPP fields */
6991 +#define NH_FLD_PPP_PID                         BIT(0)
6992 +#define NH_FLD_PPP_COMPRESSED                  BIT(1)
6993 +#define NH_FLD_PPP_ALL_FIELDS                  (BIT(2) - 1)
6994 +
6995 +/* PPPoE fields */
6996 +#define NH_FLD_PPPOE_VER                       BIT(0)
6997 +#define NH_FLD_PPPOE_TYPE                      BIT(1)
6998 +#define NH_FLD_PPPOE_CODE                      BIT(2)
6999 +#define NH_FLD_PPPOE_SID                       BIT(3)
7000 +#define NH_FLD_PPPOE_LEN                       BIT(4)
7001 +#define NH_FLD_PPPOE_SESSION                   BIT(5)
7002 +#define NH_FLD_PPPOE_PID                       BIT(6)
7003 +#define NH_FLD_PPPOE_ALL_FIELDS                        (BIT(7) - 1)
7004 +
7005 +/* PPP-Mux fields */
7006 +#define NH_FLD_PPPMUX_PID                      BIT(0)
7007 +#define NH_FLD_PPPMUX_CKSUM                    BIT(1)
7008 +#define NH_FLD_PPPMUX_COMPRESSED               BIT(2)
7009 +#define NH_FLD_PPPMUX_ALL_FIELDS               (BIT(3) - 1)
7010 +
7011 +/* PPP-Mux sub-frame fields */
7012 +#define NH_FLD_PPPMUX_SUBFRM_PFF               BIT(0)
7013 +#define NH_FLD_PPPMUX_SUBFRM_LXT               BIT(1)
7014 +#define NH_FLD_PPPMUX_SUBFRM_LEN               BIT(2)
7015 +#define NH_FLD_PPPMUX_SUBFRM_PID               BIT(3)
7016 +#define NH_FLD_PPPMUX_SUBFRM_USE_PID           BIT(4)
7017 +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS                (BIT(5) - 1)
7018 +
7019 +/* LLC fields */
7020 +#define NH_FLD_LLC_DSAP                                BIT(0)
7021 +#define NH_FLD_LLC_SSAP                                BIT(1)
7022 +#define NH_FLD_LLC_CTRL                                BIT(2)
7023 +#define NH_FLD_LLC_ALL_FIELDS                  (BIT(3) - 1)
7024 +
7025 +/* NLPID fields */
7026 +#define NH_FLD_NLPID_NLPID                     BIT(0)
7027 +#define NH_FLD_NLPID_ALL_FIELDS                        (BIT(1) - 1)
7028 +
7029 +/* SNAP fields */
7030 +#define NH_FLD_SNAP_OUI                                BIT(0)
7031 +#define NH_FLD_SNAP_PID                                BIT(1)
7032 +#define NH_FLD_SNAP_ALL_FIELDS                 (BIT(2) - 1)
7033 +
7034 +/* LLC SNAP fields */
7035 +#define NH_FLD_LLC_SNAP_TYPE                   BIT(0)
7036 +#define NH_FLD_LLC_SNAP_ALL_FIELDS             (BIT(1) - 1)
7037 +
7038 +/* ARP fields */
7039 +#define NH_FLD_ARP_HTYPE                       BIT(0)
7040 +#define NH_FLD_ARP_PTYPE                       BIT(1)
7041 +#define NH_FLD_ARP_HLEN                                BIT(2)
7042 +#define NH_FLD_ARP_PLEN                                BIT(3)
7043 +#define NH_FLD_ARP_OPER                                BIT(4)
7044 +#define NH_FLD_ARP_SHA                         BIT(5)
7045 +#define NH_FLD_ARP_SPA                         BIT(6)
7046 +#define NH_FLD_ARP_THA                         BIT(7)
7047 +#define NH_FLD_ARP_TPA                         BIT(8)
7048 +#define NH_FLD_ARP_ALL_FIELDS                  (BIT(9) - 1)
7049 +
7050 +/* RFC2684 fields */
7051 +#define NH_FLD_RFC2684_LLC                     BIT(0)
7052 +#define NH_FLD_RFC2684_NLPID                   BIT(1)
7053 +#define NH_FLD_RFC2684_OUI                     BIT(2)
7054 +#define NH_FLD_RFC2684_PID                     BIT(3)
7055 +#define NH_FLD_RFC2684_VPN_OUI                 BIT(4)
7056 +#define NH_FLD_RFC2684_VPN_IDX                 BIT(5)
7057 +#define NH_FLD_RFC2684_ALL_FIELDS              (BIT(6) - 1)
7058 +
7059 +/* User defined fields */
7060 +#define NH_FLD_USER_DEFINED_SRCPORT            BIT(0)
7061 +#define NH_FLD_USER_DEFINED_PCDID              BIT(1)
7062 +#define NH_FLD_USER_DEFINED_ALL_FIELDS         (BIT(2) - 1)
7063 +
7064 +/* Payload fields */
7065 +#define NH_FLD_PAYLOAD_BUFFER                  BIT(0)
7066 +#define NH_FLD_PAYLOAD_SIZE                    BIT(1)
7067 +#define NH_FLD_MAX_FRM_SIZE                    BIT(2)
7068 +#define NH_FLD_MIN_FRM_SIZE                    BIT(3)
7069 +#define NH_FLD_PAYLOAD_TYPE                    BIT(4)
7070 +#define NH_FLD_FRAME_SIZE                      BIT(5)
7071 +#define NH_FLD_PAYLOAD_ALL_FIELDS              (BIT(6) - 1)
7072 +
7073 +/* GRE fields */
7074 +#define NH_FLD_GRE_TYPE                                BIT(0)
7075 +#define NH_FLD_GRE_ALL_FIELDS                  (BIT(1) - 1)
7076 +
7077 +/* MINENCAP fields */
7078 +#define NH_FLD_MINENCAP_SRC_IP                 BIT(0)
7079 +#define NH_FLD_MINENCAP_DST_IP                 BIT(1)
7080 +#define NH_FLD_MINENCAP_TYPE                   BIT(2)
7081 +#define NH_FLD_MINENCAP_ALL_FIELDS             (BIT(3) - 1)
7082 +
7083 +/* IPSEC AH fields */
7084 +#define NH_FLD_IPSEC_AH_SPI                    BIT(0)
7085 +#define NH_FLD_IPSEC_AH_NH                     BIT(1)
7086 +#define NH_FLD_IPSEC_AH_ALL_FIELDS             (BIT(2) - 1)
7087 +
7088 +/* IPSEC ESP fields */
7089 +#define NH_FLD_IPSEC_ESP_SPI                   BIT(0)
7090 +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM          BIT(1)
7091 +#define NH_FLD_IPSEC_ESP_ALL_FIELDS            (BIT(2) - 1)
7092 +
7093 +/* MPLS fields */
7094 +#define NH_FLD_MPLS_LABEL_STACK                        BIT(0)
7095 +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS     (BIT(1) - 1)
7096 +
7097 +/* MACSEC fields */
7098 +#define NH_FLD_MACSEC_SECTAG                   BIT(0)
7099 +#define NH_FLD_MACSEC_ALL_FIELDS               (BIT(1) - 1)
7100 +
7101 +/* GTP fields */
7102 +#define NH_FLD_GTP_TEID                                BIT(0)
7103 +
7104 +/* Supported protocols */
7105 +enum net_prot {
7106 +       NET_PROT_NONE = 0,
7107 +       NET_PROT_PAYLOAD,
7108 +       NET_PROT_ETH,
7109 +       NET_PROT_VLAN,
7110 +       NET_PROT_IPV4,
7111 +       NET_PROT_IPV6,
7112 +       NET_PROT_IP,
7113 +       NET_PROT_TCP,
7114 +       NET_PROT_UDP,
7115 +       NET_PROT_UDP_LITE,
7116 +       NET_PROT_IPHC,
7117 +       NET_PROT_SCTP,
7118 +       NET_PROT_SCTP_CHUNK_DATA,
7119 +       NET_PROT_PPPOE,
7120 +       NET_PROT_PPP,
7121 +       NET_PROT_PPPMUX,
7122 +       NET_PROT_PPPMUX_SUBFRM,
7123 +       NET_PROT_L2TPV2,
7124 +       NET_PROT_L2TPV3_CTRL,
7125 +       NET_PROT_L2TPV3_SESS,
7126 +       NET_PROT_LLC,
7127 +       NET_PROT_LLC_SNAP,
7128 +       NET_PROT_NLPID,
7129 +       NET_PROT_SNAP,
7130 +       NET_PROT_MPLS,
7131 +       NET_PROT_IPSEC_AH,
7132 +       NET_PROT_IPSEC_ESP,
7133 +       NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
7134 +       NET_PROT_MACSEC,
7135 +       NET_PROT_GRE,
7136 +       NET_PROT_MINENCAP,
7137 +       NET_PROT_DCCP,
7138 +       NET_PROT_ICMP,
7139 +       NET_PROT_IGMP,
7140 +       NET_PROT_ARP,
7141 +       NET_PROT_CAPWAP_DATA,
7142 +       NET_PROT_CAPWAP_CTRL,
7143 +       NET_PROT_RFC2684,
7144 +       NET_PROT_ICMPV6,
7145 +       NET_PROT_FCOE,
7146 +       NET_PROT_FIP,
7147 +       NET_PROT_ISCSI,
7148 +       NET_PROT_GTP,
7149 +       NET_PROT_USER_DEFINED_L2,
7150 +       NET_PROT_USER_DEFINED_L3,
7151 +       NET_PROT_USER_DEFINED_L4,
7152 +       NET_PROT_USER_DEFINED_L5,
7153 +       NET_PROT_USER_DEFINED_SHIM1,
7154 +       NET_PROT_USER_DEFINED_SHIM2,
7155 +
7156 +       NET_PROT_DUMMY_LAST
7157 +};
7158 +
7159  /**
7160   * struct dpkg_extract - A structure for defining a single extraction
7161   * @type: Determines how the union below is interpreted:
7162 --- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
7163 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
7164 @@ -1,34 +1,6 @@
7165 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
7166  /* Copyright 2013-2016 Freescale Semiconductor Inc.
7167   * Copyright 2016 NXP
7168 - *
7169 - * Redistribution and use in source and binary forms, with or without
7170 - * modification, are permitted provided that the following conditions are met:
7171 - * * Redistributions of source code must retain the above copyright
7172 - * notice, this list of conditions and the following disclaimer.
7173 - * * Redistributions in binary form must reproduce the above copyright
7174 - * notice, this list of conditions and the following disclaimer in the
7175 - * documentation and/or other materials provided with the distribution.
7176 - * * Neither the name of the above-listed copyright holders nor the
7177 - * names of any contributors may be used to endorse or promote products
7178 - * derived from this software without specific prior written permission.
7179 - *
7180 - *
7181 - * ALTERNATIVELY, this software may be distributed under the terms of the
7182 - * GNU General Public License ("GPL") as published by the Free Software
7183 - * Foundation, either version 2 of that License or (at your option) any
7184 - * later version.
7185 - *
7186 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
7187 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
7188 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
7189 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
7190 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
7191 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
7192 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
7193 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
7194 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
7195 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
7196 - * POSSIBILITY OF SUCH DAMAGE.
7197   */
7198  #ifndef _FSL_DPNI_CMD_H
7199  #define _FSL_DPNI_CMD_H
7200 @@ -39,9 +11,11 @@
7201  #define DPNI_VER_MAJOR                         7
7202  #define DPNI_VER_MINOR                         0
7203  #define DPNI_CMD_BASE_VERSION                  1
7204 +#define DPNI_CMD_2ND_VERSION                   2
7205  #define DPNI_CMD_ID_OFFSET                     4
7206  
7207  #define DPNI_CMD(id)   (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
7208 +#define DPNI_CMD_V2(id)        (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
7209  
7210  #define DPNI_CMDID_OPEN                                        DPNI_CMD(0x801)
7211  #define DPNI_CMDID_CLOSE                               DPNI_CMD(0x800)
7212 @@ -64,16 +38,18 @@
7213  #define DPNI_CMDID_GET_IRQ_STATUS                      DPNI_CMD(0x016)
7214  #define DPNI_CMDID_CLEAR_IRQ_STATUS                    DPNI_CMD(0x017)
7215  
7216 -#define DPNI_CMDID_SET_POOLS                           DPNI_CMD(0x200)
7217 +#define DPNI_CMDID_SET_POOLS                           DPNI_CMD_V2(0x200)
7218  #define DPNI_CMDID_SET_ERRORS_BEHAVIOR                 DPNI_CMD(0x20B)
7219  
7220  #define DPNI_CMDID_GET_QDID                            DPNI_CMD(0x210)
7221  #define DPNI_CMDID_GET_TX_DATA_OFFSET                  DPNI_CMD(0x212)
7222  #define DPNI_CMDID_GET_LINK_STATE                      DPNI_CMD(0x215)
7223 +#define DPNI_CMDID_GET_LINK_STATE_V2                   DPNI_CMD_V2(0x215)
7224  #define DPNI_CMDID_SET_MAX_FRAME_LENGTH                        DPNI_CMD(0x216)
7225  #define DPNI_CMDID_GET_MAX_FRAME_LENGTH                        DPNI_CMD(0x217)
7226  #define DPNI_CMDID_SET_LINK_CFG                                DPNI_CMD(0x21A)
7227 -#define DPNI_CMDID_SET_TX_SHAPING                      DPNI_CMD(0x21B)
7228 +#define DPNI_CMDID_SET_LINK_CFG_V2                     DPNI_CMD_V2(0x21A)
7229 +#define DPNI_CMDID_SET_TX_SHAPING                      DPNI_CMD_V2(0x21B)
7230  
7231  #define DPNI_CMDID_SET_MCAST_PROMISC                   DPNI_CMD(0x220)
7232  #define DPNI_CMDID_GET_MCAST_PROMISC                   DPNI_CMD(0x221)
7233 @@ -87,11 +63,16 @@
7234  
7235  #define DPNI_CMDID_SET_RX_TC_DIST                      DPNI_CMD(0x235)
7236  
7237 +#define DPNI_CMDID_SET_QOS_TBL                         DPNI_CMD(0x240)
7238 +#define DPNI_CMDID_ADD_QOS_ENT                         DPNI_CMD(0x241)
7239 +#define DPNI_CMDID_REMOVE_QOS_ENT                      DPNI_CMD(0x242)
7240  #define DPNI_CMDID_ADD_FS_ENT                          DPNI_CMD(0x244)
7241  #define DPNI_CMDID_REMOVE_FS_ENT                       DPNI_CMD(0x245)
7242  #define DPNI_CMDID_CLR_FS_ENT                          DPNI_CMD(0x246)
7243  
7244 -#define DPNI_CMDID_GET_STATISTICS                      DPNI_CMD(0x25D)
7245 +#define DPNI_CMDID_SET_TX_PRIORITIES                   DPNI_CMD_V2(0x250)
7246 +#define DPNI_CMDID_GET_STATISTICS                      DPNI_CMD_V2(0x25D)
7247 +#define DPNI_CMDID_RESET_STATISTICS                    DPNI_CMD(0x25E)
7248  #define DPNI_CMDID_GET_QUEUE                           DPNI_CMD(0x25F)
7249  #define DPNI_CMDID_SET_QUEUE                           DPNI_CMD(0x260)
7250  #define DPNI_CMDID_GET_TAILDROP                                DPNI_CMD(0x261)
7251 @@ -110,6 +91,9 @@
7252  #define DPNI_CMDID_GET_OFFLOAD                         DPNI_CMD(0x26B)
7253  #define DPNI_CMDID_SET_OFFLOAD                         DPNI_CMD(0x26C)
7254  
7255 +#define DPNI_CMDID_SET_RX_FS_DIST                      DPNI_CMD(0x273)
7256 +#define DPNI_CMDID_SET_RX_HASH_DIST                    DPNI_CMD(0x274)
7257 +
7258  /* Macros for accessing command fields smaller than 1byte */
7259  #define DPNI_MASK(field)       \
7260         GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
7261 @@ -126,13 +110,14 @@ struct dpni_cmd_open {
7262  
7263  #define DPNI_BACKUP_POOL(val, order)   (((val) & 0x1) << (order))
7264  struct dpni_cmd_set_pools {
7265 -       /* cmd word 0 */
7266         u8 num_dpbp;
7267         u8 backup_pool_mask;
7268         __le16 pad;
7269 -       /* cmd word 0..4 */
7270 -       __le32 dpbp_id[DPNI_MAX_DPBP];
7271 -       /* cmd word 4..6 */
7272 +       struct {
7273 +               __le16 dpbp_id;
7274 +               u8 priority_mask;
7275 +               u8 pad;
7276 +       } pool[DPNI_MAX_DPBP];
7277         __le16 buffer_size[DPNI_MAX_DPBP];
7278  };
7279  
7280 @@ -303,6 +288,7 @@ struct dpni_rsp_get_tx_data_offset {
7281  
7282  struct dpni_cmd_get_statistics {
7283         u8 page_number;
7284 +       u8 param;
7285  };
7286  
7287  struct dpni_rsp_get_statistics {
7288 @@ -319,8 +305,22 @@ struct dpni_cmd_set_link_cfg {
7289         __le64 options;
7290  };
7291  
7292 +struct dpni_cmd_set_link_cfg_v2 {
7293 +       /* cmd word 0 */
7294 +       __le64 pad0;
7295 +       /* cmd word 1 */
7296 +       __le32 rate;
7297 +       __le32 pad1;
7298 +       /* cmd word 2 */
7299 +       __le64 options;
7300 +       /* cmd word 3 */
7301 +       __le64 advertising;
7302 +};
7303 +
7304  #define DPNI_LINK_STATE_SHIFT          0
7305  #define DPNI_LINK_STATE_SIZE           1
7306 +#define DPNI_STATE_VALID_SHIFT         1
7307 +#define DPNI_STATE_VALID_SIZE          1
7308  
7309  struct dpni_rsp_get_link_state {
7310         /* response word 0 */
7311 @@ -335,6 +335,39 @@ struct dpni_rsp_get_link_state {
7312         __le64 options;
7313  };
7314  
7315 +struct dpni_rsp_get_link_state_v2 {
7316 +       /* response word 0 */
7317 +       __le32 pad0;
7318 +       /* from LSB: up:1, valid:1 */
7319 +       u8 flags;
7320 +       u8 pad1[3];
7321 +       /* response word 1 */
7322 +       __le32 rate;
7323 +       __le32 pad2;
7324 +       /* response word 2 */
7325 +       __le64 options;
7326 +       /* cmd word 3 */
7327 +       __le64 supported;
7328 +       /* cmd word 4 */
7329 +       __le64 advertising;
7330 +};
7331 +
7332 +#define DPNI_COUPLED_SHIFT     0
7333 +#define DPNI_COUPLED_SIZE      1
7334 +
7335 +struct dpni_cmd_set_tx_shaping {
7336 +       /* cmd word 0 */
7337 +       __le16 tx_cr_max_burst_size;
7338 +       __le16 tx_er_max_burst_size;
7339 +       __le32 pad;
7340 +       /* cmd word 1 */
7341 +       __le32 tx_cr_rate_limit;
7342 +       __le32 tx_er_rate_limit;
7343 +       /* cmd word 2 */
7344 +       /* from LSB: coupled:1 */
7345 +       u8 coupled;
7346 +};
7347 +
7348  struct dpni_cmd_set_max_frame_length {
7349         __le16 max_frame_length;
7350  };
7351 @@ -394,6 +427,24 @@ struct dpni_cmd_clear_mac_filters {
7352         u8 flags;
7353  };
7354  
7355 +#define DPNI_SEPARATE_GRP_SHIFT 0
7356 +#define DPNI_SEPARATE_GRP_SIZE  1
7357 +#define DPNI_MODE_1_SHIFT              0
7358 +#define DPNI_MODE_1_SIZE               4
7359 +#define DPNI_MODE_2_SHIFT              4
7360 +#define DPNI_MODE_2_SIZE               4
7361 +
7362 +struct dpni_cmd_set_tx_priorities {
7363 +       __le16 flags;
7364 +       u8 prio_group_A;
7365 +       u8 prio_group_B;
7366 +       __le32 pad0;
7367 +       u8 modes[4];
7368 +       __le32 pad1;
7369 +       __le64 pad2;
7370 +       __le16 delta_bandwidth[8];
7371 +};
7372 +
7373  #define DPNI_DIST_MODE_SHIFT           0
7374  #define DPNI_DIST_MODE_SIZE            4
7375  #define DPNI_MISS_ACTION_SHIFT         4
7376 @@ -503,6 +554,63 @@ struct dpni_cmd_set_queue {
7377         __le64 user_context;
7378  };
7379  
7380 +#define DPNI_DISCARD_ON_MISS_SHIFT     0
7381 +#define DPNI_DISCARD_ON_MISS_SIZE      1
7382 +
7383 +struct dpni_cmd_set_qos_table {
7384 +       __le32 pad;
7385 +       u8 default_tc;
7386 +       /* only the LSB */
7387 +       u8 discard_on_miss;
7388 +       __le16 pad1[21];
7389 +       __le64 key_cfg_iova;
7390 +};
7391 +
7392 +struct dpni_cmd_add_qos_entry {
7393 +       __le16 pad;
7394 +       u8 tc_id;
7395 +       u8 key_size;
7396 +       __le16 index;
7397 +       __le16 pad2;
7398 +       __le64 key_iova;
7399 +       __le64 mask_iova;
7400 +};
7401 +
7402 +struct dpni_cmd_remove_qos_entry {
7403 +       u8 pad1[3];
7404 +       u8 key_size;
7405 +       __le32 pad2;
7406 +       __le64 key_iova;
7407 +       __le64 mask_iova;
7408 +};
7409 +
7410 +struct dpni_cmd_add_fs_entry {
7411 +       /* cmd word 0 */
7412 +       __le16 options;
7413 +       u8 tc_id;
7414 +       u8 key_size;
7415 +       __le16 index;
7416 +       __le16 flow_id;
7417 +       /* cmd word 1 */
7418 +       __le64 key_iova;
7419 +       /* cmd word 2 */
7420 +       __le64 mask_iova;
7421 +       /* cmd word 3 */
7422 +       __le64 flc;
7423 +};
7424 +
7425 +struct dpni_cmd_remove_fs_entry {
7426 +       /* cmd word 0 */
7427 +       __le16 pad0;
7428 +       u8 tc_id;
7429 +       u8 key_size;
7430 +       __le32 pad1;
7431 +       /* cmd word 1 */
7432 +       __le64 key_iova;
7433 +       /* cmd word 2 */
7434 +       __le64 mask_iova;
7435 +};
7436 +
7437  struct dpni_cmd_set_taildrop {
7438         /* cmd word 0 */
7439         u8 congestion_point;
7440 @@ -538,4 +646,79 @@ struct dpni_rsp_get_taildrop {
7441         __le32 threshold;
7442  };
7443  
7444 +struct dpni_rsp_get_api_version {
7445 +       u16 major;
7446 +       u16 minor;
7447 +};
7448 +
7449 +#define DPNI_DEST_TYPE_SHIFT           0
7450 +#define DPNI_DEST_TYPE_SIZE            4
7451 +#define DPNI_CONG_UNITS_SHIFT          4
7452 +#define DPNI_CONG_UNITS_SIZE           2
7453 +
7454 +struct dpni_cmd_set_congestion_notification {
7455 +       /* cmd word 0 */
7456 +       u8 qtype;
7457 +       u8 tc;
7458 +       u8 pad[6];
7459 +       /* cmd word 1 */
7460 +       __le32 dest_id;
7461 +       __le16 notification_mode;
7462 +       u8 dest_priority;
7463 +       /* from LSB: dest_type: 4 units:2 */
7464 +       u8 type_units;
7465 +       /* cmd word 2 */
7466 +       __le64 message_iova;
7467 +       /* cmd word 3 */
7468 +       __le64 message_ctx;
7469 +       /* cmd word 4 */
7470 +       __le32 threshold_entry;
7471 +       __le32 threshold_exit;
7472 +};
7473 +
7474 +struct dpni_cmd_get_congestion_notification {
7475 +       /* cmd word 0 */
7476 +       u8 qtype;
7477 +       u8 tc;
7478 +};
7479 +
7480 +struct dpni_rsp_get_congestion_notification {
7481 +       /* cmd word 0 */
7482 +       __le64 pad;
7483 +       /* cmd word 1 */
7484 +       __le32 dest_id;
7485 +       __le16 notification_mode;
7486 +       u8 dest_priority;
7487 +       /* from LSB: dest_type: 4 units:2 */
7488 +       u8 type_units;
7489 +       /* cmd word 2 */
7490 +       __le64 message_iova;
7491 +       /* cmd word 3 */
7492 +       __le64 message_ctx;
7493 +       /* cmd word 4 */
7494 +       __le32 threshold_entry;
7495 +       __le32 threshold_exit;
7496 +};
7497 +
7498 +#define DPNI_RX_FS_DIST_ENABLE_SHIFT   0
7499 +#define DPNI_RX_FS_DIST_ENABLE_SIZE    1
7500 +struct dpni_cmd_set_rx_fs_dist {
7501 +       __le16 dist_size;
7502 +       u8 enable;
7503 +       u8 tc;
7504 +       __le16 miss_flow_id;
7505 +       __le16 pad;
7506 +       __le64 key_cfg_iova;
7507 +};
7508 +
7509 +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
7510 +#define DPNI_RX_HASH_DIST_ENABLE_SIZE  1
7511 +struct dpni_cmd_set_rx_hash_dist {
7512 +       __le16 dist_size;
7513 +       u8 enable;
7514 +       u8 tc;
7515 +       __le32 pad;
7516 +       __le64 key_cfg_iova;
7517 +};
7518 +
7519  #endif /* _FSL_DPNI_CMD_H */
7520 --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
7521 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
7522 @@ -1,34 +1,6 @@
7523 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
7524  /* Copyright 2013-2016 Freescale Semiconductor Inc.
7525   * Copyright 2016 NXP
7526 - *
7527 - * Redistribution and use in source and binary forms, with or without
7528 - * modification, are permitted provided that the following conditions are met:
7529 - * * Redistributions of source code must retain the above copyright
7530 - * notice, this list of conditions and the following disclaimer.
7531 - * * Redistributions in binary form must reproduce the above copyright
7532 - * notice, this list of conditions and the following disclaimer in the
7533 - * documentation and/or other materials provided with the distribution.
7534 - * * Neither the name of the above-listed copyright holders nor the
7535 - * names of any contributors may be used to endorse or promote products
7536 - * derived from this software without specific prior written permission.
7537 - *
7538 - *
7539 - * ALTERNATIVELY, this software may be distributed under the terms of the
7540 - * GNU General Public License ("GPL") as published by the Free Software
7541 - * Foundation, either version 2 of that License or (at your option) any
7542 - * later version.
7543 - *
7544 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
7545 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
7546 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
7547 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
7548 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
7549 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
7550 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
7551 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
7552 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
7553 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
7554 - * POSSIBILITY OF SUCH DAMAGE.
7555   */
7556  #include <linux/kernel.h>
7557  #include <linux/errno.h>
7558 @@ -122,7 +94,7 @@ int dpni_open(struct fsl_mc_io *mc_io,
7559               int dpni_id,
7560               u16 *token)
7561  {
7562 -       struct mc_command cmd = { 0 };
7563 +       struct fsl_mc_command cmd = { 0 };
7564         struct dpni_cmd_open *cmd_params;
7565  
7566         int err;
7567 @@ -160,7 +132,7 @@ int dpni_close(struct fsl_mc_io *mc_io,
7568                u32 cmd_flags,
7569                u16 token)
7570  {
7571 -       struct mc_command cmd = { 0 };
7572 +       struct fsl_mc_command cmd = { 0 };
7573  
7574         /* prepare command */
7575         cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
7576 @@ -188,7 +160,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_
7577                    u16 token,
7578                    const struct dpni_pools_cfg *cfg)
7579  {
7580 -       struct mc_command cmd = { 0 };
7581 +       struct fsl_mc_command cmd = { 0 };
7582         struct dpni_cmd_set_pools *cmd_params;
7583         int i;
7584  
7585 @@ -199,7 +171,10 @@ int dpni_set_pools(struct fsl_mc_io *mc_
7586         cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
7587         cmd_params->num_dpbp = cfg->num_dpbp;
7588         for (i = 0; i < DPNI_MAX_DPBP; i++) {
7589 -               cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
7590 +               cmd_params->pool[i].dpbp_id =
7591 +                       cpu_to_le16(cfg->pools[i].dpbp_id);
7592 +               cmd_params->pool[i].priority_mask =
7593 +                       cfg->pools[i].priority_mask;
7594                 cmd_params->buffer_size[i] =
7595                         cpu_to_le16(cfg->pools[i].buffer_size);
7596                 cmd_params->backup_pool_mask |=
7597 @@ -222,7 +197,7 @@ int dpni_enable(struct fsl_mc_io *mc_io,
7598                 u32 cmd_flags,
7599                 u16 token)
7600  {
7601 -       struct mc_command cmd = { 0 };
7602 +       struct fsl_mc_command cmd = { 0 };
7603  
7604         /* prepare command */
7605         cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
7606 @@ -245,7 +220,7 @@ int dpni_disable(struct fsl_mc_io *mc_io
7607                  u32 cmd_flags,
7608                  u16 token)
7609  {
7610 -       struct mc_command cmd = { 0 };
7611 +       struct fsl_mc_command cmd = { 0 };
7612  
7613         /* prepare command */
7614         cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
7615 @@ -270,7 +245,7 @@ int dpni_is_enabled(struct fsl_mc_io *mc
7616                     u16 token,
7617                     int *en)
7618  {
7619 -       struct mc_command cmd = { 0 };
7620 +       struct fsl_mc_command cmd = { 0 };
7621         struct dpni_rsp_is_enabled *rsp_params;
7622         int err;
7623  
7624 @@ -303,7 +278,7 @@ int dpni_reset(struct fsl_mc_io *mc_io,
7625                u32 cmd_flags,
7626                u16 token)
7627  {
7628 -       struct mc_command cmd = { 0 };
7629 +       struct fsl_mc_command cmd = { 0 };
7630  
7631         /* prepare command */
7632         cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
7633 @@ -335,7 +310,7 @@ int dpni_set_irq_enable(struct fsl_mc_io
7634                         u8 irq_index,
7635                         u8 en)
7636  {
7637 -       struct mc_command cmd = { 0 };
7638 +       struct fsl_mc_command cmd = { 0 };
7639         struct dpni_cmd_set_irq_enable *cmd_params;
7640  
7641         /* prepare command */
7642 @@ -366,7 +341,7 @@ int dpni_get_irq_enable(struct fsl_mc_io
7643                         u8 irq_index,
7644                         u8 *en)
7645  {
7646 -       struct mc_command cmd = { 0 };
7647 +       struct fsl_mc_command cmd = { 0 };
7648         struct dpni_cmd_get_irq_enable *cmd_params;
7649         struct dpni_rsp_get_irq_enable *rsp_params;
7650  
7651 @@ -413,7 +388,7 @@ int dpni_set_irq_mask(struct fsl_mc_io *
7652                       u8 irq_index,
7653                       u32 mask)
7654  {
7655 -       struct mc_command cmd = { 0 };
7656 +       struct fsl_mc_command cmd = { 0 };
7657         struct dpni_cmd_set_irq_mask *cmd_params;
7658  
7659         /* prepare command */
7660 @@ -447,7 +422,7 @@ int dpni_get_irq_mask(struct fsl_mc_io *
7661                       u8 irq_index,
7662                       u32 *mask)
7663  {
7664 -       struct mc_command cmd = { 0 };
7665 +       struct fsl_mc_command cmd = { 0 };
7666         struct dpni_cmd_get_irq_mask *cmd_params;
7667         struct dpni_rsp_get_irq_mask *rsp_params;
7668         int err;
7669 @@ -489,7 +464,7 @@ int dpni_get_irq_status(struct fsl_mc_io
7670                         u8 irq_index,
7671                         u32 *status)
7672  {
7673 -       struct mc_command cmd = { 0 };
7674 +       struct fsl_mc_command cmd = { 0 };
7675         struct dpni_cmd_get_irq_status *cmd_params;
7676         struct dpni_rsp_get_irq_status *rsp_params;
7677         int err;
7678 @@ -532,7 +507,7 @@ int dpni_clear_irq_status(struct fsl_mc_
7679                           u8 irq_index,
7680                           u32 status)
7681  {
7682 -       struct mc_command cmd = { 0 };
7683 +       struct fsl_mc_command cmd = { 0 };
7684         struct dpni_cmd_clear_irq_status *cmd_params;
7685  
7686         /* prepare command */
7687 @@ -561,7 +536,7 @@ int dpni_get_attributes(struct fsl_mc_io
7688                         u16 token,
7689                         struct dpni_attr *attr)
7690  {
7691 -       struct mc_command cmd = { 0 };
7692 +       struct fsl_mc_command cmd = { 0 };
7693         struct dpni_rsp_get_attr *rsp_params;
7694  
7695         int err;
7696 @@ -609,7 +584,7 @@ int dpni_set_errors_behavior(struct fsl_
7697                              u16 token,
7698                              struct dpni_error_cfg *cfg)
7699  {
7700 -       struct mc_command cmd = { 0 };
7701 +       struct fsl_mc_command cmd = { 0 };
7702         struct dpni_cmd_set_errors_behavior *cmd_params;
7703  
7704         /* prepare command */
7705 @@ -641,7 +616,7 @@ int dpni_get_buffer_layout(struct fsl_mc
7706                            enum dpni_queue_type qtype,
7707                            struct dpni_buffer_layout *layout)
7708  {
7709 -       struct mc_command cmd = { 0 };
7710 +       struct fsl_mc_command cmd = { 0 };
7711         struct dpni_cmd_get_buffer_layout *cmd_params;
7712         struct dpni_rsp_get_buffer_layout *rsp_params;
7713         int err;
7714 @@ -689,7 +664,7 @@ int dpni_set_buffer_layout(struct fsl_mc
7715                            enum dpni_queue_type qtype,
7716                            const struct dpni_buffer_layout *layout)
7717  {
7718 -       struct mc_command cmd = { 0 };
7719 +       struct fsl_mc_command cmd = { 0 };
7720         struct dpni_cmd_set_buffer_layout *cmd_params;
7721  
7722         /* prepare command */
7723 @@ -731,7 +706,7 @@ int dpni_set_offload(struct fsl_mc_io *m
7724                      enum dpni_offload type,
7725                      u32 config)
7726  {
7727 -       struct mc_command cmd = { 0 };
7728 +       struct fsl_mc_command cmd = { 0 };
7729         struct dpni_cmd_set_offload *cmd_params;
7730  
7731         cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
7732 @@ -750,7 +725,7 @@ int dpni_get_offload(struct fsl_mc_io *m
7733                      enum dpni_offload type,
7734                      u32 *config)
7735  {
7736 -       struct mc_command cmd = { 0 };
7737 +       struct fsl_mc_command cmd = { 0 };
7738         struct dpni_cmd_get_offload *cmd_params;
7739         struct dpni_rsp_get_offload *rsp_params;
7740         int err;
7741 @@ -792,7 +767,7 @@ int dpni_get_qdid(struct fsl_mc_io *mc_i
7742                   enum dpni_queue_type qtype,
7743                   u16 *qdid)
7744  {
7745 -       struct mc_command cmd = { 0 };
7746 +       struct fsl_mc_command cmd = { 0 };
7747         struct dpni_cmd_get_qdid *cmd_params;
7748         struct dpni_rsp_get_qdid *rsp_params;
7749         int err;
7750 @@ -830,7 +805,7 @@ int dpni_get_tx_data_offset(struct fsl_m
7751                             u16 token,
7752                             u16 *data_offset)
7753  {
7754 -       struct mc_command cmd = { 0 };
7755 +       struct fsl_mc_command cmd = { 0 };
7756         struct dpni_rsp_get_tx_data_offset *rsp_params;
7757         int err;
7758  
7759 @@ -865,7 +840,7 @@ int dpni_set_link_cfg(struct fsl_mc_io *
7760                       u16 token,
7761                       const struct dpni_link_cfg *cfg)
7762  {
7763 -       struct mc_command cmd = { 0 };
7764 +       struct fsl_mc_command cmd = { 0 };
7765         struct dpni_cmd_set_link_cfg *cmd_params;
7766  
7767         /* prepare command */
7768 @@ -881,6 +856,36 @@ int dpni_set_link_cfg(struct fsl_mc_io *
7769  }
7770  
7771  /**
7772 + * dpni_set_link_cfg_v2() - set the link configuration.
7773 + * @mc_io:      Pointer to MC portal's I/O object
7774 + * @cmd_flags:  Command flags; one or more of 'MC_CMD_FLAG_'
7775 + * @token:      Token of DPNI object
7776 + * @cfg:        Link configuration
7777 + *
7778 + * Return:      '0' on Success; Error code otherwise.
7779 + */
7780 +int dpni_set_link_cfg_v2(struct fsl_mc_io *mc_io,
7781 +                        u32 cmd_flags,
7782 +                        u16 token,
7783 +                        const struct dpni_link_cfg *cfg)
7784 +{
7785 +       struct fsl_mc_command cmd = { 0 };
7786 +       struct dpni_cmd_set_link_cfg_v2 *cmd_params;
7787 +
7788 +       /* prepare command */
7789 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG_V2,
7790 +                                         cmd_flags,
7791 +                                         token);
7792 +       cmd_params = (struct dpni_cmd_set_link_cfg_v2 *)cmd.params;
7793 +       cmd_params->rate = cpu_to_le32(cfg->rate);
7794 +       cmd_params->options = cpu_to_le64(cfg->options);
7795 +       cmd_params->advertising = cpu_to_le64(cfg->advertising);
7796 +
7797 +       /* send command to mc*/
7798 +       return mc_send_command(mc_io, &cmd);
7799 +}
7800 +
7801 +/**
7802   * dpni_get_link_state() - Return the link state (either up or down)
7803   * @mc_io:     Pointer to MC portal's I/O object
7804   * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7805 @@ -894,7 +899,7 @@ int dpni_get_link_state(struct fsl_mc_io
7806                         u16 token,
7807                         struct dpni_link_state *state)
7808  {
7809 -       struct mc_command cmd = { 0 };
7810 +       struct fsl_mc_command cmd = { 0 };
7811         struct dpni_rsp_get_link_state *rsp_params;
7812         int err;
7813  
7814 @@ -918,6 +923,84 @@ int dpni_get_link_state(struct fsl_mc_io
7815  }
7816  
7817  /**
7818 + * dpni_get_link_state_v2() - Return the link state (either up or down)
7819 + * @mc_io:      Pointer to MC portal's I/O object
7820 + * @cmd_flags:  Command flags; one or more of 'MC_CMD_FLAG_'
7821 + * @token:      Token of DPNI object
7822 + * @state:      Returned link state;
7823 + *
7824 + * Return:      '0' on Success; Error code otherwise.
7825 + */
7826 +int dpni_get_link_state_v2(struct fsl_mc_io *mc_io,
7827 +                          u32 cmd_flags,
7828 +                          u16 token,
7829 +                          struct dpni_link_state *state)
7830 +{
7831 +       struct fsl_mc_command cmd = { 0 };
7832 +       struct dpni_rsp_get_link_state_v2 *rsp_params;
7833 +       int err;
7834 +
7835 +       /* prepare command */
7836 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE_V2,
7837 +                                         cmd_flags,
7838 +                                         token);
7839 +
7840 +       /* send command to mc*/
7841 +       err = mc_send_command(mc_io, &cmd);
7842 +       if (err)
7843 +               return err;
7844 +
7845 +       /* retrieve response parameters */
7846 +       rsp_params = (struct dpni_rsp_get_link_state_v2 *)cmd.params;
7847 +       state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
7848 +       state->state_valid = dpni_get_field(rsp_params->flags, STATE_VALID);
7849 +       state->rate = le32_to_cpu(rsp_params->rate);
7850 +       state->options = le64_to_cpu(rsp_params->options);
7851 +       state->supported = le64_to_cpu(rsp_params->supported);
7852 +       state->advertising = le64_to_cpu(rsp_params->advertising);
7853 +
7854 +       return 0;
7855 +}
7856 +
7857 +/**
7858 + * dpni_set_tx_shaping() - Set the transmit shaping
7859 + * @mc_io:             Pointer to MC portal's I/O object
7860 + * @cmd_flags:         Command flags; one or more of 'MC_CMD_FLAG_'
7861 + * @token:             Token of DPNI object
7862 + * @tx_cr_shaper:      TX committed rate shaping configuration
7863 + * @tx_er_shaper:      TX excess rate shaping configuration
7864 + * @coupled:           Committed and excess rate shapers are coupled
7865 + *
7866 + * Return:     '0' on Success; Error code otherwise.
7867 + */
7868 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
7869 +                       u32 cmd_flags,
7870 +                       u16 token,
7871 +                       const struct dpni_tx_shaping_cfg *tx_cr_shaper,
7872 +                       const struct dpni_tx_shaping_cfg *tx_er_shaper,
7873 +                       int coupled)
7874 +{
7875 +       struct fsl_mc_command cmd = { 0 };
7876 +       struct dpni_cmd_set_tx_shaping *cmd_params;
7877 +
7878 +       /* prepare command */
7879 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
7880 +                                         cmd_flags,
7881 +                                         token);
7882 +       cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
7883 +       cmd_params->tx_cr_max_burst_size =
7884 +                               cpu_to_le16(tx_cr_shaper->max_burst_size);
7885 +       cmd_params->tx_er_max_burst_size =
7886 +                               cpu_to_le16(tx_er_shaper->max_burst_size);
7887 +       cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
7888 +       cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
7889 +       dpni_set_field(cmd_params->coupled, COUPLED, coupled);
7890 +
7891 +       /* send command to mc*/
7892 +       return mc_send_command(mc_io, &cmd);
7893 +}
7894 +
7895 +/**
7896   * dpni_set_max_frame_length() - Set the maximum received frame length.
7897   * @mc_io:     Pointer to MC portal's I/O object
7898   * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7899 @@ -933,7 +1016,7 @@ int dpni_set_max_frame_length(struct fsl
7900                               u16 token,
7901                               u16 max_frame_length)
7902  {
7903 -       struct mc_command cmd = { 0 };
7904 +       struct fsl_mc_command cmd = { 0 };
7905         struct dpni_cmd_set_max_frame_length *cmd_params;
7906  
7907         /* prepare command */
7908 @@ -963,7 +1046,7 @@ int dpni_get_max_frame_length(struct fsl
7909                               u16 token,
7910                               u16 *max_frame_length)
7911  {
7912 -       struct mc_command cmd = { 0 };
7913 +       struct fsl_mc_command cmd = { 0 };
7914         struct dpni_rsp_get_max_frame_length *rsp_params;
7915         int err;
7916  
7917 @@ -998,7 +1081,7 @@ int dpni_set_multicast_promisc(struct fs
7918                                u16 token,
7919                                int en)
7920  {
7921 -       struct mc_command cmd = { 0 };
7922 +       struct fsl_mc_command cmd = { 0 };
7923         struct dpni_cmd_set_multicast_promisc *cmd_params;
7924  
7925         /* prepare command */
7926 @@ -1026,7 +1109,7 @@ int dpni_get_multicast_promisc(struct fs
7927                                u16 token,
7928                                int *en)
7929  {
7930 -       struct mc_command cmd = { 0 };
7931 +       struct fsl_mc_command cmd = { 0 };
7932         struct dpni_rsp_get_multicast_promisc *rsp_params;
7933         int err;
7934  
7935 @@ -1061,7 +1144,7 @@ int dpni_set_unicast_promisc(struct fsl_
7936                              u16 token,
7937                              int en)
7938  {
7939 -       struct mc_command cmd = { 0 };
7940 +       struct fsl_mc_command cmd = { 0 };
7941         struct dpni_cmd_set_unicast_promisc *cmd_params;
7942  
7943         /* prepare command */
7944 @@ -1089,7 +1172,7 @@ int dpni_get_unicast_promisc(struct fsl_
7945                              u16 token,
7946                              int *en)
7947  {
7948 -       struct mc_command cmd = { 0 };
7949 +       struct fsl_mc_command cmd = { 0 };
7950         struct dpni_rsp_get_unicast_promisc *rsp_params;
7951         int err;
7952  
7953 @@ -1124,7 +1207,7 @@ int dpni_set_primary_mac_addr(struct fsl
7954                               u16 token,
7955                               const u8 mac_addr[6])
7956  {
7957 -       struct mc_command cmd = { 0 };
7958 +       struct fsl_mc_command cmd = { 0 };
7959         struct dpni_cmd_set_primary_mac_addr *cmd_params;
7960         int i;
7961  
7962 @@ -1154,7 +1237,7 @@ int dpni_get_primary_mac_addr(struct fsl
7963                               u16 token,
7964                               u8 mac_addr[6])
7965  {
7966 -       struct mc_command cmd = { 0 };
7967 +       struct fsl_mc_command cmd = { 0 };
7968         struct dpni_rsp_get_primary_mac_addr *rsp_params;
7969         int i, err;
7970  
7971 @@ -1193,7 +1276,7 @@ int dpni_get_port_mac_addr(struct fsl_mc
7972                            u16 token,
7973                            u8 mac_addr[6])
7974  {
7975 -       struct mc_command cmd = { 0 };
7976 +       struct fsl_mc_command cmd = { 0 };
7977         struct dpni_rsp_get_port_mac_addr *rsp_params;
7978         int i, err;
7979  
7980 @@ -1229,7 +1312,7 @@ int dpni_add_mac_addr(struct fsl_mc_io *
7981                       u16 token,
7982                       const u8 mac_addr[6])
7983  {
7984 -       struct mc_command cmd = { 0 };
7985 +       struct fsl_mc_command cmd = { 0 };
7986         struct dpni_cmd_add_mac_addr *cmd_params;
7987         int i;
7988  
7989 @@ -1259,7 +1342,7 @@ int dpni_remove_mac_addr(struct fsl_mc_i
7990                          u16 token,
7991                          const u8 mac_addr[6])
7992  {
7993 -       struct mc_command cmd = { 0 };
7994 +       struct fsl_mc_command cmd = { 0 };
7995         struct dpni_cmd_remove_mac_addr *cmd_params;
7996         int i;
7997  
7998 @@ -1293,7 +1376,7 @@ int dpni_clear_mac_filters(struct fsl_mc
7999                            int unicast,
8000                            int multicast)
8001  {
8002 -       struct mc_command cmd = { 0 };
8003 +       struct fsl_mc_command cmd = { 0 };
8004         struct dpni_cmd_clear_mac_filters *cmd_params;
8005  
8006         /* prepare command */
8007 @@ -1309,6 +1392,55 @@ int dpni_clear_mac_filters(struct fsl_mc
8008  }
8009  
8010  /**
8011 + * dpni_set_tx_priorities() - Set transmission TC priority configuration
8012 + * @mc_io:     Pointer to MC portal's I/O object
8013 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8014 + * @token:     Token of DPNI object
8015 + * @cfg:       Transmission selection configuration
8016 + *
8017 + * warning:    Allowed only when DPNI is disabled
8018 + *
8019 + * Return:     '0' on Success; Error code otherwise.
8020 + */
8021 +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
8022 +                          u32 cmd_flags,
8023 +                          u16 token,
8024 +                          const struct dpni_tx_priorities_cfg *cfg)
8025 +{
8026 +       struct dpni_cmd_set_tx_priorities *cmd_params;
8027 +       struct fsl_mc_command cmd = { 0 };
8028 +       int i;
8029 +
8030 +       /* prepare command */
8031 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
8032 +                                         cmd_flags,
8033 +                                         token);
8034 +       cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
8035 +       dpni_set_field(cmd_params->flags,
8036 +                      SEPARATE_GRP,
8037 +                      cfg->separate_groups);
8038 +       cmd_params->prio_group_A = cfg->prio_group_A;
8039 +       cmd_params->prio_group_B = cfg->prio_group_B;
8040 +
8041 +       for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) {
8042 +               dpni_set_field(cmd_params->modes[i / 2],
8043 +                              MODE_1,
8044 +                              cfg->tc_sched[i].mode);
8045 +               dpni_set_field(cmd_params->modes[i / 2],
8046 +                              MODE_2,
8047 +                              cfg->tc_sched[i + 1].mode);
8048 +       }
8049 +
8050 +       for (i = 0; i < DPNI_MAX_TC; i++) {
8051 +               cmd_params->delta_bandwidth[i] =
8052 +                               cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
8053 +       }
8054 +
8055 +       /* send command to mc*/
8056 +       return mc_send_command(mc_io, &cmd);
8057 +}
8058 +
8059 +/**
8060   * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
8061   * @mc_io:     Pointer to MC portal's I/O object
8062   * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8063 @@ -1327,7 +1459,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
8064                         u8 tc_id,
8065                         const struct dpni_rx_tc_dist_cfg *cfg)
8066  {
8067 -       struct mc_command cmd = { 0 };
8068 +       struct fsl_mc_command cmd = { 0 };
8069         struct dpni_cmd_set_rx_tc_dist *cmd_params;
8070  
8071         /* prepare command */
8072 @@ -1346,6 +1478,215 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
8073         return mc_send_command(mc_io, &cmd);
8074  }
8075  
8076 +/*
8077 + * dpni_set_qos_table() - Set QoS mapping table
8078 + * @mc_io:     Pointer to MC portal's I/O object
8079 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8080 + * @token:     Token of DPNI object
8081 + * @cfg:       QoS table configuration
8082 + *
8083 + * This function and all QoS-related functions require that
8084 + *'max_tcs > 1' was set at DPNI creation.
8085 + *
8086 + * warning: Before calling this function, call dpkg_prepare_key_cfg() to
8087 + *                     prepare the key_cfg_iova parameter
8088 + *
8089 + * Return:     '0' on Success; Error code otherwise.
8090 + */
8091 +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
8092 +                      u32 cmd_flags,
8093 +                      u16 token,
8094 +                      const struct dpni_qos_tbl_cfg *cfg)
8095 +{
8096 +       struct dpni_cmd_set_qos_table *cmd_params;
8097 +       struct fsl_mc_command cmd = { 0 };
8098 +
8099 +       /* prepare command */
8100 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
8101 +                                         cmd_flags,
8102 +                                         token);
8103 +       cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
8104 +       cmd_params->default_tc = cfg->default_tc;
8105 +       cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
8106 +       dpni_set_field(cmd_params->discard_on_miss,
8107 +                      ENABLE,
8108 +                      cfg->discard_on_miss);
8109 +
8110 +       /* send command to mc*/
8111 +       return mc_send_command(mc_io, &cmd);
8112 +}
8113 +
8114 +/**
8115 + * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
8116 + * @mc_io:     Pointer to MC portal's I/O object
8117 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8118 + * @token:     Token of DPNI object
8119 + * @cfg:       QoS rule to add
8120 + * @tc_id:     Traffic class selection (0-7)
8121 + * @index:     Location in the QoS table where to insert the entry.
8122 + *             Only relevant if MASKING is enabled for QoS classification on
8123 + *             this DPNI, it is ignored for exact match.
8124 + *
8125 + * Return:     '0' on Success; Error code otherwise.
8126 + */
8127 +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
8128 +                      u32 cmd_flags,
8129 +                      u16 token,
8130 +                      const struct dpni_rule_cfg *cfg,
8131 +                      u8 tc_id,
8132 +                      u16 index)
8133 +{
8134 +       struct dpni_cmd_add_qos_entry *cmd_params;
8135 +       struct fsl_mc_command cmd = { 0 };
8136 +
8137 +       /* prepare command */
8138 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
8139 +                                         cmd_flags,
8140 +                                         token);
8141 +       cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
8142 +       cmd_params->tc_id = tc_id;
8143 +       cmd_params->key_size = cfg->key_size;
8144 +       cmd_params->index = cpu_to_le16(index);
8145 +       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8146 +       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8147 +
8148 +       /* send command to mc*/
8149 +       return mc_send_command(mc_io, &cmd);
8150 +}
8151 +
8152 +/**
8153 + * dpni_remove_qos_entry() - Remove QoS mapping entry
8154 + * @mc_io:     Pointer to MC portal's I/O object
8155 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8156 + * @token:     Token of DPNI object
8157 + * @cfg:       QoS rule to remove
8158 + *
8159 + * Return:     '0' on Success; Error code otherwise.
8160 + */
8161 +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
8162 +                         u32 cmd_flags,
8163 +                         u16 token,
8164 +                         const struct dpni_rule_cfg *cfg)
8165 +{
8166 +       struct dpni_cmd_remove_qos_entry *cmd_params;
8167 +       struct fsl_mc_command cmd = { 0 };
8168 +
8169 +       /* prepare command */
8170 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
8171 +                                         cmd_flags,
8172 +                                         token);
8173 +       cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
8174 +       cmd_params->key_size = cfg->key_size;
8175 +       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8176 +       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8177 +
8178 +       /* send command to mc*/
8179 +       return mc_send_command(mc_io, &cmd);
8180 +}
8181 +
8182 +/**
8183 + * dpni_set_congestion_notification() - Set traffic class congestion
8184 + *                                     notification configuration
8185 + * @mc_io:     Pointer to MC portal's I/O object
8186 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8187 + * @token:     Token of DPNI object
8188 + * @qtype:     Type of queue - Rx, Tx and Tx confirm types are supported
8189 + * @tc_id:     Traffic class selection (0-7)
8190 + * @cfg:       Congestion notification configuration
8191 + *
8192 + * Return:     '0' on Success; error code otherwise.
8193 + */
8194 +int dpni_set_congestion_notification(
8195 +                       struct fsl_mc_io *mc_io,
8196 +                       u32 cmd_flags,
8197 +                       u16 token,
8198 +                       enum dpni_queue_type qtype,
8199 +                       u8 tc_id,
8200 +                       const struct dpni_congestion_notification_cfg *cfg)
8201 +{
8202 +       struct dpni_cmd_set_congestion_notification *cmd_params;
8203 +       struct fsl_mc_command cmd = { 0 };
8204 +
8205 +       /* prepare command */
8206 +       cmd.header = mc_encode_cmd_header(
8207 +                       DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
8208 +                       cmd_flags,
8209 +                       token);
8210 +       cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
8211 +       cmd_params->qtype = qtype;
8212 +       cmd_params->tc = tc_id;
8213 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
8214 +       cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
8215 +       cmd_params->dest_priority = cfg->dest_cfg.priority;
8216 +       dpni_set_field(cmd_params->type_units, DEST_TYPE,
8217 +                      cfg->dest_cfg.dest_type);
8218 +       dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
8219 +       cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
8220 +       cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
8221 +       cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
8222 +       cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
8223 +
8224 +       /* send command to mc*/
8225 +       return mc_send_command(mc_io, &cmd);
8226 +}
8227 +
8228 +/**
8229 + * dpni_get_congestion_notification() - Get traffic class congestion
8230 + *     notification configuration
8231 + * @mc_io:     Pointer to MC portal's I/O object
8232 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8233 + * @token:     Token of DPNI object
8234 + * @qtype:     Type of queue - Rx, Tx and Tx confirm types are supported
8235 + * @tc_id:     bits 7-4 contain ceetm channel index (valid only for TX);
8236 + *             bits 3-0 contain traffic class.
8237 + *             Use macro DPNI_BUILD_CH_TC() to build correct value for
8238 + *             tc_id parameter.
8239 + * @cfg:       congestion notification configuration
8240 + *
8241 + * Return:     '0' on Success; error code otherwise.
8242 + */
8243 +int dpni_get_congestion_notification(
8244 +                       struct fsl_mc_io *mc_io,
8245 +                       u32 cmd_flags,
8246 +                       u16 token,
8247 +                       enum dpni_queue_type qtype,
8248 +                       u8 tc_id,
8249 +                       struct dpni_congestion_notification_cfg *cfg)
8250 +{
8251 +       struct dpni_rsp_get_congestion_notification *rsp_params;
8252 +       struct dpni_cmd_get_congestion_notification *cmd_params;
8253 +       struct fsl_mc_command cmd = { 0 };
8254 +       int err;
8255 +
8256 +       /* prepare command */
8257 +       cmd.header = mc_encode_cmd_header(
8258 +                               DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
8259 +                               cmd_flags,
8260 +                               token);
8261 +       cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
8262 +       cmd_params->qtype = qtype;
8263 +       cmd_params->tc = tc_id;
8264 +
8265 +       /* send command to mc*/
8266 +       err = mc_send_command(mc_io, &cmd);
8267 +       if (err)
8268 +               return err;
8269 +
8270 +       rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
8271 +       cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
8272 +       cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
8273 +       cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
8274 +       cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
8275 +       cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
8276 +       cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
8277 +       cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
8278 +       cfg->dest_cfg.priority = rsp_params->dest_priority;
8279 +       cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
8280 +                                                DEST_TYPE);
8281 +
8282 +       return 0;
8283 +}
8284 +
8285  /**
8286   * dpni_set_queue() - Set queue parameters
8287   * @mc_io:     Pointer to MC portal's I/O object
8288 @@ -1371,7 +1712,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_
8289                    u8 options,
8290                    const struct dpni_queue *queue)
8291  {
8292 -       struct mc_command cmd = { 0 };
8293 +       struct fsl_mc_command cmd = { 0 };
8294         struct dpni_cmd_set_queue *cmd_params;
8295  
8296         /* prepare command */
8297 @@ -1419,7 +1760,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_
8298                    struct dpni_queue *queue,
8299                    struct dpni_queue_id *qid)
8300  {
8301 -       struct mc_command cmd = { 0 };
8302 +       struct fsl_mc_command cmd = { 0 };
8303         struct dpni_cmd_get_queue *cmd_params;
8304         struct dpni_rsp_get_queue *rsp_params;
8305         int err;
8306 @@ -1463,6 +1804,8 @@ int dpni_get_queue(struct fsl_mc_io *mc_
8307   * @token:     Token of DPNI object
8308   * @page:      Selects the statistics page to retrieve, see
8309   *             DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
8310 + * @param:     Custom parameter for some pages used to select a certain
8311 + *             statistic source, for example the TC.
8312   * @stat:      Structure containing the statistics
8313   *
8314   * Return:     '0' on Success; Error code otherwise.
8315 @@ -1471,9 +1814,10 @@ int dpni_get_statistics(struct fsl_mc_io
8316                         u32 cmd_flags,
8317                         u16 token,
8318                         u8 page,
8319 +                       u8 param,
8320                         union dpni_statistics *stat)
8321  {
8322 -       struct mc_command cmd = { 0 };
8323 +       struct fsl_mc_command cmd = { 0 };
8324         struct dpni_cmd_get_statistics *cmd_params;
8325         struct dpni_rsp_get_statistics *rsp_params;
8326         int i, err;
8327 @@ -1484,6 +1828,7 @@ int dpni_get_statistics(struct fsl_mc_io
8328                                           token);
8329         cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
8330         cmd_params->page_number = page;
8331 +       cmd_params->param = param;
8332  
8333         /* send command to mc */
8334         err = mc_send_command(mc_io, &cmd);
8335 @@ -1499,6 +1844,29 @@ int dpni_get_statistics(struct fsl_mc_io
8336  }
8337  
8338  /**
8339 + * dpni_reset_statistics() - Clears DPNI statistics
8340 + * @mc_io:             Pointer to MC portal's I/O object
8341 + * @cmd_flags:         Command flags; one or more of 'MC_CMD_FLAG_'
8342 + * @token:             Token of DPNI object
8343 + *
8344 + * Return:  '0' on Success; Error code otherwise.
8345 + */
8346 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8347 +                         u32 cmd_flags,
8348 +                         u16 token)
8349 +{
8350 +       struct fsl_mc_command cmd = { 0 };
8351 +
8352 +       /* prepare command */
8353 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
8354 +                                         cmd_flags,
8355 +                                         token);
8356 +
8357 +       /* send command to mc*/
8358 +       return mc_send_command(mc_io, &cmd);
8359 +}
8360 +
8361 +/**
8362   * dpni_set_taildrop() - Set taildrop per queue or TC
8363   * @mc_io:     Pointer to MC portal's I/O object
8364   * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8365 @@ -1506,7 +1874,10 @@ int dpni_get_statistics(struct fsl_mc_io
8366   * @cg_point:  Congestion point
8367   * @q_type:    Queue type on which the taildrop is configured.
8368   *             Only Rx queues are supported for now
8369 - * @tc:                Traffic class to apply this taildrop to
8370 + * @tc:                bits 7-4 contain ceetm channel index (valid only for TX);
8371 + *             bits 3-0 contain traffic class.
8372 + *             Use macro DPNI_BUILD_CH_TC() to build correct value for
8373 + *             tc parameter.
8374   * @q_index:   Index of the queue if the DPNI supports multiple queues for
8375   *             traffic distribution. Ignored if CONGESTION_POINT is not 0.
8376   * @taildrop:  Taildrop structure
8377 @@ -1522,7 +1893,7 @@ int dpni_set_taildrop(struct fsl_mc_io *
8378                       u8 index,
8379                       struct dpni_taildrop *taildrop)
8380  {
8381 -       struct mc_command cmd = { 0 };
8382 +       struct fsl_mc_command cmd = { 0 };
8383         struct dpni_cmd_set_taildrop *cmd_params;
8384  
8385         /* prepare command */
8386 @@ -1550,7 +1921,10 @@ int dpni_set_taildrop(struct fsl_mc_io *
8387   * @cg_point:  Congestion point
8388   * @q_type:    Queue type on which the taildrop is configured.
8389   *             Only Rx queues are supported for now
8390 - * @tc:                Traffic class to apply this taildrop to
8391 + * @tc:                bits 7-4 contain ceetm channel index (valid only for TX);
8392 + *             bits 3-0 contain traffic class.
8393 + *             Use macro DPNI_BUILD_CH_TC() to build correct value for
8394 + *             tc parameter.
8395   * @q_index:   Index of the queue if the DPNI supports multiple queues for
8396   *             traffic distribution. Ignored if CONGESTION_POINT is not 0.
8397   * @taildrop:  Taildrop structure
8398 @@ -1566,7 +1940,7 @@ int dpni_get_taildrop(struct fsl_mc_io *
8399                       u8 index,
8400                       struct dpni_taildrop *taildrop)
8401  {
8402 -       struct mc_command cmd = { 0 };
8403 +       struct fsl_mc_command cmd = { 0 };
8404         struct dpni_cmd_get_taildrop *cmd_params;
8405         struct dpni_rsp_get_taildrop *rsp_params;
8406         int err;
8407 @@ -1594,3 +1968,187 @@ int dpni_get_taildrop(struct fsl_mc_io *
8408  
8409         return 0;
8410  }
8411 +
8412 +/**
8413 + * dpni_get_api_version() - Get Data Path Network Interface API version
8414 + * @mc_io:     Pointer to MC portal's I/O object
8415 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8416 + * @major_ver: Major version of data path network interface API
8417 + * @minor_ver: Minor version of data path network interface API
8418 + *
8419 + * Return:     '0' on Success; Error code otherwise.
8420 + */
8421 +int dpni_get_api_version(struct fsl_mc_io *mc_io,
8422 +                        u32 cmd_flags,
8423 +                        u16 *major_ver,
8424 +                        u16 *minor_ver)
8425 +{
8426 +       struct dpni_rsp_get_api_version *rsp_params;
8427 +       struct fsl_mc_command cmd = { 0 };
8428 +       int err;
8429 +
8430 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
8431 +                                         cmd_flags, 0);
8432 +
8433 +       err = mc_send_command(mc_io, &cmd);
8434 +       if (err)
8435 +               return err;
8436 +
8437 +       rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
8438 +       *major_ver = le16_to_cpu(rsp_params->major);
8439 +       *minor_ver = le16_to_cpu(rsp_params->minor);
8440 +
8441 +       return 0;
8442 +}
8443 +
8444 +/**
8445 + * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
8446 + * @mc_io:     Pointer to MC portal's I/O object
8447 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8448 + * @token:     Token of DPNI object
8449 + * @cfg: Distribution configuration
8450 + * If the FS is already enabled with a previous call the classification
8451 + * key will be changed but all the table rules are kept. If the
8452 + * existing rules do not match the key the results will not be
8453 + * predictable. It is the user responsibility to keep key integrity.
8454 + * If cfg.enable is set to 1 the command will create a flow steering table
8455 + * and will classify packets according to this table. The packets that
8456 + * miss all the table rules will be classified according to settings
8457 + * made in dpni_set_rx_hash_dist()
8458 + * If cfg.enable is set to 0 the command will clear flow steering table.
8459 + * The packets will be classified according to settings made in
8460 + * dpni_set_rx_hash_dist()
8461 + */
8462 +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
8463 +                       u32 cmd_flags,
8464 +                       u16 token,
8465 +                       const struct dpni_rx_dist_cfg *cfg)
8466 +{
8467 +       struct dpni_cmd_set_rx_fs_dist *cmd_params;
8468 +       struct fsl_mc_command cmd = { 0 };
8469 +
8470 +       /* prepare command */
8471 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
8472 +                                         cmd_flags,
8473 +                                         token);
8474 +       cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
8475 +       cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
8476 +       dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
8477 +       cmd_params->tc = cfg->tc;
8478 +       cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
8479 +       cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
8480 +
8481 +       /* send command to mc*/
8482 +       return mc_send_command(mc_io, &cmd);
8483 +}
8484 +
8485 +/**
8486 + * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
8487 + * @mc_io:     Pointer to MC portal's I/O object
8488 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8489 + * @token:     Token of DPNI object
8490 + * @cfg: Distribution configuration
8491 + * If cfg.enable is set to 1 the packets will be classified using a hash
8492 + * function based on the key received in cfg.key_cfg_iova parameter.
8493 + * If cfg.enable is set to 0 the packets will be sent to the queue configured
8494 + * in dpni_set_rx_dist_default_queue() call
8495 + */
8496 +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
8497 +                         u32 cmd_flags,
8498 +                         u16 token,
8499 +                         const struct dpni_rx_dist_cfg *cfg)
8500 +{
8501 +       struct dpni_cmd_set_rx_hash_dist *cmd_params;
8502 +       struct fsl_mc_command cmd = { 0 };
8503 +
8504 +       /* prepare command */
8505 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
8506 +                                         cmd_flags,
8507 +                                         token);
8508 +       cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
8509 +       cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
8510 +       dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
8511 +       cmd_params->tc = cfg->tc;
8512 +       cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
8513 +
8514 +       /* send command to mc*/
8515 +       return mc_send_command(mc_io, &cmd);
8516 +}
8517 +
8518 +/**
8519 + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
8520 + *                     (to select a flow ID)
8521 + * @mc_io:     Pointer to MC portal's I/O object
8522 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8523 + * @token:     Token of DPNI object
8524 + * @tc_id:     Traffic class selection (0-7)
8525 + * @index:     Location in the QoS table where to insert the entry.
8526 + *             Only relevant if MASKING is enabled for QoS
8527 + *             classification on this DPNI, it is ignored for exact match.
8528 + * @cfg:       Flow steering rule to add
8529 + * @action:    Action to be taken as result of a classification hit
8530 + *
8531 + * Return:     '0' on Success; Error code otherwise.
8532 + */
8533 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
8534 +                     u32 cmd_flags,
8535 +                     u16 token,
8536 +                     u8 tc_id,
8537 +                     u16 index,
8538 +                     const struct dpni_rule_cfg *cfg,
8539 +                     const struct dpni_fs_action_cfg *action)
8540 +{
8541 +       struct dpni_cmd_add_fs_entry *cmd_params;
8542 +       struct fsl_mc_command cmd = { 0 };
8543 +
8544 +       /* prepare command */
8545 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
8546 +                                         cmd_flags,
8547 +                                         token);
8548 +       cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
8549 +       cmd_params->tc_id = tc_id;
8550 +       cmd_params->key_size = cfg->key_size;
8551 +       cmd_params->index = cpu_to_le16(index);
8552 +       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8553 +       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8554 +       cmd_params->options = cpu_to_le16(action->options);
8555 +       cmd_params->flow_id = cpu_to_le16(action->flow_id);
8556 +       cmd_params->flc = cpu_to_le64(action->flc);
8557 +
8558 +       /* send command to mc*/
8559 +       return mc_send_command(mc_io, &cmd);
8560 +}
8561 +
8562 +/**
8563 + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
8564 + *                         traffic class
8565 + * @mc_io:     Pointer to MC portal's I/O object
8566 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8567 + * @token:     Token of DPNI object
8568 + * @tc_id:     Traffic class selection (0-7)
8569 + * @cfg:       Flow steering rule to remove
8570 + *
8571 + * Return:     '0' on Success; Error code otherwise.
8572 + */
8573 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
8574 +                        u32 cmd_flags,
8575 +                        u16 token,
8576 +                        u8 tc_id,
8577 +                        const struct dpni_rule_cfg *cfg)
8578 +{
8579 +       struct dpni_cmd_remove_fs_entry *cmd_params;
8580 +       struct fsl_mc_command cmd = { 0 };
8581 +
8582 +       /* prepare command */
8583 +       cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
8584 +                                         cmd_flags,
8585 +                                         token);
8586 +       cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
8587 +       cmd_params->tc_id = tc_id;
8588 +       cmd_params->key_size = cfg->key_size;
8589 +       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8590 +       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8591 +
8592 +       /* send command to mc*/
8593 +       return mc_send_command(mc_io, &cmd);
8594 +}
8595 --- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8596 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8597 @@ -1,34 +1,6 @@
8598 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
8599  /* Copyright 2013-2016 Freescale Semiconductor Inc.
8600   * Copyright 2016 NXP
8601 - *
8602 - * Redistribution and use in source and binary forms, with or without
8603 - * modification, are permitted provided that the following conditions are met:
8604 - * * Redistributions of source code must retain the above copyright
8605 - * notice, this list of conditions and the following disclaimer.
8606 - * * Redistributions in binary form must reproduce the above copyright
8607 - * notice, this list of conditions and the following disclaimer in the
8608 - * documentation and/or other materials provided with the distribution.
8609 - * * Neither the name of the above-listed copyright holders nor the
8610 - * names of any contributors may be used to endorse or promote products
8611 - * derived from this software without specific prior written permission.
8612 - *
8613 - *
8614 - * ALTERNATIVELY, this software may be distributed under the terms of the
8615 - * GNU General Public License ("GPL") as published by the Free Software
8616 - * Foundation, either version 2 of that License or (at your option) any
8617 - * later version.
8618 - *
8619 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
8620 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8621 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8622 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
8623 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
8624 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
8625 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
8626 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
8627 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
8628 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
8629 - * POSSIBILITY OF SUCH DAMAGE.
8630   */
8631  #ifndef __FSL_DPNI_H
8632  #define __FSL_DPNI_H
8633 @@ -52,6 +24,14 @@ struct fsl_mc_io;
8634   * Maximum number of buffer pools per DPNI
8635   */
8636  #define DPNI_MAX_DPBP                          8
8637 +/**
8638 + * Maximum number of senders
8639 + */
8640 +#define DPNI_MAX_SENDERS                       16
8641 +/**
8642 + * Maximum distribution size
8643 + */
8644 +#define DPNI_MAX_DIST_SIZE                     16
8645  
8646  /**
8647   * All traffic classes considered; see dpni_set_queue()
8648 @@ -123,13 +103,15 @@ struct dpni_pools_cfg {
8649         /**
8650          * struct pools - Buffer pools parameters
8651          * @dpbp_id: DPBP object ID
8652 +        * @priority_mask: priorities served by DPBP
8653          * @buffer_size: Buffer size
8654          * @backup_pool: Backup pool
8655          */
8656         struct {
8657 -               int     dpbp_id;
8658 +               u16     dpbp_id;
8659 +               u8      priority_mask;
8660                 u16     buffer_size;
8661 -               int     backup_pool;
8662 +               u8      backup_pool;
8663         } pools[DPNI_MAX_DPBP];
8664  };
8665  
8666 @@ -476,6 +458,24 @@ union dpni_statistics {
8667                 u64 egress_confirmed_frames;
8668         } page_2;
8669         /**
8670 +        * struct page_3 - Page_3 statistics structure with values for the
8671 +        *                 selected TC
8672 +        * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
8673 +        *                       dequeued
8674 +        * @ceetm_dequeue_frames: Cumulative count of the number of frames
8675 +        *                        dequeued
8676 +        * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
8677 +        *                      frames whose enqueue was rejected
8678 +        * @ceetm_reject_frames: Cumulative count of all frame enqueues
8679 +        *                       rejected
8680 +        */
8681 +       struct {
8682 +               u64 ceetm_dequeue_bytes;
8683 +               u64 ceetm_dequeue_frames;
8684 +               u64 ceetm_reject_bytes;
8685 +               u64 ceetm_reject_frames;
8686 +       } page_3;
8687 +       /**
8688          * struct raw - raw statistics structure
8689          */
8690         struct {
8691 @@ -487,8 +487,13 @@ int dpni_get_statistics(struct fsl_mc_io
8692                         u32                     cmd_flags,
8693                         u16                     token,
8694                         u8                      page,
8695 +                       u8                      param,
8696                         union dpni_statistics   *stat);
8697  
8698 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8699 +                         u32 cmd_flags,
8700 +                         u16 token);
8701 +
8702  /**
8703   * Enable auto-negotiation
8704   */
8705 @@ -505,6 +510,23 @@ int dpni_get_statistics(struct fsl_mc_io
8706   * Enable a-symmetric pause frames
8707   */
8708  #define DPNI_LINK_OPT_ASYM_PAUSE       0x0000000000000008ULL
8709 +/**
8710 + * Enable priority flow control pause frames
8711 + */
8712 +#define DPNI_LINK_OPT_PFC_PAUSE                0x0000000000000010ULL
8713 +/**
8714 + * Advertised link speeds
8715 + */
8716 +#define DPNI_ADVERTISED_10BASET_FULL           0x0000000000000001ULL
8717 +#define DPNI_ADVERTISED_100BASET_FULL          0x0000000000000002ULL
8718 +#define DPNI_ADVERTISED_1000BASET_FULL         0x0000000000000004ULL
8719 +#define DPNI_ADVERTISED_10000BASET_FULL        0x0000000000000010ULL
8720 +#define DPNI_ADVERTISED_2500BASEX_FULL         0x0000000000000020ULL
8721 +
8722 +/**
8723 + * Advertise auto-negotiation enabled
8724 + */
8725 +#define DPNI_ADVERTISED_AUTONEG                0x0000000000000008ULL
8726  
8727  /**
8728   * struct - Structure representing DPNI link configuration
8729 @@ -514,6 +536,7 @@ int dpni_get_statistics(struct fsl_mc_io
8730  struct dpni_link_cfg {
8731         u32 rate;
8732         u64 options;
8733 +       u64 advertising;
8734  };
8735  
8736  int dpni_set_link_cfg(struct fsl_mc_io                 *mc_io,
8737 @@ -521,6 +544,11 @@ int dpni_set_link_cfg(struct fsl_mc_io
8738                       u16                               token,
8739                       const struct dpni_link_cfg        *cfg);
8740  
8741 +int dpni_set_link_cfg_v2(struct fsl_mc_io              *mc_io,
8742 +                        u32                            cmd_flags,
8743 +                        u16                            token,
8744 +                        const struct dpni_link_cfg     *cfg);
8745 +
8746  /**
8747   * struct dpni_link_state - Structure representing DPNI link state
8748   * @rate: Rate
8749 @@ -530,7 +558,10 @@ int dpni_set_link_cfg(struct fsl_mc_io
8750  struct dpni_link_state {
8751         u32     rate;
8752         u64     options;
8753 +       u64     supported;
8754 +       u64     advertising;
8755         int     up;
8756 +       int     state_valid;
8757  };
8758  
8759  int dpni_get_link_state(struct fsl_mc_io       *mc_io,
8760 @@ -538,6 +569,28 @@ int dpni_get_link_state(struct fsl_mc_io
8761                         u16                     token,
8762                         struct dpni_link_state  *state);
8763  
8764 +int dpni_get_link_state_v2(struct fsl_mc_io    *mc_io,
8765 +                          u32                  cmd_flags,
8766 +                          u16                  token,
8767 +                          struct dpni_link_state       *state);
8768 +
8769 +/**
8770 + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
8771 + * @rate_limit: rate in Mbps
8772 + * @max_burst_size: burst size in bytes (up to 64KB)
8773 + */
8774 +struct dpni_tx_shaping_cfg {
8775 +       u32     rate_limit;
8776 +       u16     max_burst_size;
8777 +};
8778 +
8779 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
8780 +                       u32 cmd_flags,
8781 +                       u16 token,
8782 +                       const struct dpni_tx_shaping_cfg *tx_cr_shaper,
8783 +                       const struct dpni_tx_shaping_cfg *tx_er_shaper,
8784 +                       int coupled);
8785 +
8786  int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
8787                               u32               cmd_flags,
8788                               u16               token,
8789 @@ -639,6 +692,70 @@ int dpni_prepare_key_cfg(const struct dp
8790                          u8 *key_cfg_buf);
8791  
8792  /**
8793 + * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
8794 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
8795 + *             key extractions to be used as the QoS criteria by calling
8796 + *             dpkg_prepare_key_cfg()
8797 + * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
8798 + *             '0' to use the 'default_tc' in such cases
8799 + * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
8800 + */
8801 +struct dpni_qos_tbl_cfg {
8802 +       u64 key_cfg_iova;
8803 +       int discard_on_miss;
8804 +       u8 default_tc;
8805 +};
8806 +
8807 +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
8808 +                      u32 cmd_flags,
8809 +                      u16 token,
8810 +                      const struct dpni_qos_tbl_cfg *cfg);
8811 +
8812 +/**
8813 + * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
8814 + * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
8815 + * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
8816 + * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
8817 + */
8818 +enum dpni_tx_schedule_mode {
8819 +       DPNI_TX_SCHED_STRICT_PRIORITY = 0,
8820 +       DPNI_TX_SCHED_WEIGHTED_A,
8821 +       DPNI_TX_SCHED_WEIGHTED_B,
8822 +};
8823 +
8824 +/**
8825 + * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
8826 + * @mode:              Scheduling mode
8827 + * @delta_bandwidth:   Bandwidth represented in weights from 100 to 10000;
8828 + *     not applicable for 'strict-priority' mode;
8829 + */
8830 +struct dpni_tx_schedule_cfg {
8831 +       enum dpni_tx_schedule_mode mode;
8832 +       u16 delta_bandwidth;
8833 +};
8834 +
8835 +/**
8836 + * struct dpni_tx_priorities_cfg - Structure representing transmission
8837 + *                                     priorities for DPNI TCs
8838 + * @tc_sched:  An array of traffic-classes
8839 + * @prio_group_A: Priority of group A
8840 + * @prio_group_B: Priority of group B
8841 + * @separate_groups: Treat A and B groups as separate
8842 + * @ceetm_ch_idx: ceetm channel index to apply the changes
8843 + */
8844 +struct dpni_tx_priorities_cfg {
8845 +       struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
8846 +       u8 prio_group_A;
8847 +       u8 prio_group_B;
8848 +       u8 separate_groups;
8849 +};
8850 +
8851 +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
8852 +                          u32 cmd_flags,
8853 +                          u16 token,
8854 +                          const struct dpni_tx_priorities_cfg *cfg);
8855 +
8856 +/**
8857   * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
8858   * @dist_size: Set the distribution size;
8859   *     supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
8860 @@ -784,6 +901,108 @@ enum dpni_congestion_point {
8861  };
8862  
8863  /**
8864 + * struct dpni_dest_cfg - Structure representing DPNI destination parameters
8865 + * @dest_type: Destination type
8866 + * @dest_id:   Either DPIO ID or DPCON ID, depending on the destination type
8867 + * @priority:  Priority selection within the DPIO or DPCON channel; valid
8868 + *             values are 0-1 or 0-7, depending on the number of priorities
8869 + *             in that channel; not relevant for 'DPNI_DEST_NONE' option
8870 + */
8871 +struct dpni_dest_cfg {
8872 +       enum dpni_dest dest_type;
8873 +       int dest_id;
8874 +       u8 priority;
8875 +};
8876 +
8877 +/* DPNI congestion options */
8878 +
8879 +/**
8880 + * CSCN message is written to message_iova once entering a
8881 + * congestion state (see 'threshold_entry')
8882 + */
8883 +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER        0x00000001
8884 +/**
8885 + * CSCN message is written to message_iova once exiting a
8886 + * congestion state (see 'threshold_exit')
8887 + */
8888 +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT         0x00000002
8889 +/**
8890 + * CSCN write will attempt to allocate into a cache (coherent write);
8891 + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
8892 + */
8893 +#define DPNI_CONG_OPT_COHERENT_WRITE            0x00000004
8894 +/**
8895 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
8896 + * DPIO/DPCON's WQ channel once entering a congestion state
8897 + * (see 'threshold_entry')
8898 + */
8899 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER      0x00000008
8900 +/**
8901 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
8902 + * DPIO/DPCON's WQ channel once exiting a congestion state
8903 + * (see 'threshold_exit')
8904 + */
8905 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT       0x00000010
8906 +/**
8907 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
8908 + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
8909 + */
8910 +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED  0x00000020
8911 +/**
8912 + * This congestion will trigger flow control or priority flow control.
8913 + * This will have effect only if flow control is enabled with
8914 + * dpni_set_link_cfg().
8915 + */
8916 +#define DPNI_CONG_OPT_FLOW_CONTROL     0x00000040
8917 +
8918 +/**
8919 + * struct dpni_congestion_notification_cfg - congestion notification
8920 + *                                     configuration
8921 + * @units: Units type
8922 + * @threshold_entry: Above this threshold we enter a congestion state.
8923 + *             set it to '0' to disable it
8924 + * @threshold_exit: Below this threshold we exit the congestion state.
8925 + * @message_ctx: The context that will be part of the CSCN message
8926 + * @message_iova: I/O virtual address (must be in DMA-able memory),
8927 + *             must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
8928 + *             is contained in 'options'
8929 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
8930 + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
8931 + */
8932 +
8933 +struct dpni_congestion_notification_cfg {
8934 +       enum dpni_congestion_unit units;
8935 +       u32 threshold_entry;
8936 +       u32 threshold_exit;
8937 +       u64 message_ctx;
8938 +       u64 message_iova;
8939 +       struct dpni_dest_cfg dest_cfg;
8940 +       u16 notification_mode;
8941 +};
8942 +
8943 +/** Compose TC parameter for function dpni_set_congestion_notification()
8944 + * and dpni_get_congestion_notification().
8945 + */
8946 +#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \
8947 +       ((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F))
8948 +
8949 +int dpni_set_congestion_notification(
8950 +                       struct fsl_mc_io *mc_io,
8951 +                       u32 cmd_flags,
8952 +                       u16 token,
8953 +                       enum dpni_queue_type qtype,
8954 +                       u8 tc_id,
8955 +                       const struct dpni_congestion_notification_cfg *cfg);
8956 +
8957 +int dpni_get_congestion_notification(
8958 +                       struct fsl_mc_io *mc_io,
8959 +                       u32 cmd_flags,
8960 +                       u16 token,
8961 +                       enum dpni_queue_type qtype,
8962 +                       u8 tc_id,
8963 +                       struct dpni_congestion_notification_cfg *cfg);
8964 +
8965 +/**
8966   * struct dpni_taildrop - Structure representing the taildrop
8967   * @enable:    Indicates whether the taildrop is active or not.
8968   * @units:     Indicates the unit of THRESHOLD. Queue taildrop only supports
8969 @@ -829,4 +1048,124 @@ struct dpni_rule_cfg {
8970         u8      key_size;
8971  };
8972  
8973 +int dpni_get_api_version(struct fsl_mc_io *mc_io,
8974 +                        u32 cmd_flags,
8975 +                        u16 *major_ver,
8976 +                        u16 *minor_ver);
8977 +
8978 +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
8979 +                      u32 cmd_flags,
8980 +                      u16 token,
8981 +                      const struct dpni_rule_cfg *cfg,
8982 +                      u8 tc_id,
8983 +                      u16 index);
8984 +
8985 +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
8986 +                         u32 cmd_flags,
8987 +                         u16 token,
8988 +                         const struct dpni_rule_cfg *cfg);
8989 +
8990 +int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
8991 +                        u32 cmd_flags,
8992 +                        u16 token);
8993 +
8994 +/**
8995 + * Discard matching traffic. If set, this takes precedence over any other
8996 + * configuration and matching traffic is always discarded.
8997 + */
8998 + #define DPNI_FS_OPT_DISCARD            0x1
8999 +
9000 +/**
9001 + * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
9002 + * override the FLC value set per queue.
9003 + * For more details check the Frame Descriptor section in the hardware
9004 + * documentation.
9005 + */
9006 +#define DPNI_FS_OPT_SET_FLC            0x2
9007 +
9008 +/*
9009 + * Indicates whether the 6 lowest significant bits of FLC are used for stash
9010 + * control. If set, the 6 least significant bits in value are interpreted as
9011 + * follows:
9012 + *     - bits 0-1: indicates the number of 64 byte units of context that are
9013 + *     stashed. FLC value is interpreted as a memory address in this case,
9014 + *     excluding the 6 LS bits.
9015 + *     - bits 2-3: indicates the number of 64 byte units of frame annotation
9016 + *     to be stashed. Annotation is placed at FD[ADDR].
9017 + *     - bits 4-5: indicates the number of 64 byte units of frame data to be
9018 + *     stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
9019 + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
9020 + */
9021 +#define DPNI_FS_OPT_SET_STASH_CONTROL  0x4
9022 +
9023 +/**
9024 + * struct dpni_fs_action_cfg - Action configuration for table look-up
9025 + * @flc:       FLC value for traffic matching this rule. Please check the
9026 + *             Frame Descriptor section in the hardware documentation for
9027 + *             more information.
9028 + * @flow_id:   Identifies the Rx queue used for matching traffic. Supported
9029 + *             values are in range 0 to num_queue-1.
9030 + * @options:   Any combination of DPNI_FS_OPT_ values.
9031 + */
9032 +struct dpni_fs_action_cfg {
9033 +       u64 flc;
9034 +       u16 flow_id;
9035 +       u16 options;
9036 +};
9037 +
9038 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
9039 +                     u32 cmd_flags,
9040 +                     u16 token,
9041 +                     u8 tc_id,
9042 +                     u16 index,
9043 +                     const struct dpni_rule_cfg *cfg,
9044 +                     const struct dpni_fs_action_cfg *action);
9045 +
9046 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
9047 +                        u32 cmd_flags,
9048 +                        u16 token,
9049 +                        u8 tc_id,
9050 +                        const struct dpni_rule_cfg *cfg);
9051 +
9052 +/**
9053 + * When used for queue_idx in function dpni_set_rx_dist_default_queue
9054 + * will signal to dpni to drop all unclassified frames
9055 + */
9056 +#define DPNI_FS_MISS_DROP              ((uint16_t)-1)
9057 +
9058 +/**
9059 + * struct dpni_rx_dist_cfg - distribution configuration
9060 + * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
9061 + *             12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
9062 + *             512,768,896,1024
9063 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
9064 + *             the extractions to be used for the distribution key by calling
9065 + *             dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
9066 + *             it can be '0'
9067 + * @enable: enable/disable the distribution.
9068 + * @tc: TC id for which distribution is set
9069 + * @fs_miss_flow_id: when packet misses all rules from flow steering table and
9070 + *             hash is disabled it will be put into this queue id; use
9071 + *             DPNI_FS_MISS_DROP to drop frames. The value of this field is
9072 + *             used only when flow steering distribution is enabled and hash
9073 + *             distribution is disabled
9074 + */
9075 +struct dpni_rx_dist_cfg {
9076 +       u16 dist_size;
9077 +       u64 key_cfg_iova;
9078 +       u8 enable;
9079 +       u8 tc;
9080 +       u16 fs_miss_flow_id;
9081 +};
9082 +
9083 +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
9084 +                       u32 cmd_flags,
9085 +                       u16 token,
9086 +                       const struct dpni_rx_dist_cfg *cfg);
9087 +
9088 +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
9089 +                         u32 cmd_flags,
9090 +                         u16 token,
9091 +                         const struct dpni_rx_dist_cfg *cfg);
9092 +
9093  #endif /* __FSL_DPNI_H */
9094 --- a/drivers/staging/fsl-dpaa2/ethernet/net.h
9095 +++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
9096 @@ -1,33 +1,5 @@
9097 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
9098  /* Copyright 2013-2015 Freescale Semiconductor Inc.
9099 - *
9100 - * Redistribution and use in source and binary forms, with or without
9101 - * modification, are permitted provided that the following conditions are met:
9102 - * * Redistributions of source code must retain the above copyright
9103 - * notice, this list of conditions and the following disclaimer.
9104 - * * Redistributions in binary form must reproduce the above copyright
9105 - * notice, this list of conditions and the following disclaimer in the
9106 - * documentation and/or other materials provided with the distribution.
9107 - * * Neither the name of the above-listed copyright holders nor the
9108 - * names of any contributors may be used to endorse or promote products
9109 - * derived from this software without specific prior written permission.
9110 - *
9111 - *
9112 - * ALTERNATIVELY, this software may be distributed under the terms of the
9113 - * GNU General Public License ("GPL") as published by the Free Software
9114 - * Foundation, either version 2 of that License or (at your option) any
9115 - * later version.
9116 - *
9117 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9118 - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9119 - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9120 - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9121 - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9122 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9123 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9124 - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9125 - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9126 - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9127 - * POSSIBILITY OF SUCH DAMAGE.
9128   */
9129  #ifndef __FSL_NET_H
9130  #define __FSL_NET_H