kernel: bump 4.14 to 4.14.125 (FS#2305 FS#2297)
[oweals/openwrt.git] / target / linux / layerscape / patches-4.14 / 710-pfe-eth-support-layerscape.patch
1 From 35745905430a4c9827c235d42f3a61bef34043e8 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Fri, 19 Apr 2019 13:21:09 +0800
4 Subject: [PATCH] pfe-eth: support layerscape
5
6 This is an integrated patch of pfe-eth for layerscape
7
8 Signed-off-by: Akhila Kavi <akhila.kavi@nxp.com>
9 Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
10 Signed-off-by: Anjaneyulu Jagarlmudi <anji.jagarlmudi@nxp.com>
11 Signed-off-by: Archana Madhavan <archana.madhavan@nxp.com>
12 Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
13 Signed-off-by: Biwen Li <biwen.li@nxp.com>
14 Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
15 Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
16 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
17 Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
18 Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
19 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
20 ---
21  .../devicetree/bindings/net/fsl_ppfe/pfe.txt  |  199 ++
22  drivers/staging/fsl_ppfe/Kconfig              |   21 +
23  drivers/staging/fsl_ppfe/Makefile             |   20 +
24  drivers/staging/fsl_ppfe/TODO                 |    2 +
25  drivers/staging/fsl_ppfe/include/pfe/cbus.h   |   78 +
26  .../staging/fsl_ppfe/include/pfe/cbus/bmu.h   |   55 +
27  .../fsl_ppfe/include/pfe/cbus/class_csr.h     |  289 ++
28  .../fsl_ppfe/include/pfe/cbus/emac_mtip.h     |  242 ++
29  .../staging/fsl_ppfe/include/pfe/cbus/gpi.h   |   86 +
30  .../staging/fsl_ppfe/include/pfe/cbus/hif.h   |  100 +
31  .../fsl_ppfe/include/pfe/cbus/hif_nocpy.h     |   50 +
32  .../fsl_ppfe/include/pfe/cbus/tmu_csr.h       |  168 ++
33  .../fsl_ppfe/include/pfe/cbus/util_csr.h      |   61 +
34  drivers/staging/fsl_ppfe/include/pfe/pfe.h    |  372 +++
35  drivers/staging/fsl_ppfe/pfe_cdev.c           |  258 ++
36  drivers/staging/fsl_ppfe/pfe_cdev.h           |   41 +
37  drivers/staging/fsl_ppfe/pfe_ctrl.c           |  226 ++
38  drivers/staging/fsl_ppfe/pfe_ctrl.h           |  100 +
39  drivers/staging/fsl_ppfe/pfe_debugfs.c        |   99 +
40  drivers/staging/fsl_ppfe/pfe_debugfs.h        |   13 +
41  drivers/staging/fsl_ppfe/pfe_eth.c            | 2554 +++++++++++++++++
42  drivers/staging/fsl_ppfe/pfe_eth.h            |  175 ++
43  drivers/staging/fsl_ppfe/pfe_firmware.c       |  302 ++
44  drivers/staging/fsl_ppfe/pfe_firmware.h       |   20 +
45  drivers/staging/fsl_ppfe/pfe_hal.c            | 1516 ++++++++++
46  drivers/staging/fsl_ppfe/pfe_hif.c            | 1060 +++++++
47  drivers/staging/fsl_ppfe/pfe_hif.h            |  200 ++
48  drivers/staging/fsl_ppfe/pfe_hif_lib.c        |  628 ++++
49  drivers/staging/fsl_ppfe/pfe_hif_lib.h        |  229 ++
50  drivers/staging/fsl_ppfe/pfe_hw.c             |  164 ++
51  drivers/staging/fsl_ppfe/pfe_hw.h             |   15 +
52  .../staging/fsl_ppfe/pfe_ls1012a_platform.c   |  368 +++
53  drivers/staging/fsl_ppfe/pfe_mod.c            |  158 +
54  drivers/staging/fsl_ppfe/pfe_mod.h            |  103 +
55  drivers/staging/fsl_ppfe/pfe_perfmon.h        |   26 +
56  drivers/staging/fsl_ppfe/pfe_sysfs.c          |  806 ++++++
57  drivers/staging/fsl_ppfe/pfe_sysfs.h          |   17 +
58  37 files changed, 10821 insertions(+)
59  create mode 100644 Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
60  create mode 100644 drivers/staging/fsl_ppfe/Kconfig
61  create mode 100644 drivers/staging/fsl_ppfe/Makefile
62  create mode 100644 drivers/staging/fsl_ppfe/TODO
63  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
64  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
65  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
66  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
67  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
68  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
69  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
70  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
71  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
72  create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
73  create mode 100644 drivers/staging/fsl_ppfe/pfe_cdev.c
74  create mode 100644 drivers/staging/fsl_ppfe/pfe_cdev.h
75  create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
76  create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
77  create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
78  create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
79  create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
80  create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
81  create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
82  create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
83  create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
84  create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
85  create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
86  create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
87  create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
88  create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
89  create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
90  create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
91  create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
92  create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
93  create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
94  create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
95  create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
96
97 --- /dev/null
98 +++ b/Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
99 @@ -0,0 +1,199 @@
100 +=============================================================================
101 +NXP Programmable Packet Forwarding Engine Device Bindings
102 +
103 +CONTENTS
104 +  - PFE Node
105 +  - Ethernet Node
106 +
107 +=============================================================================
108 +PFE Node
109 +
110 +DESCRIPTION
111 +
112 +PFE Node has all the properties associated with Packet Forwarding Engine block.
113 +
114 +PROPERTIES
115 +
116 +- compatible
117 +               Usage: required
118 +               Value type: <stringlist>
119 +               Definition: Must include "fsl,pfe"
120 +
121 +- reg
122 +               Usage: required
123 +               Value type: <prop-encoded-array>
124 +               Definition: A standard property.
125 +               Specifies the offset of the following registers:
126 +               - PFE configuration registers
127 +               - DDR memory used by PFE
128 +
129 +- fsl,pfe-num-interfaces
130 +               Usage: required
131 +               Value type: <u32>
132 +               Definition: Must be present. Value can be either one or two.
133 +
134 +- interrupts
135 +               Usage: required
136 +               Value type: <prop-encoded-array>
137 +               Definition: Three interrupts are specified in this property.
138 +               - HIF interrupt
139 +               - HIF NO COPY interrupt
140 +               - Wake On LAN interrupt
141 +
142 +- interrupt-names
143 +               Usage: required
144 +               Value type: <stringlist>
145 +               Definition: Following strings are defined for the 3 interrupts.
146 +               "pfe_hif" - HIF interrupt
147 +               "pfe_hif_nocpy" - HIF NO COPY interrupt
148 +               "pfe_wol" - Wake On LAN interrupt
149 +
150 +- memory-region
151 +               Usage: required
152 +               Value type: <phandle>
153 +               Definition: phandle to a node describing reserved memory used by pfe.
154 +               Refer:- Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
155 +
156 +- fsl,pfe-scfg
157 +               Usage: required
158 +               Value type: <phandle>
159 +               Definition: phandle for scfg.
160 +
161 +- fsl,rcpm-wakeup
162 +               Usage: required
163 +               Value type: <phandle>
164 +               Definition: phandle for rcpm.
165 +
166 +- clocks
167 +               Usage: required
168 +               Value type: <phandle>
169 +               Definition: phandle for clockgen.
170 +
171 +- clock-names
172 +               Usage: required
173 +               Value type: <string>
174 +               Definition: phandle for clock name.
175 +
176 +EXAMPLE
177 +
178 +pfe: pfe@04000000 {
179 +       compatible = "fsl,pfe";
180 +       reg =   <0x0 0x04000000 0x0 0xc00000>,  /* AXI 16M */
181 +               <0x0 0x83400000 0x0 0xc00000>;  /* PFE DDR 12M */
182 +       reg-names = "pfe", "pfe-ddr";
183 +       fsl,pfe-num-interfaces = <0x2>;
184 +       interrupts = <0 172 0x4>,    /* HIF interrupt */
185 +                    <0 173 0x4>,    /*HIF_NOCPY interrupt */
186 +                    <0 174 0x4>;    /* WoL interrupt */
187 +       interrupt-names = "pfe_hif", "pfe_hif_nocpy", "pfe_wol";
188 +       memory-region = <&pfe_reserved>;
189 +       fsl,pfe-scfg = <&scfg 0>;
190 +       fsl,rcpm-wakeup = <&rcpm 0xf0000020>;
191 +       clocks = <&clockgen 4 0>;
192 +       clock-names = "pfe";
193 +
194 +       status = "okay";
195 +       pfe_mac0: ethernet@0 {
196 +       };
197 +
198 +       pfe_mac1: ethernet@1 {
199 +       };
200 +};
201 +
202 +=============================================================================
203 +Ethernet Node
204 +
205 +DESCRIPTION
206 +
207 +Ethernet Node has all the properties associated with PFE used by platforms to
208 +connect to PHY:
209 +
210 +PROPERTIES
211 +
212 +- compatible
213 +               Usage: required
214 +               Value type: <stringlist>
215 +               Definition: Must include "fsl,pfe-gemac-port"
216 +
217 +- reg
218 +               Usage: required
219 +               Value type: <prop-encoded-array>
220 +               Definition: A standard property.
221 +               Specifies the gemacid of the interface.
222 +
223 +- fsl,gemac-bus-id
224 +               Usage: required
225 +               Value type: <u32>
226 +               Definition: Must be present. Value should be the id of the bus
227 +               connected to gemac.
228 +
229 +- fsl,gemac-phy-id (deprecated binding)
230 +               Usage: required
231 +               Value type: <u32>
232 +               Definition: This binding shouldn't be used with new platforms.
233 +              Must be present. Value should be the id of the phy
234 +               connected to gemac.
235 +
236 +- fsl,mdio-mux-val
237 +               Usage: required
238 +               Value type: <u32>
239 +               Definition: Must be present. Value can be either 0 or 2 or 3.
240 +               This value is used to configure the mux to enable mdio.
241 +
242 +- phy-mode
243 +               Usage: required
244 +               Value type: <string>
245 +               Definition: Must include "sgmii"
246 +
247 +- fsl,pfe-phy-if-flags (deprecated binding)
248 +               Usage: required
249 +               Value type: <u32>
250 +               Definition: This binding shouldn't be used with new platforms.
251 +               Must be present. Value should be 0 by default.
252 +               If there is not phy connected, this need to be 1.
253 +
254 +- phy-handle
255 +               Usage: optional
256 +               Value type: <phandle>
257 +               Definition: phandle to the PHY device connected to this device.
258 +
259 +- mdio : A required subnode which specifies the mdio bus in the PFE and used as
260 +a container for phy nodes according to ../phy.txt.
261 +
262 +EXAMPLE
263 +
264 +ethernet@0 {
265 +       compatible = "fsl,pfe-gemac-port";
266 +       #address-cells = <1>;
267 +       #size-cells = <0>;
268 +       reg = <0x0>;    /* GEM_ID */
269 +       fsl,gemac-bus-id = <0x0>;       /* BUS_ID */
270 +       fsl,mdio-mux-val = <0x0>;
271 +       phy-mode = "sgmii";
272 +       phy-handle = <&sgmii_phy1>;
273 +};
274 +
275 +
276 +ethernet@1 {
277 +       compatible = "fsl,pfe-gemac-port";
278 +       #address-cells = <1>;
279 +       #size-cells = <0>;
280 +       reg = <0x1>;    /* GEM_ID */
281 +       fsl,gemac-bus-id = <0x1>;       /* BUS_ID */
282 +       fsl,mdio-mux-val = <0x0>;
283 +       phy-mode = "sgmii";
284 +       phy-handle = <&sgmii_phy2>;
285 +};
286 +
287 +mdio@0 {
288 +       #address-cells = <1>;
289 +       #size-cells = <0>;
290 +
291 +       sgmii_phy1: ethernet-phy@2 {
292 +               reg = <0x2>;
293 +       };
294 +
295 +       sgmii_phy2: ethernet-phy@1 {
296 +               reg = <0x1>;
297 +       };
298 +};
299 --- /dev/null
300 +++ b/drivers/staging/fsl_ppfe/Kconfig
301 @@ -0,0 +1,21 @@
302 +#
303 +# Freescale Programmable Packet Forwarding Engine driver
304 +#
305 +config FSL_PPFE
306 +       bool "Freescale PPFE Driver"
307 +       select FSL_GUTS
308 +       default n
309 +       ---help---
310 +       Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
311 +       It provides two high performance ethernet interfaces.
312 +       This driver initializes, programs and controls the PPFE.
313 +       Use this driver to enable network connectivity on LS1012A platforms.
314 +
315 +if FSL_PPFE
316 +
317 +config FSL_PPFE_UTIL_DISABLED
318 +       bool "Disable PPFE UTIL Processor Engine"
319 +       ---help---
320 +       UTIL PE has to be enabled only if required.
321 +
322 +endif # FSL_PPFE
323 --- /dev/null
324 +++ b/drivers/staging/fsl_ppfe/Makefile
325 @@ -0,0 +1,20 @@
326 +#
327 +# Makefile for Freesecale PPFE driver
328 +#
329 +
330 +ccflags-y +=  -I$(src)/include  -I$(src)
331 +
332 +obj-m += pfe.o
333 +
334 +pfe-y += pfe_mod.o \
335 +       pfe_hw.o \
336 +       pfe_firmware.o \
337 +       pfe_ctrl.o \
338 +       pfe_hif.o \
339 +       pfe_hif_lib.o\
340 +       pfe_eth.o \
341 +       pfe_sysfs.o \
342 +       pfe_debugfs.o \
343 +       pfe_ls1012a_platform.o \
344 +       pfe_hal.o \
345 +       pfe_cdev.o
346 --- /dev/null
347 +++ b/drivers/staging/fsl_ppfe/TODO
348 @@ -0,0 +1,2 @@
349 +TODO:
350 +       - provide pfe pe monitoring support
351 --- /dev/null
352 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
353 @@ -0,0 +1,78 @@
354 +/*
355 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
356 + * Copyright 2017 NXP
357 + *
358 + * This program is free software; you can redistribute it and/or modify
359 + * it under the terms of the GNU General Public License as published by
360 + * the Free Software Foundation; either version 2 of the License, or
361 + * (at your option) any later version.
362 + *
363 + * This program is distributed in the hope that it will be useful,
364 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
365 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
366 + * GNU General Public License for more details.
367 + *
368 + * You should have received a copy of the GNU General Public License
369 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
370 + */
371 +
372 +#ifndef _CBUS_H_
373 +#define _CBUS_H_
374 +
375 +#define EMAC1_BASE_ADDR        (CBUS_BASE_ADDR + 0x200000)
376 +#define EGPI1_BASE_ADDR        (CBUS_BASE_ADDR + 0x210000)
377 +#define EMAC2_BASE_ADDR        (CBUS_BASE_ADDR + 0x220000)
378 +#define EGPI2_BASE_ADDR        (CBUS_BASE_ADDR + 0x230000)
379 +#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
380 +#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
381 +#define ARB_BASE_ADDR  (CBUS_BASE_ADDR + 0x260000)
382 +#define DDR_CONFIG_BASE_ADDR   (CBUS_BASE_ADDR + 0x270000)
383 +#define HIF_BASE_ADDR  (CBUS_BASE_ADDR + 0x280000)
384 +#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
385 +#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
386 +#define LMEM_SIZE      0x10000
387 +#define LMEM_END       (LMEM_BASE_ADDR + LMEM_SIZE)
388 +#define TMU_CSR_BASE_ADDR      (CBUS_BASE_ADDR + 0x310000)
389 +#define CLASS_CSR_BASE_ADDR    (CBUS_BASE_ADDR + 0x320000)
390 +#define HIF_NOCPY_BASE_ADDR    (CBUS_BASE_ADDR + 0x350000)
391 +#define UTIL_CSR_BASE_ADDR     (CBUS_BASE_ADDR + 0x360000)
392 +#define CBUS_GPT_BASE_ADDR     (CBUS_BASE_ADDR + 0x370000)
393 +
394 +/*
395 + * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
396 + * XXX_MEM_ACCESS_ADDR register bit definitions.
397 + */
398 +#define PE_MEM_ACCESS_WRITE    BIT(31) /* Internal Memory Write. */
399 +#define PE_MEM_ACCESS_IMEM     BIT(15)
400 +#define PE_MEM_ACCESS_DMEM     BIT(16)
401 +
402 +/* Byte Enables of the Internal memory access. These are interpred in BE */
403 +#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size)        \
404 +       ({ typeof(size) size_ = (size);         \
405 +       (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
406 +
407 +#include "cbus/emac_mtip.h"
408 +#include "cbus/gpi.h"
409 +#include "cbus/bmu.h"
410 +#include "cbus/hif.h"
411 +#include "cbus/tmu_csr.h"
412 +#include "cbus/class_csr.h"
413 +#include "cbus/hif_nocpy.h"
414 +#include "cbus/util_csr.h"
415 +
416 +/* PFE cores states */
417 +#define CORE_DISABLE   0x00000000
418 +#define CORE_ENABLE    0x00000001
419 +#define CORE_SW_RESET  0x00000002
420 +
421 +/* LMEM defines */
422 +#define LMEM_HDR_SIZE  0x0010
423 +#define LMEM_BUF_SIZE_LN2      0x7
424 +#define LMEM_BUF_SIZE  BIT(LMEM_BUF_SIZE_LN2)
425 +
426 +/* DDR defines */
427 +#define DDR_HDR_SIZE   0x0100
428 +#define DDR_BUF_SIZE_LN2       0xb
429 +#define DDR_BUF_SIZE   BIT(DDR_BUF_SIZE_LN2)
430 +
431 +#endif /* _CBUS_H_ */
432 --- /dev/null
433 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
434 @@ -0,0 +1,55 @@
435 +/*
436 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
437 + * Copyright 2017 NXP
438 + *
439 + * This program is free software; you can redistribute it and/or modify
440 + * it under the terms of the GNU General Public License as published by
441 + * the Free Software Foundation; either version 2 of the License, or
442 + * (at your option) any later version.
443 + *
444 + * This program is distributed in the hope that it will be useful,
445 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
446 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
447 + * GNU General Public License for more details.
448 + *
449 + * You should have received a copy of the GNU General Public License
450 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
451 + */
452 +
453 +#ifndef _BMU_H_
454 +#define _BMU_H_
455 +
456 +#define BMU_VERSION    0x000
457 +#define BMU_CTRL       0x004
458 +#define BMU_UCAST_CONFIG       0x008
459 +#define BMU_UCAST_BASE_ADDR    0x00c
460 +#define BMU_BUF_SIZE   0x010
461 +#define BMU_BUF_CNT    0x014
462 +#define BMU_THRES      0x018
463 +#define BMU_INT_SRC    0x020
464 +#define BMU_INT_ENABLE 0x024
465 +#define BMU_ALLOC_CTRL 0x030
466 +#define BMU_FREE_CTRL  0x034
467 +#define BMU_FREE_ERR_ADDR      0x038
468 +#define BMU_CURR_BUF_CNT       0x03c
469 +#define BMU_MCAST_CNT  0x040
470 +#define BMU_MCAST_ALLOC_CTRL   0x044
471 +#define BMU_REM_BUF_CNT        0x048
472 +#define BMU_LOW_WATERMARK      0x050
473 +#define BMU_HIGH_WATERMARK     0x054
474 +#define BMU_INT_MEM_ACCESS     0x100
475 +
476 +struct BMU_CFG {
477 +       unsigned long baseaddr;
478 +       u32 count;
479 +       u32 size;
480 +       u32 low_watermark;
481 +       u32 high_watermark;
482 +};
483 +
484 +#define BMU1_BUF_SIZE  LMEM_BUF_SIZE_LN2
485 +#define BMU2_BUF_SIZE  DDR_BUF_SIZE_LN2
486 +
487 +#define BMU2_MCAST_ALLOC_CTRL  (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
488 +
489 +#endif /* _BMU_H_ */
490 --- /dev/null
491 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
492 @@ -0,0 +1,289 @@
493 +/*
494 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
495 + * Copyright 2017 NXP
496 + *
497 + * This program is free software; you can redistribute it and/or modify
498 + * it under the terms of the GNU General Public License as published by
499 + * the Free Software Foundation; either version 2 of the License, or
500 + * (at your option) any later version.
501 + *
502 + * This program is distributed in the hope that it will be useful,
503 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
504 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
505 + * GNU General Public License for more details.
506 + *
507 + * You should have received a copy of the GNU General Public License
508 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
509 + */
510 +
511 +#ifndef _CLASS_CSR_H_
512 +#define _CLASS_CSR_H_
513 +
514 +/* @file class_csr.h.
515 + * class_csr - block containing all the classifier control and status register.
516 + * Mapped on CBUS and accessible from all PE's and ARM.
517 + */
518 +#define CLASS_VERSION  (CLASS_CSR_BASE_ADDR + 0x000)
519 +#define CLASS_TX_CTRL  (CLASS_CSR_BASE_ADDR + 0x004)
520 +#define CLASS_INQ_PKTPTR       (CLASS_CSR_BASE_ADDR + 0x010)
521 +
522 +/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
523 +#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
524 +
525 +/* LMEM header size for the Classifier block.\ Data in the LMEM
526 + * is written from this offset.
527 + */
528 +#define CLASS_HDR_SIZE_LMEM(off)       ((off) & 0x3f)
529 +
530 +/* DDR header size for the Classifier block.\ Data in the DDR
531 + * is written from this offset.
532 + */
533 +#define CLASS_HDR_SIZE_DDR(off)        (((off) & 0x1ff) << 16)
534 +
535 +#define CLASS_PE0_QB_DM_ADDR0  (CLASS_CSR_BASE_ADDR + 0x020)
536 +
537 +/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
538 +#define CLASS_PE0_QB_DM_ADDR1  (CLASS_CSR_BASE_ADDR + 0x024)
539 +
540 +/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
541 +#define CLASS_PE0_RO_DM_ADDR0  (CLASS_CSR_BASE_ADDR + 0x060)
542 +
543 +/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
544 +#define CLASS_PE0_RO_DM_ADDR1  (CLASS_CSR_BASE_ADDR + 0x064)
545 +
546 +/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
547 +
548 +/* @name Class PE memory access. Allows external PE's and HOST to
549 + * read/write PMEM/DMEM memory ranges for each classifier PE.
550 + */
551 +/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
552 + * See \ref XXX_MEM_ACCESS_ADDR for details.
553 + */
554 +#define CLASS_MEM_ACCESS_ADDR  (CLASS_CSR_BASE_ADDR + 0x100)
555 +
556 +/* Internal Memory Access Write Data [31:0] */
557 +#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
558 +
559 +/* Internal Memory Access Read Data [31:0] */
560 +#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
561 +#define CLASS_TM_INQ_ADDR      (CLASS_CSR_BASE_ADDR + 0x114)
562 +#define CLASS_PE_STATUS        (CLASS_CSR_BASE_ADDR + 0x118)
563 +
564 +#define CLASS_PHY1_RX_PKTS     (CLASS_CSR_BASE_ADDR + 0x11c)
565 +#define CLASS_PHY1_TX_PKTS     (CLASS_CSR_BASE_ADDR + 0x120)
566 +#define CLASS_PHY1_LP_FAIL_PKTS        (CLASS_CSR_BASE_ADDR + 0x124)
567 +#define CLASS_PHY1_INTF_FAIL_PKTS      (CLASS_CSR_BASE_ADDR + 0x128)
568 +#define CLASS_PHY1_INTF_MATCH_PKTS     (CLASS_CSR_BASE_ADDR + 0x12c)
569 +#define CLASS_PHY1_L3_FAIL_PKTS        (CLASS_CSR_BASE_ADDR + 0x130)
570 +#define CLASS_PHY1_V4_PKTS     (CLASS_CSR_BASE_ADDR + 0x134)
571 +#define CLASS_PHY1_V6_PKTS     (CLASS_CSR_BASE_ADDR + 0x138)
572 +#define CLASS_PHY1_CHKSUM_ERR_PKTS     (CLASS_CSR_BASE_ADDR + 0x13c)
573 +#define CLASS_PHY1_TTL_ERR_PKTS        (CLASS_CSR_BASE_ADDR + 0x140)
574 +#define CLASS_PHY2_RX_PKTS     (CLASS_CSR_BASE_ADDR + 0x144)
575 +#define CLASS_PHY2_TX_PKTS     (CLASS_CSR_BASE_ADDR + 0x148)
576 +#define CLASS_PHY2_LP_FAIL_PKTS        (CLASS_CSR_BASE_ADDR + 0x14c)
577 +#define CLASS_PHY2_INTF_FAIL_PKTS      (CLASS_CSR_BASE_ADDR + 0x150)
578 +#define CLASS_PHY2_INTF_MATCH_PKTS     (CLASS_CSR_BASE_ADDR + 0x154)
579 +#define CLASS_PHY2_L3_FAIL_PKTS        (CLASS_CSR_BASE_ADDR + 0x158)
580 +#define CLASS_PHY2_V4_PKTS     (CLASS_CSR_BASE_ADDR + 0x15c)
581 +#define CLASS_PHY2_V6_PKTS     (CLASS_CSR_BASE_ADDR + 0x160)
582 +#define CLASS_PHY2_CHKSUM_ERR_PKTS     (CLASS_CSR_BASE_ADDR + 0x164)
583 +#define CLASS_PHY2_TTL_ERR_PKTS        (CLASS_CSR_BASE_ADDR + 0x168)
584 +#define CLASS_PHY3_RX_PKTS     (CLASS_CSR_BASE_ADDR + 0x16c)
585 +#define CLASS_PHY3_TX_PKTS     (CLASS_CSR_BASE_ADDR + 0x170)
586 +#define CLASS_PHY3_LP_FAIL_PKTS        (CLASS_CSR_BASE_ADDR + 0x174)
587 +#define CLASS_PHY3_INTF_FAIL_PKTS      (CLASS_CSR_BASE_ADDR + 0x178)
588 +#define CLASS_PHY3_INTF_MATCH_PKTS     (CLASS_CSR_BASE_ADDR + 0x17c)
589 +#define CLASS_PHY3_L3_FAIL_PKTS        (CLASS_CSR_BASE_ADDR + 0x180)
590 +#define CLASS_PHY3_V4_PKTS     (CLASS_CSR_BASE_ADDR + 0x184)
591 +#define CLASS_PHY3_V6_PKTS     (CLASS_CSR_BASE_ADDR + 0x188)
592 +#define CLASS_PHY3_CHKSUM_ERR_PKTS     (CLASS_CSR_BASE_ADDR + 0x18c)
593 +#define CLASS_PHY3_TTL_ERR_PKTS        (CLASS_CSR_BASE_ADDR + 0x190)
594 +#define CLASS_PHY1_ICMP_PKTS   (CLASS_CSR_BASE_ADDR + 0x194)
595 +#define CLASS_PHY1_IGMP_PKTS   (CLASS_CSR_BASE_ADDR + 0x198)
596 +#define CLASS_PHY1_TCP_PKTS    (CLASS_CSR_BASE_ADDR + 0x19c)
597 +#define CLASS_PHY1_UDP_PKTS    (CLASS_CSR_BASE_ADDR + 0x1a0)
598 +#define CLASS_PHY2_ICMP_PKTS   (CLASS_CSR_BASE_ADDR + 0x1a4)
599 +#define CLASS_PHY2_IGMP_PKTS   (CLASS_CSR_BASE_ADDR + 0x1a8)
600 +#define CLASS_PHY2_TCP_PKTS    (CLASS_CSR_BASE_ADDR + 0x1ac)
601 +#define CLASS_PHY2_UDP_PKTS    (CLASS_CSR_BASE_ADDR + 0x1b0)
602 +#define CLASS_PHY3_ICMP_PKTS   (CLASS_CSR_BASE_ADDR + 0x1b4)
603 +#define CLASS_PHY3_IGMP_PKTS   (CLASS_CSR_BASE_ADDR + 0x1b8)
604 +#define CLASS_PHY3_TCP_PKTS    (CLASS_CSR_BASE_ADDR + 0x1bc)
605 +#define CLASS_PHY3_UDP_PKTS    (CLASS_CSR_BASE_ADDR + 0x1c0)
606 +#define CLASS_PHY4_ICMP_PKTS   (CLASS_CSR_BASE_ADDR + 0x1c4)
607 +#define CLASS_PHY4_IGMP_PKTS   (CLASS_CSR_BASE_ADDR + 0x1c8)
608 +#define CLASS_PHY4_TCP_PKTS    (CLASS_CSR_BASE_ADDR + 0x1cc)
609 +#define CLASS_PHY4_UDP_PKTS    (CLASS_CSR_BASE_ADDR + 0x1d0)
610 +#define CLASS_PHY4_RX_PKTS     (CLASS_CSR_BASE_ADDR + 0x1d4)
611 +#define CLASS_PHY4_TX_PKTS     (CLASS_CSR_BASE_ADDR + 0x1d8)
612 +#define CLASS_PHY4_LP_FAIL_PKTS        (CLASS_CSR_BASE_ADDR + 0x1dc)
613 +#define CLASS_PHY4_INTF_FAIL_PKTS      (CLASS_CSR_BASE_ADDR + 0x1e0)
614 +#define CLASS_PHY4_INTF_MATCH_PKTS     (CLASS_CSR_BASE_ADDR + 0x1e4)
615 +#define CLASS_PHY4_L3_FAIL_PKTS        (CLASS_CSR_BASE_ADDR + 0x1e8)
616 +#define CLASS_PHY4_V4_PKTS     (CLASS_CSR_BASE_ADDR + 0x1ec)
617 +#define CLASS_PHY4_V6_PKTS     (CLASS_CSR_BASE_ADDR + 0x1f0)
618 +#define CLASS_PHY4_CHKSUM_ERR_PKTS     (CLASS_CSR_BASE_ADDR + 0x1f4)
619 +#define CLASS_PHY4_TTL_ERR_PKTS        (CLASS_CSR_BASE_ADDR + 0x1f8)
620 +
621 +#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
622 +#define CLASS_AFULL_THRES      (CLASS_CSR_BASE_ADDR + 0x204)
623 +#define CLASS_GAP_BETWEEN_READS        (CLASS_CSR_BASE_ADDR + 0x208)
624 +#define CLASS_MAX_BUF_CNT      (CLASS_CSR_BASE_ADDR + 0x20c)
625 +#define CLASS_TSQ_FIFO_THRES   (CLASS_CSR_BASE_ADDR + 0x210)
626 +#define CLASS_TSQ_MAX_CNT      (CLASS_CSR_BASE_ADDR + 0x214)
627 +#define CLASS_IRAM_DATA_0      (CLASS_CSR_BASE_ADDR + 0x218)
628 +#define CLASS_IRAM_DATA_1      (CLASS_CSR_BASE_ADDR + 0x21c)
629 +#define CLASS_IRAM_DATA_2      (CLASS_CSR_BASE_ADDR + 0x220)
630 +#define CLASS_IRAM_DATA_3      (CLASS_CSR_BASE_ADDR + 0x224)
631 +
632 +#define CLASS_BUS_ACCESS_ADDR  (CLASS_CSR_BASE_ADDR + 0x228)
633 +
634 +#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
635 +#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
636 +
637 +/* (route_entry_size[9:0], route_hash_size[23:16]
638 + * (this is actually ln2(size)))
639 + */
640 +#define CLASS_ROUTE_HASH_ENTRY_SIZE    (CLASS_CSR_BASE_ADDR + 0x234)
641 +
642 +#define CLASS_ROUTE_ENTRY_SIZE(size)    ((size) & 0x1ff)
643 +#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
644 +
645 +#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
646 +
647 +#define CLASS_ROUTE_MULTI      (CLASS_CSR_BASE_ADDR + 0x23c)
648 +#define CLASS_SMEM_OFFSET      (CLASS_CSR_BASE_ADDR + 0x240)
649 +#define CLASS_LMEM_BUF_SIZE    (CLASS_CSR_BASE_ADDR + 0x244)
650 +#define CLASS_VLAN_ID  (CLASS_CSR_BASE_ADDR + 0x248)
651 +#define CLASS_BMU1_BUF_FREE    (CLASS_CSR_BASE_ADDR + 0x24c)
652 +#define CLASS_USE_TMU_INQ      (CLASS_CSR_BASE_ADDR + 0x250)
653 +#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
654 +
655 +#define CLASS_BUS_ACCESS_BASE  (CLASS_CSR_BASE_ADDR + 0x258)
656 +#define CLASS_BUS_ACCESS_BASE_MASK     (0xFF000000)
657 +/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
658 +
659 +#define CLASS_HIF_PARSE        (CLASS_CSR_BASE_ADDR + 0x25c)
660 +
661 +#define CLASS_HOST_PE0_GP      (CLASS_CSR_BASE_ADDR + 0x260)
662 +#define CLASS_PE0_GP   (CLASS_CSR_BASE_ADDR + 0x264)
663 +#define CLASS_HOST_PE1_GP      (CLASS_CSR_BASE_ADDR + 0x268)
664 +#define CLASS_PE1_GP   (CLASS_CSR_BASE_ADDR + 0x26c)
665 +#define CLASS_HOST_PE2_GP      (CLASS_CSR_BASE_ADDR + 0x270)
666 +#define CLASS_PE2_GP   (CLASS_CSR_BASE_ADDR + 0x274)
667 +#define CLASS_HOST_PE3_GP      (CLASS_CSR_BASE_ADDR + 0x278)
668 +#define CLASS_PE3_GP   (CLASS_CSR_BASE_ADDR + 0x27c)
669 +#define CLASS_HOST_PE4_GP      (CLASS_CSR_BASE_ADDR + 0x280)
670 +#define CLASS_PE4_GP   (CLASS_CSR_BASE_ADDR + 0x284)
671 +#define CLASS_HOST_PE5_GP      (CLASS_CSR_BASE_ADDR + 0x288)
672 +#define CLASS_PE5_GP   (CLASS_CSR_BASE_ADDR + 0x28c)
673 +
674 +#define CLASS_PE_INT_SRC       (CLASS_CSR_BASE_ADDR + 0x290)
675 +#define CLASS_PE_INT_ENABLE    (CLASS_CSR_BASE_ADDR + 0x294)
676 +
677 +#define CLASS_TPID0_TPID1      (CLASS_CSR_BASE_ADDR + 0x298)
678 +#define CLASS_TPID2    (CLASS_CSR_BASE_ADDR + 0x29c)
679 +
680 +#define CLASS_L4_CHKSUM_ADDR   (CLASS_CSR_BASE_ADDR + 0x2a0)
681 +
682 +#define CLASS_PE0_DEBUG        (CLASS_CSR_BASE_ADDR + 0x2a4)
683 +#define CLASS_PE1_DEBUG        (CLASS_CSR_BASE_ADDR + 0x2a8)
684 +#define CLASS_PE2_DEBUG        (CLASS_CSR_BASE_ADDR + 0x2ac)
685 +#define CLASS_PE3_DEBUG        (CLASS_CSR_BASE_ADDR + 0x2b0)
686 +#define CLASS_PE4_DEBUG        (CLASS_CSR_BASE_ADDR + 0x2b4)
687 +#define CLASS_PE5_DEBUG        (CLASS_CSR_BASE_ADDR + 0x2b8)
688 +
689 +#define CLASS_STATE    (CLASS_CSR_BASE_ADDR + 0x2bc)
690 +
691 +/* CLASS defines */
692 +#define CLASS_PBUF_SIZE        0x100   /* Fixed by hardware */
693 +#define CLASS_PBUF_HEADER_OFFSET       0x80    /* Can be configured */
694 +
695 +/* Can be configured */
696 +#define CLASS_PBUF0_BASE_ADDR  0x000
697 +/* Can be configured */
698 +#define CLASS_PBUF1_BASE_ADDR  (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
699 +/* Can be configured */
700 +#define CLASS_PBUF2_BASE_ADDR  (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
701 +/* Can be configured */
702 +#define CLASS_PBUF3_BASE_ADDR  (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
703 +
704 +#define CLASS_PBUF0_HEADER_BASE_ADDR   (CLASS_PBUF0_BASE_ADDR + \
705 +                                               CLASS_PBUF_HEADER_OFFSET)
706 +#define CLASS_PBUF1_HEADER_BASE_ADDR   (CLASS_PBUF1_BASE_ADDR + \
707 +                                               CLASS_PBUF_HEADER_OFFSET)
708 +#define CLASS_PBUF2_HEADER_BASE_ADDR   (CLASS_PBUF2_BASE_ADDR + \
709 +                                               CLASS_PBUF_HEADER_OFFSET)
710 +#define CLASS_PBUF3_HEADER_BASE_ADDR   (CLASS_PBUF3_BASE_ADDR + \
711 +                                               CLASS_PBUF_HEADER_OFFSET)
712 +
713 +#define CLASS_PE0_RO_DM_ADDR0_VAL      ((CLASS_PBUF1_BASE_ADDR << 16) | \
714 +                                               CLASS_PBUF0_BASE_ADDR)
715 +#define CLASS_PE0_RO_DM_ADDR1_VAL      ((CLASS_PBUF3_BASE_ADDR << 16) | \
716 +                                               CLASS_PBUF2_BASE_ADDR)
717 +
718 +#define CLASS_PE0_QB_DM_ADDR0_VAL      ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
719 +                                               CLASS_PBUF0_HEADER_BASE_ADDR)
720 +#define CLASS_PE0_QB_DM_ADDR1_VAL      ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
721 +                                               CLASS_PBUF2_HEADER_BASE_ADDR)
722 +
723 +#define CLASS_ROUTE_SIZE       128
724 +#define CLASS_MAX_ROUTE_SIZE   256
725 +#define CLASS_ROUTE_HASH_BITS  20
726 +#define CLASS_ROUTE_HASH_MASK  (BIT(CLASS_ROUTE_HASH_BITS) - 1)
727 +
728 +/* Can be configured */
729 +#define        CLASS_ROUTE0_BASE_ADDR  0x400
730 +/* Can be configured */
731 +#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
732 +/* Can be configured */
733 +#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
734 +/* Can be configured */
735 +#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
736 +
737 +#define CLASS_SA_SIZE  128
738 +#define CLASS_IPSEC_SA0_BASE_ADDR      0x600
739 +/* not used */
740 +#define CLASS_IPSEC_SA1_BASE_ADDR  (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
741 +/* not used */
742 +#define CLASS_IPSEC_SA2_BASE_ADDR  (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
743 +/* not used */
744 +#define CLASS_IPSEC_SA3_BASE_ADDR  (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
745 +
746 +/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
747 +#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
748 +                               (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
749 +#define CLASS_GP_DMEM_BUF      ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
750 +                                       CLASS_SA_SIZE))
751 +
752 +#define TWO_LEVEL_ROUTE                BIT(0)
753 +#define PHYNO_IN_HASH          BIT(1)
754 +#define HW_ROUTE_FETCH         BIT(3)
755 +#define HW_BRIDGE_FETCH                BIT(5)
756 +#define IP_ALIGNED             BIT(6)
757 +#define ARC_HIT_CHECK_EN       BIT(7)
758 +#define CLASS_TOE              BIT(11)
759 +#define HASH_NORMAL            (0 << 12)
760 +#define HASH_CRC_PORT          BIT(12)
761 +#define HASH_CRC_IP            (2 << 12)
762 +#define HASH_CRC_PORT_IP       (3 << 12)
763 +#define QB2BUS_LE              BIT(15)
764 +
765 +#define TCP_CHKSUM_DROP                BIT(0)
766 +#define UDP_CHKSUM_DROP                BIT(1)
767 +#define IPV4_CHKSUM_DROP       BIT(9)
768 +
769 +/*CLASS_HIF_PARSE bits*/
770 +#define HIF_PKT_CLASS_EN       BIT(0)
771 +#define HIF_PKT_OFFSET(ofst)   (((ofst) & 0xF) << 1)
772 +
773 +struct class_cfg {
774 +       u32 toe_mode;
775 +       unsigned long route_table_baseaddr;
776 +       u32 route_table_hash_bits;
777 +       u32 pe_sys_clk_ratio;
778 +       u32 resume;
779 +};
780 +
781 +#endif /* _CLASS_CSR_H_ */
782 --- /dev/null
783 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
784 @@ -0,0 +1,242 @@
785 +/*
786 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
787 + * Copyright 2017 NXP
788 + *
789 + * This program is free software; you can redistribute it and/or modify
790 + * it under the terms of the GNU General Public License as published by
791 + * the Free Software Foundation; either version 2 of the License, or
792 + * (at your option) any later version.
793 + *
794 + * This program is distributed in the hope that it will be useful,
795 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
796 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
797 + * GNU General Public License for more details.
798 + *
799 + * You should have received a copy of the GNU General Public License
800 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
801 + */
802 +
803 +#ifndef _EMAC_H_
804 +#define _EMAC_H_
805 +
806 +#include <linux/ethtool.h>
807 +
808 +#define EMAC_IEVENT_REG                0x004
809 +#define EMAC_IMASK_REG         0x008
810 +#define EMAC_R_DES_ACTIVE_REG  0x010
811 +#define EMAC_X_DES_ACTIVE_REG  0x014
812 +#define EMAC_ECNTRL_REG                0x024
813 +#define EMAC_MII_DATA_REG      0x040
814 +#define EMAC_MII_CTRL_REG      0x044
815 +#define EMAC_MIB_CTRL_STS_REG  0x064
816 +#define EMAC_RCNTRL_REG                0x084
817 +#define EMAC_TCNTRL_REG                0x0C4
818 +#define EMAC_PHY_ADDR_LOW      0x0E4
819 +#define EMAC_PHY_ADDR_HIGH     0x0E8
820 +#define EMAC_GAUR              0x120
821 +#define EMAC_GALR              0x124
822 +#define EMAC_TFWR_STR_FWD      0x144
823 +#define EMAC_RX_SECTION_FULL   0x190
824 +#define EMAC_RX_SECTION_EMPTY  0x194
825 +#define EMAC_TX_SECTION_EMPTY  0x1A0
826 +#define EMAC_TRUNC_FL          0x1B0
827 +
828 +#define RMON_T_DROP    0x200 /* Count of frames not cntd correctly */
829 +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
830 +#define RMON_T_BC_PKT  0x208 /* RMON TX broadcast pkts */
831 +#define RMON_T_MC_PKT  0x20c /* RMON TX multicast pkts */
832 +#define RMON_T_CRC_ALIGN       0x210 /* RMON TX pkts with CRC align err */
833 +#define RMON_T_UNDERSIZE       0x214 /* RMON TX pkts < 64 bytes, good CRC */
834 +#define RMON_T_OVERSIZE        0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
835 +#define RMON_T_FRAG    0x21c /* RMON TX pkts < 64 bytes, bad CRC */
836 +#define RMON_T_JAB     0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
837 +#define RMON_T_COL     0x224 /* RMON TX collision count */
838 +#define RMON_T_P64     0x228 /* RMON TX 64 byte pkts */
839 +#define RMON_T_P65TO127        0x22c /* RMON TX 65 to 127 byte pkts */
840 +#define RMON_T_P128TO255       0x230 /* RMON TX 128 to 255 byte pkts */
841 +#define RMON_T_P256TO511       0x234 /* RMON TX 256 to 511 byte pkts */
842 +#define RMON_T_P512TO1023      0x238 /* RMON TX 512 to 1023 byte pkts */
843 +#define RMON_T_P1024TO2047     0x23c /* RMON TX 1024 to 2047 byte pkts */
844 +#define RMON_T_P_GTE2048       0x240 /* RMON TX pkts > 2048 bytes */
845 +#define RMON_T_OCTETS  0x244 /* RMON TX octets */
846 +#define IEEE_T_DROP    0x248 /* Count of frames not counted crtly */
847 +#define IEEE_T_FRAME_OK        0x24c /* Frames tx'd OK */
848 +#define IEEE_T_1COL    0x250 /* Frames tx'd with single collision */
849 +#define IEEE_T_MCOL    0x254 /* Frames tx'd with multiple collision */
850 +#define IEEE_T_DEF     0x258 /* Frames tx'd after deferral delay */
851 +#define IEEE_T_LCOL    0x25c /* Frames tx'd with late collision */
852 +#define IEEE_T_EXCOL   0x260 /* Frames tx'd with excesv collisions */
853 +#define IEEE_T_MACERR  0x264 /* Frames tx'd with TX FIFO underrun */
854 +#define IEEE_T_CSERR   0x268 /* Frames tx'd with carrier sense err */
855 +#define IEEE_T_SQE     0x26c /* Frames tx'd with SQE err */
856 +#define IEEE_T_FDXFC   0x270 /* Flow control pause frames tx'd */
857 +#define IEEE_T_OCTETS_OK       0x274 /* Octet count for frames tx'd w/o err */
858 +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
859 +#define RMON_R_BC_PKT  0x288 /* RMON RX broadcast pkts */
860 +#define RMON_R_MC_PKT  0x28c /* RMON RX multicast pkts */
861 +#define RMON_R_CRC_ALIGN       0x290 /* RMON RX pkts with CRC alignment err */
862 +#define RMON_R_UNDERSIZE       0x294 /* RMON RX pkts < 64 bytes, good CRC */
863 +#define RMON_R_OVERSIZE        0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
864 +#define RMON_R_FRAG    0x29c /* RMON RX pkts < 64 bytes, bad CRC */
865 +#define RMON_R_JAB     0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
866 +#define RMON_R_RESVD_O 0x2a4 /* Reserved */
867 +#define RMON_R_P64     0x2a8 /* RMON RX 64 byte pkts */
868 +#define RMON_R_P65TO127        0x2ac /* RMON RX 65 to 127 byte pkts */
869 +#define RMON_R_P128TO255       0x2b0 /* RMON RX 128 to 255 byte pkts */
870 +#define RMON_R_P256TO511       0x2b4 /* RMON RX 256 to 511 byte pkts */
871 +#define RMON_R_P512TO1023      0x2b8 /* RMON RX 512 to 1023 byte pkts */
872 +#define RMON_R_P1024TO2047     0x2bc /* RMON RX 1024 to 2047 byte pkts */
873 +#define RMON_R_P_GTE2048       0x2c0 /* RMON RX pkts > 2048 bytes */
874 +#define RMON_R_OCTETS  0x2c4 /* RMON RX octets */
875 +#define IEEE_R_DROP    0x2c8 /* Count frames not counted correctly */
876 +#define IEEE_R_FRAME_OK        0x2cc /* Frames rx'd OK */
877 +#define IEEE_R_CRC     0x2d0 /* Frames rx'd with CRC err */
878 +#define IEEE_R_ALIGN   0x2d4 /* Frames rx'd with alignment err */
879 +#define IEEE_R_MACERR  0x2d8 /* Receive FIFO overflow count */
880 +#define IEEE_R_FDXFC   0x2dc /* Flow control pause frames rx'd */
881 +#define IEEE_R_OCTETS_OK       0x2e0 /* Octet cnt for frames rx'd w/o err */
882 +
883 +#define EMAC_SMAC_0_0  0x500 /*Supplemental MAC Address 0 (RW).*/
884 +#define EMAC_SMAC_0_1  0x504 /*Supplemental MAC Address 0 (RW).*/
885 +
886 +/* GEMAC definitions and settings */
887 +
888 +#define EMAC_PORT_0    0
889 +#define EMAC_PORT_1    1
890 +
891 +/* GEMAC Bit definitions */
892 +#define EMAC_IEVENT_HBERR               0x80000000
893 +#define EMAC_IEVENT_BABR                0x40000000
894 +#define EMAC_IEVENT_BABT                0x20000000
895 +#define EMAC_IEVENT_GRA                         0x10000000
896 +#define EMAC_IEVENT_TXF                         0x08000000
897 +#define EMAC_IEVENT_TXB                         0x04000000
898 +#define EMAC_IEVENT_RXF                         0x02000000
899 +#define EMAC_IEVENT_RXB                         0x01000000
900 +#define EMAC_IEVENT_MII                         0x00800000
901 +#define EMAC_IEVENT_EBERR               0x00400000
902 +#define EMAC_IEVENT_LC                  0x00200000
903 +#define EMAC_IEVENT_RL                  0x00100000
904 +#define EMAC_IEVENT_UN                  0x00080000
905 +
906 +#define EMAC_IMASK_HBERR                 0x80000000
907 +#define EMAC_IMASK_BABR                  0x40000000
908 +#define EMAC_IMASKT_BABT                 0x20000000
909 +#define EMAC_IMASK_GRA                   0x10000000
910 +#define EMAC_IMASKT_TXF                  0x08000000
911 +#define EMAC_IMASK_TXB                   0x04000000
912 +#define EMAC_IMASKT_RXF                  0x02000000
913 +#define EMAC_IMASK_RXB                   0x01000000
914 +#define EMAC_IMASK_MII                   0x00800000
915 +#define EMAC_IMASK_EBERR                 0x00400000
916 +#define EMAC_IMASK_LC                    0x00200000
917 +#define EMAC_IMASKT_RL                   0x00100000
918 +#define EMAC_IMASK_UN                    0x00080000
919 +
920 +#define EMAC_RCNTRL_MAX_FL_SHIFT         16
921 +#define EMAC_RCNTRL_LOOP                 0x00000001
922 +#define EMAC_RCNTRL_DRT                  0x00000002
923 +#define EMAC_RCNTRL_MII_MODE             0x00000004
924 +#define EMAC_RCNTRL_PROM                 0x00000008
925 +#define EMAC_RCNTRL_BC_REJ               0x00000010
926 +#define EMAC_RCNTRL_FCE                  0x00000020
927 +#define EMAC_RCNTRL_RGMII                0x00000040
928 +#define EMAC_RCNTRL_SGMII                0x00000080
929 +#define EMAC_RCNTRL_RMII                 0x00000100
930 +#define EMAC_RCNTRL_RMII_10T             0x00000200
931 +#define EMAC_RCNTRL_CRC_FWD             0x00004000
932 +
933 +#define EMAC_TCNTRL_GTS                  0x00000001
934 +#define EMAC_TCNTRL_HBC                  0x00000002
935 +#define EMAC_TCNTRL_FDEN                 0x00000004
936 +#define EMAC_TCNTRL_TFC_PAUSE            0x00000008
937 +#define EMAC_TCNTRL_RFC_PAUSE            0x00000010
938 +
939 +#define EMAC_ECNTRL_RESET                0x00000001      /* reset the EMAC */
940 +#define EMAC_ECNTRL_ETHER_EN             0x00000002      /* enable the EMAC */
941 +#define EMAC_ECNTRL_MAGIC_ENA           0x00000004
942 +#define EMAC_ECNTRL_SLEEP               0x00000008
943 +#define EMAC_ECNTRL_SPEED                0x00000020
944 +#define EMAC_ECNTRL_DBSWAP               0x00000100
945 +
946 +#define EMAC_X_WMRK_STRFWD               0x00000100
947 +
948 +#define EMAC_X_DES_ACTIVE_TDAR           0x01000000
949 +#define EMAC_R_DES_ACTIVE_RDAR           0x01000000
950 +
951 +#define EMAC_RX_SECTION_EMPTY_V                0x00010006
952 +/*
953 + * The possible operating speeds of the MAC, currently supporting 10, 100 and
954 + * 1000Mb modes.
955 + */
956 +enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
957 +
958 +/* MII-related definitios */
959 +#define EMAC_MII_DATA_ST         0x40000000      /* Start of frame delimiter */
960 +#define EMAC_MII_DATA_OP_RD      0x20000000      /* Perform a read operation */
961 +#define EMAC_MII_DATA_OP_CL45_RD 0x30000000      /* Perform a read operation */
962 +#define EMAC_MII_DATA_OP_WR      0x10000000      /* Perform a write operation */
963 +#define EMAC_MII_DATA_OP_CL45_WR 0x10000000      /* Perform a write operation */
964 +#define EMAC_MII_DATA_PA_MSK     0x0f800000      /* PHY Address field mask */
965 +#define EMAC_MII_DATA_RA_MSK     0x007c0000      /* PHY Register field mask */
966 +#define EMAC_MII_DATA_TA         0x00020000      /* Turnaround */
967 +#define EMAC_MII_DATA_DATAMSK    0x0000ffff      /* PHY data field */
968 +
969 +#define EMAC_MII_DATA_RA_SHIFT   18      /* MII Register address bits */
970 +#define EMAC_MII_DATA_RA_MASK   0x1F      /* MII Register address mask */
971 +#define EMAC_MII_DATA_PA_SHIFT   23      /* MII PHY address bits */
972 +#define EMAC_MII_DATA_PA_MASK    0x1F      /* MII PHY address mask */
973 +
974 +#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
975 +                               EMAC_MII_DATA_RA_SHIFT)
976 +#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
977 +                               EMAC_MII_DATA_PA_SHIFT)
978 +#define EMAC_MII_DATA(v)    ((v) & 0xffff)
979 +
980 +#define EMAC_MII_SPEED_SHIFT   1
981 +#define EMAC_HOLDTIME_SHIFT    8
982 +#define EMAC_HOLDTIME_MASK     0x7
983 +#define EMAC_HOLDTIME(v)       (((v) & EMAC_HOLDTIME_MASK) << \
984 +                                       EMAC_HOLDTIME_SHIFT)
985 +
986 +/*
987 + * The Address organisation for the MAC device.  All addresses are split into
988 + * two 32-bit register fields.  The first one (bottom) is the lower 32-bits of
989 + * the address and the other field are the high order bits - this may be 16-bits
990 + * in the case of MAC addresses, or 32-bits for the hash address.
991 + * In terms of memory storage, the first item (bottom) is assumed to be at a
992 + * lower address location than 'top'. i.e. top should be at address location of
993 + * 'bottom' + 4 bytes.
994 + */
995 +struct pfe_mac_addr {
996 +       u32 bottom;     /* Lower 32-bits of address. */
997 +       u32 top;        /* Upper 32-bits of address. */
998 +};
999 +
1000 +/*
1001 + * The following is the organisation of the address filters section of the MAC
1002 + * registers.  The Cadence MAC contains four possible specific address match
1003 + * addresses, if an incoming frame corresponds to any one of these four
1004 + * addresses then the frame will be copied to memory.
1005 + * It is not necessary for all four of the address match registers to be
1006 + * programmed, this is application dependent.
1007 + */
1008 +struct spec_addr {
1009 +       struct pfe_mac_addr one;        /* Specific address register 1. */
1010 +       struct pfe_mac_addr two;        /* Specific address register 2. */
1011 +       struct pfe_mac_addr three;      /* Specific address register 3. */
1012 +       struct pfe_mac_addr four;       /* Specific address register 4. */
1013 +};
1014 +
1015 +struct gemac_cfg {
1016 +       u32 mode;
1017 +       u32 speed;
1018 +       u32 duplex;
1019 +};
1020 +
1021 +/* EMAC Hash size */
1022 +#define EMAC_HASH_REG_BITS       64
1023 +
1024 +#define EMAC_SPEC_ADDR_MAX     4
1025 +
1026 +#endif /* _EMAC_H_ */
1027 --- /dev/null
1028 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
1029 @@ -0,0 +1,86 @@
1030 +/*
1031 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1032 + * Copyright 2017 NXP
1033 + *
1034 + * This program is free software; you can redistribute it and/or modify
1035 + * it under the terms of the GNU General Public License as published by
1036 + * the Free Software Foundation; either version 2 of the License, or
1037 + * (at your option) any later version.
1038 + *
1039 + * This program is distributed in the hope that it will be useful,
1040 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1041 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1042 + * GNU General Public License for more details.
1043 + *
1044 + * You should have received a copy of the GNU General Public License
1045 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
1046 + */
1047 +
1048 +#ifndef _GPI_H_
1049 +#define _GPI_H_
1050 +
1051 +#define GPI_VERSION    0x00
1052 +#define GPI_CTRL       0x04
1053 +#define GPI_RX_CONFIG  0x08
1054 +#define GPI_HDR_SIZE   0x0c
1055 +#define GPI_BUF_SIZE   0x10
1056 +#define GPI_LMEM_ALLOC_ADDR    0x14
1057 +#define GPI_LMEM_FREE_ADDR     0x18
1058 +#define GPI_DDR_ALLOC_ADDR     0x1c
1059 +#define GPI_DDR_FREE_ADDR      0x20
1060 +#define GPI_CLASS_ADDR 0x24
1061 +#define GPI_DRX_FIFO   0x28
1062 +#define GPI_TRX_FIFO   0x2c
1063 +#define GPI_INQ_PKTPTR 0x30
1064 +#define GPI_DDR_DATA_OFFSET    0x34
1065 +#define GPI_LMEM_DATA_OFFSET   0x38
1066 +#define GPI_TMLF_TX    0x4c
1067 +#define GPI_DTX_ASEQ   0x50
1068 +#define GPI_FIFO_STATUS        0x54
1069 +#define GPI_FIFO_DEBUG 0x58
1070 +#define GPI_TX_PAUSE_TIME      0x5c
1071 +#define GPI_LMEM_SEC_BUF_DATA_OFFSET   0x60
1072 +#define GPI_DDR_SEC_BUF_DATA_OFFSET    0x64
1073 +#define GPI_TOE_CHKSUM_EN      0x68
1074 +#define GPI_OVERRUN_DROPCNT    0x6c
1075 +#define GPI_CSR_MTIP_PAUSE_REG         0x74
1076 +#define GPI_CSR_MTIP_PAUSE_QUANTUM     0x78
1077 +#define GPI_CSR_RX_CNT                 0x7c
1078 +#define GPI_CSR_TX_CNT                 0x80
1079 +#define GPI_CSR_DEBUG1                 0x84
1080 +#define GPI_CSR_DEBUG2                 0x88
1081 +
1082 +struct gpi_cfg {
1083 +       u32 lmem_rtry_cnt;
1084 +       u32 tmlf_txthres;
1085 +       u32 aseq_len;
1086 +       u32 mtip_pause_reg;
1087 +};
1088 +
1089 +/* GPI commons defines */
1090 +#define GPI_LMEM_BUF_EN        0x1
1091 +#define GPI_DDR_BUF_EN 0x1
1092 +
1093 +/* EGPI 1 defines */
1094 +#define EGPI1_LMEM_RTRY_CNT    0x40
1095 +#define EGPI1_TMLF_TXTHRES     0xBC
1096 +#define EGPI1_ASEQ_LEN 0x50
1097 +
1098 +/* EGPI 2 defines */
1099 +#define EGPI2_LMEM_RTRY_CNT    0x40
1100 +#define EGPI2_TMLF_TXTHRES     0xBC
1101 +#define EGPI2_ASEQ_LEN 0x40
1102 +
1103 +/* EGPI 3 defines */
1104 +#define EGPI3_LMEM_RTRY_CNT    0x40
1105 +#define EGPI3_TMLF_TXTHRES     0xBC
1106 +#define EGPI3_ASEQ_LEN 0x40
1107 +
1108 +/* HGPI defines */
1109 +#define HGPI_LMEM_RTRY_CNT     0x40
1110 +#define HGPI_TMLF_TXTHRES      0xBC
1111 +#define HGPI_ASEQ_LEN  0x40
1112 +
1113 +#define EGPI_PAUSE_TIME                0x000007D0
1114 +#define EGPI_PAUSE_ENABLE      0x40000000
1115 +#endif /* _GPI_H_ */
1116 --- /dev/null
1117 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
1118 @@ -0,0 +1,100 @@
1119 +/*
1120 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1121 + * Copyright 2017 NXP
1122 + *
1123 + * This program is free software; you can redistribute it and/or modify
1124 + * it under the terms of the GNU General Public License as published by
1125 + * the Free Software Foundation; either version 2 of the License, or
1126 + * (at your option) any later version.
1127 + *
1128 + * This program is distributed in the hope that it will be useful,
1129 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1130 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1131 + * GNU General Public License for more details.
1132 + *
1133 + * You should have received a copy of the GNU General Public License
1134 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
1135 + */
1136 +
1137 +#ifndef _HIF_H_
1138 +#define _HIF_H_
1139 +
1140 +/* @file hif.h.
1141 + * hif - PFE hif block control and status register.
1142 + * Mapped on CBUS and accessible from all PE's and ARM.
1143 + */
1144 +#define HIF_VERSION    (HIF_BASE_ADDR + 0x00)
1145 +#define HIF_TX_CTRL    (HIF_BASE_ADDR + 0x04)
1146 +#define HIF_TX_CURR_BD_ADDR    (HIF_BASE_ADDR + 0x08)
1147 +#define HIF_TX_ALLOC   (HIF_BASE_ADDR + 0x0c)
1148 +#define HIF_TX_BDP_ADDR        (HIF_BASE_ADDR + 0x10)
1149 +#define HIF_TX_STATUS  (HIF_BASE_ADDR + 0x14)
1150 +#define HIF_RX_CTRL    (HIF_BASE_ADDR + 0x20)
1151 +#define HIF_RX_BDP_ADDR        (HIF_BASE_ADDR + 0x24)
1152 +#define HIF_RX_STATUS  (HIF_BASE_ADDR + 0x30)
1153 +#define HIF_INT_SRC    (HIF_BASE_ADDR + 0x34)
1154 +#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
1155 +#define HIF_POLL_CTRL  (HIF_BASE_ADDR + 0x3c)
1156 +#define HIF_RX_CURR_BD_ADDR    (HIF_BASE_ADDR + 0x40)
1157 +#define HIF_RX_ALLOC   (HIF_BASE_ADDR + 0x44)
1158 +#define HIF_TX_DMA_STATUS      (HIF_BASE_ADDR + 0x48)
1159 +#define HIF_RX_DMA_STATUS      (HIF_BASE_ADDR + 0x4c)
1160 +#define HIF_INT_COAL   (HIF_BASE_ADDR + 0x50)
1161 +
1162 +/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
1163 +#define HIF_INT                BIT(0)
1164 +#define HIF_RXBD_INT   BIT(1)
1165 +#define HIF_RXPKT_INT  BIT(2)
1166 +#define HIF_TXBD_INT   BIT(3)
1167 +#define HIF_TXPKT_INT  BIT(4)
1168 +
1169 +/* HIF_TX_CTRL bits */
1170 +#define HIF_CTRL_DMA_EN                        BIT(0)
1171 +#define HIF_CTRL_BDP_POLL_CTRL_EN      BIT(1)
1172 +#define HIF_CTRL_BDP_CH_START_WSTB     BIT(2)
1173 +
1174 +/* HIF_RX_STATUS bits */
1175 +#define BDP_CSR_RX_DMA_ACTV     BIT(16)
1176 +
1177 +/* HIF_INT_ENABLE bits */
1178 +#define HIF_INT_EN             BIT(0)
1179 +#define HIF_RXBD_INT_EN                BIT(1)
1180 +#define HIF_RXPKT_INT_EN       BIT(2)
1181 +#define HIF_TXBD_INT_EN                BIT(3)
1182 +#define HIF_TXPKT_INT_EN       BIT(4)
1183 +
1184 +/* HIF_POLL_CTRL bits*/
1185 +#define HIF_RX_POLL_CTRL_CYCLE 0x0400
1186 +#define HIF_TX_POLL_CTRL_CYCLE 0x0400
1187 +
1188 +/* HIF_INT_COAL bits*/
1189 +#define HIF_INT_COAL_ENABLE    BIT(31)
1190 +
1191 +/* Buffer descriptor control bits */
1192 +#define BD_CTRL_BUFLEN_MASK    0x3fff
1193 +#define BD_BUF_LEN(x)  ((x) & BD_CTRL_BUFLEN_MASK)
1194 +#define BD_CTRL_CBD_INT_EN     BIT(16)
1195 +#define BD_CTRL_PKT_INT_EN     BIT(17)
1196 +#define BD_CTRL_LIFM           BIT(18)
1197 +#define BD_CTRL_LAST_BD                BIT(19)
1198 +#define BD_CTRL_DIR            BIT(20)
1199 +#define BD_CTRL_LMEM_CPY       BIT(21) /* Valid only for HIF_NOCPY */
1200 +#define BD_CTRL_PKT_XFER       BIT(24)
1201 +#define BD_CTRL_DESC_EN                BIT(31)
1202 +#define BD_CTRL_PARSE_DISABLE  BIT(25)
1203 +#define BD_CTRL_BRFETCH_DISABLE        BIT(26)
1204 +#define BD_CTRL_RTFETCH_DISABLE        BIT(27)
1205 +
1206 +/* Buffer descriptor status bits*/
1207 +#define BD_STATUS_CONN_ID(x)   ((x) & 0xffff)
1208 +#define BD_STATUS_DIR_PROC_ID  BIT(16)
1209 +#define BD_STATUS_CONN_ID_EN   BIT(17)
1210 +#define BD_STATUS_PE2PROC_ID(x)        (((x) & 7) << 18)
1211 +#define BD_STATUS_LE_DATA      BIT(21)
1212 +#define BD_STATUS_CHKSUM_EN    BIT(22)
1213 +
1214 +/* HIF Buffer descriptor status bits */
1215 +#define DIR_PROC_ID    BIT(16)
1216 +#define PROC_ID(id)    ((id) << 18)
1217 +
1218 +#endif /* _HIF_H_ */
1219 --- /dev/null
1220 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
1221 @@ -0,0 +1,50 @@
1222 +/*
1223 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1224 + * Copyright 2017 NXP
1225 + *
1226 + * This program is free software; you can redistribute it and/or modify
1227 + * it under the terms of the GNU General Public License as published by
1228 + * the Free Software Foundation; either version 2 of the License, or
1229 + * (at your option) any later version.
1230 + *
1231 + * This program is distributed in the hope that it will be useful,
1232 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1233 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1234 + * GNU General Public License for more details.
1235 + *
1236 + * You should have received a copy of the GNU General Public License
1237 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
1238 + */
1239 +
1240 +#ifndef _HIF_NOCPY_H_
1241 +#define _HIF_NOCPY_H_
1242 +
1243 +#define HIF_NOCPY_VERSION      (HIF_NOCPY_BASE_ADDR + 0x00)
1244 +#define HIF_NOCPY_TX_CTRL      (HIF_NOCPY_BASE_ADDR + 0x04)
1245 +#define HIF_NOCPY_TX_CURR_BD_ADDR      (HIF_NOCPY_BASE_ADDR + 0x08)
1246 +#define HIF_NOCPY_TX_ALLOC     (HIF_NOCPY_BASE_ADDR + 0x0c)
1247 +#define HIF_NOCPY_TX_BDP_ADDR  (HIF_NOCPY_BASE_ADDR + 0x10)
1248 +#define HIF_NOCPY_TX_STATUS    (HIF_NOCPY_BASE_ADDR + 0x14)
1249 +#define HIF_NOCPY_RX_CTRL      (HIF_NOCPY_BASE_ADDR + 0x20)
1250 +#define HIF_NOCPY_RX_BDP_ADDR  (HIF_NOCPY_BASE_ADDR + 0x24)
1251 +#define HIF_NOCPY_RX_STATUS    (HIF_NOCPY_BASE_ADDR + 0x30)
1252 +#define HIF_NOCPY_INT_SRC      (HIF_NOCPY_BASE_ADDR + 0x34)
1253 +#define HIF_NOCPY_INT_ENABLE   (HIF_NOCPY_BASE_ADDR + 0x38)
1254 +#define HIF_NOCPY_POLL_CTRL    (HIF_NOCPY_BASE_ADDR + 0x3c)
1255 +#define HIF_NOCPY_RX_CURR_BD_ADDR      (HIF_NOCPY_BASE_ADDR + 0x40)
1256 +#define HIF_NOCPY_RX_ALLOC     (HIF_NOCPY_BASE_ADDR + 0x44)
1257 +#define HIF_NOCPY_TX_DMA_STATUS        (HIF_NOCPY_BASE_ADDR + 0x48)
1258 +#define HIF_NOCPY_RX_DMA_STATUS        (HIF_NOCPY_BASE_ADDR + 0x4c)
1259 +#define HIF_NOCPY_RX_INQ0_PKTPTR       (HIF_NOCPY_BASE_ADDR + 0x50)
1260 +#define HIF_NOCPY_RX_INQ1_PKTPTR       (HIF_NOCPY_BASE_ADDR + 0x54)
1261 +#define HIF_NOCPY_TX_PORT_NO   (HIF_NOCPY_BASE_ADDR + 0x60)
1262 +#define HIF_NOCPY_LMEM_ALLOC_ADDR      (HIF_NOCPY_BASE_ADDR + 0x64)
1263 +#define HIF_NOCPY_CLASS_ADDR   (HIF_NOCPY_BASE_ADDR + 0x68)
1264 +#define HIF_NOCPY_TMU_PORT0_ADDR       (HIF_NOCPY_BASE_ADDR + 0x70)
1265 +#define HIF_NOCPY_TMU_PORT1_ADDR       (HIF_NOCPY_BASE_ADDR + 0x74)
1266 +#define HIF_NOCPY_TMU_PORT2_ADDR       (HIF_NOCPY_BASE_ADDR + 0x7c)
1267 +#define HIF_NOCPY_TMU_PORT3_ADDR       (HIF_NOCPY_BASE_ADDR + 0x80)
1268 +#define HIF_NOCPY_TMU_PORT4_ADDR       (HIF_NOCPY_BASE_ADDR + 0x84)
1269 +#define HIF_NOCPY_INT_COAL     (HIF_NOCPY_BASE_ADDR + 0x90)
1270 +
1271 +#endif /* _HIF_NOCPY_H_ */
1272 --- /dev/null
1273 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
1274 @@ -0,0 +1,168 @@
1275 +/*
1276 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1277 + * Copyright 2017 NXP
1278 + *
1279 + * This program is free software; you can redistribute it and/or modify
1280 + * it under the terms of the GNU General Public License as published by
1281 + * the Free Software Foundation; either version 2 of the License, or
1282 + * (at your option) any later version.
1283 + *
1284 + * This program is distributed in the hope that it will be useful,
1285 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1286 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1287 + * GNU General Public License for more details.
1288 + *
1289 + * You should have received a copy of the GNU General Public License
1290 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
1291 + */
1292 +
1293 +#ifndef _TMU_CSR_H_
1294 +#define _TMU_CSR_H_
1295 +
1296 +#define TMU_VERSION    (TMU_CSR_BASE_ADDR + 0x000)
1297 +#define TMU_INQ_WATERMARK      (TMU_CSR_BASE_ADDR + 0x004)
1298 +#define TMU_PHY_INQ_PKTPTR     (TMU_CSR_BASE_ADDR + 0x008)
1299 +#define TMU_PHY_INQ_PKTINFO    (TMU_CSR_BASE_ADDR + 0x00c)
1300 +#define TMU_PHY_INQ_FIFO_CNT   (TMU_CSR_BASE_ADDR + 0x010)
1301 +#define TMU_SYS_GENERIC_CONTROL        (TMU_CSR_BASE_ADDR + 0x014)
1302 +#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
1303 +#define TMU_SYS_GEN_CON0       (TMU_CSR_BASE_ADDR + 0x01c)
1304 +#define TMU_SYS_GEN_CON1       (TMU_CSR_BASE_ADDR + 0x020)
1305 +#define TMU_SYS_GEN_CON2       (TMU_CSR_BASE_ADDR + 0x024)
1306 +#define TMU_SYS_GEN_CON3       (TMU_CSR_BASE_ADDR + 0x028)
1307 +#define TMU_SYS_GEN_CON4       (TMU_CSR_BASE_ADDR + 0x02c)
1308 +#define TMU_TEQ_DISABLE_DROPCHK        (TMU_CSR_BASE_ADDR + 0x030)
1309 +#define TMU_TEQ_CTRL   (TMU_CSR_BASE_ADDR + 0x034)
1310 +#define TMU_TEQ_QCFG   (TMU_CSR_BASE_ADDR + 0x038)
1311 +#define TMU_TEQ_DROP_STAT      (TMU_CSR_BASE_ADDR + 0x03c)
1312 +#define TMU_TEQ_QAVG   (TMU_CSR_BASE_ADDR + 0x040)
1313 +#define TMU_TEQ_WREG_PROB      (TMU_CSR_BASE_ADDR + 0x044)
1314 +#define TMU_TEQ_TRANS_STAT     (TMU_CSR_BASE_ADDR + 0x048)
1315 +#define TMU_TEQ_HW_PROB_CFG0   (TMU_CSR_BASE_ADDR + 0x04c)
1316 +#define TMU_TEQ_HW_PROB_CFG1   (TMU_CSR_BASE_ADDR + 0x050)
1317 +#define TMU_TEQ_HW_PROB_CFG2   (TMU_CSR_BASE_ADDR + 0x054)
1318 +#define TMU_TEQ_HW_PROB_CFG3   (TMU_CSR_BASE_ADDR + 0x058)
1319 +#define TMU_TEQ_HW_PROB_CFG4   (TMU_CSR_BASE_ADDR + 0x05c)
1320 +#define TMU_TEQ_HW_PROB_CFG5   (TMU_CSR_BASE_ADDR + 0x060)
1321 +#define TMU_TEQ_HW_PROB_CFG6   (TMU_CSR_BASE_ADDR + 0x064)
1322 +#define TMU_TEQ_HW_PROB_CFG7   (TMU_CSR_BASE_ADDR + 0x068)
1323 +#define TMU_TEQ_HW_PROB_CFG8   (TMU_CSR_BASE_ADDR + 0x06c)
1324 +#define TMU_TEQ_HW_PROB_CFG9   (TMU_CSR_BASE_ADDR + 0x070)
1325 +#define TMU_TEQ_HW_PROB_CFG10  (TMU_CSR_BASE_ADDR + 0x074)
1326 +#define TMU_TEQ_HW_PROB_CFG11  (TMU_CSR_BASE_ADDR + 0x078)
1327 +#define TMU_TEQ_HW_PROB_CFG12  (TMU_CSR_BASE_ADDR + 0x07c)
1328 +#define TMU_TEQ_HW_PROB_CFG13  (TMU_CSR_BASE_ADDR + 0x080)
1329 +#define TMU_TEQ_HW_PROB_CFG14  (TMU_CSR_BASE_ADDR + 0x084)
1330 +#define TMU_TEQ_HW_PROB_CFG15  (TMU_CSR_BASE_ADDR + 0x088)
1331 +#define TMU_TEQ_HW_PROB_CFG16  (TMU_CSR_BASE_ADDR + 0x08c)
1332 +#define TMU_TEQ_HW_PROB_CFG17  (TMU_CSR_BASE_ADDR + 0x090)
1333 +#define TMU_TEQ_HW_PROB_CFG18  (TMU_CSR_BASE_ADDR + 0x094)
1334 +#define TMU_TEQ_HW_PROB_CFG19  (TMU_CSR_BASE_ADDR + 0x098)
1335 +#define TMU_TEQ_HW_PROB_CFG20  (TMU_CSR_BASE_ADDR + 0x09c)
1336 +#define TMU_TEQ_HW_PROB_CFG21  (TMU_CSR_BASE_ADDR + 0x0a0)
1337 +#define TMU_TEQ_HW_PROB_CFG22  (TMU_CSR_BASE_ADDR + 0x0a4)
1338 +#define TMU_TEQ_HW_PROB_CFG23  (TMU_CSR_BASE_ADDR + 0x0a8)
1339 +#define TMU_TEQ_HW_PROB_CFG24  (TMU_CSR_BASE_ADDR + 0x0ac)
1340 +#define TMU_TEQ_HW_PROB_CFG25  (TMU_CSR_BASE_ADDR + 0x0b0)
1341 +#define TMU_TDQ_IIFG_CFG       (TMU_CSR_BASE_ADDR + 0x0b4)
1342 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1343 + * This is a global Enable for all schedulers in PHY0
1344 + */
1345 +#define TMU_TDQ0_SCH_CTRL      (TMU_CSR_BASE_ADDR + 0x0b8)
1346 +
1347 +#define TMU_LLM_CTRL   (TMU_CSR_BASE_ADDR + 0x0bc)
1348 +#define TMU_LLM_BASE_ADDR      (TMU_CSR_BASE_ADDR + 0x0c0)
1349 +#define TMU_LLM_QUE_LEN        (TMU_CSR_BASE_ADDR + 0x0c4)
1350 +#define TMU_LLM_QUE_HEADPTR    (TMU_CSR_BASE_ADDR + 0x0c8)
1351 +#define TMU_LLM_QUE_TAILPTR    (TMU_CSR_BASE_ADDR + 0x0cc)
1352 +#define TMU_LLM_QUE_DROPCNT    (TMU_CSR_BASE_ADDR + 0x0d0)
1353 +#define TMU_INT_EN     (TMU_CSR_BASE_ADDR + 0x0d4)
1354 +#define TMU_INT_SRC    (TMU_CSR_BASE_ADDR + 0x0d8)
1355 +#define TMU_INQ_STAT   (TMU_CSR_BASE_ADDR + 0x0dc)
1356 +#define TMU_CTRL       (TMU_CSR_BASE_ADDR + 0x0e0)
1357 +
1358 +/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
1359 + * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
1360 + * the internal memory. This address is used to access both the PM and DM of
1361 + * all the PE's
1362 + */
1363 +#define TMU_MEM_ACCESS_ADDR    (TMU_CSR_BASE_ADDR + 0x0e4)
1364 +
1365 +/* Internal Memory Access Write Data */
1366 +#define TMU_MEM_ACCESS_WDATA   (TMU_CSR_BASE_ADDR + 0x0e8)
1367 +/* Internal Memory Access Read Data. The commands are blocked
1368 + * at the mem_access only
1369 + */
1370 +#define TMU_MEM_ACCESS_RDATA   (TMU_CSR_BASE_ADDR + 0x0ec)
1371 +
1372 +/* [31:0] PHY0 in queue address (must be initialized with one of the
1373 + * xxx_INQ_PKTPTR cbus addresses)
1374 + */
1375 +#define TMU_PHY0_INQ_ADDR      (TMU_CSR_BASE_ADDR + 0x0f0)
1376 +/* [31:0] PHY1 in queue address (must be initialized with one of the
1377 + * xxx_INQ_PKTPTR cbus addresses)
1378 + */
1379 +#define TMU_PHY1_INQ_ADDR      (TMU_CSR_BASE_ADDR + 0x0f4)
1380 +/* [31:0] PHY2 in queue address (must be initialized with one of the
1381 + * xxx_INQ_PKTPTR cbus addresses)
1382 + */
1383 +#define TMU_PHY2_INQ_ADDR      (TMU_CSR_BASE_ADDR + 0x0f8)
1384 +/* [31:0] PHY3 in queue address (must be initialized with one of the
1385 + * xxx_INQ_PKTPTR cbus addresses)
1386 + */
1387 +#define TMU_PHY3_INQ_ADDR      (TMU_CSR_BASE_ADDR + 0x0fc)
1388 +#define TMU_BMU_INQ_ADDR       (TMU_CSR_BASE_ADDR + 0x100)
1389 +#define TMU_TX_CTRL    (TMU_CSR_BASE_ADDR + 0x104)
1390 +
1391 +#define TMU_BUS_ACCESS_WDATA   (TMU_CSR_BASE_ADDR + 0x108)
1392 +#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
1393 +#define TMU_BUS_ACCESS_RDATA   (TMU_CSR_BASE_ADDR + 0x110)
1394 +
1395 +#define TMU_PE_SYS_CLK_RATIO   (TMU_CSR_BASE_ADDR + 0x114)
1396 +#define TMU_PE_STATUS  (TMU_CSR_BASE_ADDR + 0x118)
1397 +#define TMU_TEQ_MAX_THRESHOLD  (TMU_CSR_BASE_ADDR + 0x11c)
1398 +/* [31:0] PHY4 in queue address (must be initialized with one of the
1399 + * xxx_INQ_PKTPTR cbus addresses)
1400 + */
1401 +#define TMU_PHY4_INQ_ADDR      (TMU_CSR_BASE_ADDR + 0x134)
1402 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1403 + * This is a global Enable for all schedulers in PHY1
1404 + */
1405 +#define TMU_TDQ1_SCH_CTRL      (TMU_CSR_BASE_ADDR + 0x138)
1406 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1407 + * This is a global Enable for all schedulers in PHY2
1408 + */
1409 +#define TMU_TDQ2_SCH_CTRL      (TMU_CSR_BASE_ADDR + 0x13c)
1410 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1411 + * This is a global Enable for all schedulers in PHY3
1412 + */
1413 +#define TMU_TDQ3_SCH_CTRL      (TMU_CSR_BASE_ADDR + 0x140)
1414 +#define TMU_BMU_BUF_SIZE       (TMU_CSR_BASE_ADDR + 0x144)
1415 +/* [31:0] PHY5 in queue address (must be initialized with one of the
1416 + * xxx_INQ_PKTPTR cbus addresses)
1417 + */
1418 +#define TMU_PHY5_INQ_ADDR      (TMU_CSR_BASE_ADDR + 0x148)
1419 +
1420 +#define SW_RESET               BIT(0)  /* Global software reset */
1421 +#define INQ_RESET              BIT(2)
1422 +#define TEQ_RESET              BIT(3)
1423 +#define TDQ_RESET              BIT(4)
1424 +#define PE_RESET               BIT(5)
1425 +#define MEM_INIT               BIT(6)
1426 +#define MEM_INIT_DONE          BIT(7)
1427 +#define LLM_INIT               BIT(8)
1428 +#define LLM_INIT_DONE          BIT(9)
1429 +#define ECC_MEM_INIT_DONE      BIT(10)
1430 +
1431 +struct tmu_cfg {
1432 +       u32 pe_sys_clk_ratio;
1433 +       unsigned long llm_base_addr;
1434 +       u32 llm_queue_len;
1435 +};
1436 +
1437 +/* Not HW related for pfe_ctrl / pfe common defines */
1438 +#define DEFAULT_MAX_QDEPTH     80
1439 +#define DEFAULT_Q0_QDEPTH      511 /*We keep one large queue for host tx qos */
1440 +#define DEFAULT_TMU3_QDEPTH    127
1441 +
1442 +#endif /* _TMU_CSR_H_ */
1443 --- /dev/null
1444 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
1445 @@ -0,0 +1,61 @@
1446 +/*
1447 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1448 + * Copyright 2017 NXP
1449 + *
1450 + * This program is free software; you can redistribute it and/or modify
1451 + * it under the terms of the GNU General Public License as published by
1452 + * the Free Software Foundation; either version 2 of the License, or
1453 + * (at your option) any later version.
1454 + *
1455 + * This program is distributed in the hope that it will be useful,
1456 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1457 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1458 + * GNU General Public License for more details.
1459 + *
1460 + * You should have received a copy of the GNU General Public License
1461 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
1462 + */
1463 +
1464 +#ifndef _UTIL_CSR_H_
1465 +#define _UTIL_CSR_H_
1466 +
1467 +#define UTIL_VERSION   (UTIL_CSR_BASE_ADDR + 0x000)
1468 +#define UTIL_TX_CTRL   (UTIL_CSR_BASE_ADDR + 0x004)
1469 +#define UTIL_INQ_PKTPTR        (UTIL_CSR_BASE_ADDR + 0x010)
1470 +
1471 +#define UTIL_HDR_SIZE  (UTIL_CSR_BASE_ADDR + 0x014)
1472 +
1473 +#define UTIL_PE0_QB_DM_ADDR0   (UTIL_CSR_BASE_ADDR + 0x020)
1474 +#define UTIL_PE0_QB_DM_ADDR1   (UTIL_CSR_BASE_ADDR + 0x024)
1475 +#define UTIL_PE0_RO_DM_ADDR0   (UTIL_CSR_BASE_ADDR + 0x060)
1476 +#define UTIL_PE0_RO_DM_ADDR1   (UTIL_CSR_BASE_ADDR + 0x064)
1477 +
1478 +#define UTIL_MEM_ACCESS_ADDR   (UTIL_CSR_BASE_ADDR + 0x100)
1479 +#define UTIL_MEM_ACCESS_WDATA  (UTIL_CSR_BASE_ADDR + 0x104)
1480 +#define UTIL_MEM_ACCESS_RDATA  (UTIL_CSR_BASE_ADDR + 0x108)
1481 +
1482 +#define UTIL_TM_INQ_ADDR       (UTIL_CSR_BASE_ADDR + 0x114)
1483 +#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
1484 +
1485 +#define UTIL_PE_SYS_CLK_RATIO  (UTIL_CSR_BASE_ADDR + 0x200)
1486 +#define UTIL_AFULL_THRES       (UTIL_CSR_BASE_ADDR + 0x204)
1487 +#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
1488 +#define UTIL_MAX_BUF_CNT       (UTIL_CSR_BASE_ADDR + 0x20c)
1489 +#define UTIL_TSQ_FIFO_THRES    (UTIL_CSR_BASE_ADDR + 0x210)
1490 +#define UTIL_TSQ_MAX_CNT       (UTIL_CSR_BASE_ADDR + 0x214)
1491 +#define UTIL_IRAM_DATA_0       (UTIL_CSR_BASE_ADDR + 0x218)
1492 +#define UTIL_IRAM_DATA_1       (UTIL_CSR_BASE_ADDR + 0x21c)
1493 +#define UTIL_IRAM_DATA_2       (UTIL_CSR_BASE_ADDR + 0x220)
1494 +#define UTIL_IRAM_DATA_3       (UTIL_CSR_BASE_ADDR + 0x224)
1495 +
1496 +#define UTIL_BUS_ACCESS_ADDR   (UTIL_CSR_BASE_ADDR + 0x228)
1497 +#define UTIL_BUS_ACCESS_WDATA  (UTIL_CSR_BASE_ADDR + 0x22c)
1498 +#define UTIL_BUS_ACCESS_RDATA  (UTIL_CSR_BASE_ADDR + 0x230)
1499 +
1500 +#define UTIL_INQ_AFULL_THRES   (UTIL_CSR_BASE_ADDR + 0x234)
1501 +
1502 +struct util_cfg {
1503 +       u32 pe_sys_clk_ratio;
1504 +};
1505 +
1506 +#endif /* _UTIL_CSR_H_ */
1507 --- /dev/null
1508 +++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
1509 @@ -0,0 +1,372 @@
1510 +/*
1511 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1512 + * Copyright 2017 NXP
1513 + *
1514 + * This program is free software; you can redistribute it and/or modify
1515 + * it under the terms of the GNU General Public License as published by
1516 + * the Free Software Foundation; either version 2 of the License, or
1517 + * (at your option) any later version.
1518 + *
1519 + * This program is distributed in the hope that it will be useful,
1520 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1521 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1522 + * GNU General Public License for more details.
1523 + *
1524 + * You should have received a copy of the GNU General Public License
1525 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
1526 + */
1527 +
1528 +#ifndef _PFE_H_
1529 +#define _PFE_H_
1530 +
1531 +#include "cbus.h"
1532 +
1533 +#define CLASS_DMEM_BASE_ADDR(i)        (0x00000000 | ((i) << 20))
1534 +/*
1535 + * Only valid for mem access register interface
1536 + */
1537 +#define CLASS_IMEM_BASE_ADDR(i)        (0x00000000 | ((i) << 20))
1538 +#define CLASS_DMEM_SIZE        0x00002000
1539 +#define CLASS_IMEM_SIZE        0x00008000
1540 +
1541 +#define TMU_DMEM_BASE_ADDR(i)  (0x00000000 + ((i) << 20))
1542 +/*
1543 + * Only valid for mem access register interface
1544 + */
1545 +#define TMU_IMEM_BASE_ADDR(i)  (0x00000000 + ((i) << 20))
1546 +#define TMU_DMEM_SIZE  0x00000800
1547 +#define TMU_IMEM_SIZE  0x00002000
1548 +
1549 +#define UTIL_DMEM_BASE_ADDR    0x00000000
1550 +#define UTIL_DMEM_SIZE 0x00002000
1551 +
1552 +#define PE_LMEM_BASE_ADDR      0xc3010000
1553 +#define PE_LMEM_SIZE   0x8000
1554 +#define PE_LMEM_END    (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
1555 +
1556 +#define DMEM_BASE_ADDR 0x00000000
1557 +#define DMEM_SIZE      0x2000  /* TMU has less... */
1558 +#define DMEM_END       (DMEM_BASE_ADDR + DMEM_SIZE)
1559 +
1560 +#define PMEM_BASE_ADDR 0x00010000
1561 +#define PMEM_SIZE      0x8000  /* TMU has less... */
1562 +#define PMEM_END       (PMEM_BASE_ADDR + PMEM_SIZE)
1563 +
1564 +/* These check memory ranges from PE point of view/memory map */
1565 +#define IS_DMEM(addr, len)                             \
1566 +       ({ typeof(addr) addr_ = (addr);                 \
1567 +       ((unsigned long)(addr_) >= DMEM_BASE_ADDR) &&   \
1568 +       (((unsigned long)(addr_) + (len)) <= DMEM_END); })
1569 +
1570 +#define IS_PMEM(addr, len)                             \
1571 +       ({ typeof(addr) addr_ = (addr);                 \
1572 +       ((unsigned long)(addr_) >= PMEM_BASE_ADDR) &&   \
1573 +       (((unsigned long)(addr_) + (len)) <= PMEM_END); })
1574 +
1575 +#define IS_PE_LMEM(addr, len)                          \
1576 +       ({ typeof(addr) addr_ = (addr);                 \
1577 +       ((unsigned long)(addr_) >=                      \
1578 +       PE_LMEM_BASE_ADDR) &&                           \
1579 +       (((unsigned long)(addr_) +                      \
1580 +       (len)) <= PE_LMEM_END); })
1581 +
1582 +#define IS_PFE_LMEM(addr, len)                         \
1583 +       ({ typeof(addr) addr_ = (addr);                 \
1584 +       ((unsigned long)(addr_) >=                      \
1585 +       CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) &&            \
1586 +       (((unsigned long)(addr_) + (len)) <=            \
1587 +       CBUS_VIRT_TO_PFE(LMEM_END)); })
1588 +
1589 +#define __IS_PHYS_DDR(addr, len)                       \
1590 +       ({ typeof(addr) addr_ = (addr);                 \
1591 +       ((unsigned long)(addr_) >=                      \
1592 +       DDR_PHYS_BASE_ADDR) &&                          \
1593 +       (((unsigned long)(addr_) + (len)) <=            \
1594 +       DDR_PHYS_END); })
1595 +
1596 +#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
1597 +
1598 +/*
1599 + * If using a run-time virtual address for the cbus base address use this code
1600 + */
1601 +extern void *cbus_base_addr;
1602 +extern void *ddr_base_addr;
1603 +extern unsigned long ddr_phys_base_addr;
1604 +extern unsigned int ddr_size;
1605 +
1606 +#define CBUS_BASE_ADDR cbus_base_addr
1607 +#define DDR_PHYS_BASE_ADDR     ddr_phys_base_addr
1608 +#define DDR_BASE_ADDR  ddr_base_addr
1609 +#define DDR_SIZE       ddr_size
1610 +
1611 +#define DDR_PHYS_END   (DDR_PHYS_BASE_ADDR + DDR_SIZE)
1612 +
1613 +#define LS1012A_PFE_RESET_WA   /*
1614 +                                * PFE doesn't have global reset and re-init
1615 +                                * should takecare few things to make PFE
1616 +                                * functional after reset
1617 +                                */
1618 +#define PFE_CBUS_PHYS_BASE_ADDR        0xc0000000      /* CBUS physical base address
1619 +                                                * as seen by PE's.
1620 +                                                */
1621 +/* CBUS physical base address as seen by PE's. */
1622 +#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE       0xc0000000
1623 +
1624 +#define DDR_PHYS_TO_PFE(p)     (((unsigned long int)(p)) & 0x7FFFFFFF)
1625 +#define DDR_PFE_TO_PHYS(p)     (((unsigned long int)(p)) | 0x80000000)
1626 +#define CBUS_PHYS_TO_PFE(p)    (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
1627 +                               PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
1628 +/* Translates to PFE address map */
1629 +
1630 +#define DDR_PHYS_TO_VIRT(p)    (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
1631 +#define DDR_VIRT_TO_PHYS(v)    (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
1632 +#define DDR_VIRT_TO_PFE(p)     (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
1633 +
1634 +#define CBUS_VIRT_TO_PFE(v)    (((v) - CBUS_BASE_ADDR) + \
1635 +                               PFE_CBUS_PHYS_BASE_ADDR)
1636 +#define CBUS_PFE_TO_VIRT(p)    (((unsigned long int)(p) - \
1637 +                               PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
1638 +
1639 +/* The below part of the code is used in QOS control driver from host */
1640 +#define TMU_APB_BASE_ADDR       0xc1000000      /* TMU base address seen by
1641 +                                                * pe's
1642 +                                                */
1643 +
1644 +enum {
1645 +       CLASS0_ID = 0,
1646 +       CLASS1_ID,
1647 +       CLASS2_ID,
1648 +       CLASS3_ID,
1649 +       CLASS4_ID,
1650 +       CLASS5_ID,
1651 +       TMU0_ID,
1652 +       TMU1_ID,
1653 +       TMU2_ID,
1654 +       TMU3_ID,
1655 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1656 +       UTIL_ID,
1657 +#endif
1658 +       MAX_PE
1659 +};
1660 +
1661 +#define CLASS_MASK     (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
1662 +                       BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
1663 +                       BIT(CLASS4_ID) | BIT(CLASS5_ID))
1664 +#define CLASS_MAX_ID   CLASS5_ID
1665 +
1666 +#define TMU_MASK       (BIT(TMU0_ID) | BIT(TMU1_ID) |\
1667 +                       BIT(TMU3_ID))
1668 +
1669 +#define TMU_MAX_ID     TMU3_ID
1670 +
1671 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1672 +#define UTIL_MASK      BIT(UTIL_ID)
1673 +#endif
1674 +
1675 +struct pe_status {
1676 +       u32     cpu_state;
1677 +       u32     activity_counter;
1678 +       u32     rx;
1679 +       union {
1680 +       u32     tx;
1681 +       u32     tmu_qstatus;
1682 +       };
1683 +       u32     drop;
1684 +#if defined(CFG_PE_DEBUG)
1685 +       u32     debug_indicator;
1686 +       u32     debug[16];
1687 +#endif
1688 +} __aligned(16);
1689 +
1690 +struct pe_sync_mailbox {
1691 +       u32 stop;
1692 +       u32 stopped;
1693 +};
1694 +
1695 +/* Drop counter definitions */
1696 +
1697 +#define        CLASS_NUM_DROP_COUNTERS 13
1698 +#define        UTIL_NUM_DROP_COUNTERS  8
1699 +
1700 +/* PE information.
1701 + * Structure containing PE's specific information. It is used to create
1702 + * generic C functions common to all PE's.
1703 + * Before using the library functions this structure needs to be initialized
1704 + * with the different registers virtual addresses
1705 + * (according to the ARM MMU mmaping). The default initialization supports a
1706 + * virtual == physical mapping.
1707 + */
1708 +struct pe_info {
1709 +       u32 dmem_base_addr;     /* PE's dmem base address */
1710 +       u32 pmem_base_addr;     /* PE's pmem base address */
1711 +       u32 pmem_size;  /* PE's pmem size */
1712 +
1713 +       void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
1714 +                                * address
1715 +                                */
1716 +       void *mem_access_addr;  /* PE's _MEM_ACCESS_ADDR register
1717 +                                * address
1718 +                                */
1719 +       void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
1720 +                                * address
1721 +                                */
1722 +};
1723 +
1724 +void pe_lmem_read(u32 *dst, u32 len, u32 offset);
1725 +void pe_lmem_write(u32 *src, u32 len, u32 offset);
1726 +
1727 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1728 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1729 +
1730 +u32 pe_pmem_read(int id, u32 addr, u8 size);
1731 +
1732 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
1733 +u32 pe_dmem_read(int id, u32 addr, u8 size);
1734 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
1735 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
1736 +void class_bus_write(u32 val, u32 addr, u8 size);
1737 +u32 class_bus_read(u32 addr, u8 size);
1738 +
1739 +#define class_bus_readl(addr)  class_bus_read(addr, 4)
1740 +#define class_bus_readw(addr)  class_bus_read(addr, 2)
1741 +#define class_bus_readb(addr)  class_bus_read(addr, 1)
1742 +
1743 +#define class_bus_writel(val, addr)    class_bus_write(val, addr, 4)
1744 +#define class_bus_writew(val, addr)    class_bus_write(val, addr, 2)
1745 +#define class_bus_writeb(val, addr)    class_bus_write(val, addr, 1)
1746 +
1747 +#define pe_dmem_readl(id, addr)        pe_dmem_read(id, addr, 4)
1748 +#define pe_dmem_readw(id, addr)        pe_dmem_read(id, addr, 2)
1749 +#define pe_dmem_readb(id, addr)        pe_dmem_read(id, addr, 1)
1750 +
1751 +#define pe_dmem_writel(id, val, addr)  pe_dmem_write(id, val, addr, 4)
1752 +#define pe_dmem_writew(id, val, addr)  pe_dmem_write(id, val, addr, 2)
1753 +#define pe_dmem_writeb(id, val, addr)  pe_dmem_write(id, val, addr, 1)
1754 +
1755 +/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
1756 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
1757 +                       struct device *dev);
1758 +
1759 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
1760 +                 unsigned int ddr_size);
1761 +void bmu_init(void *base, struct BMU_CFG *cfg);
1762 +void bmu_reset(void *base);
1763 +void bmu_enable(void *base);
1764 +void bmu_disable(void *base);
1765 +void bmu_set_config(void *base, struct BMU_CFG *cfg);
1766 +
1767 +/*
1768 + * An enumerated type for loopback values.  This can be one of three values, no
1769 + * loopback -normal operation, local loopback with internal loopback module of
1770 + * MAC or PHY loopback which is through the external PHY.
1771 + */
1772 +#ifndef __MAC_LOOP_ENUM__
1773 +#define __MAC_LOOP_ENUM__
1774 +enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
1775 +#endif
1776 +
1777 +void gemac_init(void *base, void *config);
1778 +void gemac_disable_rx_checksum_offload(void *base);
1779 +void gemac_enable_rx_checksum_offload(void *base);
1780 +void gemac_set_speed(void *base, enum mac_speed gem_speed);
1781 +void gemac_set_duplex(void *base, int duplex);
1782 +void gemac_set_mode(void *base, int mode);
1783 +void gemac_enable(void *base);
1784 +void gemac_tx_disable(void *base);
1785 +void gemac_tx_enable(void *base);
1786 +void gemac_disable(void *base);
1787 +void gemac_reset(void *base);
1788 +void gemac_set_address(void *base, struct spec_addr *addr);
1789 +struct spec_addr gemac_get_address(void *base);
1790 +void gemac_set_loop(void *base, enum mac_loop gem_loop);
1791 +void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
1792 +void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
1793 +void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
1794 +void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
1795 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
1796 +                     unsigned int entry_index);
1797 +void gemac_clear_laddr1(void *base);
1798 +void gemac_clear_laddr2(void *base);
1799 +void gemac_clear_laddr3(void *base);
1800 +void gemac_clear_laddr4(void *base);
1801 +void gemac_clear_laddrN(void *base, unsigned int entry_index);
1802 +struct pfe_mac_addr gemac_get_hash(void *base);
1803 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
1804 +struct pfe_mac_addr gem_get_laddr1(void *base);
1805 +struct pfe_mac_addr gem_get_laddr2(void *base);
1806 +struct pfe_mac_addr gem_get_laddr3(void *base);
1807 +struct pfe_mac_addr gem_get_laddr4(void *base);
1808 +struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
1809 +void gemac_set_config(void *base, struct gemac_cfg *cfg);
1810 +void gemac_allow_broadcast(void *base);
1811 +void gemac_no_broadcast(void *base);
1812 +void gemac_enable_1536_rx(void *base);
1813 +void gemac_disable_1536_rx(void *base);
1814 +void gemac_set_rx_max_fl(void *base, int mtu);
1815 +void gemac_enable_rx_jmb(void *base);
1816 +void gemac_disable_rx_jmb(void *base);
1817 +void gemac_enable_stacked_vlan(void *base);
1818 +void gemac_disable_stacked_vlan(void *base);
1819 +void gemac_enable_pause_rx(void *base);
1820 +void gemac_disable_pause_rx(void *base);
1821 +void gemac_enable_copy_all(void *base);
1822 +void gemac_disable_copy_all(void *base);
1823 +void gemac_set_bus_width(void *base, int width);
1824 +void gemac_set_wol(void *base, u32 wol_conf);
1825 +
1826 +void gpi_init(void *base, struct gpi_cfg *cfg);
1827 +void gpi_reset(void *base);
1828 +void gpi_enable(void *base);
1829 +void gpi_disable(void *base);
1830 +void gpi_set_config(void *base, struct gpi_cfg *cfg);
1831 +
1832 +void class_init(struct class_cfg *cfg);
1833 +void class_reset(void);
1834 +void class_enable(void);
1835 +void class_disable(void);
1836 +void class_set_config(struct class_cfg *cfg);
1837 +
1838 +void tmu_reset(void);
1839 +void tmu_init(struct tmu_cfg *cfg);
1840 +void tmu_enable(u32 pe_mask);
1841 +void tmu_disable(u32 pe_mask);
1842 +u32  tmu_qstatus(u32 if_id);
1843 +u32  tmu_pkts_processed(u32 if_id);
1844 +
1845 +void util_init(struct util_cfg *cfg);
1846 +void util_reset(void);
1847 +void util_enable(void);
1848 +void util_disable(void);
1849 +
1850 +void hif_init(void);
1851 +void hif_tx_enable(void);
1852 +void hif_tx_disable(void);
1853 +void hif_rx_enable(void);
1854 +void hif_rx_disable(void);
1855 +
1856 +/* Get Chip Revision level
1857 + *
1858 + */
1859 +static inline unsigned int CHIP_REVISION(void)
1860 +{
1861 +       /*For LS1012A return always 1 */
1862 +       return 1;
1863 +}
1864 +
1865 +/* Start HIF rx DMA
1866 + *
1867 + */
1868 +static inline void hif_rx_dma_start(void)
1869 +{
1870 +       writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
1871 +}
1872 +
1873 +/* Start HIF tx DMA
1874 + *
1875 + */
1876 +static inline void hif_tx_dma_start(void)
1877 +{
1878 +       writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
1879 +}
1880 +
1881 +#endif /* _PFE_H_ */
1882 --- /dev/null
1883 +++ b/drivers/staging/fsl_ppfe/pfe_cdev.c
1884 @@ -0,0 +1,258 @@
1885 +// SPDX-License-Identifier: GPL-2.0+
1886 +/*
1887 + * Copyright 2018 NXP
1888 + */
1889 +
1890 +/* @pfe_cdev.c.
1891 + *  Dummy device representing the PFE US in userspace.
1892 + *  - used for interacting with the kernel layer for link status
1893 + */
1894 +
1895 +#include <linux/eventfd.h>
1896 +#include <linux/irqreturn.h>
1897 +#include <linux/io.h>
1898 +#include <asm/irq.h>
1899 +
1900 +#include "pfe_cdev.h"
1901 +#include "pfe_mod.h"
1902 +
1903 +static int pfe_majno;
1904 +static struct class *pfe_char_class;
1905 +static struct device *pfe_char_dev;
1906 +struct eventfd_ctx *g_trigger;
1907 +
1908 +struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
1909 +
1910 +static int pfe_cdev_open(struct inode *inp, struct file *fp)
1911 +{
1912 +       pr_debug("PFE CDEV device opened.\n");
1913 +       return 0;
1914 +}
1915 +
1916 +static ssize_t pfe_cdev_read(struct file *fp, char *buf,
1917 +                            size_t len, loff_t *off)
1918 +{
1919 +       int ret = 0;
1920 +
1921 +       pr_info("PFE CDEV attempt copying (%lu) size of user.\n",
1922 +               sizeof(link_states));
1923 +
1924 +       pr_debug("Dump link_state on screen before copy_to_user\n");
1925 +       for (; ret < PFE_CDEV_ETH_COUNT; ret++) {
1926 +               pr_debug("%u  %u", link_states[ret].phy_id,
1927 +                        link_states[ret].state);
1928 +               pr_debug("\n");
1929 +       }
1930 +
1931 +       /* Copy to user the value in buffer sized len */
1932 +       ret = copy_to_user(buf, &link_states, sizeof(link_states));
1933 +       if (ret != 0) {
1934 +               pr_err("Failed to send (%d)bytes of (%lu) requested.\n",
1935 +                      ret, len);
1936 +               return -EFAULT;
1937 +       }
1938 +
1939 +       /* offset set back to 0 as there is contextual reading offset */
1940 +       *off = 0;
1941 +       pr_debug("Read of (%lu) bytes performed.\n", sizeof(link_states));
1942 +
1943 +       return sizeof(link_states);
1944 +}
1945 +
1946 +/**
1947 + * This function is for getting some commands from user through non-IOCTL
1948 + * channel. It can used to configure the device.
1949 + * TODO: To be filled in future, if require duplex communication with user
1950 + * space.
1951 + */
1952 +static ssize_t pfe_cdev_write(struct file *fp, const char *buf,
1953 +                             size_t len, loff_t *off)
1954 +{
1955 +       pr_info("PFE CDEV Write operation not supported!\n");
1956 +
1957 +       return -EFAULT;
1958 +}
1959 +
1960 +static int pfe_cdev_release(struct inode *inp, struct file *fp)
1961 +{
1962 +       if (g_trigger) {
1963 +               free_irq(pfe->hif_irq, g_trigger);
1964 +               eventfd_ctx_put(g_trigger);
1965 +               g_trigger = NULL;
1966 +       }
1967 +
1968 +       pr_info("PFE_CDEV: Device successfully closed\n");
1969 +       return 0;
1970 +}
1971 +
1972 +/*
1973 + * hif_us_isr-
1974 + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
1975 + */
1976 +static irqreturn_t hif_us_isr(int irq, void *arg)
1977 +{
1978 +       struct eventfd_ctx *trigger = (struct eventfd_ctx *)arg;
1979 +       int int_status;
1980 +       int int_enable_mask;
1981 +
1982 +       /*Read hif interrupt source register */
1983 +       int_status = readl_relaxed(HIF_INT_SRC);
1984 +       int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
1985 +
1986 +       if ((int_status & HIF_INT) == 0)
1987 +               return IRQ_NONE;
1988 +
1989 +       if (int_status & HIF_RXPKT_INT) {
1990 +               int_enable_mask &= ~(HIF_RXPKT_INT);
1991 +               /* Disable interrupts, they will be enabled after
1992 +                * they are serviced
1993 +                */
1994 +               writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
1995 +
1996 +               eventfd_signal(trigger, 1);
1997 +       }
1998 +
1999 +       return IRQ_HANDLED;
2000 +}
2001 +
2002 +#define PFE_INTR_COAL_USECS    100
2003 +static long pfe_cdev_ioctl(struct file *fp, unsigned int cmd,
2004 +                          unsigned long arg)
2005 +{
2006 +       int ret = -EFAULT;
2007 +       int __user *argp = (int __user *)arg;
2008 +
2009 +       pr_debug("PFE CDEV IOCTL Called with cmd=(%u)\n", cmd);
2010 +
2011 +       switch (cmd) {
2012 +       case PFE_CDEV_ETH0_STATE_GET:
2013 +               /* Return an unsigned int (link state) for ETH0 */
2014 +               *argp = link_states[0].state;
2015 +               pr_debug("Returning state=%d for ETH0\n", *argp);
2016 +               ret = 0;
2017 +               break;
2018 +       case PFE_CDEV_ETH1_STATE_GET:
2019 +               /* Return an unsigned int (link state) for ETH0 */
2020 +               *argp = link_states[1].state;
2021 +               pr_debug("Returning state=%d for ETH1\n", *argp);
2022 +               ret = 0;
2023 +               break;
2024 +       case PFE_CDEV_HIF_INTR_EN:
2025 +               /* Return success/failure */
2026 +               g_trigger = eventfd_ctx_fdget(*argp);
2027 +               if (IS_ERR(g_trigger))
2028 +                       return PTR_ERR(g_trigger);
2029 +               ret = request_irq(pfe->hif_irq, hif_us_isr, 0, "pfe_hif",
2030 +                                 g_trigger);
2031 +               if (ret) {
2032 +                       pr_err("%s: failed to get the hif IRQ = %d\n",
2033 +                              __func__, pfe->hif_irq);
2034 +                       eventfd_ctx_put(g_trigger);
2035 +                       g_trigger = NULL;
2036 +               }
2037 +               writel((PFE_INTR_COAL_USECS * (pfe->ctrl.sys_clk / 1000)) |
2038 +                       HIF_INT_COAL_ENABLE, HIF_INT_COAL);
2039 +
2040 +               pr_debug("request_irq for hif interrupt: %d\n", pfe->hif_irq);
2041 +               ret = 0;
2042 +               break;
2043 +       default:
2044 +               pr_info("Unsupport cmd (%d) for PFE CDEV.\n", cmd);
2045 +               break;
2046 +       };
2047 +
2048 +       return ret;
2049 +}
2050 +
2051 +static unsigned int pfe_cdev_poll(struct file *fp,
2052 +                                 struct poll_table_struct *wait)
2053 +{
2054 +       pr_info("PFE CDEV poll method not supported\n");
2055 +       return 0;
2056 +}
2057 +
2058 +static const struct file_operations pfe_cdev_fops = {
2059 +       .open = pfe_cdev_open,
2060 +       .read = pfe_cdev_read,
2061 +       .write = pfe_cdev_write,
2062 +       .release = pfe_cdev_release,
2063 +       .unlocked_ioctl = pfe_cdev_ioctl,
2064 +       .poll = pfe_cdev_poll,
2065 +};
2066 +
2067 +int pfe_cdev_init(void)
2068 +{
2069 +       int ret;
2070 +
2071 +       pr_debug("PFE CDEV initialization begin\n");
2072 +
2073 +       /* Register the major number for the device */
2074 +       pfe_majno = register_chrdev(0, PFE_CDEV_NAME, &pfe_cdev_fops);
2075 +       if (pfe_majno < 0) {
2076 +               pr_err("Unable to register PFE CDEV. PFE CDEV not available\n");
2077 +               ret = pfe_majno;
2078 +               goto cleanup;
2079 +       }
2080 +
2081 +       pr_debug("PFE CDEV assigned major number: %d\n", pfe_majno);
2082 +
2083 +       /* Register the class for the device */
2084 +       pfe_char_class = class_create(THIS_MODULE, PFE_CLASS_NAME);
2085 +       if (IS_ERR(pfe_char_class)) {
2086 +               pr_err(
2087 +               "Failed to init class for PFE CDEV. PFE CDEV not available.\n");
2088 +               goto cleanup;
2089 +       }
2090 +
2091 +       pr_debug("PFE CDEV Class created successfully.\n");
2092 +
2093 +       /* Create the device without any parent and without any callback data */
2094 +           pfe_char_dev = device_create(pfe_char_class, NULL,
2095 +                                        MKDEV(pfe_majno, 0), NULL,
2096 +                                        PFE_CDEV_NAME);
2097 +       if (IS_ERR(pfe_char_dev)) {
2098 +               pr_err("Unable to PFE CDEV device. PFE CDEV not available.\n");
2099 +               ret = PTR_ERR(pfe_char_dev);
2100 +               goto cleanup;
2101 +       }
2102 +
2103 +       /* Information structure being shared with the userspace */
2104 +       memset(link_states, 0, sizeof(struct pfe_shared_info) *
2105 +                       PFE_CDEV_ETH_COUNT);
2106 +
2107 +       pr_info("PFE CDEV created: %s\n", PFE_CDEV_NAME);
2108 +
2109 +       ret = 0;
2110 +       return ret;
2111 +
2112 +cleanup:
2113 +       if (!IS_ERR(pfe_char_class))
2114 +               class_destroy(pfe_char_class);
2115 +
2116 +       if (pfe_majno > 0)
2117 +               unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
2118 +
2119 +       ret = -EFAULT;
2120 +       return ret;
2121 +}
2122 +
2123 +void pfe_cdev_exit(void)
2124 +{
2125 +       if (!IS_ERR(pfe_char_dev))
2126 +               device_destroy(pfe_char_class, MKDEV(pfe_majno, 0));
2127 +
2128 +       if (!IS_ERR(pfe_char_class)) {
2129 +               class_unregister(pfe_char_class);
2130 +               class_destroy(pfe_char_class);
2131 +       }
2132 +
2133 +       if (pfe_majno > 0)
2134 +               unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
2135 +
2136 +       /* reset the variables */
2137 +       pfe_majno = 0;
2138 +       pfe_char_class = NULL;
2139 +       pfe_char_dev = NULL;
2140 +
2141 +       pr_info("PFE CDEV Removed.\n");
2142 +}
2143 --- /dev/null
2144 +++ b/drivers/staging/fsl_ppfe/pfe_cdev.h
2145 @@ -0,0 +1,41 @@
2146 +/* SPDX-License-Identifier: GPL-2.0+ */
2147 +/*
2148 + * Copyright 2018 NXP
2149 + */
2150 +
2151 +#ifndef _PFE_CDEV_H_
2152 +#define _PFE_CDEV_H_
2153 +
2154 +#include <linux/init.h>
2155 +#include <linux/device.h>
2156 +#include <linux/err.h>
2157 +#include <linux/kernel.h>
2158 +#include <linux/fs.h>
2159 +#include <linux/uaccess.h>
2160 +#include <linux/poll.h>
2161 +
2162 +#define  PFE_CDEV_NAME "pfe_us_cdev"
2163 +#define  PFE_CLASS_NAME  "ppfe_us"
2164 +
2165 +/* Extracted from ls1012a_pfe_platform_data, there are 3 interfaces which are
2166 + * supported by PFE driver. Should be updated if number of eth devices are
2167 + * changed.
2168 + */
2169 +#define PFE_CDEV_ETH_COUNT 3
2170 +
2171 +struct pfe_shared_info {
2172 +       uint32_t phy_id; /* Link phy ID */
2173 +       uint8_t state;  /* Has either 0 or 1 */
2174 +};
2175 +
2176 +extern struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
2177 +
2178 +/* IOCTL Commands */
2179 +#define PFE_CDEV_ETH0_STATE_GET                _IOR('R', 0, int)
2180 +#define PFE_CDEV_ETH1_STATE_GET                _IOR('R', 1, int)
2181 +#define PFE_CDEV_HIF_INTR_EN           _IOWR('R', 2, int)
2182 +
2183 +int pfe_cdev_init(void);
2184 +void pfe_cdev_exit(void);
2185 +
2186 +#endif /* _PFE_CDEV_H_ */
2187 --- /dev/null
2188 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
2189 @@ -0,0 +1,226 @@
2190 +// SPDX-License-Identifier: GPL-2.0+
2191 +/*
2192 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2193 + * Copyright 2017 NXP
2194 + */
2195 +
2196 +#include <linux/kernel.h>
2197 +#include <linux/sched.h>
2198 +#include <linux/module.h>
2199 +#include <linux/list.h>
2200 +#include <linux/kthread.h>
2201 +
2202 +#include "pfe_mod.h"
2203 +#include "pfe_ctrl.h"
2204 +
2205 +#define TIMEOUT_MS     1000
2206 +
2207 +int relax(unsigned long end)
2208 +{
2209 +       if (time_after(jiffies, end)) {
2210 +               if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
2211 +                       return -1;
2212 +
2213 +               if (need_resched())
2214 +                       schedule();
2215 +       }
2216 +
2217 +       return 0;
2218 +}
2219 +
2220 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
2221 +{
2222 +       int id;
2223 +
2224 +       mutex_lock(&ctrl->mutex);
2225 +
2226 +       for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
2227 +               pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
2228 +
2229 +       for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
2230 +               if (id == TMU2_ID)
2231 +                       continue;
2232 +               pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
2233 +       }
2234 +
2235 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
2236 +       pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
2237 +#endif
2238 +       mutex_unlock(&ctrl->mutex);
2239 +}
2240 +
2241 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
2242 +{
2243 +       int pe_mask = CLASS_MASK | TMU_MASK;
2244 +
2245 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
2246 +       pe_mask |= UTIL_MASK;
2247 +#endif
2248 +       mutex_lock(&ctrl->mutex);
2249 +       pe_start(&pfe->ctrl, pe_mask);
2250 +       mutex_unlock(&ctrl->mutex);
2251 +}
2252 +
2253 +/* PE sync stop.
2254 + * Stops packet processing for a list of PE's (specified using a bitmask).
2255 + * The caller must hold ctrl->mutex.
2256 + *
2257 + * @param ctrl         Control context
2258 + * @param pe_mask      Mask of PE id's to stop
2259 + *
2260 + */
2261 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
2262 +{
2263 +       struct pe_sync_mailbox *mbox;
2264 +       int pe_stopped = 0;
2265 +       unsigned long end = jiffies + 2;
2266 +       int i;
2267 +
2268 +       pe_mask &= 0x2FF;  /*Exclude Util + TMU2 */
2269 +
2270 +       for (i = 0; i < MAX_PE; i++)
2271 +               if (pe_mask & (1 << i)) {
2272 +                       mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
2273 +
2274 +                       pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
2275 +                                       long)&mbox->stop, 4);
2276 +               }
2277 +
2278 +       while (pe_stopped != pe_mask) {
2279 +               for (i = 0; i < MAX_PE; i++)
2280 +                       if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
2281 +                               mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
2282 +
2283 +                               if (pe_dmem_read(i, (unsigned
2284 +                                       long)&mbox->stopped, 4) &
2285 +                                       cpu_to_be32(0x1))
2286 +                                       pe_stopped |= (1 << i);
2287 +                       }
2288 +
2289 +               if (relax(end) < 0)
2290 +                       goto err;
2291 +       }
2292 +
2293 +       return 0;
2294 +
2295 +err:
2296 +       pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
2297 +
2298 +       for (i = 0; i < MAX_PE; i++)
2299 +               if (pe_mask & (1 << i)) {
2300 +                       mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
2301 +
2302 +                       pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
2303 +                                       long)&mbox->stop, 4);
2304 +       }
2305 +
2306 +       return -EIO;
2307 +}
2308 +
2309 +/* PE start.
2310 + * Starts packet processing for a list of PE's (specified using a bitmask).
2311 + * The caller must hold ctrl->mutex.
2312 + *
2313 + * @param ctrl         Control context
2314 + * @param pe_mask      Mask of PE id's to start
2315 + *
2316 + */
2317 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
2318 +{
2319 +       struct pe_sync_mailbox *mbox;
2320 +       int i;
2321 +
2322 +       for (i = 0; i < MAX_PE; i++)
2323 +               if (pe_mask & (1 << i)) {
2324 +                       mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
2325 +
2326 +                       pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
2327 +                                       long)&mbox->stop, 4);
2328 +               }
2329 +}
2330 +
2331 +/* This function will ensure all PEs are put in to idle state */
2332 +int pe_reset_all(struct pfe_ctrl *ctrl)
2333 +{
2334 +       struct pe_sync_mailbox *mbox;
2335 +       int pe_stopped = 0;
2336 +       unsigned long end = jiffies + 2;
2337 +       int i;
2338 +       int pe_mask  = CLASS_MASK | TMU_MASK;
2339 +
2340 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
2341 +       pe_mask |= UTIL_MASK;
2342 +#endif
2343 +
2344 +       for (i = 0; i < MAX_PE; i++)
2345 +               if (pe_mask & (1 << i)) {
2346 +                       mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
2347 +
2348 +                       pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
2349 +                                       long)&mbox->stop, 4);
2350 +               }
2351 +
2352 +       while (pe_stopped != pe_mask) {
2353 +               for (i = 0; i < MAX_PE; i++)
2354 +                       if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
2355 +                               mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
2356 +
2357 +                               if (pe_dmem_read(i, (unsigned long)
2358 +                                                       &mbox->stopped, 4) &
2359 +                                               cpu_to_be32(0x1))
2360 +                                       pe_stopped |= (1 << i);
2361 +                       }
2362 +
2363 +               if (relax(end) < 0)
2364 +                       goto err;
2365 +       }
2366 +
2367 +       return 0;
2368 +
2369 +err:
2370 +       pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
2371 +       return -EIO;
2372 +}
2373 +
2374 +int pfe_ctrl_init(struct pfe *pfe)
2375 +{
2376 +       struct pfe_ctrl *ctrl = &pfe->ctrl;
2377 +       int id;
2378 +
2379 +       pr_info("%s\n", __func__);
2380 +
2381 +       mutex_init(&ctrl->mutex);
2382 +       spin_lock_init(&ctrl->lock);
2383 +
2384 +       for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
2385 +               ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
2386 +               ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
2387 +       }
2388 +
2389 +       for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
2390 +               if (id == TMU2_ID)
2391 +                       continue;
2392 +               ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
2393 +               ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
2394 +       }
2395 +
2396 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
2397 +       ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
2398 +       ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
2399 +#endif
2400 +
2401 +       ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
2402 +       ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
2403 +                                               ROUTE_TABLE_BASEADDR;
2404 +
2405 +       ctrl->dev = pfe->dev;
2406 +
2407 +       pr_info("%s finished\n", __func__);
2408 +
2409 +       return 0;
2410 +}
2411 +
2412 +void pfe_ctrl_exit(struct pfe *pfe)
2413 +{
2414 +       pr_info("%s\n", __func__);
2415 +}
2416 --- /dev/null
2417 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
2418 @@ -0,0 +1,100 @@
2419 +/* SPDX-License-Identifier: GPL-2.0+ */
2420 +/*
2421 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2422 + * Copyright 2017 NXP
2423 + */
2424 +
2425 +#ifndef _PFE_CTRL_H_
2426 +#define _PFE_CTRL_H_
2427 +
2428 +#include <linux/dmapool.h>
2429 +
2430 +#include "pfe_mod.h"
2431 +#include "pfe/pfe.h"
2432 +
2433 +#define DMA_BUF_SIZE_128       0x80    /* enough for 1 conntracks */
2434 +#define DMA_BUF_SIZE_256       0x100
2435 +/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
2436 +#define DMA_BUF_SIZE_512       0x200
2437 +/* 512bytes dma allocated buffers used by rtp relay feature */
2438 +#define DMA_BUF_MIN_ALIGNMENT  8
2439 +#define DMA_BUF_BOUNDARY       (4 * 1024)
2440 +/* bursts can not cross 4k boundary */
2441 +
2442 +#define CMD_TX_ENABLE  0x0501
2443 +#define CMD_TX_DISABLE 0x0502
2444 +
2445 +#define CMD_RX_LRO             0x0011
2446 +#define CMD_PKTCAP_ENABLE       0x0d01
2447 +#define CMD_QM_EXPT_RATE       0x020c
2448 +
2449 +#define CLASS_DM_SH_STATIC             (0x800)
2450 +#define CLASS_DM_CPU_TICKS             (CLASS_DM_SH_STATIC)
2451 +#define CLASS_DM_SYNC_MBOX             (0x808)
2452 +#define CLASS_DM_MSG_MBOX              (0x810)
2453 +#define CLASS_DM_DROP_CNTR             (0x820)
2454 +#define CLASS_DM_RESUME                        (0x854)
2455 +#define CLASS_DM_PESTATUS              (0x860)
2456 +
2457 +#define TMU_DM_SH_STATIC               (0x80)
2458 +#define TMU_DM_CPU_TICKS               (TMU_DM_SH_STATIC)
2459 +#define TMU_DM_SYNC_MBOX               (0x88)
2460 +#define TMU_DM_MSG_MBOX                        (0x90)
2461 +#define TMU_DM_RESUME                  (0xA0)
2462 +#define TMU_DM_PESTATUS                        (0xB0)
2463 +#define TMU_DM_CONTEXT                 (0x300)
2464 +#define TMU_DM_TX_TRANS                        (0x480)
2465 +
2466 +#define UTIL_DM_SH_STATIC              (0x0)
2467 +#define UTIL_DM_CPU_TICKS              (UTIL_DM_SH_STATIC)
2468 +#define UTIL_DM_SYNC_MBOX              (0x8)
2469 +#define UTIL_DM_MSG_MBOX               (0x10)
2470 +#define UTIL_DM_DROP_CNTR              (0x20)
2471 +#define UTIL_DM_RESUME                 (0x40)
2472 +#define UTIL_DM_PESTATUS               (0x50)
2473 +
2474 +struct pfe_ctrl {
2475 +       struct mutex mutex; /* to serialize pfe control access */
2476 +       spinlock_t lock;
2477 +
2478 +       void *dma_pool;
2479 +       void *dma_pool_512;
2480 +       void *dma_pool_128;
2481 +
2482 +       struct device *dev;
2483 +
2484 +       void *hash_array_baseaddr;              /*
2485 +                                                * Virtual base address of
2486 +                                                * the conntrack hash array
2487 +                                                */
2488 +       unsigned long hash_array_phys_baseaddr; /*
2489 +                                                * Physical base address of
2490 +                                                * the conntrack hash array
2491 +                                                */
2492 +
2493 +       int (*event_cb)(u16, u16, u16*);
2494 +
2495 +       unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
2496 +                                                     * Sync mailbox PFE
2497 +                                                     * internal address,
2498 +                                                     * initialized
2499 +                                                     * when parsing elf images
2500 +                                                     */
2501 +       unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
2502 +                                                    * Msg mailbox PFE internal
2503 +                                                    * address, initialized
2504 +                                                    * when parsing elf images
2505 +                                                    */
2506 +       unsigned int sys_clk;                   /* AXI clock value, in KHz */
2507 +};
2508 +
2509 +int pfe_ctrl_init(struct pfe *pfe);
2510 +void pfe_ctrl_exit(struct pfe *pfe);
2511 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
2512 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
2513 +int pe_reset_all(struct pfe_ctrl *ctrl);
2514 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
2515 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
2516 +int relax(unsigned long end);
2517 +
2518 +#endif /* _PFE_CTRL_H_ */
2519 --- /dev/null
2520 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
2521 @@ -0,0 +1,99 @@
2522 +// SPDX-License-Identifier: GPL-2.0+
2523 +/*
2524 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2525 + * Copyright 2017 NXP
2526 + */
2527 +
2528 +#include <linux/module.h>
2529 +#include <linux/debugfs.h>
2530 +#include <linux/platform_device.h>
2531 +
2532 +#include "pfe_mod.h"
2533 +
2534 +static int dmem_show(struct seq_file *s, void *unused)
2535 +{
2536 +       u32 dmem_addr, val;
2537 +       int id = (long int)s->private;
2538 +       int i;
2539 +
2540 +       for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
2541 +               seq_printf(s, "%04x:", dmem_addr);
2542 +
2543 +               for (i = 0; i < 8; i++) {
2544 +                       val = pe_dmem_read(id, dmem_addr + i * 4, 4);
2545 +                       seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
2546 +                                  (val >> 8) & 0xff, (val >> 16) & 0xff,
2547 +                                  (val >> 24) & 0xff);
2548 +               }
2549 +
2550 +               seq_puts(s, "\n");
2551 +       }
2552 +
2553 +       return 0;
2554 +}
2555 +
2556 +static int dmem_open(struct inode *inode, struct file *file)
2557 +{
2558 +       return single_open(file, dmem_show, inode->i_private);
2559 +}
2560 +
2561 +static const struct file_operations dmem_fops = {
2562 +       .open           = dmem_open,
2563 +       .read           = seq_read,
2564 +       .llseek         = seq_lseek,
2565 +       .release        = single_release,
2566 +};
2567 +
2568 +int pfe_debugfs_init(struct pfe *pfe)
2569 +{
2570 +       struct dentry *d;
2571 +
2572 +       pr_info("%s\n", __func__);
2573 +
2574 +       pfe->dentry = debugfs_create_dir("pfe", NULL);
2575 +       if (IS_ERR_OR_NULL(pfe->dentry))
2576 +               goto err_dir;
2577 +
2578 +       d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
2579 +                               &dmem_fops);
2580 +       if (IS_ERR_OR_NULL(d))
2581 +               goto err_pe;
2582 +
2583 +       d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
2584 +                               &dmem_fops);
2585 +       if (IS_ERR_OR_NULL(d))
2586 +               goto err_pe;
2587 +
2588 +       d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
2589 +                               &dmem_fops);
2590 +       if (IS_ERR_OR_NULL(d))
2591 +               goto err_pe;
2592 +
2593 +       d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
2594 +                               &dmem_fops);
2595 +       if (IS_ERR_OR_NULL(d))
2596 +               goto err_pe;
2597 +
2598 +       d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
2599 +                               &dmem_fops);
2600 +       if (IS_ERR_OR_NULL(d))
2601 +               goto err_pe;
2602 +
2603 +       d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
2604 +                               &dmem_fops);
2605 +       if (IS_ERR_OR_NULL(d))
2606 +               goto err_pe;
2607 +
2608 +       return 0;
2609 +
2610 +err_pe:
2611 +       debugfs_remove_recursive(pfe->dentry);
2612 +
2613 +err_dir:
2614 +       return -1;
2615 +}
2616 +
2617 +void pfe_debugfs_exit(struct pfe *pfe)
2618 +{
2619 +       debugfs_remove_recursive(pfe->dentry);
2620 +}
2621 --- /dev/null
2622 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
2623 @@ -0,0 +1,13 @@
2624 +/* SPDX-License-Identifier: GPL-2.0+ */
2625 +/*
2626 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2627 + * Copyright 2017 NXP
2628 + */
2629 +
2630 +#ifndef _PFE_DEBUGFS_H_
2631 +#define _PFE_DEBUGFS_H_
2632 +
2633 +int pfe_debugfs_init(struct pfe *pfe);
2634 +void pfe_debugfs_exit(struct pfe *pfe);
2635 +
2636 +#endif /* _PFE_DEBUGFS_H_ */
2637 --- /dev/null
2638 +++ b/drivers/staging/fsl_ppfe/pfe_eth.c
2639 @@ -0,0 +1,2554 @@
2640 +// SPDX-License-Identifier: GPL-2.0+
2641 +/*
2642 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2643 + * Copyright 2017 NXP
2644 + */
2645 +
2646 +/* @pfe_eth.c.
2647 + *  Ethernet driver for to handle exception path for PFE.
2648 + *  - uses HIF functions to send/receive packets.
2649 + *  - uses ctrl function to start/stop interfaces.
2650 + *  - uses direct register accesses to control phy operation.
2651 + */
2652 +#include <linux/version.h>
2653 +#include <linux/kernel.h>
2654 +#include <linux/interrupt.h>
2655 +#include <linux/dma-mapping.h>
2656 +#include <linux/dmapool.h>
2657 +#include <linux/netdevice.h>
2658 +#include <linux/etherdevice.h>
2659 +#include <linux/ethtool.h>
2660 +#include <linux/mii.h>
2661 +#include <linux/phy.h>
2662 +#include <linux/timer.h>
2663 +#include <linux/hrtimer.h>
2664 +#include <linux/platform_device.h>
2665 +
2666 +#include <net/ip.h>
2667 +#include <net/sock.h>
2668 +
2669 +#include <linux/of.h>
2670 +#include <linux/of_mdio.h>
2671 +
2672 +#include <linux/io.h>
2673 +#include <asm/irq.h>
2674 +#include <linux/delay.h>
2675 +#include <linux/regmap.h>
2676 +#include <linux/i2c.h>
2677 +#include <linux/fsl/guts.h>
2678 +
2679 +#if defined(CONFIG_NF_CONNTRACK_MARK)
2680 +#include <net/netfilter/nf_conntrack.h>
2681 +#endif
2682 +
2683 +#include "pfe_mod.h"
2684 +#include "pfe_eth.h"
2685 +#include "pfe_cdev.h"
2686 +
2687 +#define LS1012A_REV_1_0                0x87040010
2688 +
2689 +bool pfe_use_old_dts_phy;
2690 +bool pfe_errata_a010897;
2691 +
2692 +static void *cbus_emac_base[3];
2693 +static void *cbus_gpi_base[3];
2694 +
2695 +/* Forward Declaration */
2696 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
2697 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
2698 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
2699 +                               from_tx, int n_desc);
2700 +
2701 +/* MDIO registers */
2702 +#define MDIO_SGMII_CR                  0x00
2703 +#define MDIO_SGMII_SR                  0x01
2704 +#define MDIO_SGMII_DEV_ABIL_SGMII      0x04
2705 +#define MDIO_SGMII_LINK_TMR_L          0x12
2706 +#define MDIO_SGMII_LINK_TMR_H          0x13
2707 +#define MDIO_SGMII_IF_MODE             0x14
2708 +
2709 +/* SGMII Control defines */
2710 +#define SGMII_CR_RST                   0x8000
2711 +#define SGMII_CR_AN_EN                 0x1000
2712 +#define SGMII_CR_RESTART_AN            0x0200
2713 +#define SGMII_CR_FD                    0x0100
2714 +#define SGMII_CR_SPEED_SEL1_1G         0x0040
2715 +#define SGMII_CR_DEF_VAL               (SGMII_CR_AN_EN | SGMII_CR_FD | \
2716 +                                        SGMII_CR_SPEED_SEL1_1G)
2717 +
2718 +/* SGMII IF Mode */
2719 +#define SGMII_DUPLEX_HALF              0x10
2720 +#define SGMII_SPEED_10MBPS             0x00
2721 +#define SGMII_SPEED_100MBPS            0x04
2722 +#define SGMII_SPEED_1GBPS              0x08
2723 +#define SGMII_USE_SGMII_AN             0x02
2724 +#define SGMII_EN                       0x01
2725 +
2726 +/* SGMII Device Ability for SGMII */
2727 +#define SGMII_DEV_ABIL_ACK             0x4000
2728 +#define SGMII_DEV_ABIL_EEE_CLK_STP_EN  0x0100
2729 +#define SGMII_DEV_ABIL_SGMII           0x0001
2730 +
2731 +unsigned int gemac_regs[] = {
2732 +       0x0004, /* Interrupt event */
2733 +       0x0008, /* Interrupt mask */
2734 +       0x0024, /* Ethernet control */
2735 +       0x0064, /* MIB Control/Status */
2736 +       0x0084, /* Receive control/status */
2737 +       0x00C4, /* Transmit control */
2738 +       0x00E4, /* Physical address low */
2739 +       0x00E8, /* Physical address high */
2740 +       0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
2741 +       0x0190, /* Receive FIFO Section Full Threshold */
2742 +       0x01A0, /* Transmit FIFO Section Empty Threshold */
2743 +       0x01B0, /* Frame Truncation Length */
2744 +};
2745 +
2746 +/********************************************************************/
2747 +/*                   SYSFS INTERFACE                               */
2748 +/********************************************************************/
2749 +
2750 +#ifdef PFE_ETH_NAPI_STATS
2751 +/*
2752 + * pfe_eth_show_napi_stats
2753 + */
2754 +static ssize_t pfe_eth_show_napi_stats(struct device *dev,
2755 +                                      struct device_attribute *attr,
2756 +                                      char *buf)
2757 +{
2758 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2759 +       ssize_t len = 0;
2760 +
2761 +       len += sprintf(buf + len, "sched:  %u\n",
2762 +                       priv->napi_counters[NAPI_SCHED_COUNT]);
2763 +       len += sprintf(buf + len, "poll:   %u\n",
2764 +                       priv->napi_counters[NAPI_POLL_COUNT]);
2765 +       len += sprintf(buf + len, "packet: %u\n",
2766 +                       priv->napi_counters[NAPI_PACKET_COUNT]);
2767 +       len += sprintf(buf + len, "budget: %u\n",
2768 +                       priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
2769 +       len += sprintf(buf + len, "desc:   %u\n",
2770 +                       priv->napi_counters[NAPI_DESC_COUNT]);
2771 +
2772 +       return len;
2773 +}
2774 +
2775 +/*
2776 + * pfe_eth_set_napi_stats
2777 + */
2778 +static ssize_t pfe_eth_set_napi_stats(struct device *dev,
2779 +                                     struct device_attribute *attr,
2780 +                                     const char *buf, size_t count)
2781 +{
2782 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2783 +
2784 +       memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
2785 +
2786 +       return count;
2787 +}
2788 +#endif
2789 +#ifdef PFE_ETH_TX_STATS
2790 +/* pfe_eth_show_tx_stats
2791 + *
2792 + */
2793 +static ssize_t pfe_eth_show_tx_stats(struct device *dev,
2794 +                                    struct device_attribute *attr,
2795 +                                    char *buf)
2796 +{
2797 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2798 +       ssize_t len = 0;
2799 +       int i;
2800 +
2801 +       len += sprintf(buf + len, "TX queues stats:\n");
2802 +
2803 +       for (i = 0; i < emac_txq_cnt; i++) {
2804 +               struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2805 +                                                                       i);
2806 +
2807 +               len += sprintf(buf + len, "\n");
2808 +               __netif_tx_lock_bh(tx_queue);
2809 +
2810 +               hif_tx_lock(&pfe->hif);
2811 +               len += sprintf(buf + len,
2812 +                               "Queue %2d :  credits               = %10d\n"
2813 +                               , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
2814 +               len += sprintf(buf + len,
2815 +                                "            tx packets            = %10d\n"
2816 +                               ,  pfe->tmu_credit.tx_packets[priv->id][i]);
2817 +               hif_tx_unlock(&pfe->hif);
2818 +
2819 +               /* Don't output additionnal stats if queue never used */
2820 +               if (!pfe->tmu_credit.tx_packets[priv->id][i])
2821 +                       goto skip;
2822 +
2823 +               len += sprintf(buf + len,
2824 +                                "            clean_fail            = %10d\n"
2825 +                               , priv->clean_fail[i]);
2826 +               len += sprintf(buf + len,
2827 +                                "            stop_queue            = %10d\n"
2828 +                               , priv->stop_queue_total[i]);
2829 +               len += sprintf(buf + len,
2830 +                                "            stop_queue_hif        = %10d\n"
2831 +                               , priv->stop_queue_hif[i]);
2832 +               len += sprintf(buf + len,
2833 +                               "            stop_queue_hif_client = %10d\n"
2834 +                               , priv->stop_queue_hif_client[i]);
2835 +               len += sprintf(buf + len,
2836 +                                "            stop_queue_credit     = %10d\n"
2837 +                               , priv->stop_queue_credit[i]);
2838 +skip:
2839 +               __netif_tx_unlock_bh(tx_queue);
2840 +       }
2841 +       return len;
2842 +}
2843 +
2844 +/* pfe_eth_set_tx_stats
2845 + *
2846 + */
2847 +static ssize_t pfe_eth_set_tx_stats(struct device *dev,
2848 +                                   struct device_attribute *attr,
2849 +                                   const char *buf, size_t count)
2850 +{
2851 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2852 +       int i;
2853 +
2854 +       for (i = 0; i < emac_txq_cnt; i++) {
2855 +               struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2856 +                                                                       i);
2857 +
2858 +               __netif_tx_lock_bh(tx_queue);
2859 +               priv->clean_fail[i] = 0;
2860 +               priv->stop_queue_total[i] = 0;
2861 +               priv->stop_queue_hif[i] = 0;
2862 +               priv->stop_queue_hif_client[i] = 0;
2863 +               priv->stop_queue_credit[i] = 0;
2864 +               __netif_tx_unlock_bh(tx_queue);
2865 +       }
2866 +
2867 +       return count;
2868 +}
2869 +#endif
2870 +/* pfe_eth_show_txavail
2871 + *
2872 + */
2873 +static ssize_t pfe_eth_show_txavail(struct device *dev,
2874 +                                   struct device_attribute *attr,
2875 +                                   char *buf)
2876 +{
2877 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2878 +       ssize_t len = 0;
2879 +       int i;
2880 +
2881 +       for (i = 0; i < emac_txq_cnt; i++) {
2882 +               struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2883 +                                                                       i);
2884 +
2885 +               __netif_tx_lock_bh(tx_queue);
2886 +
2887 +               len += sprintf(buf + len, "%d",
2888 +                               hif_lib_tx_avail(&priv->client, i));
2889 +
2890 +               __netif_tx_unlock_bh(tx_queue);
2891 +
2892 +               if (i == (emac_txq_cnt - 1))
2893 +                       len += sprintf(buf + len, "\n");
2894 +               else
2895 +                       len += sprintf(buf + len, " ");
2896 +       }
2897 +
2898 +       return len;
2899 +}
2900 +
2901 +/* pfe_eth_show_default_priority
2902 + *
2903 + */
2904 +static ssize_t pfe_eth_show_default_priority(struct device *dev,
2905 +                                            struct device_attribute *attr,
2906 +                                               char *buf)
2907 +{
2908 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2909 +       unsigned long flags;
2910 +       int rc;
2911 +
2912 +       spin_lock_irqsave(&priv->lock, flags);
2913 +       rc = sprintf(buf, "%d\n", priv->default_priority);
2914 +       spin_unlock_irqrestore(&priv->lock, flags);
2915 +
2916 +       return rc;
2917 +}
2918 +
2919 +/* pfe_eth_set_default_priority
2920 + *
2921 + */
2922 +
2923 +static ssize_t pfe_eth_set_default_priority(struct device *dev,
2924 +                                           struct device_attribute *attr,
2925 +                                           const char *buf, size_t count)
2926 +{
2927 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2928 +       unsigned long flags;
2929 +
2930 +       spin_lock_irqsave(&priv->lock, flags);
2931 +       priv->default_priority = kstrtoul(buf, 0, 0);
2932 +       spin_unlock_irqrestore(&priv->lock, flags);
2933 +
2934 +       return count;
2935 +}
2936 +
2937 +static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
2938 +static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
2939 +                       pfe_eth_set_default_priority);
2940 +
2941 +#ifdef PFE_ETH_NAPI_STATS
2942 +static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
2943 +                       pfe_eth_set_napi_stats);
2944 +#endif
2945 +
2946 +#ifdef PFE_ETH_TX_STATS
2947 +static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
2948 +                       pfe_eth_set_tx_stats);
2949 +#endif
2950 +
2951 +/*
2952 + * pfe_eth_sysfs_init
2953 + *
2954 + */
2955 +static int pfe_eth_sysfs_init(struct net_device *ndev)
2956 +{
2957 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2958 +       int err;
2959 +
2960 +       /* Initialize the default values */
2961 +
2962 +       /*
2963 +        * By default, packets without conntrack will use this default low
2964 +        * priority queue
2965 +        */
2966 +       priv->default_priority = 0;
2967 +
2968 +       /* Create our sysfs files */
2969 +       err = device_create_file(&ndev->dev, &dev_attr_default_priority);
2970 +       if (err) {
2971 +               netdev_err(ndev,
2972 +                          "failed to create default_priority sysfs files\n");
2973 +               goto err_priority;
2974 +       }
2975 +
2976 +       err = device_create_file(&ndev->dev, &dev_attr_txavail);
2977 +       if (err) {
2978 +               netdev_err(ndev,
2979 +                          "failed to create default_priority sysfs files\n");
2980 +               goto err_txavail;
2981 +       }
2982 +
2983 +#ifdef PFE_ETH_NAPI_STATS
2984 +       err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
2985 +       if (err) {
2986 +               netdev_err(ndev, "failed to create napi stats sysfs files\n");
2987 +               goto err_napi;
2988 +       }
2989 +#endif
2990 +
2991 +#ifdef PFE_ETH_TX_STATS
2992 +       err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
2993 +       if (err) {
2994 +               netdev_err(ndev, "failed to create tx stats sysfs files\n");
2995 +               goto err_tx;
2996 +       }
2997 +#endif
2998 +
2999 +       return 0;
3000 +
3001 +#ifdef PFE_ETH_TX_STATS
3002 +err_tx:
3003 +#endif
3004 +#ifdef PFE_ETH_NAPI_STATS
3005 +       device_remove_file(&ndev->dev, &dev_attr_napi_stats);
3006 +
3007 +err_napi:
3008 +#endif
3009 +       device_remove_file(&ndev->dev, &dev_attr_txavail);
3010 +
3011 +err_txavail:
3012 +       device_remove_file(&ndev->dev, &dev_attr_default_priority);
3013 +
3014 +err_priority:
3015 +       return -1;
3016 +}
3017 +
3018 +/* pfe_eth_sysfs_exit
3019 + *
3020 + */
3021 +void pfe_eth_sysfs_exit(struct net_device *ndev)
3022 +{
3023 +#ifdef PFE_ETH_TX_STATS
3024 +       device_remove_file(&ndev->dev, &dev_attr_tx_stats);
3025 +#endif
3026 +
3027 +#ifdef PFE_ETH_NAPI_STATS
3028 +       device_remove_file(&ndev->dev, &dev_attr_napi_stats);
3029 +#endif
3030 +       device_remove_file(&ndev->dev, &dev_attr_txavail);
3031 +       device_remove_file(&ndev->dev, &dev_attr_default_priority);
3032 +}
3033 +
3034 +/*************************************************************************/
3035 +/*             ETHTOOL INTERCAE                                         */
3036 +/*************************************************************************/
3037 +
3038 +/*MTIP GEMAC */
3039 +static const struct fec_stat {
3040 +       char name[ETH_GSTRING_LEN];
3041 +       u16 offset;
3042 +} fec_stats[] = {
3043 +       /* RMON TX */
3044 +       { "tx_dropped", RMON_T_DROP },
3045 +       { "tx_packets", RMON_T_PACKETS },
3046 +       { "tx_broadcast", RMON_T_BC_PKT },
3047 +       { "tx_multicast", RMON_T_MC_PKT },
3048 +       { "tx_crc_errors", RMON_T_CRC_ALIGN },
3049 +       { "tx_undersize", RMON_T_UNDERSIZE },
3050 +       { "tx_oversize", RMON_T_OVERSIZE },
3051 +       { "tx_fragment", RMON_T_FRAG },
3052 +       { "tx_jabber", RMON_T_JAB },
3053 +       { "tx_collision", RMON_T_COL },
3054 +       { "tx_64byte", RMON_T_P64 },
3055 +       { "tx_65to127byte", RMON_T_P65TO127 },
3056 +       { "tx_128to255byte", RMON_T_P128TO255 },
3057 +       { "tx_256to511byte", RMON_T_P256TO511 },
3058 +       { "tx_512to1023byte", RMON_T_P512TO1023 },
3059 +       { "tx_1024to2047byte", RMON_T_P1024TO2047 },
3060 +       { "tx_GTE2048byte", RMON_T_P_GTE2048 },
3061 +       { "tx_octets", RMON_T_OCTETS },
3062 +
3063 +       /* IEEE TX */
3064 +       { "IEEE_tx_drop", IEEE_T_DROP },
3065 +       { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
3066 +       { "IEEE_tx_1col", IEEE_T_1COL },
3067 +       { "IEEE_tx_mcol", IEEE_T_MCOL },
3068 +       { "IEEE_tx_def", IEEE_T_DEF },
3069 +       { "IEEE_tx_lcol", IEEE_T_LCOL },
3070 +       { "IEEE_tx_excol", IEEE_T_EXCOL },
3071 +       { "IEEE_tx_macerr", IEEE_T_MACERR },
3072 +       { "IEEE_tx_cserr", IEEE_T_CSERR },
3073 +       { "IEEE_tx_sqe", IEEE_T_SQE },
3074 +       { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
3075 +       { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
3076 +
3077 +       /* RMON RX */
3078 +       { "rx_packets", RMON_R_PACKETS },
3079 +       { "rx_broadcast", RMON_R_BC_PKT },
3080 +       { "rx_multicast", RMON_R_MC_PKT },
3081 +       { "rx_crc_errors", RMON_R_CRC_ALIGN },
3082 +       { "rx_undersize", RMON_R_UNDERSIZE },
3083 +       { "rx_oversize", RMON_R_OVERSIZE },
3084 +       { "rx_fragment", RMON_R_FRAG },
3085 +       { "rx_jabber", RMON_R_JAB },
3086 +       { "rx_64byte", RMON_R_P64 },
3087 +       { "rx_65to127byte", RMON_R_P65TO127 },
3088 +       { "rx_128to255byte", RMON_R_P128TO255 },
3089 +       { "rx_256to511byte", RMON_R_P256TO511 },
3090 +       { "rx_512to1023byte", RMON_R_P512TO1023 },
3091 +       { "rx_1024to2047byte", RMON_R_P1024TO2047 },
3092 +       { "rx_GTE2048byte", RMON_R_P_GTE2048 },
3093 +       { "rx_octets", RMON_R_OCTETS },
3094 +
3095 +       /* IEEE RX */
3096 +       { "IEEE_rx_drop", IEEE_R_DROP },
3097 +       { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
3098 +       { "IEEE_rx_crc", IEEE_R_CRC },
3099 +       { "IEEE_rx_align", IEEE_R_ALIGN },
3100 +       { "IEEE_rx_macerr", IEEE_R_MACERR },
3101 +       { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
3102 +       { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
3103 +};
3104 +
3105 +static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
3106 +                               *stats, u64 *data)
3107 +{
3108 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3109 +       int i;
3110 +
3111 +       for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
3112 +               data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
3113 +}
3114 +
3115 +static void pfe_eth_gstrings(struct net_device *netdev,
3116 +                            u32 stringset, u8 *data)
3117 +{
3118 +       int i;
3119 +
3120 +       switch (stringset) {
3121 +       case ETH_SS_STATS:
3122 +               for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
3123 +                       memcpy(data + i * ETH_GSTRING_LEN,
3124 +                              fec_stats[i].name, ETH_GSTRING_LEN);
3125 +               break;
3126 +       }
3127 +}
3128 +
3129 +static int pfe_eth_stats_count(struct net_device *ndev, int sset)
3130 +{
3131 +       switch (sset) {
3132 +       case ETH_SS_STATS:
3133 +               return ARRAY_SIZE(fec_stats);
3134 +       default:
3135 +               return -EOPNOTSUPP;
3136 +       }
3137 +}
3138 +
3139 +/*
3140 + * pfe_eth_gemac_reglen - Return the length of the register structure.
3141 + *
3142 + */
3143 +static int pfe_eth_gemac_reglen(struct net_device *ndev)
3144 +{
3145 +       pr_info("%s()\n", __func__);
3146 +       return (sizeof(gemac_regs) / sizeof(u32));
3147 +}
3148 +
3149 +/*
3150 + * pfe_eth_gemac_get_regs - Return the gemac register structure.
3151 + *
3152 + */
3153 +static void  pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
3154 +                                       *regs, void *regbuf)
3155 +{
3156 +       int i;
3157 +
3158 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3159 +       u32 *buf = (u32 *)regbuf;
3160 +
3161 +       pr_info("%s()\n", __func__);
3162 +       for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
3163 +               buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
3164 +}
3165 +
3166 +/*
3167 + * pfe_eth_set_wol - Set the magic packet option, in WoL register.
3168 + *
3169 + */
3170 +static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3171 +{
3172 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3173 +
3174 +       if (wol->wolopts & ~WAKE_MAGIC)
3175 +               return -EOPNOTSUPP;
3176 +
3177 +       /* for MTIP we store wol->wolopts */
3178 +       priv->wol = wol->wolopts;
3179 +
3180 +       device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
3181 +
3182 +       return 0;
3183 +}
3184 +
3185 +/*
3186 + *
3187 + * pfe_eth_get_wol - Get the WoL options.
3188 + *
3189 + */
3190 +static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
3191 +                               *wol)
3192 +{
3193 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3194 +
3195 +       wol->supported = WAKE_MAGIC;
3196 +       wol->wolopts = 0;
3197 +
3198 +       if (priv->wol & WAKE_MAGIC)
3199 +               wol->wolopts = WAKE_MAGIC;
3200 +
3201 +       memset(&wol->sopass, 0, sizeof(wol->sopass));
3202 +}
3203 +
3204 +/*
3205 + * pfe_eth_get_drvinfo -  Fills in the drvinfo structure with some basic info
3206 + *
3207 + */
3208 +static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
3209 +                               *drvinfo)
3210 +{
3211 +       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
3212 +       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
3213 +       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
3214 +       strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
3215 +}
3216 +
3217 +/*
3218 + * pfe_eth_set_settings - Used to send commands to PHY.
3219 + *
3220 + */
3221 +static int pfe_eth_set_settings(struct net_device *ndev,
3222 +                               const struct ethtool_link_ksettings *cmd)
3223 +{
3224 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3225 +       struct phy_device *phydev = priv->phydev;
3226 +
3227 +       if (!phydev)
3228 +               return -ENODEV;
3229 +
3230 +       return phy_ethtool_ksettings_set(phydev, cmd);
3231 +}
3232 +
3233 +/*
3234 + * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
3235 + * structure.
3236 + *
3237 + */
3238 +static int pfe_eth_get_settings(struct net_device *ndev,
3239 +                               struct ethtool_link_ksettings *cmd)
3240 +{
3241 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3242 +       struct phy_device *phydev = priv->phydev;
3243 +
3244 +       if (!phydev)
3245 +               return -ENODEV;
3246 +
3247 +       phy_ethtool_ksettings_get(phydev, cmd);
3248 +
3249 +       return 0;
3250 +}
3251 +
3252 +/*
3253 + * pfe_eth_get_msglevel - Gets the debug message mask.
3254 + *
3255 + */
3256 +static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
3257 +{
3258 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3259 +
3260 +       return priv->msg_enable;
3261 +}
3262 +
3263 +/*
3264 + * pfe_eth_set_msglevel - Sets the debug message mask.
3265 + *
3266 + */
3267 +static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
3268 +{
3269 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3270 +
3271 +       priv->msg_enable = data;
3272 +}
3273 +
3274 +#define HIF_RX_COAL_MAX_CLKS           (~(1 << 31))
3275 +#define HIF_RX_COAL_CLKS_PER_USEC      (pfe->ctrl.sys_clk / 1000)
3276 +#define HIF_RX_COAL_MAX_USECS          (HIF_RX_COAL_MAX_CLKS   / \
3277 +                                               HIF_RX_COAL_CLKS_PER_USEC)
3278 +
3279 +/*
3280 + * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
3281 + *
3282 + */
3283 +static int pfe_eth_set_coalesce(struct net_device *ndev,
3284 +                               struct ethtool_coalesce *ec)
3285 +{
3286 +       if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
3287 +               return -EINVAL;
3288 +
3289 +       if (!ec->rx_coalesce_usecs) {
3290 +               writel(0, HIF_INT_COAL);
3291 +               return 0;
3292 +       }
3293 +
3294 +       writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
3295 +                       HIF_INT_COAL_ENABLE, HIF_INT_COAL);
3296 +
3297 +       return 0;
3298 +}
3299 +
3300 +/*
3301 + * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
3302 + *
3303 + */
3304 +static int pfe_eth_get_coalesce(struct net_device *ndev,
3305 +                               struct ethtool_coalesce *ec)
3306 +{
3307 +       int reg_val = readl(HIF_INT_COAL);
3308 +
3309 +       if (reg_val & HIF_INT_COAL_ENABLE)
3310 +               ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
3311 +                                               HIF_RX_COAL_CLKS_PER_USEC;
3312 +       else
3313 +               ec->rx_coalesce_usecs = 0;
3314 +
3315 +       return 0;
3316 +}
3317 +
3318 +/*
3319 + * pfe_eth_set_pauseparam - Sets pause parameters
3320 + *
3321 + */
3322 +static int pfe_eth_set_pauseparam(struct net_device *ndev,
3323 +                                 struct ethtool_pauseparam *epause)
3324 +{
3325 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3326 +
3327 +       if (epause->tx_pause != epause->rx_pause) {
3328 +               netdev_info(ndev,
3329 +                           "hardware only support enable/disable both tx and rx\n");
3330 +               return -EINVAL;
3331 +       }
3332 +
3333 +       priv->pause_flag = 0;
3334 +       priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
3335 +       priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
3336 +
3337 +       if (epause->rx_pause || epause->autoneg) {
3338 +               gemac_enable_pause_rx(priv->EMAC_baseaddr);
3339 +               writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
3340 +                                       EGPI_PAUSE_ENABLE),
3341 +                               priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
3342 +               if (priv->phydev) {
3343 +                       priv->phydev->supported |= ADVERTISED_Pause |
3344 +                                                       ADVERTISED_Asym_Pause;
3345 +                       priv->phydev->advertising |= ADVERTISED_Pause |
3346 +                                                       ADVERTISED_Asym_Pause;
3347 +               }
3348 +       } else {
3349 +               gemac_disable_pause_rx(priv->EMAC_baseaddr);
3350 +               writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
3351 +                                       ~EGPI_PAUSE_ENABLE),
3352 +                               priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
3353 +               if (priv->phydev) {
3354 +                       priv->phydev->supported &= ~(ADVERTISED_Pause |
3355 +                                                       ADVERTISED_Asym_Pause);
3356 +                       priv->phydev->advertising &= ~(ADVERTISED_Pause |
3357 +                                                       ADVERTISED_Asym_Pause);
3358 +               }
3359 +       }
3360 +
3361 +       return 0;
3362 +}
3363 +
3364 +/*
3365 + * pfe_eth_get_pauseparam - Gets pause parameters
3366 + *
3367 + */
3368 +static void pfe_eth_get_pauseparam(struct net_device *ndev,
3369 +                                  struct ethtool_pauseparam *epause)
3370 +{
3371 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3372 +
3373 +       epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
3374 +       epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
3375 +       epause->rx_pause = epause->tx_pause;
3376 +}
3377 +
3378 +/*
3379 + * pfe_eth_get_hash
3380 + */
3381 +#define PFE_HASH_BITS  6               /* #bits in hash */
3382 +#define CRC32_POLY     0xEDB88320
3383 +
3384 +static int pfe_eth_get_hash(u8 *addr)
3385 +{
3386 +       unsigned int i, bit, data, crc, hash;
3387 +
3388 +       /* calculate crc32 value of mac address */
3389 +       crc = 0xffffffff;
3390 +
3391 +       for (i = 0; i < 6; i++) {
3392 +               data = addr[i];
3393 +               for (bit = 0; bit < 8; bit++, data >>= 1) {
3394 +                       crc = (crc >> 1) ^
3395 +                               (((crc ^ data) & 1) ? CRC32_POLY : 0);
3396 +               }
3397 +       }
3398 +
3399 +       /*
3400 +        * only upper 6 bits (PFE_HASH_BITS) are used
3401 +        * which point to specific bit in the hash registers
3402 +        */
3403 +       hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
3404 +
3405 +       return hash;
3406 +}
3407 +
3408 +const struct ethtool_ops pfe_ethtool_ops = {
3409 +       .get_drvinfo = pfe_eth_get_drvinfo,
3410 +       .get_regs_len = pfe_eth_gemac_reglen,
3411 +       .get_regs = pfe_eth_gemac_get_regs,
3412 +       .get_link = ethtool_op_get_link,
3413 +       .get_wol  = pfe_eth_get_wol,
3414 +       .set_wol  = pfe_eth_set_wol,
3415 +       .set_pauseparam = pfe_eth_set_pauseparam,
3416 +       .get_pauseparam = pfe_eth_get_pauseparam,
3417 +       .get_strings = pfe_eth_gstrings,
3418 +       .get_sset_count = pfe_eth_stats_count,
3419 +       .get_ethtool_stats = pfe_eth_fill_stats,
3420 +       .get_msglevel = pfe_eth_get_msglevel,
3421 +       .set_msglevel = pfe_eth_set_msglevel,
3422 +       .set_coalesce = pfe_eth_set_coalesce,
3423 +       .get_coalesce = pfe_eth_get_coalesce,
3424 +       .get_link_ksettings = pfe_eth_get_settings,
3425 +       .set_link_ksettings = pfe_eth_set_settings,
3426 +};
3427 +
3428 +/* pfe_eth_mdio_reset
3429 + */
3430 +int pfe_eth_mdio_reset(struct mii_bus *bus)
3431 +{
3432 +       struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
3433 +       u32 phy_speed;
3434 +
3435 +
3436 +       mutex_lock(&bus->mdio_lock);
3437 +
3438 +       /*
3439 +        * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
3440 +        *
3441 +        * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
3442 +        * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
3443 +        */
3444 +       phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
3445 +                    << EMAC_MII_SPEED_SHIFT);
3446 +       phy_speed |= EMAC_HOLDTIME(0x5);
3447 +       __raw_writel(phy_speed, priv->mdio_base + EMAC_MII_CTRL_REG);
3448 +
3449 +       mutex_unlock(&bus->mdio_lock);
3450 +
3451 +       return 0;
3452 +}
3453 +
3454 +/* pfe_eth_mdio_timeout
3455 + *
3456 + */
3457 +static int pfe_eth_mdio_timeout(struct pfe_mdio_priv_s *priv, int timeout)
3458 +{
3459 +       while (!(__raw_readl(priv->mdio_base + EMAC_IEVENT_REG) &
3460 +                       EMAC_IEVENT_MII)) {
3461 +               if (timeout-- <= 0)
3462 +                       return -1;
3463 +               usleep_range(10, 20);
3464 +       }
3465 +       __raw_writel(EMAC_IEVENT_MII, priv->mdio_base + EMAC_IEVENT_REG);
3466 +       return 0;
3467 +}
3468 +
3469 +static int pfe_eth_mdio_mux(u8 muxval)
3470 +{
3471 +       struct i2c_adapter *a;
3472 +       struct i2c_msg msg;
3473 +       unsigned char buf[2];
3474 +       int ret;
3475 +
3476 +       a = i2c_get_adapter(0);
3477 +       if (!a)
3478 +               return -ENODEV;
3479 +
3480 +       /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
3481 +       buf[0] = 0x54; /* reg number */
3482 +       buf[1] = (muxval << 6) | 0x3; /* data */
3483 +       msg.addr = 0x66;
3484 +       msg.buf = buf;
3485 +       msg.len = 2;
3486 +       msg.flags = 0;
3487 +       ret = i2c_transfer(a, &msg, 1);
3488 +       i2c_put_adapter(a);
3489 +       if (ret != 1)
3490 +               return -ENODEV;
3491 +       return 0;
3492 +}
3493 +
3494 +static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
3495 +                                  int dev_addr, int regnum)
3496 +{
3497 +       struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
3498 +
3499 +       __raw_writel(EMAC_MII_DATA_PA(mii_id) |
3500 +                    EMAC_MII_DATA_RA(dev_addr) |
3501 +                    EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
3502 +                    priv->mdio_base + EMAC_MII_DATA_REG);
3503 +
3504 +       if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3505 +               dev_err(&bus->dev, "phy MDIO address write timeout\n");
3506 +               return -1;
3507 +       }
3508 +
3509 +       return 0;
3510 +}
3511 +
3512 +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
3513 +                             u16 value)
3514 +{
3515 +       struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
3516 +
3517 +       /*To access external PHYs on QDS board mux needs to be configured*/
3518 +       if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3519 +               pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3520 +
3521 +       if (regnum & MII_ADDR_C45) {
3522 +               pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3523 +                                       regnum & 0xffff);
3524 +               __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
3525 +                            EMAC_MII_DATA_PA(mii_id) |
3526 +                            EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3527 +                            EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3528 +                            priv->mdio_base + EMAC_MII_DATA_REG);
3529 +       } else {
3530 +               /* start a write op */
3531 +               __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
3532 +                            EMAC_MII_DATA_PA(mii_id) |
3533 +                            EMAC_MII_DATA_RA(regnum) |
3534 +                            EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3535 +                            priv->mdio_base + EMAC_MII_DATA_REG);
3536 +       }
3537 +
3538 +       if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3539 +               dev_err(&bus->dev, "%s: phy MDIO write timeout\n", __func__);
3540 +               return -1;
3541 +       }
3542 +       return 0;
3543 +}
3544 +
3545 +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
3546 +{
3547 +       struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
3548 +       u16 value = 0;
3549 +
3550 +       /*To access external PHYs on QDS board mux needs to be configured*/
3551 +       if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3552 +               pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3553 +
3554 +       if (regnum & MII_ADDR_C45) {
3555 +               pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3556 +                                       regnum & 0xffff);
3557 +               __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
3558 +                            EMAC_MII_DATA_PA(mii_id) |
3559 +                            EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3560 +                            EMAC_MII_DATA_TA,
3561 +                            priv->mdio_base + EMAC_MII_DATA_REG);
3562 +       } else {
3563 +               /* start a read op */
3564 +               __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
3565 +                            EMAC_MII_DATA_PA(mii_id) |
3566 +                            EMAC_MII_DATA_RA(regnum) |
3567 +                            EMAC_MII_DATA_TA, priv->mdio_base +
3568 +                            EMAC_MII_DATA_REG);
3569 +       }
3570 +
3571 +       if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3572 +               dev_err(&bus->dev, "%s: phy MDIO read timeout\n", __func__);
3573 +               return -1;
3574 +       }
3575 +
3576 +       value = EMAC_MII_DATA(__raw_readl(priv->mdio_base +
3577 +                                               EMAC_MII_DATA_REG));
3578 +       return value;
3579 +}
3580 +
3581 +static int pfe_eth_mdio_init(struct pfe *pfe,
3582 +                            struct ls1012a_pfe_platform_data *pfe_info,
3583 +                            int ii)
3584 +{
3585 +       struct pfe_mdio_priv_s *priv = NULL;
3586 +       struct ls1012a_mdio_platform_data *mdio_info;
3587 +       struct mii_bus *bus;
3588 +       struct device_node *mdio_node;
3589 +       int rc = 0;
3590 +
3591 +       mdio_info = (struct ls1012a_mdio_platform_data *)
3592 +                                       pfe_info->ls1012a_mdio_pdata;
3593 +       mdio_info->id = ii;
3594 +
3595 +       bus = mdiobus_alloc_size(sizeof(struct pfe_mdio_priv_s));
3596 +       if (!bus) {
3597 +               pr_err("mdiobus_alloc() failed\n");
3598 +               rc = -ENOMEM;
3599 +               goto err_mdioalloc;
3600 +       }
3601 +
3602 +       bus->name = "ls1012a MDIO Bus";
3603 +       snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", mdio_info->id);
3604 +
3605 +       bus->read = &pfe_eth_mdio_read;
3606 +       bus->write = &pfe_eth_mdio_write;
3607 +       bus->reset = &pfe_eth_mdio_reset;
3608 +       bus->parent = pfe->dev;
3609 +       bus->phy_mask = mdio_info->phy_mask;
3610 +       bus->irq[0] = mdio_info->irq[0];
3611 +       priv = bus->priv;
3612 +       priv->mdio_base = cbus_emac_base[ii];
3613 +
3614 +       priv->mdc_div = mdio_info->mdc_div;
3615 +       if (!priv->mdc_div)
3616 +               priv->mdc_div = 64;
3617 +               dev_info(bus->parent, "%s: mdc_div: %d, phy_mask: %x\n",
3618 +                        __func__, priv->mdc_div, bus->phy_mask);
3619 +
3620 +       mdio_node = of_get_child_by_name(pfe->dev->of_node, "mdio");
3621 +       if ((mdio_info->id == 0) && mdio_node) {
3622 +               rc = of_mdiobus_register(bus, mdio_node);
3623 +               of_node_put(mdio_node);
3624 +       } else {
3625 +               rc = mdiobus_register(bus);
3626 +       }
3627 +
3628 +       if (rc) {
3629 +               dev_err(bus->parent, "mdiobus_register(%s) failed\n",
3630 +                       bus->name);
3631 +               goto err_mdioregister;
3632 +       }
3633 +
3634 +       priv->mii_bus = bus;
3635 +       pfe->mdio.mdio_priv[ii] = priv;
3636 +
3637 +       pfe_eth_mdio_reset(bus);
3638 +
3639 +       return 0;
3640 +
3641 +err_mdioregister:
3642 +       mdiobus_free(bus);
3643 +err_mdioalloc:
3644 +       return rc;
3645 +}
3646 +
3647 +/* pfe_eth_mdio_exit
3648 + */
3649 +static void pfe_eth_mdio_exit(struct pfe *pfe,
3650 +                             int ii)
3651 +{
3652 +       struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[ii];
3653 +       struct mii_bus *bus = mdio_priv->mii_bus;
3654 +
3655 +       if (!bus)
3656 +               return;
3657 +       mdiobus_unregister(bus);
3658 +       mdiobus_free(bus);
3659 +}
3660 +
3661 +/* pfe_get_phydev_speed
3662 + */
3663 +static int pfe_get_phydev_speed(struct phy_device *phydev)
3664 +{
3665 +       switch (phydev->speed) {
3666 +       case 10:
3667 +                       return SPEED_10M;
3668 +       case 100:
3669 +                       return SPEED_100M;
3670 +       case 1000:
3671 +       default:
3672 +                       return SPEED_1000M;
3673 +       }
3674 +}
3675 +
3676 +/* pfe_set_rgmii_speed
3677 + */
3678 +#define RGMIIPCR       0x434
3679 +/* RGMIIPCR bit definitions*/
3680 +#define SCFG_RGMIIPCR_EN_AUTO           (0x00000008)
3681 +#define SCFG_RGMIIPCR_SETSP_1000M       (0x00000004)
3682 +#define SCFG_RGMIIPCR_SETSP_100M        (0x00000000)
3683 +#define SCFG_RGMIIPCR_SETSP_10M         (0x00000002)
3684 +#define SCFG_RGMIIPCR_SETFD             (0x00000001)
3685 +
3686 +#define MDIOSELCR      0x484
3687 +#define MDIOSEL_SERDES 0x0
3688 +#define MDIOSEL_EXTPHY  0x80000000
3689 +
3690 +static void pfe_set_rgmii_speed(struct phy_device *phydev)
3691 +{
3692 +       u32 rgmii_pcr;
3693 +
3694 +       regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
3695 +       rgmii_pcr  &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
3696 +
3697 +       switch (phydev->speed) {
3698 +       case 10:
3699 +                       rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
3700 +                       break;
3701 +       case 1000:
3702 +                       rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
3703 +                       break;
3704 +       case 100:
3705 +       default:
3706 +                       /* Default is 100M */
3707 +                       break;
3708 +       }
3709 +       regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
3710 +}
3711 +
3712 +/* pfe_get_phydev_duplex
3713 + */
3714 +static int pfe_get_phydev_duplex(struct phy_device *phydev)
3715 +{
3716 +       /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
3717 +       return DUPLEX_FULL;
3718 +}
3719 +
3720 +/* pfe_eth_adjust_link
3721 + */
3722 +static void pfe_eth_adjust_link(struct net_device *ndev)
3723 +{
3724 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3725 +       unsigned long flags;
3726 +       struct phy_device *phydev = priv->phydev;
3727 +       int new_state = 0;
3728 +
3729 +       netif_info(priv, drv, ndev, "%s\n", __func__);
3730 +
3731 +       spin_lock_irqsave(&priv->lock, flags);
3732 +
3733 +       if (phydev->link) {
3734 +               /*
3735 +                * Now we make sure that we can be in full duplex mode.
3736 +                * If not, we operate in half-duplex mode.
3737 +                */
3738 +               if (phydev->duplex != priv->oldduplex) {
3739 +                       new_state = 1;
3740 +                       gemac_set_duplex(priv->EMAC_baseaddr,
3741 +                                        pfe_get_phydev_duplex(phydev));
3742 +                       priv->oldduplex = phydev->duplex;
3743 +               }
3744 +
3745 +               if (phydev->speed != priv->oldspeed) {
3746 +                       new_state = 1;
3747 +                       gemac_set_speed(priv->EMAC_baseaddr,
3748 +                                       pfe_get_phydev_speed(phydev));
3749 +                       if (priv->einfo->mii_config ==
3750 +                                       PHY_INTERFACE_MODE_RGMII_TXID)
3751 +                               pfe_set_rgmii_speed(phydev);
3752 +                       priv->oldspeed = phydev->speed;
3753 +               }
3754 +
3755 +               if (!priv->oldlink) {
3756 +                       new_state = 1;
3757 +                       priv->oldlink = 1;
3758 +               }
3759 +
3760 +       } else if (priv->oldlink) {
3761 +               new_state = 1;
3762 +               priv->oldlink = 0;
3763 +               priv->oldspeed = 0;
3764 +               priv->oldduplex = -1;
3765 +       }
3766 +
3767 +       if (new_state && netif_msg_link(priv))
3768 +               phy_print_status(phydev);
3769 +
3770 +       spin_unlock_irqrestore(&priv->lock, flags);
3771 +
3772 +       /* Now, dump the details to the cdev.
3773 +        * XXX: Locking would be required? (uniprocess arch)
3774 +        *      Or, maybe move it in spinlock above
3775 +        */
3776 +       if (us && priv->einfo->gem_id < PFE_CDEV_ETH_COUNT) {
3777 +               pr_debug("Changing link state from (%u) to (%u) for ID=(%u)\n",
3778 +                        link_states[priv->einfo->gem_id].state,
3779 +                        phydev->link,
3780 +                        priv->einfo->gem_id);
3781 +               link_states[priv->einfo->gem_id].phy_id = priv->einfo->gem_id;
3782 +               link_states[priv->einfo->gem_id].state = phydev->link;
3783 +       }
3784 +}
3785 +
3786 +/* pfe_phy_exit
3787 + */
3788 +static void pfe_phy_exit(struct net_device *ndev)
3789 +{
3790 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3791 +
3792 +       netif_info(priv, drv, ndev, "%s\n", __func__);
3793 +
3794 +       phy_disconnect(priv->phydev);
3795 +       priv->phydev = NULL;
3796 +}
3797 +
3798 +/* pfe_eth_stop
3799 + */
3800 +static void pfe_eth_stop(struct net_device *ndev, int wake)
3801 +{
3802 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3803 +
3804 +       netif_info(priv, drv, ndev, "%s\n", __func__);
3805 +
3806 +       if (wake) {
3807 +               gemac_tx_disable(priv->EMAC_baseaddr);
3808 +       } else {
3809 +               gemac_disable(priv->EMAC_baseaddr);
3810 +               gpi_disable(priv->GPI_baseaddr);
3811 +
3812 +               if (priv->phydev)
3813 +                       phy_stop(priv->phydev);
3814 +       }
3815 +}
3816 +
3817 +/* pfe_eth_start
3818 + */
3819 +static int pfe_eth_start(struct pfe_eth_priv_s *priv)
3820 +{
3821 +       netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3822 +
3823 +       if (priv->phydev)
3824 +               phy_start(priv->phydev);
3825 +
3826 +       gpi_enable(priv->GPI_baseaddr);
3827 +       gemac_enable(priv->EMAC_baseaddr);
3828 +
3829 +       return 0;
3830 +}
3831 +
3832 +/*
3833 + * Configure on chip serdes through mdio
3834 + */
3835 +static void ls1012a_configure_serdes(struct net_device *ndev)
3836 +{
3837 +       struct pfe_eth_priv_s *eth_priv = netdev_priv(ndev);
3838 +       struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[eth_priv->id];
3839 +       int sgmii_2500 = 0;
3840 +       struct mii_bus *bus = mdio_priv->mii_bus;
3841 +       u16 value = 0;
3842 +
3843 +       if (eth_priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
3844 +               sgmii_2500 = 1;
3845 +
3846 +       netif_info(eth_priv, drv, ndev, "%s\n", __func__);
3847 +       /* PCS configuration done with corresponding GEMAC */
3848 +
3849 +       pfe_eth_mdio_read(bus, 0, MDIO_SGMII_CR);
3850 +       pfe_eth_mdio_read(bus, 0, MDIO_SGMII_SR);
3851 +
3852 +       pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, SGMII_CR_RST);
3853 +
3854 +       if (sgmii_2500) {
3855 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE, SGMII_SPEED_1GBPS
3856 +                                                              | SGMII_EN);
3857 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
3858 +                                  SGMII_DEV_ABIL_ACK | SGMII_DEV_ABIL_SGMII);
3859 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0xa120);
3860 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x7);
3861 +               /* Autonegotiation need to be disabled for 2.5G SGMII mode*/
3862 +               value = SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
3863 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
3864 +       } else {
3865 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE,
3866 +                                  SGMII_SPEED_1GBPS
3867 +                                  | SGMII_USE_SGMII_AN
3868 +                                  | SGMII_EN);
3869 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
3870 +                                  SGMII_DEV_ABIL_EEE_CLK_STP_EN
3871 +                                  | 0xa0
3872 +                                  | SGMII_DEV_ABIL_SGMII);
3873 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0x400);
3874 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x0);
3875 +               value = SGMII_CR_AN_EN | SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
3876 +               pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
3877 +       }
3878 +}
3879 +
3880 +/*
3881 + * pfe_phy_init
3882 + *
3883 + */
3884 +static int pfe_phy_init(struct net_device *ndev)
3885 +{
3886 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3887 +       struct phy_device *phydev;
3888 +       char phy_id[MII_BUS_ID_SIZE + 3];
3889 +       char bus_id[MII_BUS_ID_SIZE];
3890 +       phy_interface_t interface;
3891 +
3892 +       priv->oldlink = 0;
3893 +       priv->oldspeed = 0;
3894 +       priv->oldduplex = -1;
3895 +
3896 +       snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
3897 +       snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
3898 +                priv->einfo->phy_id);
3899 +       netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
3900 +       interface = priv->einfo->mii_config;
3901 +       if ((interface == PHY_INTERFACE_MODE_SGMII) ||
3902 +           (interface == PHY_INTERFACE_MODE_2500SGMII)) {
3903 +               /*Configure SGMII PCS */
3904 +               if (pfe->scfg) {
3905 +                       /* Config MDIO from serdes */
3906 +                       regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_SERDES);
3907 +               }
3908 +               ls1012a_configure_serdes(ndev);
3909 +       }
3910 +
3911 +       if (pfe->scfg) {
3912 +               /*Config MDIO from PAD */
3913 +               regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_EXTPHY);
3914 +       }
3915 +
3916 +       priv->oldlink = 0;
3917 +       priv->oldspeed = 0;
3918 +       priv->oldduplex = -1;
3919 +       pr_info("%s interface %x\n", __func__, interface);
3920 +
3921 +       if (priv->phy_node) {
3922 +               phydev = of_phy_connect(ndev, priv->phy_node,
3923 +                                       pfe_eth_adjust_link, 0,
3924 +                                       priv->einfo->mii_config);
3925 +               if (!(phydev)) {
3926 +                       netdev_err(ndev, "Unable to connect to phy\n");
3927 +                       return -ENODEV;
3928 +               }
3929 +
3930 +       } else {
3931 +               phydev = phy_connect(ndev, phy_id,
3932 +                                    &pfe_eth_adjust_link, interface);
3933 +               if (IS_ERR(phydev)) {
3934 +                       netdev_err(ndev, "Unable to connect to phy\n");
3935 +                       return PTR_ERR(phydev);
3936 +               }
3937 +       }
3938 +
3939 +       priv->phydev = phydev;
3940 +       phydev->irq = PHY_POLL;
3941 +
3942 +       return 0;
3943 +}
3944 +
3945 +/* pfe_gemac_init
3946 + */
3947 +static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
3948 +{
3949 +       struct gemac_cfg cfg;
3950 +
3951 +       netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
3952 +
3953 +       cfg.speed = SPEED_1000M;
3954 +       cfg.duplex = DUPLEX_FULL;
3955 +
3956 +       gemac_set_config(priv->EMAC_baseaddr, &cfg);
3957 +       gemac_allow_broadcast(priv->EMAC_baseaddr);
3958 +       gemac_enable_1536_rx(priv->EMAC_baseaddr);
3959 +       gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
3960 +       gemac_enable_pause_rx(priv->EMAC_baseaddr);
3961 +       gemac_set_bus_width(priv->EMAC_baseaddr, 64);
3962 +
3963 +       /*GEM will perform checksum verifications*/
3964 +       if (priv->ndev->features & NETIF_F_RXCSUM)
3965 +               gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
3966 +       else
3967 +               gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
3968 +
3969 +       return 0;
3970 +}
3971 +
3972 +/* pfe_eth_event_handler
3973 + */
3974 +static int pfe_eth_event_handler(void *data, int event, int qno)
3975 +{
3976 +       struct pfe_eth_priv_s *priv = data;
3977 +
3978 +       switch (event) {
3979 +       case EVENT_RX_PKT_IND:
3980 +
3981 +               if (qno == 0) {
3982 +                       if (napi_schedule_prep(&priv->high_napi)) {
3983 +                               netif_info(priv, intr, priv->ndev,
3984 +                                          "%s: schedule high prio poll\n"
3985 +                                          , __func__);
3986 +
3987 +#ifdef PFE_ETH_NAPI_STATS
3988 +                               priv->napi_counters[NAPI_SCHED_COUNT]++;
3989 +#endif
3990 +
3991 +                               __napi_schedule(&priv->high_napi);
3992 +                       }
3993 +               } else if (qno == 1) {
3994 +                       if (napi_schedule_prep(&priv->low_napi)) {
3995 +                               netif_info(priv, intr, priv->ndev,
3996 +                                          "%s: schedule low prio poll\n"
3997 +                                          , __func__);
3998 +
3999 +#ifdef PFE_ETH_NAPI_STATS
4000 +                               priv->napi_counters[NAPI_SCHED_COUNT]++;
4001 +#endif
4002 +                               __napi_schedule(&priv->low_napi);
4003 +                       }
4004 +               } else if (qno == 2) {
4005 +                       if (napi_schedule_prep(&priv->lro_napi)) {
4006 +                               netif_info(priv, intr, priv->ndev,
4007 +                                          "%s: schedule lro prio poll\n"
4008 +                                          , __func__);
4009 +
4010 +#ifdef PFE_ETH_NAPI_STATS
4011 +                               priv->napi_counters[NAPI_SCHED_COUNT]++;
4012 +#endif
4013 +                               __napi_schedule(&priv->lro_napi);
4014 +                       }
4015 +               }
4016 +
4017 +               break;
4018 +
4019 +       case EVENT_TXDONE_IND:
4020 +               pfe_eth_flush_tx(priv);
4021 +               hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
4022 +               break;
4023 +       case EVENT_HIGH_RX_WM:
4024 +       default:
4025 +               break;
4026 +       }
4027 +
4028 +       return 0;
4029 +}
4030 +
4031 +static int pfe_eth_change_mtu(struct net_device *ndev, int new_mtu)
4032 +{
4033 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4034 +
4035 +       ndev->mtu = new_mtu;
4036 +       new_mtu += ETH_HLEN + ETH_FCS_LEN;
4037 +       gemac_set_rx_max_fl(priv->EMAC_baseaddr, new_mtu);
4038 +
4039 +       return 0;
4040 +}
4041 +
4042 +/* pfe_eth_open
4043 + */
4044 +static int pfe_eth_open(struct net_device *ndev)
4045 +{
4046 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4047 +       struct hif_client_s *client;
4048 +       int rc;
4049 +
4050 +       netif_info(priv, ifup, ndev, "%s\n", __func__);
4051 +
4052 +       /* Register client driver with HIF */
4053 +       client = &priv->client;
4054 +       memset(client, 0, sizeof(*client));
4055 +       client->id = PFE_CL_GEM0 + priv->id;
4056 +       client->tx_qn = emac_txq_cnt;
4057 +       client->rx_qn = EMAC_RXQ_CNT;
4058 +       client->priv = priv;
4059 +       client->pfe = priv->pfe;
4060 +       client->event_handler = pfe_eth_event_handler;
4061 +
4062 +       client->tx_qsize = EMAC_TXQ_DEPTH;
4063 +       client->rx_qsize = EMAC_RXQ_DEPTH;
4064 +
4065 +       rc = hif_lib_client_register(client);
4066 +       if (rc) {
4067 +               netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
4068 +                          __func__, client->id);
4069 +               goto err0;
4070 +       }
4071 +
4072 +       netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
4073 +                  client);
4074 +
4075 +       pfe_gemac_init(priv);
4076 +
4077 +       if (!is_valid_ether_addr(ndev->dev_addr)) {
4078 +               netdev_err(ndev, "%s: invalid MAC address\n", __func__);
4079 +               rc = -EADDRNOTAVAIL;
4080 +               goto err1;
4081 +       }
4082 +
4083 +       gemac_set_laddrN(priv->EMAC_baseaddr,
4084 +                        (struct pfe_mac_addr *)ndev->dev_addr, 1);
4085 +
4086 +       napi_enable(&priv->high_napi);
4087 +       napi_enable(&priv->low_napi);
4088 +       napi_enable(&priv->lro_napi);
4089 +
4090 +       rc = pfe_eth_start(priv);
4091 +
4092 +       netif_tx_wake_all_queues(ndev);
4093 +
4094 +       return rc;
4095 +
4096 +err1:
4097 +       hif_lib_client_unregister(&priv->client);
4098 +
4099 +err0:
4100 +       return rc;
4101 +}
4102 +
4103 +/*
4104 + *  pfe_eth_shutdown
4105 + */
4106 +int pfe_eth_shutdown(struct net_device *ndev, int wake)
4107 +{
4108 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4109 +       int i, qstatus;
4110 +       unsigned long next_poll = jiffies + 1, end = jiffies +
4111 +                               (TX_POLL_TIMEOUT_MS * HZ) / 1000;
4112 +       int tx_pkts, prv_tx_pkts;
4113 +
4114 +       netif_info(priv, ifdown, ndev, "%s\n", __func__);
4115 +
4116 +       for (i = 0; i < emac_txq_cnt; i++)
4117 +               hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
4118 +
4119 +       netif_tx_stop_all_queues(ndev);
4120 +
4121 +       do {
4122 +               tx_pkts = 0;
4123 +               pfe_eth_flush_tx(priv);
4124 +
4125 +               for (i = 0; i < emac_txq_cnt; i++)
4126 +                       tx_pkts += hif_lib_tx_pending(&priv->client, i);
4127 +
4128 +               if (tx_pkts) {
4129 +                       /*Don't wait forever, break if we cross max timeout */
4130 +                       if (time_after(jiffies, end)) {
4131 +                               pr_err(
4132 +                                       "(%s)Tx is not complete after %dmsec\n",
4133 +                                       ndev->name, TX_POLL_TIMEOUT_MS);
4134 +                               break;
4135 +                       }
4136 +
4137 +                       pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
4138 +                               , __func__, ndev->name, tx_pkts);
4139 +                       if (need_resched())
4140 +                               schedule();
4141 +               }
4142 +
4143 +       } while (tx_pkts);
4144 +
4145 +       end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
4146 +
4147 +       prv_tx_pkts = tmu_pkts_processed(priv->id);
4148 +       /*
4149 +        * Wait till TMU transmits all pending packets
4150 +        * poll tmu_qstatus and pkts processed by TMU for every 10ms
4151 +        * Consider TMU is busy, If we see TMU qeueu pending or any packets
4152 +        * processed by TMU
4153 +        */
4154 +       while (1) {
4155 +               if (time_after(jiffies, next_poll)) {
4156 +                       tx_pkts = tmu_pkts_processed(priv->id);
4157 +                       qstatus = tmu_qstatus(priv->id) & 0x7ffff;
4158 +
4159 +                       if (!qstatus && (tx_pkts == prv_tx_pkts))
4160 +                               break;
4161 +                       /* Don't wait forever, break if we cross max
4162 +                        * timeout(TX_POLL_TIMEOUT_MS)
4163 +                        */
4164 +                       if (time_after(jiffies, end)) {
4165 +                               pr_err("TMU%d is busy after %dmsec\n",
4166 +                                      priv->id, TX_POLL_TIMEOUT_MS);
4167 +                               break;
4168 +                       }
4169 +                       prv_tx_pkts = tx_pkts;
4170 +                       next_poll++;
4171 +               }
4172 +               if (need_resched())
4173 +                       schedule();
4174 +       }
4175 +       /* Wait for some more time to complete transmitting packet if any */
4176 +       next_poll = jiffies + 1;
4177 +       while (1) {
4178 +               if (time_after(jiffies, next_poll))
4179 +                       break;
4180 +               if (need_resched())
4181 +                       schedule();
4182 +       }
4183 +
4184 +       pfe_eth_stop(ndev, wake);
4185 +
4186 +       napi_disable(&priv->lro_napi);
4187 +       napi_disable(&priv->low_napi);
4188 +       napi_disable(&priv->high_napi);
4189 +
4190 +       hif_lib_client_unregister(&priv->client);
4191 +
4192 +       return 0;
4193 +}
4194 +
4195 +/* pfe_eth_close
4196 + *
4197 + */
4198 +static int pfe_eth_close(struct net_device *ndev)
4199 +{
4200 +       pfe_eth_shutdown(ndev, 0);
4201 +
4202 +       return 0;
4203 +}
4204 +
4205 +/* pfe_eth_suspend
4206 + *
4207 + * return value : 1 if netdevice is configured to wakeup system
4208 + *                0 otherwise
4209 + */
4210 +int pfe_eth_suspend(struct net_device *ndev)
4211 +{
4212 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4213 +       int retval = 0;
4214 +
4215 +       if (priv->wol) {
4216 +               gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
4217 +               retval = 1;
4218 +       }
4219 +       pfe_eth_shutdown(ndev, priv->wol);
4220 +
4221 +       return retval;
4222 +}
4223 +
4224 +/* pfe_eth_resume
4225 + *
4226 + */
4227 +int pfe_eth_resume(struct net_device *ndev)
4228 +{
4229 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4230 +
4231 +       if (priv->wol)
4232 +               gemac_set_wol(priv->EMAC_baseaddr, 0);
4233 +       gemac_tx_enable(priv->EMAC_baseaddr);
4234 +
4235 +       return pfe_eth_open(ndev);
4236 +}
4237 +
4238 +/* pfe_eth_get_queuenum
4239 + */
4240 +static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
4241 +                                       *skb)
4242 +{
4243 +       int queuenum = 0;
4244 +       unsigned long flags;
4245 +
4246 +       /* Get the Fast Path queue number */
4247 +       /*
4248 +        * Use conntrack mark (if conntrack exists), then packet mark (if any),
4249 +        * then fallback to default
4250 +        */
4251 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
4252 +       if (skb->_nfct) {
4253 +               enum ip_conntrack_info cinfo;
4254 +               struct nf_conn *ct;
4255 +
4256 +               ct = nf_ct_get(skb, &cinfo);
4257 +
4258 +               if (ct) {
4259 +                       u32 connmark;
4260 +
4261 +                       connmark = ct->mark;
4262 +
4263 +                       if ((connmark & 0x80000000) && priv->id != 0)
4264 +                               connmark >>= 16;
4265 +
4266 +                       queuenum = connmark & EMAC_QUEUENUM_MASK;
4267 +               }
4268 +       } else  {/* continued after #endif ... */
4269 +#endif
4270 +               if (skb->mark) {
4271 +                       queuenum = skb->mark & EMAC_QUEUENUM_MASK;
4272 +               } else {
4273 +                       spin_lock_irqsave(&priv->lock, flags);
4274 +                       queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
4275 +                       spin_unlock_irqrestore(&priv->lock, flags);
4276 +               }
4277 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
4278 +       }
4279 +#endif
4280 +       return queuenum;
4281 +}
4282 +
4283 +/* pfe_eth_might_stop_tx
4284 + *
4285 + */
4286 +static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
4287 +                                struct netdev_queue *tx_queue,
4288 +                                unsigned int n_desc,
4289 +                                unsigned int n_segs)
4290 +{
4291 +       ktime_t kt;
4292 +       int tried = 0;
4293 +
4294 +try_again:
4295 +       if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
4296 +       (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
4297 +       (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
4298 +               if (!tried) {
4299 +                       __hif_lib_update_credit(&priv->client, queuenum);
4300 +                       tried = 1;
4301 +                       goto try_again;
4302 +               }
4303 +#ifdef PFE_ETH_TX_STATS
4304 +               if (__hif_tx_avail(&pfe->hif) < n_desc) {
4305 +                       priv->stop_queue_hif[queuenum]++;
4306 +               } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
4307 +                       priv->stop_queue_hif_client[queuenum]++;
4308 +               } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
4309 +                       n_segs) {
4310 +                       priv->stop_queue_credit[queuenum]++;
4311 +               }
4312 +               priv->stop_queue_total[queuenum]++;
4313 +#endif
4314 +               netif_tx_stop_queue(tx_queue);
4315 +
4316 +               kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
4317 +                               NSEC_PER_MSEC);
4318 +               hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
4319 +                             HRTIMER_MODE_REL);
4320 +               return -1;
4321 +       } else {
4322 +               return 0;
4323 +       }
4324 +}
4325 +
4326 +#define SA_MAX_OP 2
4327 +/* pfe_hif_send_packet
4328 + *
4329 + * At this level if TX fails we drop the packet
4330 + */
4331 +static void pfe_hif_send_packet(struct sk_buff *skb, struct  pfe_eth_priv_s
4332 +                                       *priv, int queuenum)
4333 +{
4334 +       struct skb_shared_info *sh = skb_shinfo(skb);
4335 +       unsigned int nr_frags;
4336 +       u32 ctrl = 0;
4337 +
4338 +       netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
4339 +
4340 +       if (skb_is_gso(skb)) {
4341 +               priv->stats.tx_dropped++;
4342 +               return;
4343 +       }
4344 +
4345 +       if (skb->ip_summed == CHECKSUM_PARTIAL)
4346 +               ctrl = HIF_CTRL_TX_CHECKSUM;
4347 +
4348 +       nr_frags = sh->nr_frags;
4349 +
4350 +       if (nr_frags) {
4351 +               skb_frag_t *f;
4352 +               int i;
4353 +
4354 +               __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
4355 +                                  skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
4356 +                                  skb);
4357 +
4358 +               for (i = 0; i < nr_frags - 1; i++) {
4359 +                       f = &sh->frags[i];
4360 +                       __hif_lib_xmit_pkt(&priv->client, queuenum,
4361 +                                          skb_frag_address(f),
4362 +                                          skb_frag_size(f),
4363 +                                          0x0, 0x0, skb);
4364 +               }
4365 +
4366 +               f = &sh->frags[i];
4367 +
4368 +               __hif_lib_xmit_pkt(&priv->client, queuenum,
4369 +                                  skb_frag_address(f), skb_frag_size(f),
4370 +                                  0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
4371 +                                  skb);
4372 +
4373 +               netif_info(priv, tx_queued, priv->ndev,
4374 +                          "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
4375 +                          __func__, skb, nr_frags, skb->len);
4376 +       } else {
4377 +               __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
4378 +                                  skb->len, ctrl, HIF_FIRST_BUFFER |
4379 +                                  HIF_LAST_BUFFER | HIF_DATA_VALID,
4380 +                                  skb);
4381 +               netif_info(priv, tx_queued, priv->ndev,
4382 +                          "%s: pkt sent successfully skb:%p len:%d\n",
4383 +                          __func__, skb, skb->len);
4384 +       }
4385 +       hif_tx_dma_start();
4386 +       priv->stats.tx_packets++;
4387 +       priv->stats.tx_bytes += skb->len;
4388 +       hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
4389 +}
4390 +
4391 +/* pfe_eth_flush_txQ
4392 + */
4393 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
4394 +                               from_tx, int n_desc)
4395 +{
4396 +       struct sk_buff *skb;
4397 +       struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4398 +                                                               tx_q_num);
4399 +       unsigned int flags;
4400 +
4401 +       netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
4402 +
4403 +       if (!from_tx)
4404 +               __netif_tx_lock_bh(tx_queue);
4405 +
4406 +       /* Clean HIF and client queue */
4407 +       while ((skb = hif_lib_tx_get_next_complete(&priv->client,
4408 +                                                  tx_q_num, &flags,
4409 +                                                  HIF_TX_DESC_NT))) {
4410 +               if (flags & HIF_DATA_VALID)
4411 +                       dev_kfree_skb_any(skb);
4412 +       }
4413 +       if (!from_tx)
4414 +               __netif_tx_unlock_bh(tx_queue);
4415 +}
4416 +
4417 +/* pfe_eth_flush_tx
4418 + */
4419 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
4420 +{
4421 +       int ii;
4422 +
4423 +       netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
4424 +
4425 +       for (ii = 0; ii < emac_txq_cnt; ii++) {
4426 +               pfe_eth_flush_txQ(priv, ii, 0, 0);
4427 +               __hif_lib_update_credit(&priv->client, ii);
4428 +       }
4429 +}
4430 +
4431 +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
4432 +                               *n_segs)
4433 +{
4434 +       struct skb_shared_info *sh = skb_shinfo(skb);
4435 +
4436 +       /* Scattered data */
4437 +       if (sh->nr_frags) {
4438 +               *n_desc = sh->nr_frags + 1;
4439 +               *n_segs = 1;
4440 +       /* Regular case */
4441 +       } else {
4442 +               *n_desc = 1;
4443 +               *n_segs = 1;
4444 +       }
4445 +}
4446 +
4447 +/* pfe_eth_send_packet
4448 + */
4449 +static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
4450 +{
4451 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4452 +       int tx_q_num = skb_get_queue_mapping(skb);
4453 +       int n_desc, n_segs;
4454 +       struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4455 +                                                               tx_q_num);
4456 +
4457 +       netif_info(priv, tx_queued, ndev, "%s\n", __func__);
4458 +
4459 +       if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
4460 +                       sizeof(unsigned long)))) {
4461 +               netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
4462 +                          __func__);
4463 +
4464 +               if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
4465 +                                       long)), 0, GFP_ATOMIC)) {
4466 +                       /* No need to re-transmit, no way to recover*/
4467 +                       kfree_skb(skb);
4468 +                       priv->stats.tx_dropped++;
4469 +                       return NETDEV_TX_OK;
4470 +               }
4471 +       }
4472 +
4473 +       pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
4474 +
4475 +       hif_tx_lock(&pfe->hif);
4476 +       if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
4477 +                                          n_segs))) {
4478 +#ifdef PFE_ETH_TX_STATS
4479 +               if (priv->was_stopped[tx_q_num]) {
4480 +                       priv->clean_fail[tx_q_num]++;
4481 +                       priv->was_stopped[tx_q_num] = 0;
4482 +               }
4483 +#endif
4484 +               hif_tx_unlock(&pfe->hif);
4485 +               return NETDEV_TX_BUSY;
4486 +       }
4487 +
4488 +       pfe_hif_send_packet(skb, priv, tx_q_num);
4489 +
4490 +       hif_tx_unlock(&pfe->hif);
4491 +
4492 +       tx_queue->trans_start = jiffies;
4493 +
4494 +#ifdef PFE_ETH_TX_STATS
4495 +       priv->was_stopped[tx_q_num] = 0;
4496 +#endif
4497 +
4498 +       return NETDEV_TX_OK;
4499 +}
4500 +
4501 +/* pfe_eth_select_queue
4502 + *
4503 + */
4504 +static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
4505 +                               void *accel_priv,
4506 +                               select_queue_fallback_t fallback)
4507 +{
4508 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4509 +
4510 +       return pfe_eth_get_queuenum(priv, skb);
4511 +}
4512 +
4513 +/* pfe_eth_get_stats
4514 + */
4515 +static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
4516 +{
4517 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4518 +
4519 +       netif_info(priv, drv, ndev, "%s\n", __func__);
4520 +
4521 +       return &priv->stats;
4522 +}
4523 +
4524 +/* pfe_eth_set_mac_address
4525 + */
4526 +static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
4527 +{
4528 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4529 +       struct sockaddr *sa = addr;
4530 +
4531 +       netif_info(priv, drv, ndev, "%s\n", __func__);
4532 +
4533 +       if (!is_valid_ether_addr(sa->sa_data))
4534 +               return -EADDRNOTAVAIL;
4535 +
4536 +       memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
4537 +
4538 +       gemac_set_laddrN(priv->EMAC_baseaddr,
4539 +                        (struct pfe_mac_addr *)ndev->dev_addr, 1);
4540 +
4541 +       return 0;
4542 +}
4543 +
4544 +/* pfe_eth_enet_addr_byte_mac
4545 + */
4546 +int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
4547 +                              struct pfe_mac_addr *enet_addr)
4548 +{
4549 +       if (!enet_byte_addr || !enet_addr) {
4550 +               return -1;
4551 +
4552 +       } else {
4553 +               enet_addr->bottom = enet_byte_addr[0] |
4554 +                       (enet_byte_addr[1] << 8) |
4555 +                       (enet_byte_addr[2] << 16) |
4556 +                       (enet_byte_addr[3] << 24);
4557 +               enet_addr->top = enet_byte_addr[4] |
4558 +                       (enet_byte_addr[5] << 8);
4559 +               return 0;
4560 +       }
4561 +}
4562 +
4563 +/* pfe_eth_set_multi
4564 + */
4565 +static void pfe_eth_set_multi(struct net_device *ndev)
4566 +{
4567 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4568 +       struct pfe_mac_addr    hash_addr; /* hash register structure */
4569 +       /* specific mac address register structure */
4570 +       struct pfe_mac_addr    spec_addr;
4571 +       int             result; /* index into hash register to set.. */
4572 +       int             uc_count = 0;
4573 +       struct netdev_hw_addr *ha;
4574 +
4575 +       if (ndev->flags & IFF_PROMISC) {
4576 +               netif_info(priv, drv, ndev, "entering promiscuous mode\n");
4577 +
4578 +               priv->promisc = 1;
4579 +               gemac_enable_copy_all(priv->EMAC_baseaddr);
4580 +       } else {
4581 +               priv->promisc = 0;
4582 +               gemac_disable_copy_all(priv->EMAC_baseaddr);
4583 +       }
4584 +
4585 +       /* Enable broadcast frame reception if required. */
4586 +       if (ndev->flags & IFF_BROADCAST) {
4587 +               gemac_allow_broadcast(priv->EMAC_baseaddr);
4588 +       } else {
4589 +               netif_info(priv, drv, ndev,
4590 +                          "disabling broadcast frame reception\n");
4591 +
4592 +               gemac_no_broadcast(priv->EMAC_baseaddr);
4593 +       }
4594 +
4595 +       if (ndev->flags & IFF_ALLMULTI) {
4596 +               /* Set the hash to rx all multicast frames */
4597 +               hash_addr.bottom = 0xFFFFFFFF;
4598 +               hash_addr.top = 0xFFFFFFFF;
4599 +               gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4600 +               netdev_for_each_uc_addr(ha, ndev) {
4601 +                       if (uc_count >= MAX_UC_SPEC_ADDR_REG)
4602 +                               break;
4603 +                       pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
4604 +                       gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
4605 +                                        uc_count + 2);
4606 +                       uc_count++;
4607 +               }
4608 +       } else if ((netdev_mc_count(ndev) > 0)  || (netdev_uc_count(ndev))) {
4609 +               u8 *addr;
4610 +
4611 +               hash_addr.bottom = 0;
4612 +               hash_addr.top = 0;
4613 +
4614 +               netdev_for_each_mc_addr(ha, ndev) {
4615 +                       addr = ha->addr;
4616 +
4617 +                       netif_info(priv, drv, ndev,
4618 +                                  "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
4619 +                               addr[0], addr[1], addr[2],
4620 +                               addr[3], addr[4], addr[5]);
4621 +
4622 +                       result = pfe_eth_get_hash(addr);
4623 +
4624 +                       if (result < EMAC_HASH_REG_BITS) {
4625 +                               if (result < 32)
4626 +                                       hash_addr.bottom |= (1 << result);
4627 +                               else
4628 +                                       hash_addr.top |= (1 << (result - 32));
4629 +                       } else {
4630 +                               break;
4631 +                       }
4632 +               }
4633 +
4634 +               uc_count = -1;
4635 +               netdev_for_each_uc_addr(ha, ndev) {
4636 +                       addr = ha->addr;
4637 +
4638 +                       if (++uc_count < MAX_UC_SPEC_ADDR_REG)   {
4639 +                               netdev_info(ndev,
4640 +                                           "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
4641 +                                           addr[0], addr[1], addr[2],
4642 +                                           addr[3], addr[4], addr[5]);
4643 +                               pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
4644 +                               gemac_set_laddrN(priv->EMAC_baseaddr,
4645 +                                                &spec_addr, uc_count + 2);
4646 +                       } else {
4647 +                               netif_info(priv, drv, ndev,
4648 +                                          "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
4649 +                                          addr[0], addr[1], addr[2],
4650 +                                          addr[3], addr[4], addr[5]);
4651 +
4652 +                               result = pfe_eth_get_hash(addr);
4653 +                               if (result >= EMAC_HASH_REG_BITS) {
4654 +                                       break;
4655 +
4656 +                               } else {
4657 +                                       if (result < 32)
4658 +                                               hash_addr.bottom |= (1 <<
4659 +                                                               result);
4660 +                                       else
4661 +                                               hash_addr.top |= (1 <<
4662 +                                                               (result - 32));
4663 +                               }
4664 +                       }
4665 +               }
4666 +
4667 +               gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4668 +       }
4669 +
4670 +       if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
4671 +               /*
4672 +                *  Check if there are any specific address HW registers that
4673 +                * need to be flushed
4674 +                */
4675 +               for (uc_count = netdev_uc_count(ndev); uc_count <
4676 +                       MAX_UC_SPEC_ADDR_REG; uc_count++)
4677 +                       gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
4678 +       }
4679 +
4680 +       if (ndev->flags & IFF_LOOPBACK)
4681 +               gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
4682 +}
4683 +
4684 +/* pfe_eth_set_features
4685 + */
4686 +static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
4687 +                                       features)
4688 +{
4689 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4690 +       int rc = 0;
4691 +
4692 +       if (features & NETIF_F_RXCSUM)
4693 +               gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
4694 +       else
4695 +               gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
4696 +       return rc;
4697 +}
4698 +
4699 +/* pfe_eth_fast_tx_timeout
4700 + */
4701 +static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
4702 +{
4703 +       struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
4704 +                                                       pfe_eth_fast_timer,
4705 +                                                       timer);
4706 +       struct pfe_eth_priv_s *priv =  container_of(fast_tx_timeout->base,
4707 +                                                       struct pfe_eth_priv_s,
4708 +                                                       fast_tx_timeout);
4709 +       struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4710 +                                               fast_tx_timeout->queuenum);
4711 +
4712 +       if (netif_tx_queue_stopped(tx_queue)) {
4713 +#ifdef PFE_ETH_TX_STATS
4714 +               priv->was_stopped[fast_tx_timeout->queuenum] = 1;
4715 +#endif
4716 +               netif_tx_wake_queue(tx_queue);
4717 +       }
4718 +
4719 +       return HRTIMER_NORESTART;
4720 +}
4721 +
4722 +/* pfe_eth_fast_tx_timeout_init
4723 + */
4724 +static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
4725 +{
4726 +       int i;
4727 +
4728 +       for (i = 0; i < emac_txq_cnt; i++) {
4729 +               priv->fast_tx_timeout[i].queuenum = i;
4730 +               hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
4731 +                            HRTIMER_MODE_REL);
4732 +               priv->fast_tx_timeout[i].timer.function =
4733 +                               pfe_eth_fast_tx_timeout;
4734 +               priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
4735 +       }
4736 +}
4737 +
4738 +static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
4739 +                                     struct    pfe_eth_priv_s *priv,
4740 +                                     unsigned int qno)
4741 +{
4742 +       void *buf_addr;
4743 +       unsigned int rx_ctrl;
4744 +       unsigned int desc_ctrl = 0;
4745 +       struct hif_ipsec_hdr *ipsec_hdr = NULL;
4746 +       struct sk_buff *skb;
4747 +       struct sk_buff *skb_frag, *skb_frag_last = NULL;
4748 +       int length = 0, offset;
4749 +
4750 +       skb = priv->skb_inflight[qno];
4751 +
4752 +       if (skb) {
4753 +               skb_frag_last = skb_shinfo(skb)->frag_list;
4754 +               if (skb_frag_last) {
4755 +                       while (skb_frag_last->next)
4756 +                               skb_frag_last = skb_frag_last->next;
4757 +               }
4758 +       }
4759 +
4760 +       while (!(desc_ctrl & CL_DESC_LAST)) {
4761 +               buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
4762 +                                              &offset, &rx_ctrl, &desc_ctrl,
4763 +                                              (void **)&ipsec_hdr);
4764 +               if (!buf_addr)
4765 +                       goto incomplete;
4766 +
4767 +#ifdef PFE_ETH_NAPI_STATS
4768 +               priv->napi_counters[NAPI_DESC_COUNT]++;
4769 +#endif
4770 +
4771 +               /* First frag */
4772 +               if (desc_ctrl & CL_DESC_FIRST) {
4773 +                       skb = build_skb(buf_addr, 0);
4774 +                       if (unlikely(!skb))
4775 +                               goto pkt_drop;
4776 +
4777 +                       skb_reserve(skb, offset);
4778 +                       skb_put(skb, length);
4779 +                       skb->dev = ndev;
4780 +
4781 +                       if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
4782 +                                       HIF_CTRL_RX_CHECKSUMMED))
4783 +                               skb->ip_summed = CHECKSUM_UNNECESSARY;
4784 +                       else
4785 +                               skb_checksum_none_assert(skb);
4786 +
4787 +               } else {
4788 +                       /* Next frags */
4789 +                       if (unlikely(!skb)) {
4790 +                               pr_err("%s: NULL skb_inflight\n",
4791 +                                      __func__);
4792 +                               goto pkt_drop;
4793 +                       }
4794 +
4795 +                       skb_frag = build_skb(buf_addr, 0);
4796 +
4797 +                       if (unlikely(!skb_frag)) {
4798 +                               kfree(buf_addr);
4799 +                               goto pkt_drop;
4800 +                       }
4801 +
4802 +                       skb_reserve(skb_frag, offset);
4803 +                       skb_put(skb_frag, length);
4804 +
4805 +                       skb_frag->dev = ndev;
4806 +
4807 +                       if (skb_shinfo(skb)->frag_list)
4808 +                               skb_frag_last->next = skb_frag;
4809 +                       else
4810 +                               skb_shinfo(skb)->frag_list = skb_frag;
4811 +
4812 +                       skb->truesize += skb_frag->truesize;
4813 +                       skb->data_len += length;
4814 +                       skb->len += length;
4815 +                       skb_frag_last = skb_frag;
4816 +               }
4817 +       }
4818 +
4819 +       priv->skb_inflight[qno] = NULL;
4820 +       return skb;
4821 +
4822 +incomplete:
4823 +       priv->skb_inflight[qno] = skb;
4824 +       return NULL;
4825 +
4826 +pkt_drop:
4827 +       priv->skb_inflight[qno] = NULL;
4828 +
4829 +       if (skb)
4830 +               kfree_skb(skb);
4831 +       else
4832 +               kfree(buf_addr);
4833 +
4834 +       priv->stats.rx_errors++;
4835 +
4836 +       return NULL;
4837 +}
4838 +
4839 +/* pfe_eth_poll
4840 + */
4841 +static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
4842 +                       unsigned int qno, int budget)
4843 +{
4844 +       struct net_device *ndev = priv->ndev;
4845 +       struct sk_buff *skb;
4846 +       int work_done = 0;
4847 +       unsigned int len;
4848 +
4849 +       netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4850 +
4851 +#ifdef PFE_ETH_NAPI_STATS
4852 +       priv->napi_counters[NAPI_POLL_COUNT]++;
4853 +#endif
4854 +
4855 +       do {
4856 +               skb = pfe_eth_rx_skb(ndev, priv, qno);
4857 +
4858 +               if (!skb)
4859 +                       break;
4860 +
4861 +               len = skb->len;
4862 +
4863 +               /* Packet will be processed */
4864 +               skb->protocol = eth_type_trans(skb, ndev);
4865 +
4866 +               netif_receive_skb(skb);
4867 +
4868 +               priv->stats.rx_packets++;
4869 +               priv->stats.rx_bytes += len;
4870 +
4871 +               work_done++;
4872 +
4873 +#ifdef PFE_ETH_NAPI_STATS
4874 +               priv->napi_counters[NAPI_PACKET_COUNT]++;
4875 +#endif
4876 +
4877 +       } while (work_done < budget);
4878 +
4879 +       /*
4880 +        * If no Rx receive nor cleanup work was done, exit polling mode.
4881 +        * No more netif_running(dev) check is required here , as this is
4882 +        * checked in net/core/dev.c (2.6.33.5 kernel specific).
4883 +        */
4884 +       if (work_done < budget) {
4885 +               napi_complete(napi);
4886 +
4887 +               hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
4888 +                                           qno);
4889 +       }
4890 +#ifdef PFE_ETH_NAPI_STATS
4891 +       else
4892 +               priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
4893 +#endif
4894 +
4895 +       return work_done;
4896 +}
4897 +
4898 +/*
4899 + * pfe_eth_lro_poll
4900 + */
4901 +static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
4902 +{
4903 +       struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4904 +                                                       lro_napi);
4905 +
4906 +       netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4907 +
4908 +       return pfe_eth_poll(priv, napi, 2, budget);
4909 +}
4910 +
4911 +/* pfe_eth_low_poll
4912 + */
4913 +static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
4914 +{
4915 +       struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4916 +                                                       low_napi);
4917 +
4918 +       netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4919 +
4920 +       return pfe_eth_poll(priv, napi, 1, budget);
4921 +}
4922 +
4923 +/* pfe_eth_high_poll
4924 + */
4925 +static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
4926 +{
4927 +       struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4928 +                                                       high_napi);
4929 +
4930 +       netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4931 +
4932 +       return pfe_eth_poll(priv, napi, 0, budget);
4933 +}
4934 +
4935 +static const struct net_device_ops pfe_netdev_ops = {
4936 +       .ndo_open = pfe_eth_open,
4937 +       .ndo_stop = pfe_eth_close,
4938 +       .ndo_start_xmit = pfe_eth_send_packet,
4939 +       .ndo_select_queue = pfe_eth_select_queue,
4940 +       .ndo_set_rx_mode = pfe_eth_set_multi,
4941 +       .ndo_set_mac_address = pfe_eth_set_mac_address,
4942 +       .ndo_validate_addr = eth_validate_addr,
4943 +       .ndo_change_mtu = pfe_eth_change_mtu,
4944 +       .ndo_get_stats = pfe_eth_get_stats,
4945 +       .ndo_set_features = pfe_eth_set_features,
4946 +};
4947 +
4948 +/* pfe_eth_init_one
4949 + */
4950 +static int pfe_eth_init_one(struct pfe *pfe,
4951 +                           struct ls1012a_pfe_platform_data *pfe_info,
4952 +                           int id)
4953 +{
4954 +       struct net_device *ndev = NULL;
4955 +       struct pfe_eth_priv_s *priv = NULL;
4956 +       struct ls1012a_eth_platform_data *einfo;
4957 +       int err;
4958 +
4959 +       einfo = (struct ls1012a_eth_platform_data *)
4960 +                               pfe_info->ls1012a_eth_pdata;
4961 +
4962 +       /* einfo never be NULL, but no harm in having this check */
4963 +       if (!einfo) {
4964 +               pr_err(
4965 +                       "%s: pfe missing additional gemacs platform data\n"
4966 +                       , __func__);
4967 +               err = -ENODEV;
4968 +               goto err0;
4969 +       }
4970 +
4971 +       if (us)
4972 +               emac_txq_cnt = EMAC_TXQ_CNT;
4973 +       /* Create an ethernet device instance */
4974 +       ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
4975 +
4976 +       if (!ndev) {
4977 +               pr_err("%s: gemac %d device allocation failed\n",
4978 +                      __func__, einfo[id].gem_id);
4979 +               err = -ENOMEM;
4980 +               goto err0;
4981 +       }
4982 +
4983 +       priv = netdev_priv(ndev);
4984 +       priv->ndev = ndev;
4985 +       priv->id = einfo[id].gem_id;
4986 +       priv->pfe = pfe;
4987 +       priv->phy_node = einfo[id].phy_node;
4988 +
4989 +       SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
4990 +
4991 +       pfe->eth.eth_priv[id] = priv;
4992 +
4993 +       /* Set the info in the priv to the current info */
4994 +       priv->einfo = &einfo[id];
4995 +       priv->EMAC_baseaddr = cbus_emac_base[id];
4996 +       priv->GPI_baseaddr = cbus_gpi_base[id];
4997 +
4998 +       spin_lock_init(&priv->lock);
4999 +
5000 +       pfe_eth_fast_tx_timeout_init(priv);
5001 +
5002 +       /* Copy the station address into the dev structure, */
5003 +       memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
5004 +
5005 +       if (us)
5006 +               goto phy_init;
5007 +
5008 +       ndev->mtu = 1500;
5009 +
5010 +       /* Set MTU limits */
5011 +       ndev->min_mtu = ETH_MIN_MTU;
5012 +
5013 +/*
5014 + * Jumbo frames are not supported on LS1012A rev-1.0.
5015 + * So max mtu should be restricted to supported frame length.
5016 + */
5017 +       if (pfe_errata_a010897)
5018 +               ndev->max_mtu = JUMBO_FRAME_SIZE_V1 - ETH_HLEN - ETH_FCS_LEN;
5019 +       else
5020 +               ndev->max_mtu = JUMBO_FRAME_SIZE_V2 - ETH_HLEN - ETH_FCS_LEN;
5021 +
5022 +       /*Enable after checksum offload is validated */
5023 +       ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
5024 +               NETIF_F_IPV6_CSUM | NETIF_F_SG;
5025 +
5026 +       /* enabled by default */
5027 +       ndev->features = ndev->hw_features;
5028 +
5029 +       priv->usr_features = ndev->features;
5030 +
5031 +       ndev->netdev_ops = &pfe_netdev_ops;
5032 +
5033 +       ndev->ethtool_ops = &pfe_ethtool_ops;
5034 +
5035 +       /* Enable basic messages by default */
5036 +       priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
5037 +                               NETIF_MSG_PROBE;
5038 +
5039 +       netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
5040 +                      HIF_RX_POLL_WEIGHT - 16);
5041 +       netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
5042 +                      HIF_RX_POLL_WEIGHT - 16);
5043 +       netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
5044 +                      HIF_RX_POLL_WEIGHT - 16);
5045 +
5046 +       err = register_netdev(ndev);
5047 +       if (err) {
5048 +               netdev_err(ndev, "register_netdev() failed\n");
5049 +               goto err1;
5050 +       }
5051 +
5052 +       if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
5053 +           ((pfe_use_old_dts_phy) &&
5054 +             (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
5055 +               pr_info("%s: No PHY or fixed-link\n", __func__);
5056 +               goto skip_phy_init;
5057 +       }
5058 +
5059 +phy_init:
5060 +       device_init_wakeup(&ndev->dev, WAKE_MAGIC);
5061 +
5062 +       err = pfe_phy_init(ndev);
5063 +       if (err) {
5064 +               netdev_err(ndev, "%s: pfe_phy_init() failed\n",
5065 +                          __func__);
5066 +               goto err2;
5067 +       }
5068 +
5069 +       if (us) {
5070 +               if (priv->phydev)
5071 +                       phy_start(priv->phydev);
5072 +               return 0;
5073 +       }
5074 +
5075 +       netif_carrier_on(ndev);
5076 +
5077 +skip_phy_init:
5078 +       /* Create all the sysfs files */
5079 +       if (pfe_eth_sysfs_init(ndev))
5080 +               goto err3;
5081 +
5082 +       netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
5083 +                  __func__, priv->EMAC_baseaddr);
5084 +
5085 +       return 0;
5086 +
5087 +err3:
5088 +       pfe_phy_exit(priv->ndev);
5089 +err2:
5090 +       if (us)
5091 +               goto err1;
5092 +       unregister_netdev(ndev);
5093 +err1:
5094 +       free_netdev(priv->ndev);
5095 +err0:
5096 +       return err;
5097 +}
5098 +
5099 +/* pfe_eth_init
5100 + */
5101 +int pfe_eth_init(struct pfe *pfe)
5102 +{
5103 +       int ii = 0;
5104 +       int err;
5105 +       struct ls1012a_pfe_platform_data *pfe_info;
5106 +
5107 +       pr_info("%s\n", __func__);
5108 +
5109 +       cbus_emac_base[0] = EMAC1_BASE_ADDR;
5110 +       cbus_emac_base[1] = EMAC2_BASE_ADDR;
5111 +
5112 +       cbus_gpi_base[0] = EGPI1_BASE_ADDR;
5113 +       cbus_gpi_base[1] = EGPI2_BASE_ADDR;
5114 +
5115 +       pfe_info = (struct ls1012a_pfe_platform_data *)
5116 +                                       pfe->dev->platform_data;
5117 +       if (!pfe_info) {
5118 +               pr_err("%s: pfe missing additional platform data\n", __func__);
5119 +               err = -ENODEV;
5120 +               goto err_pdata;
5121 +       }
5122 +
5123 +       for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
5124 +               err = pfe_eth_mdio_init(pfe, pfe_info, ii);
5125 +               if (err) {
5126 +                       pr_err("%s: pfe_eth_mdio_init() failed\n", __func__);
5127 +                       goto err_mdio_init;
5128 +               }
5129 +       }
5130 +
5131 +       if (fsl_guts_get_svr() == LS1012A_REV_1_0)
5132 +               pfe_errata_a010897 = true;
5133 +       else
5134 +               pfe_errata_a010897 = false;
5135 +
5136 +       for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
5137 +               err = pfe_eth_init_one(pfe, pfe_info, ii);
5138 +               if (err)
5139 +                       goto err_eth_init;
5140 +       }
5141 +
5142 +       return 0;
5143 +
5144 +err_eth_init:
5145 +       while (ii--) {
5146 +               pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
5147 +               pfe_eth_mdio_exit(pfe, ii);
5148 +       }
5149 +
5150 +err_mdio_init:
5151 +err_pdata:
5152 +       return err;
5153 +}
5154 +
5155 +/* pfe_eth_exit_one
5156 + */
5157 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
5158 +{
5159 +       netif_info(priv, probe, priv->ndev, "%s\n", __func__);
5160 +
5161 +       if (!us)
5162 +               pfe_eth_sysfs_exit(priv->ndev);
5163 +
5164 +       if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
5165 +           ((pfe_use_old_dts_phy) &&
5166 +             (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
5167 +               pr_info("%s: No PHY or fixed-link\n", __func__);
5168 +               goto skip_phy_exit;
5169 +       }
5170 +
5171 +       pfe_phy_exit(priv->ndev);
5172 +
5173 +skip_phy_exit:
5174 +       if (!us)
5175 +               unregister_netdev(priv->ndev);
5176 +
5177 +       free_netdev(priv->ndev);
5178 +}
5179 +
5180 +/* pfe_eth_exit
5181 + */
5182 +void pfe_eth_exit(struct pfe *pfe)
5183 +{
5184 +       int ii;
5185 +
5186 +       pr_info("%s\n", __func__);
5187 +
5188 +       for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
5189 +               pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
5190 +
5191 +       for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
5192 +               pfe_eth_mdio_exit(pfe, ii);
5193 +}
5194 --- /dev/null
5195 +++ b/drivers/staging/fsl_ppfe/pfe_eth.h
5196 @@ -0,0 +1,175 @@
5197 +/* SPDX-License-Identifier: GPL-2.0+ */
5198 +/*
5199 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5200 + * Copyright 2017 NXP
5201 + */
5202 +
5203 +#ifndef _PFE_ETH_H_
5204 +#define _PFE_ETH_H_
5205 +#include <linux/kernel.h>
5206 +#include <linux/netdevice.h>
5207 +#include <linux/etherdevice.h>
5208 +#include <linux/ethtool.h>
5209 +#include <linux/mii.h>
5210 +#include <linux/phy.h>
5211 +#include <linux/clk.h>
5212 +#include <linux/interrupt.h>
5213 +#include <linux/time.h>
5214 +
5215 +#define PFE_ETH_NAPI_STATS
5216 +#define PFE_ETH_TX_STATS
5217 +
5218 +#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
5219 +#define LRO_LEN_COUNT_MAX      32
5220 +#define LRO_NB_COUNT_MAX       32
5221 +
5222 +#define PFE_PAUSE_FLAG_ENABLE          1
5223 +#define PFE_PAUSE_FLAG_AUTONEG         2
5224 +
5225 +/* GEMAC configured by SW */
5226 +/* GEMAC configured by phy lines (not for MII/GMII) */
5227 +
5228 +#define GEMAC_SW_FULL_DUPLEX    BIT(9)
5229 +#define GEMAC_SW_SPEED_10M      (0 << 12)
5230 +#define GEMAC_SW_SPEED_100M     BIT(12)
5231 +#define GEMAC_SW_SPEED_1G       (2 << 12)
5232 +
5233 +#define GEMAC_NO_PHY            BIT(0)
5234 +
5235 +struct ls1012a_eth_platform_data {
5236 +       /* board specific information */
5237 +       u32 mii_config;
5238 +       u32 phy_flags;
5239 +       u32 gem_id;
5240 +       u32 phy_id;
5241 +       u32 mdio_muxval;
5242 +       u8 mac_addr[ETH_ALEN];
5243 +       struct device_node      *phy_node;
5244 +};
5245 +
5246 +struct ls1012a_mdio_platform_data {
5247 +       int id;
5248 +       int irq[32];
5249 +       u32 phy_mask;
5250 +       int mdc_div;
5251 +};
5252 +
5253 +struct ls1012a_pfe_platform_data {
5254 +       struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
5255 +       struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
5256 +};
5257 +
5258 +#define NUM_GEMAC_SUPPORT      2
5259 +#define DRV_NAME               "pfe-eth"
5260 +#define DRV_VERSION            "1.0"
5261 +
5262 +#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS    3
5263 +#define TX_POLL_TIMEOUT_MS     1000
5264 +
5265 +#define EMAC_TXQ_CNT   16
5266 +#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
5267 +
5268 +#define JUMBO_FRAME_SIZE_V1    1900
5269 +#define JUMBO_FRAME_SIZE_V2    10258
5270 +/*
5271 + * Client Tx queue threshold, for txQ flush condition.
5272 + * It must be smaller than the queue size (in case we ever change it in the
5273 + * future).
5274 + */
5275 +#define HIF_CL_TX_FLUSH_MARK   32
5276 +
5277 +/*
5278 + * Max number of TX resources (HIF descriptors or skbs) that will be released
5279 + * in a single go during batch recycling.
5280 + * Should be lower than the flush mark so the SW can provide the HW with a
5281 + * continuous stream of packets instead of bursts.
5282 + */
5283 +#define TX_FREE_MAX_COUNT 16
5284 +#define EMAC_RXQ_CNT   3
5285 +#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
5286 +/* make sure clients can receive a full burst of packets */
5287 +#define EMAC_RMON_TXBYTES_POS  0x00
5288 +#define EMAC_RMON_RXBYTES_POS  0x14
5289 +
5290 +#define EMAC_QUEUENUM_MASK      (emac_txq_cnt - 1)
5291 +#define EMAC_MDIO_TIMEOUT      1000
5292 +#define MAX_UC_SPEC_ADDR_REG 31
5293 +
5294 +struct pfe_eth_fast_timer {
5295 +       int queuenum;
5296 +       struct hrtimer timer;
5297 +       void *base;
5298 +};
5299 +
5300 +struct  pfe_eth_priv_s {
5301 +       struct pfe              *pfe;
5302 +       struct hif_client_s     client;
5303 +       struct napi_struct      lro_napi;
5304 +       struct napi_struct      low_napi;
5305 +       struct napi_struct      high_napi;
5306 +       int                     low_tmu_q;
5307 +       int                     high_tmu_q;
5308 +       struct net_device_stats stats;
5309 +       struct net_device       *ndev;
5310 +       int                     id;
5311 +       int                     promisc;
5312 +       unsigned int            msg_enable;
5313 +       unsigned int            usr_features;
5314 +
5315 +       spinlock_t              lock; /* protect member variables */
5316 +       unsigned int            event_status;
5317 +       int                     irq;
5318 +       void                    *EMAC_baseaddr;
5319 +       void                    *GPI_baseaddr;
5320 +       /* PHY stuff */
5321 +       struct phy_device       *phydev;
5322 +       int                     oldspeed;
5323 +       int                     oldduplex;
5324 +       int                     oldlink;
5325 +       struct device_node      *phy_node;
5326 +       struct clk              *gemtx_clk;
5327 +       int                     wol;
5328 +       int                     pause_flag;
5329 +
5330 +       int                     default_priority;
5331 +       struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
5332 +
5333 +       struct ls1012a_eth_platform_data *einfo;
5334 +       struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
5335 +
5336 +#ifdef PFE_ETH_TX_STATS
5337 +       unsigned int stop_queue_total[EMAC_TXQ_CNT];
5338 +       unsigned int stop_queue_hif[EMAC_TXQ_CNT];
5339 +       unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
5340 +       unsigned int stop_queue_credit[EMAC_TXQ_CNT];
5341 +       unsigned int clean_fail[EMAC_TXQ_CNT];
5342 +       unsigned int was_stopped[EMAC_TXQ_CNT];
5343 +#endif
5344 +
5345 +#ifdef PFE_ETH_NAPI_STATS
5346 +       unsigned int napi_counters[NAPI_MAX_COUNT];
5347 +#endif
5348 +       unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
5349 +};
5350 +
5351 +struct pfe_eth {
5352 +       struct pfe_eth_priv_s *eth_priv[3];
5353 +};
5354 +
5355 +struct pfe_mdio_priv_s {
5356 +       void __iomem *mdio_base;
5357 +       int                     mdc_div;
5358 +       struct mii_bus          *mii_bus;
5359 +};
5360 +
5361 +struct pfe_mdio {
5362 +       struct pfe_mdio_priv_s *mdio_priv[3];
5363 +};
5364 +
5365 +int pfe_eth_init(struct pfe *pfe);
5366 +void pfe_eth_exit(struct pfe *pfe);
5367 +int pfe_eth_suspend(struct net_device *dev);
5368 +int pfe_eth_resume(struct net_device *dev);
5369 +int pfe_eth_mdio_reset(struct mii_bus *bus);
5370 +
5371 +#endif /* _PFE_ETH_H_ */
5372 --- /dev/null
5373 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
5374 @@ -0,0 +1,302 @@
5375 +// SPDX-License-Identifier: GPL-2.0+
5376 +/*
5377 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5378 + * Copyright 2017 NXP
5379 + */
5380 +
5381 +/*
5382 + * @file
5383 + * Contains all the functions to handle parsing and loading of PE firmware
5384 + * files.
5385 + */
5386 +#include <linux/firmware.h>
5387 +
5388 +#include "pfe_mod.h"
5389 +#include "pfe_firmware.h"
5390 +#include "pfe/pfe.h"
5391 +
5392 +static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
5393 +                                                const char *section)
5394 +{
5395 +       struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
5396 +       struct elf32_shdr *shdr;
5397 +       struct elf32_shdr *shdr_shstr;
5398 +       Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
5399 +       Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
5400 +       Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
5401 +       Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
5402 +       Elf32_Off shstr_offset;
5403 +       Elf32_Word sh_name;
5404 +       const char *name;
5405 +       int i;
5406 +
5407 +       /* Section header strings */
5408 +       shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
5409 +                                       e_shentsize);
5410 +       shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
5411 +
5412 +       for (i = 0; i < e_shnum; i++) {
5413 +               shdr = (struct elf32_shdr *)(fw->data + e_shoff
5414 +                                            + i * e_shentsize);
5415 +
5416 +               sh_name = be32_to_cpu(shdr->sh_name);
5417 +
5418 +               name = (const char *)(fw->data + shstr_offset + sh_name);
5419 +
5420 +               if (!strcmp(name, section))
5421 +                       return shdr;
5422 +       }
5423 +
5424 +       pr_err("%s: didn't find section %s\n", __func__, section);
5425 +
5426 +       return NULL;
5427 +}
5428 +
5429 +#if defined(CFG_DIAGS)
5430 +static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
5431 +                               *diags_info)
5432 +{
5433 +       struct elf32_shdr *shdr;
5434 +       unsigned long offset, size;
5435 +
5436 +       shdr = get_elf_section_header(fw, ".pfe_diags_str");
5437 +       if (shdr) {
5438 +               offset = be32_to_cpu(shdr->sh_offset);
5439 +               size = be32_to_cpu(shdr->sh_size);
5440 +               diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
5441 +               diags_info->diags_str_size = size;
5442 +               diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
5443 +               memcpy(diags_info->diags_str_array, fw->data + offset, size);
5444 +
5445 +               return 0;
5446 +       } else {
5447 +               return -1;
5448 +       }
5449 +}
5450 +#endif
5451 +
5452 +static void pfe_check_version_info(const struct firmware *fw)
5453 +{
5454 +       /*static char *version = NULL;*/
5455 +       static char *version;
5456 +
5457 +       struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
5458 +
5459 +       if (shdr) {
5460 +               if (!version) {
5461 +                       /*
5462 +                        * this is the first fw we load, use its version
5463 +                        * string as reference (whatever it is)
5464 +                        */
5465 +                       version = (char *)(fw->data +
5466 +                                       be32_to_cpu(shdr->sh_offset));
5467 +
5468 +                       pr_info("PFE binary version: %s\n", version);
5469 +               } else {
5470 +                       /*
5471 +                        * already have loaded at least one firmware, check
5472 +                        * sequence can start now
5473 +                        */
5474 +                       if (strcmp(version, (char *)(fw->data +
5475 +                               be32_to_cpu(shdr->sh_offset)))) {
5476 +                               pr_info(
5477 +                               "WARNING: PFE firmware binaries from incompatible version\n");
5478 +                       }
5479 +               }
5480 +       } else {
5481 +               /*
5482 +                * version cannot be verified, a potential issue that should
5483 +                * be reported
5484 +                */
5485 +               pr_info(
5486 +                        "WARNING: PFE firmware binaries from incompatible version\n");
5487 +       }
5488 +}
5489 +
5490 +/* PFE elf firmware loader.
5491 + * Loads an elf firmware image into a list of PE's (specified using a bitmask)
5492 + *
5493 + * @param pe_mask      Mask of PE id's to load firmware to
5494 + * @param fw           Pointer to the firmware image
5495 + *
5496 + * @return             0 on success, a negative value on error
5497 + *
5498 + */
5499 +int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
5500 +{
5501 +       struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
5502 +       Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
5503 +       struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
5504 +                                       be32_to_cpu(elf_hdr->e_shoff));
5505 +       int id, section;
5506 +       int rc;
5507 +
5508 +       pr_info("%s\n", __func__);
5509 +
5510 +       /* Some sanity checks */
5511 +       if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
5512 +               pr_err("%s: incorrect elf magic number\n", __func__);
5513 +               return -EINVAL;
5514 +       }
5515 +
5516 +       if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
5517 +               pr_err("%s: incorrect elf class(%x)\n", __func__,
5518 +                      elf_hdr->e_ident[EI_CLASS]);
5519 +               return -EINVAL;
5520 +       }
5521 +
5522 +       if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
5523 +               pr_err("%s: incorrect elf data(%x)\n", __func__,
5524 +                      elf_hdr->e_ident[EI_DATA]);
5525 +               return -EINVAL;
5526 +       }
5527 +
5528 +       if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
5529 +               pr_err("%s: incorrect elf file type(%x)\n", __func__,
5530 +                      be16_to_cpu(elf_hdr->e_type));
5531 +               return -EINVAL;
5532 +       }
5533 +
5534 +       for (section = 0; section < sections; section++, shdr++) {
5535 +               if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
5536 +                       SHF_EXECINSTR)))
5537 +                       continue;
5538 +
5539 +               for (id = 0; id < MAX_PE; id++)
5540 +                       if (pe_mask & (1 << id)) {
5541 +                               rc = pe_load_elf_section(id, fw->data, shdr,
5542 +                                                        pfe->dev);
5543 +                               if (rc < 0)
5544 +                                       goto err;
5545 +                       }
5546 +       }
5547 +
5548 +       pfe_check_version_info(fw);
5549 +
5550 +       return 0;
5551 +
5552 +err:
5553 +       return rc;
5554 +}
5555 +
5556 +/* PFE firmware initialization.
5557 + * Loads different firmware files from filesystem.
5558 + * Initializes PE IMEM/DMEM and UTIL-PE DDR
5559 + * Initializes control path symbol addresses (by looking them up in the elf
5560 + * firmware files
5561 + * Takes PE's out of reset
5562 + *
5563 + * @return     0 on success, a negative value on error
5564 + *
5565 + */
5566 +int pfe_firmware_init(struct pfe *pfe)
5567 +{
5568 +       const struct firmware *class_fw, *tmu_fw;
5569 +       int rc = 0;
5570 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5571 +       const char *util_fw_name;
5572 +       const struct firmware *util_fw;
5573 +#endif
5574 +
5575 +       pr_info("%s\n", __func__);
5576 +
5577 +       if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
5578 +               pr_err("%s: request firmware %s failed\n", __func__,
5579 +                      CLASS_FIRMWARE_FILENAME);
5580 +               rc = -ETIMEDOUT;
5581 +               goto err0;
5582 +       }
5583 +
5584 +       if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
5585 +               pr_err("%s: request firmware %s failed\n", __func__,
5586 +                      TMU_FIRMWARE_FILENAME);
5587 +               rc = -ETIMEDOUT;
5588 +               goto err1;
5589 +}
5590 +
5591 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5592 +       util_fw_name = UTIL_FIRMWARE_FILENAME;
5593 +
5594 +       if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
5595 +               pr_err("%s: request firmware %s failed\n", __func__,
5596 +                      util_fw_name);
5597 +               rc = -ETIMEDOUT;
5598 +               goto err2;
5599 +       }
5600 +#endif
5601 +       rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
5602 +       if (rc < 0) {
5603 +               pr_err("%s: class firmware load failed\n", __func__);
5604 +               goto err3;
5605 +       }
5606 +
5607 +#if defined(CFG_DIAGS)
5608 +       rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
5609 +       if (rc < 0) {
5610 +               pr_warn(
5611 +                       "PFE diags won't be available for class PEs\n");
5612 +               rc = 0;
5613 +       }
5614 +#endif
5615 +
5616 +       rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
5617 +       if (rc < 0) {
5618 +               pr_err("%s: tmu firmware load failed\n", __func__);
5619 +               goto err3;
5620 +       }
5621 +
5622 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5623 +       rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
5624 +       if (rc < 0) {
5625 +               pr_err("%s: util firmware load failed\n", __func__);
5626 +               goto err3;
5627 +       }
5628 +
5629 +#if defined(CFG_DIAGS)
5630 +       rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
5631 +       if (rc < 0) {
5632 +               pr_warn(
5633 +                       "PFE diags won't be available for util PE\n");
5634 +               rc = 0;
5635 +       }
5636 +#endif
5637 +
5638 +       util_enable();
5639 +#endif
5640 +
5641 +       tmu_enable(0xf);
5642 +       class_enable();
5643 +
5644 +err3:
5645 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5646 +       release_firmware(util_fw);
5647 +
5648 +err2:
5649 +#endif
5650 +       release_firmware(tmu_fw);
5651 +
5652 +err1:
5653 +       release_firmware(class_fw);
5654 +
5655 +err0:
5656 +       return rc;
5657 +}
5658 +
5659 +/* PFE firmware cleanup
5660 + * Puts PE's in reset
5661 + *
5662 + *
5663 + */
5664 +void pfe_firmware_exit(struct pfe *pfe)
5665 +{
5666 +       pr_info("%s\n", __func__);
5667 +
5668 +       if (pe_reset_all(&pfe->ctrl) != 0)
5669 +               pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
5670 +
5671 +       class_disable();
5672 +       tmu_disable(0xf);
5673 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5674 +       util_disable();
5675 +#endif
5676 +}
5677 --- /dev/null
5678 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
5679 @@ -0,0 +1,20 @@
5680 +/* SPDX-License-Identifier: GPL-2.0+ */
5681 +/*
5682 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5683 + * Copyright 2017 NXP
5684 + */
5685 +
5686 +#ifndef _PFE_FIRMWARE_H_
5687 +#define _PFE_FIRMWARE_H_
5688 +
5689 +#define CLASS_FIRMWARE_FILENAME                "ppfe_class_ls1012a.elf"
5690 +#define TMU_FIRMWARE_FILENAME          "ppfe_tmu_ls1012a.elf"
5691 +
5692 +#define PFE_FW_CHECK_PASS              0
5693 +#define PFE_FW_CHECK_FAIL              1
5694 +#define NUM_PFE_FW                             3
5695 +
5696 +int pfe_firmware_init(struct pfe *pfe);
5697 +void pfe_firmware_exit(struct pfe *pfe);
5698 +
5699 +#endif /* _PFE_FIRMWARE_H_ */
5700 --- /dev/null
5701 +++ b/drivers/staging/fsl_ppfe/pfe_hal.c
5702 @@ -0,0 +1,1516 @@
5703 +// SPDX-License-Identifier: GPL-2.0+
5704 +/*
5705 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5706 + * Copyright 2017 NXP
5707 + */
5708 +
5709 +#include "pfe_mod.h"
5710 +#include "pfe/pfe.h"
5711 +
5712 +/* A-010897: Jumbo frame is not supported */
5713 +extern bool pfe_errata_a010897;
5714 +
5715 +#define PFE_RCR_MAX_FL_MASK    0xC000FFFF
5716 +
5717 +void *cbus_base_addr;
5718 +void *ddr_base_addr;
5719 +unsigned long ddr_phys_base_addr;
5720 +unsigned int ddr_size;
5721 +
5722 +static struct pe_info pe[MAX_PE];
5723 +
5724 +/* Initializes the PFE library.
5725 + * Must be called before using any of the library functions.
5726 + *
5727 + * @param[in] cbus_base                CBUS virtual base address (as mapped in
5728 + * the host CPU address space)
5729 + * @param[in] ddr_base         PFE DDR range virtual base address (as
5730 + * mapped in the host CPU address space)
5731 + * @param[in] ddr_phys_base    PFE DDR range physical base address (as
5732 + * mapped in platform)
5733 + * @param[in] size             PFE DDR range size (as defined by the host
5734 + * software)
5735 + */
5736 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
5737 +                 unsigned int size)
5738 +{
5739 +       cbus_base_addr = cbus_base;
5740 +       ddr_base_addr = ddr_base;
5741 +       ddr_phys_base_addr = ddr_phys_base;
5742 +       ddr_size = size;
5743 +
5744 +       pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
5745 +       pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
5746 +       pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
5747 +       pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5748 +       pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5749 +       pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5750 +
5751 +       pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
5752 +       pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
5753 +       pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
5754 +       pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5755 +       pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5756 +       pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5757 +
5758 +       pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
5759 +       pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
5760 +       pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
5761 +       pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5762 +       pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5763 +       pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5764 +
5765 +       pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
5766 +       pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
5767 +       pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
5768 +       pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5769 +       pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5770 +       pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5771 +
5772 +       pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
5773 +       pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
5774 +       pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
5775 +       pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5776 +       pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5777 +       pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5778 +
5779 +       pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
5780 +       pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
5781 +       pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
5782 +       pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5783 +       pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5784 +       pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5785 +
5786 +       pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
5787 +       pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
5788 +       pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
5789 +       pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5790 +       pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5791 +       pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5792 +
5793 +       pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
5794 +       pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
5795 +       pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
5796 +       pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5797 +       pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5798 +       pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5799 +
5800 +       pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
5801 +       pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
5802 +       pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
5803 +       pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5804 +       pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5805 +       pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5806 +
5807 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5808 +       pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
5809 +       pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
5810 +       pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
5811 +       pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
5812 +#endif
5813 +}
5814 +
5815 +/* Writes a buffer to PE internal memory from the host
5816 + * through indirect access registers.
5817 + *
5818 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
5819 + * ..., UTIL_ID)
5820 + * @param[in] src              Buffer source address
5821 + * @param[in] mem_access_addr  DMEM destination address (must be 32bit
5822 + * aligned)
5823 + * @param[in] len              Number of bytes to copy
5824 + */
5825 +void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
5826 +int len)
5827 +{
5828 +       u32 offset = 0, val, addr;
5829 +       unsigned int len32 = len >> 2;
5830 +       int i;
5831 +
5832 +       addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
5833 +               PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
5834 +
5835 +       for (i = 0; i < len32; i++, offset += 4, src += 4) {
5836 +               val = *(u32 *)src;
5837 +               writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5838 +               writel(addr + offset, pe[id].mem_access_addr);
5839 +       }
5840 +
5841 +       len = (len & 0x3);
5842 +       if (len) {
5843 +               val = 0;
5844 +
5845 +               addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
5846 +                       PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
5847 +
5848 +               for (i = 0; i < len; i++, src++)
5849 +                       val |= (*(u8 *)src) << (8 * i);
5850 +
5851 +               writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5852 +               writel(addr, pe[id].mem_access_addr);
5853 +       }
5854 +}
5855 +
5856 +/* Writes a buffer to PE internal data memory (DMEM) from the host
5857 + * through indirect access registers.
5858 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
5859 + * ..., UTIL_ID)
5860 + * @param[in] src              Buffer source address
5861 + * @param[in] dst              DMEM destination address (must be 32bit
5862 + * aligned)
5863 + * @param[in] len              Number of bytes to copy
5864 + */
5865 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5866 +{
5867 +       pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
5868 +                               PE_MEM_ACCESS_DMEM, src, len);
5869 +}
5870 +
5871 +/* Writes a buffer to PE internal program memory (PMEM) from the host
5872 + * through indirect access registers.
5873 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
5874 + * ..., TMU3_ID)
5875 + * @param[in] src              Buffer source address
5876 + * @param[in] dst              PMEM destination address (must be 32bit
5877 + * aligned)
5878 + * @param[in] len              Number of bytes to copy
5879 + */
5880 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5881 +{
5882 +       pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
5883 +                               - 1)) | PE_MEM_ACCESS_IMEM, src, len);
5884 +}
5885 +
5886 +/* Reads PE internal program memory (IMEM) from the host
5887 + * through indirect access registers.
5888 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
5889 + * ..., TMU3_ID)
5890 + * @param[in] addr             PMEM read address (must be aligned on size)
5891 + * @param[in] size             Number of bytes to read (maximum 4, must not
5892 + * cross 32bit boundaries)
5893 + * @return                     the data read (in PE endianness, i.e BE).
5894 + */
5895 +u32 pe_pmem_read(int id, u32 addr, u8 size)
5896 +{
5897 +       u32 offset = addr & 0x3;
5898 +       u32 mask = 0xffffffff >> ((4 - size) << 3);
5899 +       u32 val;
5900 +
5901 +       addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
5902 +               | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5903 +
5904 +       writel(addr, pe[id].mem_access_addr);
5905 +       val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5906 +
5907 +       return (val >> (offset << 3)) & mask;
5908 +}
5909 +
5910 +/* Writes PE internal data memory (DMEM) from the host
5911 + * through indirect access registers.
5912 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
5913 + * ..., UTIL_ID)
5914 + * @param[in] addr             DMEM write address (must be aligned on size)
5915 + * @param[in] val              Value to write (in PE endianness, i.e BE)
5916 + * @param[in] size             Number of bytes to write (maximum 4, must not
5917 + * cross 32bit boundaries)
5918 + */
5919 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
5920 +{
5921 +       u32 offset = addr & 0x3;
5922 +
5923 +       addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
5924 +               PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5925 +
5926 +       /* Indirect access interface is byte swapping data being written */
5927 +       writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
5928 +       writel(addr, pe[id].mem_access_addr);
5929 +}
5930 +
5931 +/* Reads PE internal data memory (DMEM) from the host
5932 + * through indirect access registers.
5933 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
5934 + * ..., UTIL_ID)
5935 + * @param[in] addr             DMEM read address (must be aligned on size)
5936 + * @param[in] size             Number of bytes to read (maximum 4, must not
5937 + * cross 32bit boundaries)
5938 + * @return                     the data read (in PE endianness, i.e BE).
5939 + */
5940 +u32 pe_dmem_read(int id, u32 addr, u8 size)
5941 +{
5942 +       u32 offset = addr & 0x3;
5943 +       u32 mask = 0xffffffff >> ((4 - size) << 3);
5944 +       u32 val;
5945 +
5946 +       addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
5947 +                       PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5948 +
5949 +       writel(addr, pe[id].mem_access_addr);
5950 +
5951 +       /* Indirect access interface is byte swapping data being read */
5952 +       val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5953 +
5954 +       return (val >> (offset << 3)) & mask;
5955 +}
5956 +
5957 +/* This function is used to write to CLASS internal bus peripherals (ccu,
5958 + * pe-lem) from the host
5959 + * through indirect access registers.
5960 + * @param[in]  val     value to write
5961 + * @param[in]  addr    Address to write to (must be aligned on size)
5962 + * @param[in]  size    Number of bytes to write (1, 2 or 4)
5963 + *
5964 + */
5965 +void class_bus_write(u32 val, u32 addr, u8 size)
5966 +{
5967 +       u32 offset = addr & 0x3;
5968 +
5969 +       writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5970 +
5971 +       addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
5972 +                       (size << 24);
5973 +
5974 +       writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
5975 +       writel(addr, CLASS_BUS_ACCESS_ADDR);
5976 +}
5977 +
5978 +/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
5979 + * through indirect access registers.
5980 + * @param[in] addr     Address to read from (must be aligned on size)
5981 + * @param[in] size     Number of bytes to read (1, 2 or 4)
5982 + * @return             the read data
5983 + *
5984 + */
5985 +u32 class_bus_read(u32 addr, u8 size)
5986 +{
5987 +       u32 offset = addr & 0x3;
5988 +       u32 mask = 0xffffffff >> ((4 - size) << 3);
5989 +       u32 val;
5990 +
5991 +       writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5992 +
5993 +       addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
5994 +
5995 +       writel(addr, CLASS_BUS_ACCESS_ADDR);
5996 +       val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
5997 +
5998 +       return (val >> (offset << 3)) & mask;
5999 +}
6000 +
6001 +/* Writes data to the cluster memory (PE_LMEM)
6002 + * @param[in] dst      PE LMEM destination address (must be 32bit aligned)
6003 + * @param[in] src      Buffer source address
6004 + * @param[in] len      Number of bytes to copy
6005 + */
6006 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
6007 +{
6008 +       u32 len32 = len >> 2;
6009 +       int i;
6010 +
6011 +       for (i = 0; i < len32; i++, src += 4, dst += 4)
6012 +               class_bus_write(*(u32 *)src, dst, 4);
6013 +
6014 +       if (len & 0x2) {
6015 +               class_bus_write(*(u16 *)src, dst, 2);
6016 +               src += 2;
6017 +               dst += 2;
6018 +       }
6019 +
6020 +       if (len & 0x1) {
6021 +               class_bus_write(*(u8 *)src, dst, 1);
6022 +               src++;
6023 +               dst++;
6024 +       }
6025 +}
6026 +
6027 +/* Writes value to the cluster memory (PE_LMEM)
6028 + * @param[in] dst      PE LMEM destination address (must be 32bit aligned)
6029 + * @param[in] val      Value to write
6030 + * @param[in] len      Number of bytes to write
6031 + */
6032 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
6033 +{
6034 +       u32 len32 = len >> 2;
6035 +       int i;
6036 +
6037 +       val = val | (val << 8) | (val << 16) | (val << 24);
6038 +
6039 +       for (i = 0; i < len32; i++, dst += 4)
6040 +               class_bus_write(val, dst, 4);
6041 +
6042 +       if (len & 0x2) {
6043 +               class_bus_write(val, dst, 2);
6044 +               dst += 2;
6045 +       }
6046 +
6047 +       if (len & 0x1) {
6048 +               class_bus_write(val, dst, 1);
6049 +               dst++;
6050 +       }
6051 +}
6052 +
6053 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
6054 +
6055 +/* Writes UTIL program memory (DDR) from the host.
6056 + *
6057 + * @param[in] addr     Address to write (virtual, must be aligned on size)
6058 + * @param[in] val              Value to write (in PE endianness, i.e BE)
6059 + * @param[in] size             Number of bytes to write (2 or 4)
6060 + */
6061 +static void util_pmem_write(u32 val, void *addr, u8 size)
6062 +{
6063 +       void *addr64 = (void *)((unsigned long)addr & ~0x7);
6064 +       unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
6065 +
6066 +       /*
6067 +        * IMEM should  be loaded as a 64bit swapped value in a 64bit aligned
6068 +        * location
6069 +        */
6070 +       if (size == 4)
6071 +               writel(be32_to_cpu(val), addr64 + off);
6072 +       else
6073 +               writew(be16_to_cpu((u16)val), addr64 + off);
6074 +}
6075 +
6076 +/* Writes a buffer to UTIL program memory (DDR) from the host.
6077 + *
6078 + * @param[in] dst      Address to write (virtual, must be at least 16bit
6079 + * aligned)
6080 + * @param[in] src      Buffer to write (in PE endianness, i.e BE, must have
6081 + * same alignment as dst)
6082 + * @param[in] len      Number of bytes to write (must be at least 16bit
6083 + * aligned)
6084 + */
6085 +static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
6086 +{
6087 +       unsigned int len32;
6088 +       int i;
6089 +
6090 +       if ((unsigned long)src & 0x2) {
6091 +               util_pmem_write(*(u16 *)src, dst, 2);
6092 +               src += 2;
6093 +               dst += 2;
6094 +               len -= 2;
6095 +       }
6096 +
6097 +       len32 = len >> 2;
6098 +
6099 +       for (i = 0; i < len32; i++, dst += 4, src += 4)
6100 +               util_pmem_write(*(u32 *)src, dst, 4);
6101 +
6102 +       if (len & 0x2)
6103 +               util_pmem_write(*(u16 *)src, dst, len & 0x2);
6104 +}
6105 +#endif
6106 +
6107 +/* Loads an elf section into pmem
6108 + * Code needs to be at least 16bit aligned and only PROGBITS sections are
6109 + * supported
6110 + *
6111 + * @param[in] id       PE identification (CLASS0_ID, ..., TMU0_ID, ...,
6112 + * TMU3_ID)
6113 + * @param[in] data     pointer to the elf firmware
6114 + * @param[in] shdr     pointer to the elf section header
6115 + *
6116 + */
6117 +static int pe_load_pmem_section(int id, const void *data,
6118 +                               struct elf32_shdr *shdr)
6119 +{
6120 +       u32 offset = be32_to_cpu(shdr->sh_offset);
6121 +       u32 addr = be32_to_cpu(shdr->sh_addr);
6122 +       u32 size = be32_to_cpu(shdr->sh_size);
6123 +       u32 type = be32_to_cpu(shdr->sh_type);
6124 +
6125 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
6126 +       if (id == UTIL_ID) {
6127 +               pr_err("%s: unsupported pmem section for UTIL\n",
6128 +                      __func__);
6129 +               return -EINVAL;
6130 +       }
6131 +#endif
6132 +
6133 +       if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
6134 +               pr_err(
6135 +                       "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
6136 +                       , __func__, addr, (unsigned long)data + offset);
6137 +
6138 +               return -EINVAL;
6139 +       }
6140 +
6141 +       if (addr & 0x1) {
6142 +               pr_err("%s: load address(%x) is not 16bit aligned\n",
6143 +                      __func__, addr);
6144 +               return -EINVAL;
6145 +       }
6146 +
6147 +       if (size & 0x1) {
6148 +               pr_err("%s: load size(%x) is not 16bit aligned\n",
6149 +                      __func__, size);
6150 +               return -EINVAL;
6151 +       }
6152 +
6153 +       switch (type) {
6154 +       case SHT_PROGBITS:
6155 +               pe_pmem_memcpy_to32(id, addr, data + offset, size);
6156 +
6157 +               break;
6158 +
6159 +       default:
6160 +               pr_err("%s: unsupported section type(%x)\n", __func__,
6161 +                      type);
6162 +               return -EINVAL;
6163 +       }
6164 +
6165 +       return 0;
6166 +}
6167 +
6168 +/* Loads an elf section into dmem
6169 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
6170 + * initialized to 0
6171 + *
6172 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
6173 + * ..., UTIL_ID)
6174 + * @param[in] data             pointer to the elf firmware
6175 + * @param[in] shdr             pointer to the elf section header
6176 + *
6177 + */
6178 +static int pe_load_dmem_section(int id, const void *data,
6179 +                               struct elf32_shdr *shdr)
6180 +{
6181 +       u32 offset = be32_to_cpu(shdr->sh_offset);
6182 +       u32 addr = be32_to_cpu(shdr->sh_addr);
6183 +       u32 size = be32_to_cpu(shdr->sh_size);
6184 +       u32 type = be32_to_cpu(shdr->sh_type);
6185 +       u32 size32 = size >> 2;
6186 +       int i;
6187 +
6188 +       if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
6189 +               pr_err(
6190 +                       "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
6191 +                       __func__, addr, (unsigned long)data + offset);
6192 +
6193 +               return -EINVAL;
6194 +       }
6195 +
6196 +       if (addr & 0x3) {
6197 +               pr_err("%s: load address(%x) is not 32bit aligned\n",
6198 +                      __func__, addr);
6199 +               return -EINVAL;
6200 +       }
6201 +
6202 +       switch (type) {
6203 +       case SHT_PROGBITS:
6204 +               pe_dmem_memcpy_to32(id, addr, data + offset, size);
6205 +               break;
6206 +
6207 +       case SHT_NOBITS:
6208 +               for (i = 0; i < size32; i++, addr += 4)
6209 +                       pe_dmem_write(id, 0, addr, 4);
6210 +
6211 +               if (size & 0x3)
6212 +                       pe_dmem_write(id, 0, addr, size & 0x3);
6213 +
6214 +               break;
6215 +
6216 +       default:
6217 +               pr_err("%s: unsupported section type(%x)\n", __func__,
6218 +                      type);
6219 +               return -EINVAL;
6220 +       }
6221 +
6222 +       return 0;
6223 +}
6224 +
6225 +/* Loads an elf section into DDR
6226 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
6227 + * initialized to 0
6228 + *
6229 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
6230 + * ..., UTIL_ID)
6231 + * @param[in] data             pointer to the elf firmware
6232 + * @param[in] shdr             pointer to the elf section header
6233 + *
6234 + */
6235 +static int pe_load_ddr_section(int id, const void *data,
6236 +                              struct elf32_shdr *shdr,
6237 +                              struct device *dev) {
6238 +       u32 offset = be32_to_cpu(shdr->sh_offset);
6239 +       u32 addr = be32_to_cpu(shdr->sh_addr);
6240 +       u32 size = be32_to_cpu(shdr->sh_size);
6241 +       u32 type = be32_to_cpu(shdr->sh_type);
6242 +       u32 flags = be32_to_cpu(shdr->sh_flags);
6243 +
6244 +       switch (type) {
6245 +       case SHT_PROGBITS:
6246 +               if (flags & SHF_EXECINSTR) {
6247 +                       if (id <= CLASS_MAX_ID) {
6248 +                               /* DO the loading only once in DDR */
6249 +                               if (id == CLASS0_ID) {
6250 +                                       pr_err(
6251 +                                               "%s: load address(%x) and elf file address(%lx) rcvd\n",
6252 +                                               __func__, addr,
6253 +                                               (unsigned long)data + offset);
6254 +                                       if (((unsigned long)(data + offset)
6255 +                                               & 0x3) != (addr & 0x3)) {
6256 +                                               pr_err(
6257 +                                                       "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
6258 +                                                       , __func__, addr,
6259 +                                               (unsigned long)data + offset);
6260 +
6261 +                                               return -EINVAL;
6262 +                                       }
6263 +
6264 +                                       if (addr & 0x1) {
6265 +                                               pr_err(
6266 +                                                       "%s: load address(%x) is not 16bit aligned\n"
6267 +                                                       , __func__, addr);
6268 +                                               return -EINVAL;
6269 +                                       }
6270 +
6271 +                                       if (size & 0x1) {
6272 +                                               pr_err(
6273 +                                                       "%s: load length(%x) is not 16bit aligned\n"
6274 +                                                       , __func__, size);
6275 +                                               return -EINVAL;
6276 +                                       }
6277 +                                       memcpy(DDR_PHYS_TO_VIRT(
6278 +                                               DDR_PFE_TO_PHYS(addr)),
6279 +                                               data + offset, size);
6280 +                               }
6281 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
6282 +                       } else if (id == UTIL_ID) {
6283 +                               if (((unsigned long)(data + offset) & 0x3)
6284 +                                       != (addr & 0x3)) {
6285 +                                       pr_err(
6286 +                                               "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
6287 +                                               , __func__, addr,
6288 +                                               (unsigned long)data + offset);
6289 +
6290 +                                       return -EINVAL;
6291 +                               }
6292 +
6293 +                               if (addr & 0x1) {
6294 +                                       pr_err(
6295 +                                               "%s: load address(%x) is not 16bit aligned\n"
6296 +                                               , __func__, addr);
6297 +                                       return -EINVAL;
6298 +                               }
6299 +
6300 +                               if (size & 0x1) {
6301 +                                       pr_err(
6302 +                                               "%s: load length(%x) is not 16bit aligned\n"
6303 +                                               , __func__, size);
6304 +                                       return -EINVAL;
6305 +                               }
6306 +
6307 +                               util_pmem_memcpy(DDR_PHYS_TO_VIRT(
6308 +                                                       DDR_PFE_TO_PHYS(addr)),
6309 +                                                       data + offset, size);
6310 +                       }
6311 +#endif
6312 +                       } else {
6313 +                               pr_err(
6314 +                                       "%s: unsupported ddr section type(%x) for PE(%d)\n"
6315 +                                               , __func__, type, id);
6316 +                               return -EINVAL;
6317 +                       }
6318 +
6319 +               } else {
6320 +                       memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
6321 +                               + offset, size);
6322 +               }
6323 +
6324 +               break;
6325 +
6326 +       case SHT_NOBITS:
6327 +               memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
6328 +
6329 +               break;
6330 +
6331 +       default:
6332 +               pr_err("%s: unsupported section type(%x)\n", __func__,
6333 +                      type);
6334 +               return -EINVAL;
6335 +       }
6336 +
6337 +       return 0;
6338 +}
6339 +
6340 +/* Loads an elf section into pe lmem
6341 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
6342 + * initialized to 0
6343 + *
6344 + * @param[in] id               PE identification (CLASS0_ID,..., CLASS5_ID)
6345 + * @param[in] data             pointer to the elf firmware
6346 + * @param[in] shdr             pointer to the elf section header
6347 + *
6348 + */
6349 +static int pe_load_pe_lmem_section(int id, const void *data,
6350 +                                  struct elf32_shdr *shdr)
6351 +{
6352 +       u32 offset = be32_to_cpu(shdr->sh_offset);
6353 +       u32 addr = be32_to_cpu(shdr->sh_addr);
6354 +       u32 size = be32_to_cpu(shdr->sh_size);
6355 +       u32 type = be32_to_cpu(shdr->sh_type);
6356 +
6357 +       if (id > CLASS_MAX_ID) {
6358 +               pr_err(
6359 +                       "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
6360 +                        __func__, type, id);
6361 +               return -EINVAL;
6362 +       }
6363 +
6364 +       if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
6365 +               pr_err(
6366 +                       "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
6367 +                       __func__, addr, (unsigned long)data + offset);
6368 +
6369 +               return -EINVAL;
6370 +       }
6371 +
6372 +       if (addr & 0x3) {
6373 +               pr_err("%s: load address(%x) is not 32bit aligned\n",
6374 +                      __func__, addr);
6375 +               return -EINVAL;
6376 +       }
6377 +
6378 +       switch (type) {
6379 +       case SHT_PROGBITS:
6380 +               class_pe_lmem_memcpy_to32(addr, data + offset, size);
6381 +               break;
6382 +
6383 +       case SHT_NOBITS:
6384 +               class_pe_lmem_memset(addr, 0, size);
6385 +               break;
6386 +
6387 +       default:
6388 +               pr_err("%s: unsupported section type(%x)\n", __func__,
6389 +                      type);
6390 +               return -EINVAL;
6391 +       }
6392 +
6393 +       return 0;
6394 +}
6395 +
6396 +/* Loads an elf section into a PE
6397 + * For now only supports loading a section to dmem (all PE's), pmem (class and
6398 + * tmu PE's),
6399 + * DDDR (util PE code)
6400 + *
6401 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
6402 + * ..., UTIL_ID)
6403 + * @param[in] data             pointer to the elf firmware
6404 + * @param[in] shdr             pointer to the elf section header
6405 + *
6406 + */
6407 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
6408 +                       struct device *dev) {
6409 +       u32 addr = be32_to_cpu(shdr->sh_addr);
6410 +       u32 size = be32_to_cpu(shdr->sh_size);
6411 +
6412 +       if (IS_DMEM(addr, size))
6413 +               return pe_load_dmem_section(id, data, shdr);
6414 +       else if (IS_PMEM(addr, size))
6415 +               return pe_load_pmem_section(id, data, shdr);
6416 +       else if (IS_PFE_LMEM(addr, size))
6417 +               return 0;
6418 +       else if (IS_PHYS_DDR(addr, size))
6419 +               return pe_load_ddr_section(id, data, shdr, dev);
6420 +       else if (IS_PE_LMEM(addr, size))
6421 +               return pe_load_pe_lmem_section(id, data, shdr);
6422 +
6423 +       pr_err("%s: unsupported memory range(%x)\n", __func__,
6424 +              addr);
6425 +       return 0;
6426 +}
6427 +
6428 +/**************************** BMU ***************************/
6429 +
6430 +/* Initializes a BMU block.
6431 + * @param[in] base     BMU block base address
6432 + * @param[in] cfg      BMU configuration
6433 + */
6434 +void bmu_init(void *base, struct BMU_CFG *cfg)
6435 +{
6436 +       bmu_disable(base);
6437 +
6438 +       bmu_set_config(base, cfg);
6439 +
6440 +       bmu_reset(base);
6441 +}
6442 +
6443 +/* Resets a BMU block.
6444 + * @param[in] base     BMU block base address
6445 + */
6446 +void bmu_reset(void *base)
6447 +{
6448 +       writel(CORE_SW_RESET, base + BMU_CTRL);
6449 +
6450 +       /* Wait for self clear */
6451 +       while (readl(base + BMU_CTRL) & CORE_SW_RESET)
6452 +               ;
6453 +}
6454 +
6455 +/* Enabled a BMU block.
6456 + * @param[in] base     BMU block base address
6457 + */
6458 +void bmu_enable(void *base)
6459 +{
6460 +       writel(CORE_ENABLE, base + BMU_CTRL);
6461 +}
6462 +
6463 +/* Disables a BMU block.
6464 + * @param[in] base     BMU block base address
6465 + */
6466 +void bmu_disable(void *base)
6467 +{
6468 +       writel(CORE_DISABLE, base + BMU_CTRL);
6469 +}
6470 +
6471 +/* Sets the configuration of a BMU block.
6472 + * @param[in] base     BMU block base address
6473 + * @param[in] cfg      BMU configuration
6474 + */
6475 +void bmu_set_config(void *base, struct BMU_CFG *cfg)
6476 +{
6477 +       writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
6478 +       writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
6479 +       writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
6480 +
6481 +       /* Interrupts are never used */
6482 +       writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
6483 +       writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
6484 +       writel(0x0, base + BMU_INT_ENABLE);
6485 +}
6486 +
6487 +/**************************** MTIP GEMAC ***************************/
6488 +
6489 +/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
6490 + *   TCP or UDP checksums are discarded
6491 + *
6492 + * @param[in] base     GEMAC base address.
6493 + */
6494 +void gemac_enable_rx_checksum_offload(void *base)
6495 +{
6496 +       /*Do not find configuration to do this */
6497 +}
6498 +
6499 +/* Disable Rx Checksum Engine.
6500 + *
6501 + * @param[in] base     GEMAC base address.
6502 + */
6503 +void gemac_disable_rx_checksum_offload(void *base)
6504 +{
6505 +       /*Do not find configuration to do this */
6506 +}
6507 +
6508 +/* GEMAC set speed.
6509 + * @param[in] base     GEMAC base address
6510 + * @param[in] speed    GEMAC speed (10, 100 or 1000 Mbps)
6511 + */
6512 +void gemac_set_speed(void *base, enum mac_speed gem_speed)
6513 +{
6514 +       u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
6515 +       u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
6516 +
6517 +       switch (gem_speed) {
6518 +       case SPEED_10M:
6519 +                       rcr |= EMAC_RCNTRL_RMII_10T;
6520 +                       break;
6521 +
6522 +       case SPEED_1000M:
6523 +                       ecr |= EMAC_ECNTRL_SPEED;
6524 +                       break;
6525 +
6526 +       case SPEED_100M:
6527 +       default:
6528 +                       /*It is in 100M mode */
6529 +                       break;
6530 +       }
6531 +       writel(ecr, (base + EMAC_ECNTRL_REG));
6532 +       writel(rcr, (base + EMAC_RCNTRL_REG));
6533 +}
6534 +
6535 +/* GEMAC set duplex.
6536 + * @param[in] base     GEMAC base address
6537 + * @param[in] duplex   GEMAC duplex mode (Full, Half)
6538 + */
6539 +void gemac_set_duplex(void *base, int duplex)
6540 +{
6541 +       if (duplex == DUPLEX_HALF) {
6542 +               writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
6543 +                       + EMAC_TCNTRL_REG);
6544 +               writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
6545 +                       + EMAC_RCNTRL_REG));
6546 +       } else{
6547 +               writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
6548 +                       + EMAC_TCNTRL_REG);
6549 +               writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
6550 +                       + EMAC_RCNTRL_REG));
6551 +       }
6552 +}
6553 +
6554 +/* GEMAC set mode.
6555 + * @param[in] base     GEMAC base address
6556 + * @param[in] mode     GEMAC operation mode (MII, RMII, RGMII, SGMII)
6557 + */
6558 +void gemac_set_mode(void *base, int mode)
6559 +{
6560 +       u32 val = readl(base + EMAC_RCNTRL_REG);
6561 +
6562 +       /*Remove loopbank*/
6563 +       val &= ~EMAC_RCNTRL_LOOP;
6564 +
6565 +       /* Enable flow control and MII mode and terminate received CRC */
6566 +       val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE | EMAC_RCNTRL_CRC_FWD);
6567 +
6568 +       writel(val, base + EMAC_RCNTRL_REG);
6569 +}
6570 +
6571 +/* GEMAC enable function.
6572 + * @param[in] base     GEMAC base address
6573 + */
6574 +void gemac_enable(void *base)
6575 +{
6576 +       writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
6577 +               EMAC_ECNTRL_REG);
6578 +}
6579 +
6580 +/* GEMAC disable function.
6581 + * @param[in] base     GEMAC base address
6582 + */
6583 +void gemac_disable(void *base)
6584 +{
6585 +       writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
6586 +               EMAC_ECNTRL_REG);
6587 +}
6588 +
6589 +/* GEMAC TX disable function.
6590 + * @param[in] base     GEMAC base address
6591 + */
6592 +void gemac_tx_disable(void *base)
6593 +{
6594 +       writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
6595 +               EMAC_TCNTRL_REG);
6596 +}
6597 +
6598 +void gemac_tx_enable(void *base)
6599 +{
6600 +       writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
6601 +                       EMAC_TCNTRL_REG);
6602 +}
6603 +
6604 +/* Sets the hash register of the MAC.
6605 + * This register is used for matching unicast and multicast frames.
6606 + *
6607 + * @param[in] base     GEMAC base address.
6608 + * @param[in] hash     64-bit hash to be configured.
6609 + */
6610 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
6611 +{
6612 +       writel(hash->bottom,  base + EMAC_GALR);
6613 +       writel(hash->top, base + EMAC_GAUR);
6614 +}
6615 +
6616 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
6617 +                     unsigned int entry_index)
6618 +{
6619 +       if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6620 +               return;
6621 +
6622 +       entry_index = entry_index - 1;
6623 +       if (entry_index < 1) {
6624 +               writel(htonl(address->bottom),  base + EMAC_PHY_ADDR_LOW);
6625 +               writel((htonl(address->top) | 0x8808), base +
6626 +                       EMAC_PHY_ADDR_HIGH);
6627 +       } else {
6628 +               writel(htonl(address->bottom),  base + ((entry_index - 1) * 8)
6629 +                       + EMAC_SMAC_0_0);
6630 +               writel((htonl(address->top) | 0x8808), base + ((entry_index -
6631 +                       1) * 8) + EMAC_SMAC_0_1);
6632 +       }
6633 +}
6634 +
6635 +void gemac_clear_laddrN(void *base, unsigned int entry_index)
6636 +{
6637 +       if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6638 +               return;
6639 +
6640 +       entry_index = entry_index - 1;
6641 +       if (entry_index < 1) {
6642 +               writel(0, base + EMAC_PHY_ADDR_LOW);
6643 +               writel(0, base + EMAC_PHY_ADDR_HIGH);
6644 +       } else {
6645 +               writel(0,  base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
6646 +               writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
6647 +       }
6648 +}
6649 +
6650 +/* Set the loopback mode of the MAC.  This can be either no loopback for
6651 + * normal operation, local loopback through MAC internal loopback module or PHY
6652 + *   loopback for external loopback through a PHY.  This asserts the external
6653 + * loop pin.
6654 + *
6655 + * @param[in] base     GEMAC base address.
6656 + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
6657 + * Loopback,
6658 + *                     LB_EXT - PHY Loopback.
6659 + */
6660 +void gemac_set_loop(void *base, enum mac_loop gem_loop)
6661 +{
6662 +       pr_info("%s()\n", __func__);
6663 +       writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
6664 +               EMAC_RCNTRL_REG));
6665 +}
6666 +
6667 +/* GEMAC allow frames
6668 + * @param[in] base     GEMAC base address
6669 + */
6670 +void gemac_enable_copy_all(void *base)
6671 +{
6672 +       writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
6673 +               EMAC_RCNTRL_REG));
6674 +}
6675 +
6676 +/* GEMAC do not allow frames
6677 + * @param[in] base     GEMAC base address
6678 + */
6679 +void gemac_disable_copy_all(void *base)
6680 +{
6681 +       writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
6682 +               EMAC_RCNTRL_REG));
6683 +}
6684 +
6685 +/* GEMAC allow broadcast function.
6686 + * @param[in] base     GEMAC base address
6687 + */
6688 +void gemac_allow_broadcast(void *base)
6689 +{
6690 +       writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
6691 +               EMAC_RCNTRL_REG);
6692 +}
6693 +
6694 +/* GEMAC no broadcast function.
6695 + * @param[in] base     GEMAC base address
6696 + */
6697 +void gemac_no_broadcast(void *base)
6698 +{
6699 +       writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
6700 +               EMAC_RCNTRL_REG);
6701 +}
6702 +
6703 +/* GEMAC enable 1536 rx function.
6704 + * @param[in]  base    GEMAC base address
6705 + */
6706 +void gemac_enable_1536_rx(void *base)
6707 +{
6708 +       /* Set 1536 as Maximum frame length */
6709 +       writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
6710 +               | (1536 << 16), base +  EMAC_RCNTRL_REG);
6711 +}
6712 +
6713 +/* GEMAC set rx Max frame length.
6714 + * @param[in]  base    GEMAC base address
6715 + * @param[in]  mtu     new mtu
6716 + */
6717 +void gemac_set_rx_max_fl(void *base, int mtu)
6718 +{
6719 +       /* Set mtu as Maximum frame length */
6720 +       writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
6721 +               | (mtu << 16), base + EMAC_RCNTRL_REG);
6722 +}
6723 +
6724 +/* GEMAC enable stacked vlan function.
6725 + * @param[in]  base    GEMAC base address
6726 + */
6727 +void gemac_enable_stacked_vlan(void *base)
6728 +{
6729 +       /* MTIP doesn't support stacked vlan */
6730 +}
6731 +
6732 +/* GEMAC enable pause rx function.
6733 + * @param[in] base     GEMAC base address
6734 + */
6735 +void gemac_enable_pause_rx(void *base)
6736 +{
6737 +       writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
6738 +              base + EMAC_RCNTRL_REG);
6739 +}
6740 +
6741 +/* GEMAC disable pause rx function.
6742 + * @param[in] base     GEMAC base address
6743 + */
6744 +void gemac_disable_pause_rx(void *base)
6745 +{
6746 +       writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
6747 +              base + EMAC_RCNTRL_REG);
6748 +}
6749 +
6750 +/* GEMAC enable pause tx function.
6751 + * @param[in] base GEMAC base address
6752 + */
6753 +void gemac_enable_pause_tx(void *base)
6754 +{
6755 +       writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
6756 +}
6757 +
6758 +/* GEMAC disable pause tx function.
6759 + * @param[in] base GEMAC base address
6760 + */
6761 +void gemac_disable_pause_tx(void *base)
6762 +{
6763 +       writel(0x0, base + EMAC_RX_SECTION_EMPTY);
6764 +}
6765 +
6766 +/* GEMAC wol configuration
6767 + * @param[in] base     GEMAC base address
6768 + * @param[in] wol_conf WoL register configuration
6769 + */
6770 +void gemac_set_wol(void *base, u32 wol_conf)
6771 +{
6772 +       u32  val = readl(base + EMAC_ECNTRL_REG);
6773 +
6774 +       if (wol_conf)
6775 +               val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6776 +       else
6777 +               val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6778 +       writel(val, base + EMAC_ECNTRL_REG);
6779 +}
6780 +
6781 +/* Sets Gemac bus width to 64bit
6782 + * @param[in] base       GEMAC base address
6783 + * @param[in] width     gemac bus width to be set possible values are 32/64/128
6784 + */
6785 +void gemac_set_bus_width(void *base, int width)
6786 +{
6787 +}
6788 +
6789 +/* Sets Gemac configuration.
6790 + * @param[in] base     GEMAC base address
6791 + * @param[in] cfg      GEMAC configuration
6792 + */
6793 +void gemac_set_config(void *base, struct gemac_cfg *cfg)
6794 +{
6795 +       /*GEMAC config taken from VLSI */
6796 +       writel(0x00000004, base + EMAC_TFWR_STR_FWD);
6797 +       writel(0x00000005, base + EMAC_RX_SECTION_FULL);
6798 +
6799 +       if (pfe_errata_a010897)
6800 +               writel(0x0000076c, base + EMAC_TRUNC_FL);
6801 +       else
6802 +               writel(0x00003fff, base + EMAC_TRUNC_FL);
6803 +
6804 +       writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
6805 +       writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
6806 +
6807 +       gemac_set_mode(base, cfg->mode);
6808 +
6809 +       gemac_set_speed(base, cfg->speed);
6810 +
6811 +       gemac_set_duplex(base, cfg->duplex);
6812 +}
6813 +
6814 +/**************************** GPI ***************************/
6815 +
6816 +/* Initializes a GPI block.
6817 + * @param[in] base     GPI base address
6818 + * @param[in] cfg      GPI configuration
6819 + */
6820 +void gpi_init(void *base, struct gpi_cfg *cfg)
6821 +{
6822 +       gpi_reset(base);
6823 +
6824 +       gpi_disable(base);
6825 +
6826 +       gpi_set_config(base, cfg);
6827 +}
6828 +
6829 +/* Resets a GPI block.
6830 + * @param[in] base     GPI base address
6831 + */
6832 +void gpi_reset(void *base)
6833 +{
6834 +       writel(CORE_SW_RESET, base + GPI_CTRL);
6835 +}
6836 +
6837 +/* Enables a GPI block.
6838 + * @param[in] base     GPI base address
6839 + */
6840 +void gpi_enable(void *base)
6841 +{
6842 +       writel(CORE_ENABLE, base + GPI_CTRL);
6843 +}
6844 +
6845 +/* Disables a GPI block.
6846 + * @param[in] base     GPI base address
6847 + */
6848 +void gpi_disable(void *base)
6849 +{
6850 +       writel(CORE_DISABLE, base + GPI_CTRL);
6851 +}
6852 +
6853 +/* Sets the configuration of a GPI block.
6854 + * @param[in] base     GPI base address
6855 + * @param[in] cfg      GPI configuration
6856 + */
6857 +void gpi_set_config(void *base, struct gpi_cfg *cfg)
6858 +{
6859 +       writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL),       base
6860 +               + GPI_LMEM_ALLOC_ADDR);
6861 +       writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL),        base
6862 +               + GPI_LMEM_FREE_ADDR);
6863 +       writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL),       base
6864 +               + GPI_DDR_ALLOC_ADDR);
6865 +       writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),        base
6866 +               + GPI_DDR_FREE_ADDR);
6867 +       writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
6868 +       writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
6869 +       writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
6870 +       writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
6871 +       writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
6872 +       writel((DDR_HDR_SIZE << 16) |   LMEM_HDR_SIZE,  base + GPI_HDR_SIZE);
6873 +       writel((DDR_BUF_SIZE << 16) |   LMEM_BUF_SIZE,  base + GPI_BUF_SIZE);
6874 +
6875 +       writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
6876 +               GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
6877 +       writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
6878 +       writel(cfg->aseq_len,   base + GPI_DTX_ASEQ);
6879 +       writel(1, base + GPI_TOE_CHKSUM_EN);
6880 +
6881 +       if (cfg->mtip_pause_reg) {
6882 +               writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
6883 +               writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
6884 +       }
6885 +}
6886 +
6887 +/**************************** CLASSIFIER ***************************/
6888 +
6889 +/* Initializes CLASSIFIER block.
6890 + * @param[in] cfg      CLASSIFIER configuration
6891 + */
6892 +void class_init(struct class_cfg *cfg)
6893 +{
6894 +       class_reset();
6895 +
6896 +       class_disable();
6897 +
6898 +       class_set_config(cfg);
6899 +}
6900 +
6901 +/* Resets CLASSIFIER block.
6902 + *
6903 + */
6904 +void class_reset(void)
6905 +{
6906 +       writel(CORE_SW_RESET, CLASS_TX_CTRL);
6907 +}
6908 +
6909 +/* Enables all CLASS-PE's cores.
6910 + *
6911 + */
6912 +void class_enable(void)
6913 +{
6914 +       writel(CORE_ENABLE, CLASS_TX_CTRL);
6915 +}
6916 +
6917 +/* Disables all CLASS-PE's cores.
6918 + *
6919 + */
6920 +void class_disable(void)
6921 +{
6922 +       writel(CORE_DISABLE, CLASS_TX_CTRL);
6923 +}
6924 +
6925 +/*
6926 + * Sets the configuration of the CLASSIFIER block.
6927 + * @param[in] cfg      CLASSIFIER configuration
6928 + */
6929 +void class_set_config(struct class_cfg *cfg)
6930 +{
6931 +       u32 val;
6932 +
6933 +       /* Initialize route table */
6934 +       if (!cfg->resume)
6935 +               memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
6936 +               cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
6937 +
6938 +#if !defined(LS1012A_PFE_RESET_WA)
6939 +       writel(cfg->pe_sys_clk_ratio,   CLASS_PE_SYS_CLK_RATIO);
6940 +#endif
6941 +
6942 +       writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE,    CLASS_HDR_SIZE);
6943 +       writel(LMEM_BUF_SIZE,                           CLASS_LMEM_BUF_SIZE);
6944 +       writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
6945 +               CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
6946 +               CLASS_ROUTE_HASH_ENTRY_SIZE);
6947 +       writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
6948 +              CLASS_HIF_PARSE);
6949 +
6950 +       val = HASH_CRC_PORT_IP | QB2BUS_LE;
6951 +
6952 +#if defined(CONFIG_IP_ALIGNED)
6953 +       val |= IP_ALIGNED;
6954 +#endif
6955 +
6956 +       /*
6957 +        *  Class PE packet steering will only work if TOE mode, bridge fetch or
6958 +        * route fetch are enabled (see class/qb_fet.v). Route fetch would
6959 +        * trigger additional memory copies (likely from DDR because of hash
6960 +        * table size, which cannot be reduced because PE software still
6961 +        * relies on hash value computed in HW), so when not in TOE mode we
6962 +        * simply enable HW bridge fetch even though we don't use it.
6963 +        */
6964 +       if (cfg->toe_mode)
6965 +               val |= CLASS_TOE;
6966 +       else
6967 +               val |= HW_BRIDGE_FETCH;
6968 +
6969 +       writel(val, CLASS_ROUTE_MULTI);
6970 +
6971 +       writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
6972 +              CLASS_ROUTE_TABLE_BASE);
6973 +       writel(CLASS_PE0_RO_DM_ADDR0_VAL,               CLASS_PE0_RO_DM_ADDR0);
6974 +       writel(CLASS_PE0_RO_DM_ADDR1_VAL,               CLASS_PE0_RO_DM_ADDR1);
6975 +       writel(CLASS_PE0_QB_DM_ADDR0_VAL,               CLASS_PE0_QB_DM_ADDR0);
6976 +       writel(CLASS_PE0_QB_DM_ADDR1_VAL,               CLASS_PE0_QB_DM_ADDR1);
6977 +       writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR),    CLASS_TM_INQ_ADDR);
6978 +
6979 +       writel(23, CLASS_AFULL_THRES);
6980 +       writel(23, CLASS_TSQ_FIFO_THRES);
6981 +
6982 +       writel(24, CLASS_MAX_BUF_CNT);
6983 +       writel(24, CLASS_TSQ_MAX_CNT);
6984 +}
6985 +
6986 +/**************************** TMU ***************************/
6987 +
6988 +void tmu_reset(void)
6989 +{
6990 +       writel(SW_RESET, TMU_CTRL);
6991 +}
6992 +
6993 +/* Initializes TMU block.
6994 + * @param[in] cfg      TMU configuration
6995 + */
6996 +void tmu_init(struct tmu_cfg *cfg)
6997 +{
6998 +       int q, phyno;
6999 +
7000 +       tmu_disable(0xF);
7001 +       mdelay(10);
7002 +
7003 +#if !defined(LS1012A_PFE_RESET_WA)
7004 +       /* keep in soft reset */
7005 +       writel(SW_RESET, TMU_CTRL);
7006 +#endif
7007 +       writel(0x3, TMU_SYS_GENERIC_CONTROL);
7008 +       writel(750, TMU_INQ_WATERMARK);
7009 +       writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
7010 +               GPI_INQ_PKTPTR),        TMU_PHY0_INQ_ADDR);
7011 +       writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
7012 +               GPI_INQ_PKTPTR),        TMU_PHY1_INQ_ADDR);
7013 +       writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
7014 +               GPI_INQ_PKTPTR),        TMU_PHY3_INQ_ADDR);
7015 +       writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
7016 +       writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
7017 +       writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
7018 +              TMU_BMU_INQ_ADDR);
7019 +
7020 +       writel(0x3FF,   TMU_TDQ0_SCH_CTRL);     /*
7021 +                                                * enabling all 10
7022 +                                                * schedulers [9:0] of each TDQ
7023 +                                                */
7024 +       writel(0x3FF,   TMU_TDQ1_SCH_CTRL);
7025 +       writel(0x3FF,   TMU_TDQ3_SCH_CTRL);
7026 +
7027 +#if !defined(LS1012A_PFE_RESET_WA)
7028 +       writel(cfg->pe_sys_clk_ratio,   TMU_PE_SYS_CLK_RATIO);
7029 +#endif
7030 +
7031 +#if !defined(LS1012A_PFE_RESET_WA)
7032 +       writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr),     TMU_LLM_BASE_ADDR);
7033 +       /* Extra packet pointers will be stored from this address onwards */
7034 +
7035 +       writel(cfg->llm_queue_len,      TMU_LLM_QUE_LEN);
7036 +       writel(5,                       TMU_TDQ_IIFG_CFG);
7037 +       writel(DDR_BUF_SIZE,            TMU_BMU_BUF_SIZE);
7038 +
7039 +       writel(0x0,                     TMU_CTRL);
7040 +
7041 +       /* MEM init */
7042 +       pr_info("%s: mem init\n", __func__);
7043 +       writel(MEM_INIT,        TMU_CTRL);
7044 +
7045 +       while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
7046 +               ;
7047 +
7048 +       /* LLM init */
7049 +       pr_info("%s: lmem init\n", __func__);
7050 +       writel(LLM_INIT,        TMU_CTRL);
7051 +
7052 +       while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
7053 +               ;
7054 +#endif
7055 +       /* set up each queue for tail drop */
7056 +       for (phyno = 0; phyno < 4; phyno++) {
7057 +               if (phyno == 2)
7058 +                       continue;
7059 +               for (q = 0; q < 16; q++) {
7060 +                       u32 qdepth;
7061 +
7062 +                       writel((phyno << 8) | q, TMU_TEQ_CTRL);
7063 +                       writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
7064 +
7065 +                       if (phyno == 3)
7066 +                               qdepth = DEFAULT_TMU3_QDEPTH;
7067 +                       else
7068 +                               qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
7069 +                                               DEFAULT_MAX_QDEPTH;
7070 +
7071 +                       /* LOG: 68855 */
7072 +                       /*
7073 +                        * The following is a workaround for the reordered
7074 +                        * packet and BMU2 buffer leakage issue.
7075 +                        */
7076 +                       if (CHIP_REVISION() == 0)
7077 +                               qdepth = 31;
7078 +
7079 +                       writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
7080 +                       writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
7081 +               }
7082 +       }
7083 +
7084 +#ifdef CFG_LRO
7085 +       /* Set TMU-3 queue 5 (LRO) in no-drop mode */
7086 +       writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
7087 +       writel(0, TMU_TEQ_QCFG);
7088 +#endif
7089 +
7090 +       writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
7091 +
7092 +       writel(0x0, TMU_CTRL);
7093 +}
7094 +
7095 +/* Enables TMU-PE cores.
7096 + * @param[in] pe_mask  TMU PE mask
7097 + */
7098 +void tmu_enable(u32 pe_mask)
7099 +{
7100 +       writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
7101 +}
7102 +
7103 +/* Disables TMU cores.
7104 + * @param[in] pe_mask  TMU PE mask
7105 + */
7106 +void tmu_disable(u32 pe_mask)
7107 +{
7108 +       writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
7109 +}
7110 +
7111 +/* This will return the tmu queue status
7112 + * @param[in] if_id    gem interface id or TMU index
7113 + * @return             returns the bit mask of busy queues, zero means all
7114 + * queues are empty
7115 + */
7116 +u32 tmu_qstatus(u32 if_id)
7117 +{
7118 +       return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
7119 +               offsetof(struct pe_status, tmu_qstatus), 4));
7120 +}
7121 +
7122 +u32 tmu_pkts_processed(u32 if_id)
7123 +{
7124 +       return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
7125 +               offsetof(struct pe_status, rx), 4));
7126 +}
7127 +
7128 +/**************************** UTIL ***************************/
7129 +
7130 +/* Resets UTIL block.
7131 + */
7132 +void util_reset(void)
7133 +{
7134 +       writel(CORE_SW_RESET, UTIL_TX_CTRL);
7135 +}
7136 +
7137 +/* Initializes UTIL block.
7138 + * @param[in] cfg      UTIL configuration
7139 + */
7140 +void util_init(struct util_cfg *cfg)
7141 +{
7142 +       writel(cfg->pe_sys_clk_ratio,   UTIL_PE_SYS_CLK_RATIO);
7143 +}
7144 +
7145 +/* Enables UTIL-PE core.
7146 + *
7147 + */
7148 +void util_enable(void)
7149 +{
7150 +       writel(CORE_ENABLE, UTIL_TX_CTRL);
7151 +}
7152 +
7153 +/* Disables UTIL-PE core.
7154 + *
7155 + */
7156 +void util_disable(void)
7157 +{
7158 +       writel(CORE_DISABLE, UTIL_TX_CTRL);
7159 +}
7160 +
7161 +/**************************** HIF ***************************/
7162 +/* Initializes HIF copy block.
7163 + *
7164 + */
7165 +void hif_init(void)
7166 +{
7167 +       /*Initialize HIF registers*/
7168 +       writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
7169 +              HIF_POLL_CTRL);
7170 +}
7171 +
7172 +/* Enable hif tx DMA and interrupt
7173 + *
7174 + */
7175 +void hif_tx_enable(void)
7176 +{
7177 +       writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
7178 +       writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
7179 +              HIF_INT_ENABLE);
7180 +}
7181 +
7182 +/* Disable hif tx DMA and interrupt
7183 + *
7184 + */
7185 +void hif_tx_disable(void)
7186 +{
7187 +       u32     hif_int;
7188 +
7189 +       writel(0, HIF_TX_CTRL);
7190 +
7191 +       hif_int = readl(HIF_INT_ENABLE);
7192 +       hif_int &= HIF_TXPKT_INT_EN;
7193 +       writel(hif_int, HIF_INT_ENABLE);
7194 +}
7195 +
7196 +/* Enable hif rx DMA and interrupt
7197 + *
7198 + */
7199 +void hif_rx_enable(void)
7200 +{
7201 +       hif_rx_dma_start();
7202 +       writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
7203 +              HIF_INT_ENABLE);
7204 +}
7205 +
7206 +/* Disable hif rx DMA and interrupt
7207 + *
7208 + */
7209 +void hif_rx_disable(void)
7210 +{
7211 +       u32     hif_int;
7212 +
7213 +       writel(0, HIF_RX_CTRL);
7214 +
7215 +       hif_int = readl(HIF_INT_ENABLE);
7216 +       hif_int &= HIF_RXPKT_INT_EN;
7217 +       writel(hif_int, HIF_INT_ENABLE);
7218 +}
7219 --- /dev/null
7220 +++ b/drivers/staging/fsl_ppfe/pfe_hif.c
7221 @@ -0,0 +1,1060 @@
7222 +// SPDX-License-Identifier: GPL-2.0+
7223 +/*
7224 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
7225 + * Copyright 2017 NXP
7226 + */
7227 +
7228 +#include <linux/kernel.h>
7229 +#include <linux/interrupt.h>
7230 +#include <linux/dma-mapping.h>
7231 +#include <linux/dmapool.h>
7232 +#include <linux/sched.h>
7233 +#include <linux/module.h>
7234 +#include <linux/list.h>
7235 +#include <linux/kthread.h>
7236 +#include <linux/slab.h>
7237 +
7238 +#include <linux/io.h>
7239 +#include <asm/irq.h>
7240 +
7241 +#include "pfe_mod.h"
7242 +
7243 +#define HIF_INT_MASK   (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
7244 +
7245 +unsigned char napi_first_batch;
7246 +
7247 +static void pfe_tx_do_cleanup(unsigned long data);
7248 +
7249 +static int pfe_hif_alloc_descr(struct pfe_hif *hif)
7250 +{
7251 +       void *addr;
7252 +       dma_addr_t dma_addr;
7253 +       int err = 0;
7254 +
7255 +       pr_info("%s\n", __func__);
7256 +       addr = dma_alloc_coherent(pfe->dev,
7257 +                                 HIF_RX_DESC_NT * sizeof(struct hif_desc) +
7258 +                                 HIF_TX_DESC_NT * sizeof(struct hif_desc),
7259 +                                 &dma_addr, GFP_KERNEL);
7260 +
7261 +       if (!addr) {
7262 +               pr_err("%s: Could not allocate buffer descriptors!\n"
7263 +                       , __func__);
7264 +               err = -ENOMEM;
7265 +               goto err0;
7266 +       }
7267 +
7268 +       hif->descr_baseaddr_p = dma_addr;
7269 +       hif->descr_baseaddr_v = addr;
7270 +       hif->rx_ring_size = HIF_RX_DESC_NT;
7271 +       hif->tx_ring_size = HIF_TX_DESC_NT;
7272 +
7273 +       return 0;
7274 +
7275 +err0:
7276 +       return err;
7277 +}
7278 +
7279 +#if defined(LS1012A_PFE_RESET_WA)
7280 +static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
7281 +{
7282 +       int ii;
7283 +       struct hif_desc *desc = hif->rx_base;
7284 +
7285 +       /*Mark all descriptors as LAST_BD */
7286 +       for (ii = 0; ii < hif->rx_ring_size; ii++) {
7287 +               desc->ctrl |= BD_CTRL_LAST_BD;
7288 +               desc++;
7289 +       }
7290 +}
7291 +
7292 +struct class_rx_hdr_t {
7293 +       u32     next_ptr;       /* ptr to the start of the first DDR buffer */
7294 +       u16     length;         /* total packet length */
7295 +       u16     phyno;          /* input physical port number */
7296 +       u32     status;         /* gemac status bits */
7297 +       u32     status2;            /* reserved for software usage */
7298 +};
7299 +
7300 +/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
7301 + * except overflow
7302 + */
7303 +#define STATUS_BAD_FRAME_ERR            BIT(16)
7304 +#define STATUS_LENGTH_ERR               BIT(17)
7305 +#define STATUS_CRC_ERR                  BIT(18)
7306 +#define STATUS_TOO_SHORT_ERR            BIT(19)
7307 +#define STATUS_TOO_LONG_ERR             BIT(20)
7308 +#define STATUS_CODE_ERR                 BIT(21)
7309 +#define STATUS_MC_HASH_MATCH            BIT(22)
7310 +#define STATUS_CUMULATIVE_ARC_HIT       BIT(23)
7311 +#define STATUS_UNICAST_HASH_MATCH       BIT(24)
7312 +#define STATUS_IP_CHECKSUM_CORRECT      BIT(25)
7313 +#define STATUS_TCP_CHECKSUM_CORRECT     BIT(26)
7314 +#define STATUS_UDP_CHECKSUM_CORRECT     BIT(27)
7315 +#define STATUS_OVERFLOW_ERR             BIT(28) /* GPI error */
7316 +#define MIN_PKT_SIZE                   64
7317 +
7318 +static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
7319 +{
7320 +       int i;
7321 +
7322 +       for (i = 0; i < len; i += sizeof(u32))  {
7323 +               *dst = htonl(*src);
7324 +               dst++; src++;
7325 +       }
7326 +}
7327 +
7328 +static void send_dummy_pkt_to_hif(void)
7329 +{
7330 +       void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
7331 +       u32 physaddr;
7332 +       struct class_rx_hdr_t local_hdr;
7333 +       static u32 dummy_pkt[] =  {
7334 +               0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
7335 +               0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
7336 +               0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
7337 +               0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
7338 +
7339 +       ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
7340 +       if (!ddr_ptr)
7341 +               return;
7342 +
7343 +       lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
7344 +       if (!lmem_ptr)
7345 +               return;
7346 +
7347 +       pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
7348 +       physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
7349 +
7350 +       lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
7351 +
7352 +       local_hdr.phyno = htons(0); /* RX_PHY_0 */
7353 +       local_hdr.length = htons(MIN_PKT_SIZE);
7354 +
7355 +       local_hdr.next_ptr = htonl((u32)physaddr);
7356 +       /*Mark checksum is correct */
7357 +       local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
7358 +                               STATUS_UDP_CHECKSUM_CORRECT |
7359 +                               STATUS_TCP_CHECKSUM_CORRECT |
7360 +                               STATUS_UNICAST_HASH_MATCH |
7361 +                               STATUS_CUMULATIVE_ARC_HIT));
7362 +       copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
7363 +                    sizeof(local_hdr));
7364 +
7365 +       copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
7366 +                    0x40);
7367 +
7368 +       writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
7369 +}
7370 +
7371 +void pfe_hif_rx_idle(struct pfe_hif *hif)
7372 +{
7373 +       int hif_stop_loop = 10;
7374 +       u32 rx_status;
7375 +
7376 +       pfe_hif_disable_rx_desc(hif);
7377 +       pr_info("Bringing hif to idle state...");
7378 +       writel(0, HIF_INT_ENABLE);
7379 +       /*If HIF Rx BDP is busy send a dummy packet */
7380 +       do {
7381 +               rx_status = readl(HIF_RX_STATUS);
7382 +               if (rx_status & BDP_CSR_RX_DMA_ACTV)
7383 +                       send_dummy_pkt_to_hif();
7384 +
7385 +               usleep_range(100, 150);
7386 +       } while (--hif_stop_loop);
7387 +
7388 +       if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
7389 +               pr_info("Failed\n");
7390 +       else
7391 +               pr_info("Done\n");
7392 +}
7393 +#endif
7394 +
7395 +static void pfe_hif_free_descr(struct pfe_hif *hif)
7396 +{
7397 +       pr_info("%s\n", __func__);
7398 +
7399 +       dma_free_coherent(pfe->dev,
7400 +                         hif->rx_ring_size * sizeof(struct hif_desc) +
7401 +                         hif->tx_ring_size * sizeof(struct hif_desc),
7402 +                         hif->descr_baseaddr_v, hif->descr_baseaddr_p);
7403 +}
7404 +
7405 +void pfe_hif_desc_dump(struct pfe_hif *hif)
7406 +{
7407 +       struct hif_desc *desc;
7408 +       unsigned long desc_p;
7409 +       int ii = 0;
7410 +
7411 +       pr_info("%s\n", __func__);
7412 +
7413 +       desc = hif->rx_base;
7414 +       desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
7415 +                       hif->descr_baseaddr_p);
7416 +
7417 +       pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
7418 +       for (ii = 0; ii < hif->rx_ring_size; ii++) {
7419 +               pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
7420 +                       readl(&desc->status), readl(&desc->ctrl),
7421 +                       readl(&desc->data), readl(&desc->next));
7422 +                       desc++;
7423 +       }
7424 +
7425 +       desc = hif->tx_base;
7426 +       desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
7427 +                       hif->descr_baseaddr_p);
7428 +
7429 +       pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
7430 +       for (ii = 0; ii < hif->tx_ring_size; ii++) {
7431 +               pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
7432 +                       readl(&desc->status), readl(&desc->ctrl),
7433 +                       readl(&desc->data), readl(&desc->next));
7434 +               desc++;
7435 +       }
7436 +}
7437 +
7438 +/* pfe_hif_release_buffers */
7439 +static void pfe_hif_release_buffers(struct pfe_hif *hif)
7440 +{
7441 +       struct hif_desc *desc;
7442 +       int i = 0;
7443 +
7444 +       hif->rx_base = hif->descr_baseaddr_v;
7445 +
7446 +       pr_info("%s\n", __func__);
7447 +
7448 +       /*Free Rx buffers */
7449 +       desc = hif->rx_base;
7450 +       for (i = 0; i < hif->rx_ring_size; i++) {
7451 +               if (readl(&desc->data)) {
7452 +                       if ((i < hif->shm->rx_buf_pool_cnt) &&
7453 +                           (!hif->shm->rx_buf_pool[i])) {
7454 +                               /*
7455 +                                * dma_unmap_single(hif->dev, desc->data,
7456 +                                * hif->rx_buf_len[i], DMA_FROM_DEVICE);
7457 +                                */
7458 +                               dma_unmap_single(hif->dev,
7459 +                                                DDR_PFE_TO_PHYS(
7460 +                                                readl(&desc->data)),
7461 +                                                hif->rx_buf_len[i],
7462 +                                                DMA_FROM_DEVICE);
7463 +                               hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
7464 +                       } else {
7465 +                               pr_err("%s: buffer pool already full\n"
7466 +                                       , __func__);
7467 +                       }
7468 +               }
7469 +
7470 +               writel(0, &desc->data);
7471 +               writel(0, &desc->status);
7472 +               writel(0, &desc->ctrl);
7473 +               desc++;
7474 +       }
7475 +}
7476 +
7477 +/*
7478 + * pfe_hif_init_buffers
7479 + * This function initializes the HIF Rx/Tx ring descriptors and
7480 + * initialize Rx queue with buffers.
7481 + */
7482 +static int pfe_hif_init_buffers(struct pfe_hif *hif)
7483 +{
7484 +       struct hif_desc *desc, *first_desc_p;
7485 +       u32 data;
7486 +       int i = 0;
7487 +
7488 +       pr_info("%s\n", __func__);
7489 +
7490 +       /* Check enough Rx buffers available in the shared memory */
7491 +       if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
7492 +               return -ENOMEM;
7493 +
7494 +       hif->rx_base = hif->descr_baseaddr_v;
7495 +       memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
7496 +
7497 +       /*Initialize Rx descriptors */
7498 +       desc = hif->rx_base;
7499 +       first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
7500 +
7501 +       for (i = 0; i < hif->rx_ring_size; i++) {
7502 +               /* Initialize Rx buffers from the shared memory */
7503 +
7504 +               data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
7505 +                               pfe_pkt_size, DMA_FROM_DEVICE);
7506 +               hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
7507 +               hif->rx_buf_len[i] = pfe_pkt_size;
7508 +               hif->shm->rx_buf_pool[i] = NULL;
7509 +
7510 +               if (likely(dma_mapping_error(hif->dev, data) == 0)) {
7511 +                       writel(DDR_PHYS_TO_PFE(data), &desc->data);
7512 +               } else {
7513 +                       pr_err("%s : low on mem\n",  __func__);
7514 +
7515 +                       goto err;
7516 +               }
7517 +
7518 +               writel(0, &desc->status);
7519 +
7520 +               /*
7521 +                * Ensure everything else is written to DDR before
7522 +                * writing bd->ctrl
7523 +                */
7524 +               wmb();
7525 +
7526 +               writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
7527 +                       | BD_CTRL_DIR | BD_CTRL_DESC_EN
7528 +                       | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
7529 +
7530 +               /* Chain descriptors */
7531 +               writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7532 +               desc++;
7533 +       }
7534 +
7535 +       /* Overwrite last descriptor to chain it to first one*/
7536 +       desc--;
7537 +       writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7538 +
7539 +       hif->rxtoclean_index = 0;
7540 +
7541 +       /*Initialize Rx buffer descriptor ring base address */
7542 +       writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
7543 +
7544 +       hif->tx_base = hif->rx_base + hif->rx_ring_size;
7545 +       first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
7546 +                               hif->rx_ring_size;
7547 +       memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
7548 +
7549 +       /*Initialize tx descriptors */
7550 +       desc = hif->tx_base;
7551 +
7552 +       for (i = 0; i < hif->tx_ring_size; i++) {
7553 +               /* Chain descriptors */
7554 +               writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7555 +               writel(0, &desc->ctrl);
7556 +               desc++;
7557 +       }
7558 +
7559 +       /* Overwrite last descriptor to chain it to first one */
7560 +       desc--;
7561 +       writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7562 +       hif->txavail = hif->tx_ring_size;
7563 +       hif->txtosend = 0;
7564 +       hif->txtoclean = 0;
7565 +       hif->txtoflush = 0;
7566 +
7567 +       /*Initialize Tx buffer descriptor ring base address */
7568 +       writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
7569 +
7570 +       return 0;
7571 +
7572 +err:
7573 +       pfe_hif_release_buffers(hif);
7574 +       return -ENOMEM;
7575 +}
7576 +
7577 +/*
7578 + * pfe_hif_client_register
7579 + *
7580 + * This function used to register a client driver with the HIF driver.
7581 + *
7582 + * Return value:
7583 + * 0 - on Successful registration
7584 + */
7585 +static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
7586 +                                  struct hif_client_shm *client_shm)
7587 +{
7588 +       struct hif_client *client = &hif->client[client_id];
7589 +       u32 i, cnt;
7590 +       struct rx_queue_desc *rx_qbase;
7591 +       struct tx_queue_desc *tx_qbase;
7592 +       struct hif_rx_queue *rx_queue;
7593 +       struct hif_tx_queue *tx_queue;
7594 +       int err = 0;
7595 +
7596 +       pr_info("%s\n", __func__);
7597 +
7598 +       spin_lock_bh(&hif->tx_lock);
7599 +
7600 +       if (test_bit(client_id, &hif->shm->g_client_status[0])) {
7601 +               pr_err("%s: client %d already registered\n",
7602 +                      __func__, client_id);
7603 +               err = -1;
7604 +               goto unlock;
7605 +       }
7606 +
7607 +       memset(client, 0, sizeof(struct hif_client));
7608 +
7609 +       /* Initialize client Rx queues baseaddr, size */
7610 +
7611 +       cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
7612 +       /* Check if client is requesting for more queues than supported */
7613 +       if (cnt > HIF_CLIENT_QUEUES_MAX)
7614 +               cnt = HIF_CLIENT_QUEUES_MAX;
7615 +
7616 +       client->rx_qn = cnt;
7617 +       rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
7618 +       for (i = 0; i < cnt; i++) {
7619 +               rx_queue = &client->rx_q[i];
7620 +               rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
7621 +               rx_queue->size = client_shm->rx_qsize;
7622 +               rx_queue->write_idx = 0;
7623 +       }
7624 +
7625 +       /* Initialize client Tx queues baseaddr, size */
7626 +       cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
7627 +
7628 +       /* Check if client is requesting for more queues than supported */
7629 +       if (cnt > HIF_CLIENT_QUEUES_MAX)
7630 +               cnt = HIF_CLIENT_QUEUES_MAX;
7631 +
7632 +       client->tx_qn = cnt;
7633 +       tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
7634 +       for (i = 0; i < cnt; i++) {
7635 +               tx_queue = &client->tx_q[i];
7636 +               tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
7637 +               tx_queue->size = client_shm->tx_qsize;
7638 +               tx_queue->ack_idx = 0;
7639 +       }
7640 +
7641 +       set_bit(client_id, &hif->shm->g_client_status[0]);
7642 +
7643 +unlock:
7644 +       spin_unlock_bh(&hif->tx_lock);
7645 +
7646 +       return err;
7647 +}
7648 +
7649 +/*
7650 + * pfe_hif_client_unregister
7651 + *
7652 + * This function used to unregister a client  from the HIF driver.
7653 + *
7654 + */
7655 +static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
7656 +{
7657 +       pr_info("%s\n", __func__);
7658 +
7659 +       /*
7660 +        * Mark client as no longer available (which prevents further packet
7661 +        * receive for this client)
7662 +        */
7663 +       spin_lock_bh(&hif->tx_lock);
7664 +
7665 +       if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
7666 +               pr_err("%s: client %d not registered\n", __func__,
7667 +                      client_id);
7668 +
7669 +               spin_unlock_bh(&hif->tx_lock);
7670 +               return;
7671 +       }
7672 +
7673 +       clear_bit(client_id, &hif->shm->g_client_status[0]);
7674 +
7675 +       spin_unlock_bh(&hif->tx_lock);
7676 +}
7677 +
7678 +/*
7679 + * client_put_rxpacket-
7680 + * This functions puts the Rx pkt  in the given client Rx queue.
7681 + * It actually swap the Rx pkt in the client Rx descriptor buffer
7682 + * and returns the free buffer from it.
7683 + *
7684 + * If the function returns NULL means client Rx queue is full and
7685 + * packet couldn't send to client queue.
7686 + */
7687 +static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
7688 +                                u32 flags, u32 client_ctrl, u32 *rem_len)
7689 +{
7690 +       void *free_pkt = NULL;
7691 +       struct rx_queue_desc *desc = queue->base + queue->write_idx;
7692 +
7693 +       if (readl(&desc->ctrl) & CL_DESC_OWN) {
7694 +               if (page_mode) {
7695 +                       int rem_page_size = PAGE_SIZE -
7696 +                                       PRESENT_OFST_IN_PAGE(pkt);
7697 +                       int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
7698 +                                       pfe_pkt_headroom);
7699 +                       *rem_len = (rem_page_size - cur_pkt_size);
7700 +                       if (*rem_len) {
7701 +                               free_pkt = pkt + cur_pkt_size;
7702 +                               get_page(virt_to_page(free_pkt));
7703 +                       } else {
7704 +                               free_pkt = (void
7705 +                               *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
7706 +                               *rem_len = pfe_pkt_size;
7707 +                       }
7708 +               } else {
7709 +                       free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
7710 +                                       GFP_DMA_PFE);
7711 +                       *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
7712 +               }
7713 +
7714 +               if (free_pkt) {
7715 +                       desc->data = pkt;
7716 +                       desc->client_ctrl = client_ctrl;
7717 +                       /*
7718 +                        * Ensure everything else is written to DDR before
7719 +                        * writing bd->ctrl
7720 +                        */
7721 +                       smp_wmb();
7722 +                       writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
7723 +                       queue->write_idx = (queue->write_idx + 1)
7724 +                                           & (queue->size - 1);
7725 +
7726 +                       free_pkt += pfe_pkt_headroom;
7727 +               }
7728 +       }
7729 +
7730 +       return free_pkt;
7731 +}
7732 +
7733 +/*
7734 + * pfe_hif_rx_process-
7735 + * This function does pfe hif rx queue processing.
7736 + * Dequeue packet from Rx queue and send it to corresponding client queue
7737 + */
7738 +static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
7739 +{
7740 +       struct hif_desc *desc;
7741 +       struct hif_hdr *pkt_hdr;
7742 +       struct __hif_hdr hif_hdr;
7743 +       void *free_buf;
7744 +       int rtc, len, rx_processed = 0;
7745 +       struct __hif_desc local_desc;
7746 +       int flags;
7747 +       unsigned int desc_p;
7748 +       unsigned int buf_size = 0;
7749 +
7750 +       spin_lock_bh(&hif->lock);
7751 +
7752 +       rtc = hif->rxtoclean_index;
7753 +
7754 +       while (rx_processed < budget) {
7755 +               desc = hif->rx_base + rtc;
7756 +
7757 +               __memcpy12(&local_desc, desc);
7758 +
7759 +               /* ACK pending Rx interrupt */
7760 +               if (local_desc.ctrl & BD_CTRL_DESC_EN) {
7761 +                       writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
7762 +
7763 +                       if (rx_processed == 0) {
7764 +                               if (napi_first_batch == 1) {
7765 +                                       desc_p = hif->descr_baseaddr_p +
7766 +                                       ((unsigned long int)(desc) -
7767 +                                       (unsigned long
7768 +                                       int)hif->descr_baseaddr_v);
7769 +                                       napi_first_batch = 0;
7770 +                               }
7771 +                       }
7772 +
7773 +                       __memcpy12(&local_desc, desc);
7774 +
7775 +                       if (local_desc.ctrl & BD_CTRL_DESC_EN)
7776 +                               break;
7777 +               }
7778 +
7779 +               napi_first_batch = 0;
7780 +
7781 +#ifdef HIF_NAPI_STATS
7782 +               hif->napi_counters[NAPI_DESC_COUNT]++;
7783 +#endif
7784 +               len = BD_BUF_LEN(local_desc.ctrl);
7785 +               /*
7786 +                * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7787 +                * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7788 +                */
7789 +               dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7790 +                                hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7791 +
7792 +               pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
7793 +
7794 +               /* Track last HIF header received */
7795 +               if (!hif->started) {
7796 +                       hif->started = 1;
7797 +
7798 +                       __memcpy8(&hif_hdr, pkt_hdr);
7799 +
7800 +                       hif->qno = hif_hdr.hdr.q_num;
7801 +                       hif->client_id = hif_hdr.hdr.client_id;
7802 +                       hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
7803 +                                               hif_hdr.hdr.client_ctrl;
7804 +                       flags = CL_DESC_FIRST;
7805 +
7806 +               } else {
7807 +                       flags = 0;
7808 +               }
7809 +
7810 +               if (local_desc.ctrl & BD_CTRL_LIFM)
7811 +                       flags |= CL_DESC_LAST;
7812 +
7813 +               /* Check for valid client id and still registered */
7814 +               if ((hif->client_id >= HIF_CLIENTS_MAX) ||
7815 +                   !(test_bit(hif->client_id,
7816 +                       &hif->shm->g_client_status[0]))) {
7817 +                       printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
7818 +                                          __func__,
7819 +                                          hif->client_id,
7820 +                                          hif->qno);
7821 +
7822 +                       free_buf = pkt_hdr;
7823 +
7824 +                       goto pkt_drop;
7825 +               }
7826 +
7827 +               /* Check to valid queue number */
7828 +               if (hif->client[hif->client_id].rx_qn <= hif->qno) {
7829 +                       pr_info("%s: packet with invalid queue: %d\n"
7830 +                               , __func__, hif->qno);
7831 +                       hif->qno = 0;
7832 +               }
7833 +
7834 +               free_buf =
7835 +               client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
7836 +                                   (void *)pkt_hdr, len, flags,
7837 +                       hif->client_ctrl, &buf_size);
7838 +
7839 +               hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
7840 +                                       hif->qno);
7841 +
7842 +               if (unlikely(!free_buf)) {
7843 +#ifdef HIF_NAPI_STATS
7844 +                       hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
7845 +#endif
7846 +                       /*
7847 +                        * If we want to keep in polling mode to retry later,
7848 +                        * we need to tell napi that we consumed
7849 +                        * the full budget or we will hit a livelock scenario.
7850 +                        * The core code keeps this napi instance
7851 +                        * at the head of the list and none of the other
7852 +                        * instances get to run
7853 +                        */
7854 +                       rx_processed = budget;
7855 +
7856 +                       if (flags & CL_DESC_FIRST)
7857 +                               hif->started = 0;
7858 +
7859 +                       break;
7860 +               }
7861 +
7862 +pkt_drop:
7863 +               /*Fill free buffer in the descriptor */
7864 +               hif->rx_buf_addr[rtc] = free_buf;
7865 +               hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
7866 +               writel((DDR_PHYS_TO_PFE
7867 +                       ((u32)dma_map_single(hif->dev,
7868 +                       free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
7869 +                       &desc->data);
7870 +               /*
7871 +                * Ensure everything else is written to DDR before
7872 +                * writing bd->ctrl
7873 +                */
7874 +               wmb();
7875 +               writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
7876 +                       BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
7877 +                       &desc->ctrl);
7878 +
7879 +               rtc = (rtc + 1) & (hif->rx_ring_size - 1);
7880 +
7881 +               if (local_desc.ctrl & BD_CTRL_LIFM) {
7882 +                       if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
7883 +                               rx_processed++;
7884 +
7885 +#ifdef HIF_NAPI_STATS
7886 +                               hif->napi_counters[NAPI_PACKET_COUNT]++;
7887 +#endif
7888 +                       }
7889 +                       hif->started = 0;
7890 +               }
7891 +       }
7892 +
7893 +       hif->rxtoclean_index = rtc;
7894 +       spin_unlock_bh(&hif->lock);
7895 +
7896 +       /* we made some progress, re-start rx dma in case it stopped */
7897 +       hif_rx_dma_start();
7898 +
7899 +       return rx_processed;
7900 +}
7901 +
7902 +/*
7903 + * client_ack_txpacket-
7904 + * This function ack the Tx packet in the give client Tx queue by resetting
7905 + * ownership bit in the descriptor.
7906 + */
7907 +static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
7908 +                              unsigned int q_no)
7909 +{
7910 +       struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
7911 +       struct tx_queue_desc *desc = queue->base + queue->ack_idx;
7912 +
7913 +       if (readl(&desc->ctrl) & CL_DESC_OWN) {
7914 +               writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
7915 +               queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
7916 +
7917 +               return 0;
7918 +
7919 +       } else {
7920 +               /*This should not happen */
7921 +               pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
7922 +                      hif->txtosend, hif->txtoclean, hif->txavail,
7923 +                       client_id, q_no, queue, queue->ack_idx);
7924 +               WARN(1, "%s: doesn't own this descriptor", __func__);
7925 +               return 1;
7926 +       }
7927 +}
7928 +
7929 +void __hif_tx_done_process(struct pfe_hif *hif, int count)
7930 +{
7931 +       struct hif_desc *desc;
7932 +       struct hif_desc_sw *desc_sw;
7933 +       int ttc, tx_avl;
7934 +       int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
7935 +
7936 +       ttc = hif->txtoclean;
7937 +       tx_avl = hif->txavail;
7938 +
7939 +       while ((tx_avl < hif->tx_ring_size) && count--) {
7940 +               desc = hif->tx_base + ttc;
7941 +
7942 +               if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
7943 +                       break;
7944 +
7945 +               desc_sw = &hif->tx_sw_queue[ttc];
7946 +
7947 +               if (desc_sw->data) {
7948 +                       /*
7949 +                        * dmap_unmap_single(hif->dev, desc_sw->data,
7950 +                        * desc_sw->len, DMA_TO_DEVICE);
7951 +                        */
7952 +                       dma_unmap_single(hif->dev, desc_sw->data,
7953 +                                        desc_sw->len, DMA_TO_DEVICE);
7954 +               }
7955 +
7956 +               if (desc_sw->client_id > HIF_CLIENTS_MAX)
7957 +                       pr_err("Invalid cl id %d\n", desc_sw->client_id);
7958 +
7959 +               pkts_done[desc_sw->client_id]++;
7960 +
7961 +               client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
7962 +
7963 +               ttc = (ttc + 1) & (hif->tx_ring_size - 1);
7964 +               tx_avl++;
7965 +       }
7966 +
7967 +       if (pkts_done[0])
7968 +               hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
7969 +       if (pkts_done[1])
7970 +               hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
7971 +
7972 +       hif->txtoclean = ttc;
7973 +       hif->txavail = tx_avl;
7974 +
7975 +       if (!count) {
7976 +               tasklet_schedule(&hif->tx_cleanup_tasklet);
7977 +       } else {
7978 +               /*Enable Tx done interrupt */
7979 +               writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
7980 +                      HIF_INT_ENABLE);
7981 +       }
7982 +}
7983 +
7984 +static void pfe_tx_do_cleanup(unsigned long data)
7985 +{
7986 +       struct pfe_hif *hif = (struct pfe_hif *)data;
7987 +
7988 +       writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
7989 +
7990 +       hif_tx_done_process(hif, 64);
7991 +}
7992 +
7993 +/*
7994 + * __hif_xmit_pkt -
7995 + * This function puts one packet in the HIF Tx queue
7996 + */
7997 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7998 +                       q_no, void *data, u32 len, unsigned int flags)
7999 +{
8000 +       struct hif_desc *desc;
8001 +       struct hif_desc_sw *desc_sw;
8002 +
8003 +       desc = hif->tx_base + hif->txtosend;
8004 +       desc_sw = &hif->tx_sw_queue[hif->txtosend];
8005 +
8006 +       desc_sw->len = len;
8007 +       desc_sw->client_id = client_id;
8008 +       desc_sw->q_no = q_no;
8009 +       desc_sw->flags = flags;
8010 +
8011 +       if (flags & HIF_DONT_DMA_MAP) {
8012 +               desc_sw->data = 0;
8013 +               writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
8014 +       } else {
8015 +               desc_sw->data = dma_map_single(hif->dev, data, len,
8016 +                                               DMA_TO_DEVICE);
8017 +               writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
8018 +       }
8019 +
8020 +       hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
8021 +       hif->txavail--;
8022 +
8023 +       if ((!((flags & HIF_DATA_VALID) && (flags &
8024 +                               HIF_LAST_BUFFER))))
8025 +               goto skip_tx;
8026 +
8027 +       /*
8028 +        * Ensure everything else is written to DDR before
8029 +        * writing bd->ctrl
8030 +        */
8031 +       wmb();
8032 +
8033 +       do {
8034 +               desc_sw = &hif->tx_sw_queue[hif->txtoflush];
8035 +               desc = hif->tx_base + hif->txtoflush;
8036 +
8037 +               if (desc_sw->flags & HIF_LAST_BUFFER) {
8038 +                       writel((BD_CTRL_LIFM |
8039 +                              BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
8040 +                              | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
8041 +                               BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
8042 +                               &desc->ctrl);
8043 +               } else {
8044 +                       writel((BD_CTRL_DESC_EN |
8045 +                               BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
8046 +               }
8047 +               hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
8048 +       }
8049 +       while (hif->txtoflush != hif->txtosend)
8050 +               ;
8051 +
8052 +skip_tx:
8053 +       return;
8054 +}
8055 +
8056 +static irqreturn_t wol_isr(int irq, void *dev_id)
8057 +{
8058 +       pr_info("WoL\n");
8059 +       gemac_set_wol(EMAC1_BASE_ADDR, 0);
8060 +       gemac_set_wol(EMAC2_BASE_ADDR, 0);
8061 +       return IRQ_HANDLED;
8062 +}
8063 +
8064 +/*
8065 + * hif_isr-
8066 + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
8067 + */
8068 +static irqreturn_t hif_isr(int irq, void *dev_id)
8069 +{
8070 +       struct pfe_hif *hif = (struct pfe_hif *)dev_id;
8071 +       int int_status;
8072 +       int int_enable_mask;
8073 +
8074 +       /*Read hif interrupt source register */
8075 +       int_status = readl_relaxed(HIF_INT_SRC);
8076 +       int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
8077 +
8078 +       if ((int_status & HIF_INT) == 0)
8079 +               return IRQ_NONE;
8080 +
8081 +       int_status &= ~(HIF_INT);
8082 +
8083 +       if (int_status & HIF_RXPKT_INT) {
8084 +               int_status &= ~(HIF_RXPKT_INT);
8085 +               int_enable_mask &= ~(HIF_RXPKT_INT);
8086 +
8087 +               napi_first_batch = 1;
8088 +
8089 +               if (napi_schedule_prep(&hif->napi)) {
8090 +#ifdef HIF_NAPI_STATS
8091 +                       hif->napi_counters[NAPI_SCHED_COUNT]++;
8092 +#endif
8093 +                       __napi_schedule(&hif->napi);
8094 +               }
8095 +       }
8096 +
8097 +       if (int_status & HIF_TXPKT_INT) {
8098 +               int_status &= ~(HIF_TXPKT_INT);
8099 +               int_enable_mask &= ~(HIF_TXPKT_INT);
8100 +               /*Schedule tx cleanup tassklet */
8101 +               tasklet_schedule(&hif->tx_cleanup_tasklet);
8102 +       }
8103 +
8104 +       /*Disable interrupts, they will be enabled after they are serviced */
8105 +       writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
8106 +
8107 +       if (int_status) {
8108 +               pr_info("%s : Invalid interrupt : %d\n", __func__,
8109 +                       int_status);
8110 +               writel(int_status, HIF_INT_SRC);
8111 +       }
8112 +
8113 +       return IRQ_HANDLED;
8114 +}
8115 +
8116 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
8117 +{
8118 +       unsigned int client_id = data1;
8119 +
8120 +       if (client_id >= HIF_CLIENTS_MAX) {
8121 +               pr_err("%s: client id %d out of bounds\n", __func__,
8122 +                      client_id);
8123 +               return;
8124 +       }
8125 +
8126 +       switch (req) {
8127 +       case REQUEST_CL_REGISTER:
8128 +                       /* Request for register a client */
8129 +                       pr_info("%s: register client_id %d\n",
8130 +                               __func__, client_id);
8131 +                       pfe_hif_client_register(hif, client_id, (struct
8132 +                               hif_client_shm *)&hif->shm->client[client_id]);
8133 +                       break;
8134 +
8135 +       case REQUEST_CL_UNREGISTER:
8136 +                       pr_info("%s: unregister client_id %d\n",
8137 +                               __func__, client_id);
8138 +
8139 +                       /* Request for unregister a client */
8140 +                       pfe_hif_client_unregister(hif, client_id);
8141 +
8142 +                       break;
8143 +
8144 +       default:
8145 +                       pr_err("%s: unsupported request %d\n",
8146 +                              __func__, req);
8147 +                       break;
8148 +       }
8149 +
8150 +       /*
8151 +        * Process client Tx queues
8152 +        * Currently we don't have checking for tx pending
8153 +        */
8154 +}
8155 +
8156 +/*
8157 + * pfe_hif_rx_poll
8158 + *  This function is NAPI poll function to process HIF Rx queue.
8159 + */
8160 +static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
8161 +{
8162 +       struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
8163 +       int work_done;
8164 +
8165 +#ifdef HIF_NAPI_STATS
8166 +       hif->napi_counters[NAPI_POLL_COUNT]++;
8167 +#endif
8168 +
8169 +       work_done = pfe_hif_rx_process(hif, budget);
8170 +
8171 +       if (work_done < budget) {
8172 +               napi_complete(napi);
8173 +               writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
8174 +                      HIF_INT_ENABLE);
8175 +       }
8176 +#ifdef HIF_NAPI_STATS
8177 +       else
8178 +               hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
8179 +#endif
8180 +
8181 +       return work_done;
8182 +}
8183 +
8184 +/*
8185 + * pfe_hif_init
8186 + * This function initializes the baseaddresses and irq, etc.
8187 + */
8188 +int pfe_hif_init(struct pfe *pfe)
8189 +{
8190 +       struct pfe_hif *hif = &pfe->hif;
8191 +       int err;
8192 +
8193 +       pr_info("%s\n", __func__);
8194 +
8195 +       hif->dev = pfe->dev;
8196 +       hif->irq = pfe->hif_irq;
8197 +
8198 +       err = pfe_hif_alloc_descr(hif);
8199 +       if (err)
8200 +               goto err0;
8201 +
8202 +       if (pfe_hif_init_buffers(hif)) {
8203 +               pr_err("%s: Could not initialize buffer descriptors\n"
8204 +                       , __func__);
8205 +               err = -ENOMEM;
8206 +               goto err1;
8207 +       }
8208 +
8209 +       /* Initialize NAPI for Rx processing */
8210 +       init_dummy_netdev(&hif->dummy_dev);
8211 +       netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
8212 +                      HIF_RX_POLL_WEIGHT);
8213 +       napi_enable(&hif->napi);
8214 +
8215 +       spin_lock_init(&hif->tx_lock);
8216 +       spin_lock_init(&hif->lock);
8217 +
8218 +       hif_init();
8219 +       hif_rx_enable();
8220 +       hif_tx_enable();
8221 +
8222 +       /* Disable tx done interrupt */
8223 +       writel(HIF_INT_MASK, HIF_INT_ENABLE);
8224 +
8225 +       gpi_enable(HGPI_BASE_ADDR);
8226 +
8227 +       err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
8228 +       if (err) {
8229 +               pr_err("%s: failed to get the hif IRQ = %d\n",
8230 +                      __func__, hif->irq);
8231 +               goto err1;
8232 +       }
8233 +
8234 +       err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
8235 +       if (err) {
8236 +               pr_err("%s: failed to get the wol IRQ = %d\n",
8237 +                      __func__, pfe->wol_irq);
8238 +               goto err1;
8239 +       }
8240 +
8241 +       tasklet_init(&hif->tx_cleanup_tasklet,
8242 +                    (void(*)(unsigned long))pfe_tx_do_cleanup,
8243 +                    (unsigned long)hif);
8244 +
8245 +       return 0;
8246 +err1:
8247 +       pfe_hif_free_descr(hif);
8248 +err0:
8249 +       return err;
8250 +}
8251 +
8252 +/* pfe_hif_exit- */
8253 +void pfe_hif_exit(struct pfe *pfe)
8254 +{
8255 +       struct pfe_hif *hif = &pfe->hif;
8256 +
8257 +       pr_info("%s\n", __func__);
8258 +
8259 +       tasklet_kill(&hif->tx_cleanup_tasklet);
8260 +
8261 +       spin_lock_bh(&hif->lock);
8262 +       hif->shm->g_client_status[0] = 0;
8263 +       /* Make sure all clients are disabled*/
8264 +       hif->shm->g_client_status[1] = 0;
8265 +
8266 +       spin_unlock_bh(&hif->lock);
8267 +
8268 +       /*Disable Rx/Tx */
8269 +       gpi_disable(HGPI_BASE_ADDR);
8270 +       hif_rx_disable();
8271 +       hif_tx_disable();
8272 +
8273 +       napi_disable(&hif->napi);
8274 +       netif_napi_del(&hif->napi);
8275 +
8276 +       free_irq(pfe->wol_irq, pfe);
8277 +       free_irq(hif->irq, hif);
8278 +
8279 +       pfe_hif_release_buffers(hif);
8280 +       pfe_hif_free_descr(hif);
8281 +}
8282 --- /dev/null
8283 +++ b/drivers/staging/fsl_ppfe/pfe_hif.h
8284 @@ -0,0 +1,200 @@
8285 +/* SPDX-License-Identifier: GPL-2.0+ */
8286 +/*
8287 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8288 + * Copyright 2017 NXP
8289 + */
8290 +
8291 +#ifndef _PFE_HIF_H_
8292 +#define _PFE_HIF_H_
8293 +
8294 +#include <linux/netdevice.h>
8295 +#include <linux/interrupt.h>
8296 +
8297 +#define HIF_NAPI_STATS
8298 +
8299 +#define HIF_CLIENT_QUEUES_MAX  16
8300 +#define HIF_RX_POLL_WEIGHT     64
8301 +
8302 +#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
8303 +#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
8304 +#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
8305 +                                       & HIF_RX_PKT_MIN_SIZE_MASK)
8306 +#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
8307 +                                       - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
8308 +
8309 +enum {
8310 +       NAPI_SCHED_COUNT = 0,
8311 +       NAPI_POLL_COUNT,
8312 +       NAPI_PACKET_COUNT,
8313 +       NAPI_DESC_COUNT,
8314 +       NAPI_FULL_BUDGET_COUNT,
8315 +       NAPI_CLIENT_FULL_COUNT,
8316 +       NAPI_MAX_COUNT
8317 +};
8318 +
8319 +/*
8320 + * HIF_TX_DESC_NT value should be always greter than 4,
8321 + * Otherwise HIF_TX_POLL_MARK will become zero.
8322 + */
8323 +#define HIF_RX_DESC_NT         256
8324 +#define HIF_TX_DESC_NT         2048
8325 +
8326 +#define HIF_FIRST_BUFFER       BIT(0)
8327 +#define HIF_LAST_BUFFER                BIT(1)
8328 +#define HIF_DONT_DMA_MAP       BIT(2)
8329 +#define HIF_DATA_VALID         BIT(3)
8330 +#define HIF_TSO                        BIT(4)
8331 +
8332 +enum {
8333 +       PFE_CL_GEM0 = 0,
8334 +       PFE_CL_GEM1,
8335 +       HIF_CLIENTS_MAX
8336 +};
8337 +
8338 +/*structure to store client queue info */
8339 +struct hif_rx_queue {
8340 +       struct rx_queue_desc *base;
8341 +       u32     size;
8342 +       u32     write_idx;
8343 +};
8344 +
8345 +struct hif_tx_queue {
8346 +       struct tx_queue_desc *base;
8347 +       u32     size;
8348 +       u32     ack_idx;
8349 +};
8350 +
8351 +/*Structure to store the client info */
8352 +struct hif_client {
8353 +       int     rx_qn;
8354 +       struct hif_rx_queue     rx_q[HIF_CLIENT_QUEUES_MAX];
8355 +       int     tx_qn;
8356 +       struct hif_tx_queue     tx_q[HIF_CLIENT_QUEUES_MAX];
8357 +};
8358 +
8359 +/*HIF hardware buffer descriptor */
8360 +struct hif_desc {
8361 +       u32 ctrl;
8362 +       u32 status;
8363 +       u32 data;
8364 +       u32 next;
8365 +};
8366 +
8367 +struct __hif_desc {
8368 +       u32 ctrl;
8369 +       u32 status;
8370 +       u32 data;
8371 +};
8372 +
8373 +struct hif_desc_sw {
8374 +       dma_addr_t data;
8375 +       u16 len;
8376 +       u8 client_id;
8377 +       u8 q_no;
8378 +       u16 flags;
8379 +};
8380 +
8381 +struct hif_hdr {
8382 +       u8 client_id;
8383 +       u8 q_num;
8384 +       u16 client_ctrl;
8385 +       u16 client_ctrl1;
8386 +};
8387 +
8388 +struct __hif_hdr {
8389 +       union {
8390 +               struct hif_hdr hdr;
8391 +               u32 word[2];
8392 +       };
8393 +};
8394 +
8395 +struct hif_ipsec_hdr {
8396 +       u16     sa_handle[2];
8397 +} __packed;
8398 +
8399 +/*  HIF_CTRL_TX... defines */
8400 +#define HIF_CTRL_TX_CHECKSUM           BIT(2)
8401 +
8402 +/*  HIF_CTRL_RX... defines */
8403 +#define HIF_CTRL_RX_OFFSET_OFST         (24)
8404 +#define HIF_CTRL_RX_CHECKSUMMED                BIT(2)
8405 +#define HIF_CTRL_RX_CONTINUED          BIT(1)
8406 +
8407 +struct pfe_hif {
8408 +       /* To store registered clients in hif layer */
8409 +       struct hif_client client[HIF_CLIENTS_MAX];
8410 +       struct hif_shm *shm;
8411 +       int     irq;
8412 +
8413 +       void    *descr_baseaddr_v;
8414 +       unsigned long   descr_baseaddr_p;
8415 +
8416 +       struct hif_desc *rx_base;
8417 +       u32     rx_ring_size;
8418 +       u32     rxtoclean_index;
8419 +       void    *rx_buf_addr[HIF_RX_DESC_NT];
8420 +       int     rx_buf_len[HIF_RX_DESC_NT];
8421 +       unsigned int qno;
8422 +       unsigned int client_id;
8423 +       unsigned int client_ctrl;
8424 +       unsigned int started;
8425 +
8426 +       struct hif_desc *tx_base;
8427 +       u32     tx_ring_size;
8428 +       u32     txtosend;
8429 +       u32     txtoclean;
8430 +       u32     txavail;
8431 +       u32     txtoflush;
8432 +       struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
8433 +
8434 +/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
8435 +       spinlock_t tx_lock;
8436 +/* lock synchronizes hif rx queue processing */
8437 +       spinlock_t lock;
8438 +       struct net_device       dummy_dev;
8439 +       struct napi_struct      napi;
8440 +       struct device *dev;
8441 +
8442 +#ifdef HIF_NAPI_STATS
8443 +       unsigned int napi_counters[NAPI_MAX_COUNT];
8444 +#endif
8445 +       struct tasklet_struct   tx_cleanup_tasklet;
8446 +};
8447 +
8448 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
8449 +                       q_no, void *data, u32 len, unsigned int flags);
8450 +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
8451 +                void *data, unsigned int len);
8452 +void __hif_tx_done_process(struct pfe_hif *hif, int count);
8453 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
8454 +                               data2);
8455 +int pfe_hif_init(struct pfe *pfe);
8456 +void pfe_hif_exit(struct pfe *pfe);
8457 +void pfe_hif_rx_idle(struct pfe_hif *hif);
8458 +static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
8459 +{
8460 +       spin_lock_bh(&hif->tx_lock);
8461 +       __hif_tx_done_process(hif, count);
8462 +       spin_unlock_bh(&hif->tx_lock);
8463 +}
8464 +
8465 +static inline void hif_tx_lock(struct pfe_hif *hif)
8466 +{
8467 +       spin_lock_bh(&hif->tx_lock);
8468 +}
8469 +
8470 +static inline void hif_tx_unlock(struct pfe_hif *hif)
8471 +{
8472 +       spin_unlock_bh(&hif->tx_lock);
8473 +}
8474 +
8475 +static inline int __hif_tx_avail(struct pfe_hif *hif)
8476 +{
8477 +       return hif->txavail;
8478 +}
8479 +
8480 +#define __memcpy8(dst, src)            memcpy(dst, src, 8)
8481 +#define __memcpy12(dst, src)           memcpy(dst, src, 12)
8482 +#define __memcpy(dst, src, len)                memcpy(dst, src, len)
8483 +
8484 +#endif /* _PFE_HIF_H_ */
8485 --- /dev/null
8486 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
8487 @@ -0,0 +1,628 @@
8488 +// SPDX-License-Identifier: GPL-2.0+
8489 +/*
8490 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8491 + * Copyright 2017 NXP
8492 + */
8493 +
8494 +#include <linux/version.h>
8495 +#include <linux/kernel.h>
8496 +#include <linux/slab.h>
8497 +#include <linux/interrupt.h>
8498 +#include <linux/workqueue.h>
8499 +#include <linux/dma-mapping.h>
8500 +#include <linux/dmapool.h>
8501 +#include <linux/sched.h>
8502 +#include <linux/skbuff.h>
8503 +#include <linux/moduleparam.h>
8504 +#include <linux/cpu.h>
8505 +
8506 +#include "pfe_mod.h"
8507 +#include "pfe_hif.h"
8508 +#include "pfe_hif_lib.h"
8509 +
8510 +unsigned int lro_mode;
8511 +unsigned int page_mode;
8512 +unsigned int tx_qos = 1;
8513 +module_param(tx_qos, uint, 0444);
8514 +MODULE_PARM_DESC(tx_qos, "0: disable ,\n"
8515 +                        "1: enable (default), guarantee no packet drop at TMU level\n");
8516 +unsigned int pfe_pkt_size;
8517 +unsigned int pfe_pkt_headroom;
8518 +unsigned int emac_txq_cnt;
8519 +
8520 +/*
8521 + * @pfe_hal_lib.c.
8522 + * Common functions used by HIF client drivers
8523 + */
8524 +
8525 +/*HIF shared memory Global variable */
8526 +struct hif_shm ghif_shm;
8527 +
8528 +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
8529 + * This function should be called after pfe_hif_exit
8530 + *
8531 + * @param[in] hif_shm          Shared memory address location in DDR
8532 + */
8533 +static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
8534 +{
8535 +       int i;
8536 +       void *pkt;
8537 +
8538 +       for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8539 +               pkt = hif_shm->rx_buf_pool[i];
8540 +               if (pkt) {
8541 +                       hif_shm->rx_buf_pool[i] = NULL;
8542 +                       pkt -= pfe_pkt_headroom;
8543 +
8544 +                       if (page_mode)
8545 +                               put_page(virt_to_page(pkt));
8546 +                       else
8547 +                               kfree(pkt);
8548 +               }
8549 +       }
8550 +}
8551 +
8552 +/* Initialize shared memory used between HIF driver and clients,
8553 + * allocate rx_buffer_pool required for HIF Rx descriptors.
8554 + * This function should be called before initializing HIF driver.
8555 + *
8556 + * @param[in] hif_shm          Shared memory address location in DDR
8557 + * @rerurn                     0 - on succes, <0 on fail to initialize
8558 + */
8559 +static int pfe_hif_shm_init(struct hif_shm *hif_shm)
8560 +{
8561 +       int i;
8562 +       void *pkt;
8563 +
8564 +       memset(hif_shm, 0, sizeof(struct hif_shm));
8565 +       hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
8566 +
8567 +       for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8568 +               if (page_mode) {
8569 +                       pkt = (void *)__get_free_page(GFP_KERNEL |
8570 +                               GFP_DMA_PFE);
8571 +               } else {
8572 +                       pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
8573 +               }
8574 +
8575 +               if (pkt)
8576 +                       hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
8577 +               else
8578 +                       goto err0;
8579 +       }
8580 +
8581 +       return 0;
8582 +
8583 +err0:
8584 +       pr_err("%s Low memory\n", __func__);
8585 +       pfe_hif_shm_clean(hif_shm);
8586 +       return -ENOMEM;
8587 +}
8588 +
8589 +/*This function sends indication to HIF driver
8590 + *
8591 + * @param[in] hif      hif context
8592 + */
8593 +static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
8594 +                                       data2)
8595 +{
8596 +       hif_process_client_req(hif, req, data1, data2);
8597 +}
8598 +
8599 +void hif_lib_indicate_client(int client_id, int event_type, int qno)
8600 +{
8601 +       struct hif_client_s *client = pfe->hif_client[client_id];
8602 +
8603 +       if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
8604 +               HIF_CLIENT_QUEUES_MAX))
8605 +               return;
8606 +
8607 +       if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
8608 +               client->event_handler(client->priv, event_type, qno);
8609 +}
8610 +
8611 +/*This function releases Rx queue descriptors memory and pre-filled buffers
8612 + *
8613 + * @param[in] client   hif_client context
8614 + */
8615 +static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
8616 +{
8617 +       struct rx_queue_desc *desc;
8618 +       int qno, ii;
8619 +       void *buf;
8620 +
8621 +       for (qno = 0; qno < client->rx_qn; qno++) {
8622 +               desc = client->rx_q[qno].base;
8623 +
8624 +               for (ii = 0; ii < client->rx_q[qno].size; ii++) {
8625 +                       buf = (void *)desc->data;
8626 +                       if (buf) {
8627 +                               buf -= pfe_pkt_headroom;
8628 +
8629 +                               if (page_mode)
8630 +                                       free_page((unsigned long)buf);
8631 +                               else
8632 +                                       kfree(buf);
8633 +
8634 +                               desc->ctrl = 0;
8635 +                       }
8636 +
8637 +                       desc++;
8638 +               }
8639 +       }
8640 +
8641 +       kfree(client->rx_qbase);
8642 +}
8643 +
8644 +/*This function allocates memory for the rxq descriptors and pre-fill rx queues
8645 + * with buffers.
8646 + * @param[in] client   client context
8647 + * @param[in] q_size   size of the rxQ, all queues are of same size
8648 + */
8649 +static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
8650 +                                               q_size)
8651 +{
8652 +       struct rx_queue_desc *desc;
8653 +       struct hif_client_rx_queue *queue;
8654 +       int ii, qno;
8655 +
8656 +       /*Allocate memory for the client queues */
8657 +       client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
8658 +                               rx_queue_desc), GFP_KERNEL);
8659 +       if (!client->rx_qbase)
8660 +               goto err;
8661 +
8662 +       for (qno = 0; qno < client->rx_qn; qno++) {
8663 +               queue = &client->rx_q[qno];
8664 +
8665 +               queue->base = client->rx_qbase + qno * q_size * sizeof(struct
8666 +                               rx_queue_desc);
8667 +               queue->size = q_size;
8668 +               queue->read_idx = 0;
8669 +               queue->write_idx = 0;
8670 +
8671 +               pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
8672 +                        queue->base, queue->size);
8673 +       }
8674 +
8675 +       for (qno = 0; qno < client->rx_qn; qno++) {
8676 +               queue = &client->rx_q[qno];
8677 +               desc = queue->base;
8678 +
8679 +               for (ii = 0; ii < queue->size; ii++) {
8680 +                       desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
8681 +                                       CL_DESC_OWN;
8682 +                       desc++;
8683 +               }
8684 +       }
8685 +
8686 +       return 0;
8687 +
8688 +err:
8689 +       return 1;
8690 +}
8691 +
8692 +
8693 +static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
8694 +{
8695 +       pr_debug("%s\n", __func__);
8696 +
8697 +       /*
8698 +        * Check if there are any pending packets. Client must flush the tx
8699 +        * queues before unregistering, by calling by calling
8700 +        * hif_lib_tx_get_next_complete()
8701 +        *
8702 +        * Hif no longer calls since we are no longer registered
8703 +        */
8704 +       if (queue->tx_pending)
8705 +               pr_err("%s: pending transmit packets\n", __func__);
8706 +}
8707 +
8708 +static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
8709 +{
8710 +       int qno;
8711 +
8712 +       pr_debug("%s\n", __func__);
8713 +
8714 +       for (qno = 0; qno < client->tx_qn; qno++)
8715 +               hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
8716 +
8717 +       kfree(client->tx_qbase);
8718 +}
8719 +
8720 +static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
8721 +                                               q_size)
8722 +{
8723 +       struct hif_client_tx_queue *queue;
8724 +       int qno;
8725 +
8726 +       client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
8727 +                                       tx_queue_desc), GFP_KERNEL);
8728 +       if (!client->tx_qbase)
8729 +               return 1;
8730 +
8731 +       for (qno = 0; qno < client->tx_qn; qno++) {
8732 +               queue = &client->tx_q[qno];
8733 +
8734 +               queue->base = client->tx_qbase + qno * q_size * sizeof(struct
8735 +                               tx_queue_desc);
8736 +               queue->size = q_size;
8737 +               queue->read_idx = 0;
8738 +               queue->write_idx = 0;
8739 +               queue->tx_pending = 0;
8740 +               queue->nocpy_flag = 0;
8741 +               queue->prev_tmu_tx_pkts = 0;
8742 +               queue->done_tmu_tx_pkts = 0;
8743 +
8744 +               pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
8745 +                        queue->base, queue->size);
8746 +       }
8747 +
8748 +       return 0;
8749 +}
8750 +
8751 +static int hif_lib_event_dummy(void *priv, int event_type, int qno)
8752 +{
8753 +       return 0;
8754 +}
8755 +
8756 +int hif_lib_client_register(struct hif_client_s *client)
8757 +{
8758 +       struct hif_shm *hif_shm;
8759 +       struct hif_client_shm *client_shm;
8760 +       int err, i;
8761 +       /* int loop_cnt = 0; */
8762 +
8763 +       pr_debug("%s\n", __func__);
8764 +
8765 +       /*Allocate memory before spin_lock*/
8766 +       if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
8767 +               err = -ENOMEM;
8768 +               goto err_rx;
8769 +       }
8770 +
8771 +       if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
8772 +               err = -ENOMEM;
8773 +               goto err_tx;
8774 +       }
8775 +
8776 +       spin_lock_bh(&pfe->hif.lock);
8777 +       if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
8778 +           (pfe->hif_client[client->id])) {
8779 +               err = -EINVAL;
8780 +               goto err;
8781 +       }
8782 +
8783 +       hif_shm = client->pfe->hif.shm;
8784 +
8785 +       if (!client->event_handler)
8786 +               client->event_handler = hif_lib_event_dummy;
8787 +
8788 +       /*Initialize client specific shared memory */
8789 +       client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
8790 +       client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
8791 +       client_shm->rx_qsize = client->rx_qsize;
8792 +       client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
8793 +       client_shm->tx_qsize = client->tx_qsize;
8794 +       client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
8795 +                               (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
8796 +       /* spin_lock_init(&client->rx_lock); */
8797 +
8798 +       for (i = 0; i < HIF_EVENT_MAX; i++) {
8799 +               client->queue_mask[i] = 0;  /*
8800 +                                            * By default all events are
8801 +                                            * unmasked
8802 +                                            */
8803 +       }
8804 +
8805 +       /*Indicate to HIF driver*/
8806 +       hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
8807 +
8808 +       pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
8809 +                __func__, client, client->id, client->tx_qsize,
8810 +                client->rx_qsize);
8811 +
8812 +       client->cpu_id = -1;
8813 +
8814 +       pfe->hif_client[client->id] = client;
8815 +       spin_unlock_bh(&pfe->hif.lock);
8816 +
8817 +       return 0;
8818 +
8819 +err:
8820 +       spin_unlock_bh(&pfe->hif.lock);
8821 +       hif_lib_client_release_tx_buffers(client);
8822 +
8823 +err_tx:
8824 +       hif_lib_client_release_rx_buffers(client);
8825 +
8826 +err_rx:
8827 +       return err;
8828 +}
8829 +
8830 +int hif_lib_client_unregister(struct hif_client_s *client)
8831 +{
8832 +       struct pfe *pfe = client->pfe;
8833 +       u32 client_id = client->id;
8834 +
8835 +       pr_info(
8836 +               "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
8837 +               , __func__, client, client->id, client->tx_qsize,
8838 +               client->rx_qsize);
8839 +
8840 +       spin_lock_bh(&pfe->hif.lock);
8841 +       hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
8842 +
8843 +       hif_lib_client_release_tx_buffers(client);
8844 +       hif_lib_client_release_rx_buffers(client);
8845 +       pfe->hif_client[client_id] = NULL;
8846 +       spin_unlock_bh(&pfe->hif.lock);
8847 +
8848 +       return 0;
8849 +}
8850 +
8851 +int hif_lib_event_handler_start(struct hif_client_s *client, int event,
8852 +                               int qno)
8853 +{
8854 +       struct hif_client_rx_queue *queue = &client->rx_q[qno];
8855 +       struct rx_queue_desc *desc = queue->base + queue->read_idx;
8856 +
8857 +       if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
8858 +               pr_debug("%s: Unsupported event : %d  queue number : %d\n",
8859 +                        __func__, event, qno);
8860 +               return -1;
8861 +       }
8862 +
8863 +       test_and_clear_bit(qno, &client->queue_mask[event]);
8864 +
8865 +       switch (event) {
8866 +       case EVENT_RX_PKT_IND:
8867 +               if (!(desc->ctrl & CL_DESC_OWN))
8868 +                       hif_lib_indicate_client(client->id,
8869 +                                               EVENT_RX_PKT_IND, qno);
8870 +               break;
8871 +
8872 +       case EVENT_HIGH_RX_WM:
8873 +       case EVENT_TXDONE_IND:
8874 +       default:
8875 +               break;
8876 +       }
8877 +
8878 +       return 0;
8879 +}
8880 +
8881 +/*
8882 + * This function gets one packet from the specified client queue
8883 + * It also refill the rx buffer
8884 + */
8885 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8886 +                               *ofst, unsigned int *rx_ctrl,
8887 +                               unsigned int *desc_ctrl, void **priv_data)
8888 +{
8889 +       struct hif_client_rx_queue *queue = &client->rx_q[qno];
8890 +       struct rx_queue_desc *desc;
8891 +       void *pkt = NULL;
8892 +
8893 +       /*
8894 +        * Following lock is to protect rx queue access from,
8895 +        * hif_lib_event_handler_start.
8896 +        * In general below lock is not required, because hif_lib_xmit_pkt and
8897 +        * hif_lib_event_handler_start are called from napi poll and which is
8898 +        * not re-entrant. But if some client use in different way this lock is
8899 +        * required.
8900 +        */
8901 +       /*spin_lock_irqsave(&client->rx_lock, flags); */
8902 +       desc = queue->base + queue->read_idx;
8903 +       if (!(desc->ctrl & CL_DESC_OWN)) {
8904 +               pkt = desc->data - pfe_pkt_headroom;
8905 +
8906 +               *rx_ctrl = desc->client_ctrl;
8907 +               *desc_ctrl = desc->ctrl;
8908 +
8909 +               if (desc->ctrl & CL_DESC_FIRST) {
8910 +                       u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
8911 +
8912 +                       if (size) {
8913 +                               size += PFE_PARSE_INFO_SIZE;
8914 +                               *len = CL_DESC_BUF_LEN(desc->ctrl) -
8915 +                                               PFE_PKT_HEADER_SZ - size;
8916 +                               *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
8917 +                                                               + size;
8918 +                               *priv_data = desc->data + PFE_PKT_HEADER_SZ;
8919 +                       } else {
8920 +                               *len = CL_DESC_BUF_LEN(desc->ctrl) -
8921 +                                      PFE_PKT_HEADER_SZ - PFE_PARSE_INFO_SIZE;
8922 +                               *ofst = pfe_pkt_headroom
8923 +                                       + PFE_PKT_HEADER_SZ
8924 +                                       + PFE_PARSE_INFO_SIZE;
8925 +                               *priv_data = NULL;
8926 +                       }
8927 +
8928 +               } else {
8929 +                       *len = CL_DESC_BUF_LEN(desc->ctrl);
8930 +                       *ofst = pfe_pkt_headroom;
8931 +               }
8932 +
8933 +               /*
8934 +                * Needed so we don't free a buffer/page
8935 +                * twice on module_exit
8936 +                */
8937 +               desc->data = NULL;
8938 +
8939 +               /*
8940 +                * Ensure everything else is written to DDR before
8941 +                * writing bd->ctrl
8942 +                */
8943 +               smp_wmb();
8944 +
8945 +               desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
8946 +               queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8947 +       }
8948 +
8949 +       /*spin_unlock_irqrestore(&client->rx_lock, flags); */
8950 +       return pkt;
8951 +}
8952 +
8953 +static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
8954 +                                       client_id, unsigned int qno,
8955 +                                       u32 client_ctrl)
8956 +{
8957 +       /* Optimize the write since the destinaton may be non-cacheable */
8958 +       if (!((unsigned long)pkt_hdr & 0x3)) {
8959 +               ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
8960 +                                       client_id;
8961 +       } else {
8962 +               ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
8963 +               ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
8964 +       }
8965 +}
8966 +
8967 +/*This function puts the given packet in the specific client queue */
8968 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8969 +                               *data, unsigned int len, u32 client_ctrl,
8970 +                               unsigned int flags, void *client_data)
8971 +{
8972 +       struct hif_client_tx_queue *queue = &client->tx_q[qno];
8973 +       struct tx_queue_desc *desc = queue->base + queue->write_idx;
8974 +
8975 +       /* First buffer */
8976 +       if (flags & HIF_FIRST_BUFFER) {
8977 +               data -= sizeof(struct hif_hdr);
8978 +               len += sizeof(struct hif_hdr);
8979 +
8980 +               hif_hdr_write(data, client->id, qno, client_ctrl);
8981 +       }
8982 +
8983 +       desc->data = client_data;
8984 +       desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
8985 +
8986 +       __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
8987 +
8988 +       queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
8989 +       queue->tx_pending++;
8990 +       queue->jiffies_last_packet = jiffies;
8991 +}
8992 +
8993 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8994 +                                  unsigned int *flags, int count)
8995 +{
8996 +       struct hif_client_tx_queue *queue = &client->tx_q[qno];
8997 +       struct tx_queue_desc *desc = queue->base + queue->read_idx;
8998 +
8999 +       pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
9000 +                queue->read_idx, queue->tx_pending);
9001 +
9002 +       if (!queue->tx_pending)
9003 +               return NULL;
9004 +
9005 +       if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
9006 +               u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
9007 +                       client->id, TMU_DM_TX_TRANS, 4));
9008 +
9009 +               if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
9010 +                       queue->done_tmu_tx_pkts = UINT_MAX -
9011 +                               queue->prev_tmu_tx_pkts + tmu_tx_pkts;
9012 +               else
9013 +                       queue->done_tmu_tx_pkts = tmu_tx_pkts -
9014 +                                               queue->prev_tmu_tx_pkts;
9015 +
9016 +               queue->prev_tmu_tx_pkts  = tmu_tx_pkts;
9017 +
9018 +               if (!queue->done_tmu_tx_pkts)
9019 +                       return NULL;
9020 +       }
9021 +
9022 +       if (desc->ctrl & CL_DESC_OWN)
9023 +               return NULL;
9024 +
9025 +       queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
9026 +       queue->tx_pending--;
9027 +
9028 +       *flags = CL_DESC_GET_FLAGS(desc->ctrl);
9029 +
9030 +       if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
9031 +               queue->done_tmu_tx_pkts--;
9032 +
9033 +       return desc->data;
9034 +}
9035 +
9036 +static void hif_lib_tmu_credit_init(struct pfe *pfe)
9037 +{
9038 +       int i, q;
9039 +
9040 +       for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
9041 +               for (q = 0; q < emac_txq_cnt; q++) {
9042 +                       pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
9043 +                                       DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
9044 +                       pfe->tmu_credit.tx_credit[i][q] =
9045 +                                       pfe->tmu_credit.tx_credit_max[i][q];
9046 +               }
9047 +}
9048 +
9049 +/* __hif_lib_update_credit
9050 + *
9051 + * @param[in] client   hif client context
9052 + * @param[in] queue    queue number in match with TMU
9053 + */
9054 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
9055 +{
9056 +       unsigned int tmu_tx_packets, tmp;
9057 +
9058 +       if (tx_qos) {
9059 +               tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID +
9060 +                       client->id, (TMU_DM_TX_TRANS + (queue * 4)), 4));
9061 +
9062 +               /* tx_packets counter overflowed */
9063 +               if (tmu_tx_packets >
9064 +                   pfe->tmu_credit.tx_packets[client->id][queue]) {
9065 +                       tmp = UINT_MAX - tmu_tx_packets +
9066 +                       pfe->tmu_credit.tx_packets[client->id][queue];
9067 +
9068 +                       pfe->tmu_credit.tx_credit[client->id][queue] =
9069 +                       pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
9070 +               } else {
9071 +               /* TMU tx <= pfe_eth tx, normal case or both OF since
9072 +                * last time
9073 +                */
9074 +                       pfe->tmu_credit.tx_credit[client->id][queue] =
9075 +                       pfe->tmu_credit.tx_credit_max[client->id][queue] -
9076 +                       (pfe->tmu_credit.tx_packets[client->id][queue] -
9077 +                       tmu_tx_packets);
9078 +               }
9079 +       }
9080 +}
9081 +
9082 +int pfe_hif_lib_init(struct pfe *pfe)
9083 +{
9084 +       int rc;
9085 +
9086 +       pr_info("%s\n", __func__);
9087 +
9088 +       if (lro_mode) {
9089 +               page_mode = 1;
9090 +               pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
9091 +               pfe_pkt_headroom = 0;
9092 +       } else {
9093 +               page_mode = 0;
9094 +               pfe_pkt_size = PFE_PKT_SIZE;
9095 +               pfe_pkt_headroom = PFE_PKT_HEADROOM;
9096 +       }
9097 +
9098 +       if (tx_qos)
9099 +               emac_txq_cnt = EMAC_TXQ_CNT / 2;
9100 +       else
9101 +               emac_txq_cnt = EMAC_TXQ_CNT;
9102 +
9103 +       hif_lib_tmu_credit_init(pfe);
9104 +       pfe->hif.shm = &ghif_shm;
9105 +       rc = pfe_hif_shm_init(pfe->hif.shm);
9106 +
9107 +       return rc;
9108 +}
9109 +
9110 +void pfe_hif_lib_exit(struct pfe *pfe)
9111 +{
9112 +       pr_info("%s\n", __func__);
9113 +
9114 +       pfe_hif_shm_clean(pfe->hif.shm);
9115 +}
9116 --- /dev/null
9117 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
9118 @@ -0,0 +1,229 @@
9119 +/* SPDX-License-Identifier: GPL-2.0+ */
9120 +/*
9121 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9122 + * Copyright 2017 NXP
9123 + */
9124 +
9125 +#ifndef _PFE_HIF_LIB_H_
9126 +#define _PFE_HIF_LIB_H_
9127 +
9128 +#include "pfe_hif.h"
9129 +
9130 +#define HIF_CL_REQ_TIMEOUT     10
9131 +#define GFP_DMA_PFE 0
9132 +#define PFE_PARSE_INFO_SIZE    16
9133 +
9134 +enum {
9135 +       REQUEST_CL_REGISTER = 0,
9136 +       REQUEST_CL_UNREGISTER,
9137 +       HIF_REQUEST_MAX
9138 +};
9139 +
9140 +enum {
9141 +       /* Event to indicate that client rx queue is reached water mark level */
9142 +       EVENT_HIGH_RX_WM = 0,
9143 +       /* Event to indicate that, packet received for client */
9144 +       EVENT_RX_PKT_IND,
9145 +       /* Event to indicate that, packet tx done for client */
9146 +       EVENT_TXDONE_IND,
9147 +       HIF_EVENT_MAX
9148 +};
9149 +
9150 +/*structure to store client queue info */
9151 +
9152 +/*structure to store client queue info */
9153 +struct hif_client_rx_queue {
9154 +       struct rx_queue_desc *base;
9155 +       u32     size;
9156 +       u32     read_idx;
9157 +       u32     write_idx;
9158 +};
9159 +
9160 +struct hif_client_tx_queue {
9161 +       struct tx_queue_desc *base;
9162 +       u32     size;
9163 +       u32     read_idx;
9164 +       u32     write_idx;
9165 +       u32     tx_pending;
9166 +       unsigned long jiffies_last_packet;
9167 +       u32     nocpy_flag;
9168 +       u32     prev_tmu_tx_pkts;
9169 +       u32     done_tmu_tx_pkts;
9170 +};
9171 +
9172 +struct hif_client_s {
9173 +       int     id;
9174 +       int     tx_qn;
9175 +       int     rx_qn;
9176 +       void    *rx_qbase;
9177 +       void    *tx_qbase;
9178 +       int     tx_qsize;
9179 +       int     rx_qsize;
9180 +       int     cpu_id;
9181 +       struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
9182 +       struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
9183 +       int (*event_handler)(void *priv, int event, int data);
9184 +       unsigned long queue_mask[HIF_EVENT_MAX];
9185 +       struct pfe *pfe;
9186 +       void *priv;
9187 +};
9188 +
9189 +/*
9190 + * Client specific shared memory
9191 + * It contains number of Rx/Tx queues, base addresses and queue sizes
9192 + */
9193 +struct hif_client_shm {
9194 +       u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
9195 +       unsigned long rx_qbase; /*Rx queue base address */
9196 +       u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
9197 +       unsigned long tx_qbase; /* Tx queue base address */
9198 +       u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
9199 +};
9200 +
9201 +/*Client shared memory ctrl bit description */
9202 +#define CLIENT_CTRL_RX_Q_CNT_OFST      0
9203 +#define CLIENT_CTRL_TX_Q_CNT_OFST      8
9204 +#define CLIENT_CTRL_RX_Q_CNT(ctrl)     (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
9205 +                                               & 0xFF)
9206 +#define CLIENT_CTRL_TX_Q_CNT(ctrl)     (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
9207 +                                               & 0xFF)
9208 +
9209 +/*
9210 + * Shared memory used to communicate between HIF driver and host/client drivers
9211 + * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
9212 + * initialized with host buffers and buffers count in the pool.
9213 + * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
9214 + *
9215 + */
9216 +struct hif_shm {
9217 +       u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
9218 +       /*Rx buffers required to initialize HIF rx descriptors */
9219 +       void *rx_buf_pool[HIF_RX_DESC_NT];
9220 +       unsigned long g_client_status[2]; /*Global client status bit mask */
9221 +       /* Client specific shared memory */
9222 +       struct hif_client_shm client[HIF_CLIENTS_MAX];
9223 +};
9224 +
9225 +#define CL_DESC_OWN    BIT(31)
9226 +/* This sets owner ship to HIF driver */
9227 +#define CL_DESC_LAST   BIT(30)
9228 +/* This indicates last packet for multi buffers handling */
9229 +#define CL_DESC_FIRST  BIT(29)
9230 +/* This indicates first packet for multi buffers handling */
9231 +
9232 +#define CL_DESC_BUF_LEN(x)             ((x) & 0xFFFF)
9233 +#define CL_DESC_FLAGS(x)               (((x) & 0xF) << 16)
9234 +#define CL_DESC_GET_FLAGS(x)           (((x) >> 16) & 0xF)
9235 +
9236 +struct rx_queue_desc {
9237 +       void *data;
9238 +       u32     ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
9239 +       u32     client_ctrl;
9240 +};
9241 +
9242 +struct tx_queue_desc {
9243 +       void *data;
9244 +       u32     ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
9245 +};
9246 +
9247 +/* HIF Rx is not working properly for 2-byte aligned buffers and
9248 + * ip_header should be 4byte aligned for better iperformance.
9249 + * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
9250 + */
9251 +#define PFE_PKT_HEADER_SZ      sizeof(struct hif_hdr)
9252 +/* must be big enough for headroom, pkt size and skb shared info */
9253 +#define PFE_BUF_SIZE           2048
9254 +#define PFE_PKT_HEADROOM       128
9255 +
9256 +#define SKB_SHARED_INFO_SIZE   SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
9257 +#define PFE_PKT_SIZE           (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
9258 +                                - SKB_SHARED_INFO_SIZE)
9259 +#define MAX_L2_HDR_SIZE                14      /* Not correct for VLAN/PPPoE */
9260 +#define MAX_L3_HDR_SIZE                20      /* Not correct for IPv6 */
9261 +#define MAX_L4_HDR_SIZE                60      /* TCP with maximum options */
9262 +#define MAX_HDR_SIZE           (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
9263 +                                + MAX_L4_HDR_SIZE)
9264 +/* Used in page mode to clamp packet size to the maximum supported by the hif
9265 + *hw interface (<16KiB)
9266 + */
9267 +#define MAX_PFE_PKT_SIZE       16380UL
9268 +
9269 +extern unsigned int pfe_pkt_size;
9270 +extern unsigned int pfe_pkt_headroom;
9271 +extern unsigned int page_mode;
9272 +extern unsigned int lro_mode;
9273 +extern unsigned int tx_qos;
9274 +extern unsigned int emac_txq_cnt;
9275 +
9276 +int pfe_hif_lib_init(struct pfe *pfe);
9277 +void pfe_hif_lib_exit(struct pfe *pfe);
9278 +int hif_lib_client_register(struct hif_client_s *client);
9279 +int hif_lib_client_unregister(struct  hif_client_s *client);
9280 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
9281 +                               *data, unsigned int len, u32 client_ctrl,
9282 +                               unsigned int flags, void *client_data);
9283 +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
9284 +                    unsigned int len, u32 client_ctrl, void *client_data);
9285 +void hif_lib_indicate_client(int cl_id, int event, int data);
9286 +int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
9287 +                                       data);
9288 +int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
9289 +int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
9290 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
9291 +                                  unsigned int *flags, int count);
9292 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
9293 +                               *ofst, unsigned int *rx_ctrl,
9294 +                               unsigned int *desc_ctrl, void **priv_data);
9295 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
9296 +void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
9297 +void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
9298 +                                       enable);
9299 +static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
9300 +                                       qno)
9301 +{
9302 +       struct hif_client_tx_queue *queue = &client->tx_q[qno];
9303 +
9304 +       return (queue->size - queue->tx_pending);
9305 +}
9306 +
9307 +static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
9308 +                                               int qno)
9309 +{
9310 +       struct hif_client_tx_queue *queue = &client->tx_q[qno];
9311 +
9312 +       return queue->write_idx;
9313 +}
9314 +
9315 +static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
9316 +                                       qno)
9317 +{
9318 +       struct hif_client_tx_queue *queue = &client->tx_q[qno];
9319 +
9320 +       return queue->tx_pending;
9321 +}
9322 +
9323 +#define hif_lib_tx_credit_avail(pfe, id, qno) \
9324 +                               ((pfe)->tmu_credit.tx_credit[id][qno])
9325 +
9326 +#define hif_lib_tx_credit_max(pfe, id, qno) \
9327 +                               ((pfe)->tmu_credit.tx_credit_max[id][qno])
9328 +
9329 +/*
9330 + * Test comment
9331 + */
9332 +#define hif_lib_tx_credit_use(pfe, id, qno, credit)                    \
9333 +       ({ typeof(pfe) pfe_ = pfe;                                      \
9334 +               typeof(id) id_ = id;                                    \
9335 +               typeof(qno) qno_ = qno_;                                \
9336 +               typeof(credit) credit_ = credit;                        \
9337 +               do {                                                    \
9338 +                       if (tx_qos) {                                   \
9339 +                               (pfe_)->tmu_credit.tx_credit[id_][qno_]\
9340 +                                        -= credit_;                    \
9341 +                               (pfe_)->tmu_credit.tx_packets[id_][qno_]\
9342 +                                       += credit_;                     \
9343 +                       }                                               \
9344 +               } while (0);                                            \
9345 +       })
9346 +
9347 +#endif /* _PFE_HIF_LIB_H_ */
9348 --- /dev/null
9349 +++ b/drivers/staging/fsl_ppfe/pfe_hw.c
9350 @@ -0,0 +1,164 @@
9351 +// SPDX-License-Identifier: GPL-2.0+
9352 +/*
9353 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9354 + * Copyright 2017 NXP
9355 + */
9356 +
9357 +#include "pfe_mod.h"
9358 +#include "pfe_hw.h"
9359 +
9360 +/* Functions to handle most of pfe hw register initialization */
9361 +int pfe_hw_init(struct pfe *pfe, int resume)
9362 +{
9363 +       struct class_cfg class_cfg = {
9364 +               .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
9365 +               .route_table_baseaddr = pfe->ddr_phys_baseaddr +
9366 +                                       ROUTE_TABLE_BASEADDR,
9367 +               .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
9368 +       };
9369 +
9370 +       struct tmu_cfg tmu_cfg = {
9371 +               .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
9372 +               .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
9373 +               .llm_queue_len = TMU_LLM_QUEUE_LEN,
9374 +       };
9375 +
9376 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9377 +       struct util_cfg util_cfg = {
9378 +               .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
9379 +       };
9380 +#endif
9381 +
9382 +       struct BMU_CFG bmu1_cfg = {
9383 +               .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
9384 +                                               BMU1_LMEM_BASEADDR),
9385 +               .count = BMU1_BUF_COUNT,
9386 +               .size = BMU1_BUF_SIZE,
9387 +               .low_watermark = 10,
9388 +               .high_watermark = 15,
9389 +       };
9390 +
9391 +       struct BMU_CFG bmu2_cfg = {
9392 +               .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
9393 +                                               BMU2_DDR_BASEADDR),
9394 +               .count = BMU2_BUF_COUNT,
9395 +               .size = BMU2_BUF_SIZE,
9396 +               .low_watermark = 250,
9397 +               .high_watermark = 253,
9398 +       };
9399 +
9400 +       struct gpi_cfg egpi1_cfg = {
9401 +               .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
9402 +               .tmlf_txthres = EGPI1_TMLF_TXTHRES,
9403 +               .aseq_len = EGPI1_ASEQ_LEN,
9404 +               .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
9405 +                                               EMAC_TCNTRL_REG),
9406 +       };
9407 +
9408 +       struct gpi_cfg egpi2_cfg = {
9409 +               .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
9410 +               .tmlf_txthres = EGPI2_TMLF_TXTHRES,
9411 +               .aseq_len = EGPI2_ASEQ_LEN,
9412 +               .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
9413 +                                               EMAC_TCNTRL_REG),
9414 +       };
9415 +
9416 +       struct gpi_cfg hgpi_cfg = {
9417 +               .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
9418 +               .tmlf_txthres = HGPI_TMLF_TXTHRES,
9419 +               .aseq_len = HGPI_ASEQ_LEN,
9420 +               .mtip_pause_reg = 0,
9421 +       };
9422 +
9423 +       pr_info("%s\n", __func__);
9424 +
9425 +#if !defined(LS1012A_PFE_RESET_WA)
9426 +       /* LS1012A needs this to make PE work correctly */
9427 +       writel(0x3,     CLASS_PE_SYS_CLK_RATIO);
9428 +       writel(0x3,     TMU_PE_SYS_CLK_RATIO);
9429 +       writel(0x3,     UTIL_PE_SYS_CLK_RATIO);
9430 +       usleep_range(10, 20);
9431 +#endif
9432 +
9433 +       pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
9434 +       pr_info("TMU version: %x\n", readl(TMU_VERSION));
9435 +
9436 +       pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
9437 +               BMU_VERSION));
9438 +       pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
9439 +               BMU_VERSION));
9440 +
9441 +       pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
9442 +               GPI_VERSION));
9443 +       pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
9444 +               GPI_VERSION));
9445 +       pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
9446 +               GPI_VERSION));
9447 +
9448 +       pr_info("HIF version: %x\n", readl(HIF_VERSION));
9449 +       pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
9450 +
9451 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9452 +       pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
9453 +#endif
9454 +       while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
9455 +               ;
9456 +
9457 +       hif_rx_disable();
9458 +       hif_tx_disable();
9459 +
9460 +       bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
9461 +
9462 +       pr_info("bmu_init(1) done\n");
9463 +
9464 +       bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
9465 +
9466 +       pr_info("bmu_init(2) done\n");
9467 +
9468 +       class_cfg.resume = resume ? 1 : 0;
9469 +
9470 +       class_init(&class_cfg);
9471 +
9472 +       pr_info("class_init() done\n");
9473 +
9474 +       tmu_init(&tmu_cfg);
9475 +
9476 +       pr_info("tmu_init() done\n");
9477 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9478 +       util_init(&util_cfg);
9479 +
9480 +       pr_info("util_init() done\n");
9481 +#endif
9482 +       gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
9483 +
9484 +       pr_info("gpi_init(1) done\n");
9485 +
9486 +       gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
9487 +
9488 +       pr_info("gpi_init(2) done\n");
9489 +
9490 +       gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
9491 +
9492 +       pr_info("gpi_init(hif) done\n");
9493 +
9494 +       bmu_enable(BMU1_BASE_ADDR);
9495 +
9496 +       pr_info("bmu_enable(1) done\n");
9497 +
9498 +       bmu_enable(BMU2_BASE_ADDR);
9499 +
9500 +       pr_info("bmu_enable(2) done\n");
9501 +
9502 +       return 0;
9503 +}
9504 +
9505 +void pfe_hw_exit(struct pfe *pfe)
9506 +{
9507 +       pr_info("%s\n", __func__);
9508 +
9509 +       bmu_disable(BMU1_BASE_ADDR);
9510 +       bmu_reset(BMU1_BASE_ADDR);
9511 +
9512 +       bmu_disable(BMU2_BASE_ADDR);
9513 +       bmu_reset(BMU2_BASE_ADDR);
9514 +}
9515 --- /dev/null
9516 +++ b/drivers/staging/fsl_ppfe/pfe_hw.h
9517 @@ -0,0 +1,15 @@
9518 +/* SPDX-License-Identifier: GPL-2.0+ */
9519 +/*
9520 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9521 + * Copyright 2017 NXP
9522 + */
9523 +
9524 +#ifndef _PFE_HW_H_
9525 +#define _PFE_HW_H_
9526 +
9527 +#define PE_SYS_CLK_RATIO       1       /* SYS/AXI = 250MHz, HFE = 500MHz */
9528 +
9529 +int pfe_hw_init(struct pfe *pfe, int resume);
9530 +void pfe_hw_exit(struct pfe *pfe);
9531 +
9532 +#endif /* _PFE_HW_H_ */
9533 --- /dev/null
9534 +++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
9535 @@ -0,0 +1,368 @@
9536 +// SPDX-License-Identifier: GPL-2.0+
9537 +/*
9538 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9539 + * Copyright 2017 NXP
9540 + */
9541 +
9542 +#include <linux/module.h>
9543 +#include <linux/device.h>
9544 +#include <linux/of.h>
9545 +#include <linux/of_net.h>
9546 +#include <linux/of_address.h>
9547 +#include <linux/of_mdio.h>
9548 +#include <linux/platform_device.h>
9549 +#include <linux/slab.h>
9550 +#include <linux/clk.h>
9551 +#include <linux/mfd/syscon.h>
9552 +#include <linux/regmap.h>
9553 +
9554 +#include "pfe_mod.h"
9555 +
9556 +extern bool pfe_use_old_dts_phy;
9557 +struct ls1012a_pfe_platform_data pfe_platform_data;
9558 +
9559 +static int pfe_get_gemac_if_properties(struct device_node *gem,
9560 +                                      int port,
9561 +                                      struct ls1012a_pfe_platform_data *pdata)
9562 +{
9563 +       struct device_node *phy_node = NULL;
9564 +       int size;
9565 +       int phy_id = 0;
9566 +       const u32 *addr;
9567 +       const void *mac_addr;
9568 +
9569 +       addr = of_get_property(gem, "reg", &size);
9570 +       port = be32_to_cpup(addr);
9571 +
9572 +       pdata->ls1012a_eth_pdata[port].gem_id = port;
9573 +
9574 +       mac_addr = of_get_mac_address(gem);
9575 +       if (mac_addr) {
9576 +               memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
9577 +                      ETH_ALEN);
9578 +       }
9579 +
9580 +       phy_node = of_parse_phandle(gem, "phy-handle", 0);
9581 +       pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
9582 +       if (phy_node) {
9583 +               pfe_use_old_dts_phy = false;
9584 +               goto process_phynode;
9585 +       } else if (of_phy_is_fixed_link(gem)) {
9586 +               pfe_use_old_dts_phy = false;
9587 +               if (of_phy_register_fixed_link(gem) < 0) {
9588 +                       pr_err("broken fixed-link specification\n");
9589 +                       goto err;
9590 +               }
9591 +               phy_node = of_node_get(gem);
9592 +               pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
9593 +       } else if (of_get_property(gem, "fsl,pfe-phy-if-flags", &size)) {
9594 +               pfe_use_old_dts_phy = true;
9595 +               /* Use old dts properties for phy handling */
9596 +               addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
9597 +               pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
9598 +
9599 +               addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
9600 +               if (!addr) {
9601 +                       pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
9602 +                              __LINE__);
9603 +               } else {
9604 +                       phy_id = be32_to_cpup(addr);
9605 +                       pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
9606 +                       pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
9607 +               }
9608 +
9609 +               /* If PHY is enabled, read mdio properties */
9610 +               if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
9611 +                       goto done;
9612 +
9613 +       } else {
9614 +               pr_info("%s: No PHY or fixed-link\n", __func__);
9615 +               return 0;
9616 +       }
9617 +
9618 +process_phynode:
9619 +       pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
9620 +       if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
9621 +               pr_err("%s:%d Incorrect Phy mode....\n", __func__,
9622 +                      __LINE__);
9623 +
9624 +       addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
9625 +       if (!addr) {
9626 +               pr_err("%s: Invalid mdio-mux-val....\n", __func__);
9627 +       } else {
9628 +               phy_id = be32_to_cpup(addr);
9629 +               pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
9630 +       }
9631 +
9632 +       if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
9633 +               pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
9634 +                        pdata->ls1012a_eth_pdata[port].mdio_muxval;
9635 +
9636 +
9637 +       pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
9638 +
9639 +done:
9640 +       return 0;
9641 +
9642 +err:
9643 +       return -1;
9644 +}
9645 +
9646 +/*
9647 + *
9648 + * pfe_platform_probe -
9649 + *
9650 + *
9651 + */
9652 +static int pfe_platform_probe(struct platform_device *pdev)
9653 +{
9654 +       struct resource res;
9655 +       int ii, rc, interface_count = 0, size = 0;
9656 +       const u32 *prop;
9657 +       struct device_node *np, *gem = NULL;
9658 +       struct clk *pfe_clk;
9659 +
9660 +       np = pdev->dev.of_node;
9661 +
9662 +       if (!np) {
9663 +               pr_err("Invalid device node\n");
9664 +               return -EINVAL;
9665 +       }
9666 +
9667 +       pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
9668 +       if (!pfe) {
9669 +               rc = -ENOMEM;
9670 +               goto err_alloc;
9671 +       }
9672 +
9673 +       platform_set_drvdata(pdev, pfe);
9674 +
9675 +       dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9676 +
9677 +       if (of_address_to_resource(np, 1, &res)) {
9678 +               rc = -ENOMEM;
9679 +               pr_err("failed to get ddr resource\n");
9680 +               goto err_ddr;
9681 +       }
9682 +
9683 +       pfe->ddr_phys_baseaddr = res.start;
9684 +       pfe->ddr_size = resource_size(&res);
9685 +       pfe->ddr_baseaddr = phys_to_virt(res.start);
9686 +
9687 +       pfe->scfg =
9688 +               syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
9689 +                                               "fsl,pfe-scfg");
9690 +       if (IS_ERR(pfe->scfg)) {
9691 +               dev_err(&pdev->dev, "No syscfg phandle specified\n");
9692 +               return PTR_ERR(pfe->scfg);
9693 +       }
9694 +
9695 +       pfe->cbus_baseaddr = of_iomap(np, 0);
9696 +       if (!pfe->cbus_baseaddr) {
9697 +               rc = -ENOMEM;
9698 +               pr_err("failed to get axi resource\n");
9699 +               goto err_axi;
9700 +       }
9701 +
9702 +       pfe->hif_irq = platform_get_irq(pdev, 0);
9703 +       if (pfe->hif_irq < 0) {
9704 +               pr_err("platform_get_irq for hif failed\n");
9705 +               rc = pfe->hif_irq;
9706 +               goto err_hif_irq;
9707 +       }
9708 +
9709 +       pfe->wol_irq = platform_get_irq(pdev, 2);
9710 +       if (pfe->wol_irq < 0) {
9711 +               pr_err("platform_get_irq for WoL failed\n");
9712 +               rc = pfe->wol_irq;
9713 +               goto err_hif_irq;
9714 +       }
9715 +
9716 +       /* Read interface count */
9717 +       prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
9718 +       if (!prop) {
9719 +               pr_err("Failed to read number of interfaces\n");
9720 +               rc = -ENXIO;
9721 +               goto err_prop;
9722 +       }
9723 +
9724 +       interface_count = be32_to_cpup(prop);
9725 +       if (interface_count <= 0) {
9726 +               pr_err("No ethernet interface count : %d\n",
9727 +                      interface_count);
9728 +               rc = -ENXIO;
9729 +               goto err_prop;
9730 +       }
9731 +
9732 +       pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
9733 +
9734 +       for (ii = 0; ii < interface_count; ii++) {
9735 +               gem = of_get_next_child(np, gem);
9736 +               if (gem)
9737 +                       pfe_get_gemac_if_properties(gem, ii,
9738 +                                                   &pfe_platform_data);
9739 +               else
9740 +                       pr_err("Unable to find interface %d\n", ii);
9741 +
9742 +       }
9743 +
9744 +       pfe->dev = &pdev->dev;
9745 +
9746 +       pfe->dev->platform_data = &pfe_platform_data;
9747 +
9748 +       /* declare WoL capabilities */
9749 +       device_init_wakeup(&pdev->dev, true);
9750 +
9751 +       /* find the clocks */
9752 +       pfe_clk = devm_clk_get(pfe->dev, "pfe");
9753 +       if (IS_ERR(pfe_clk))
9754 +               return PTR_ERR(pfe_clk);
9755 +
9756 +       /* PFE clock is (platform clock / 2) */
9757 +       /* save sys_clk value as KHz */
9758 +       pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
9759 +
9760 +       rc = pfe_probe(pfe);
9761 +       if (rc < 0)
9762 +               goto err_probe;
9763 +
9764 +       return 0;
9765 +
9766 +err_probe:
9767 +err_prop:
9768 +err_hif_irq:
9769 +       iounmap(pfe->cbus_baseaddr);
9770 +
9771 +err_axi:
9772 +err_ddr:
9773 +       platform_set_drvdata(pdev, NULL);
9774 +
9775 +       kfree(pfe);
9776 +
9777 +err_alloc:
9778 +       return rc;
9779 +}
9780 +
9781 +/*
9782 + * pfe_platform_remove -
9783 + */
9784 +static int pfe_platform_remove(struct platform_device *pdev)
9785 +{
9786 +       struct pfe *pfe = platform_get_drvdata(pdev);
9787 +       int rc;
9788 +
9789 +       pr_info("%s\n", __func__);
9790 +
9791 +       rc = pfe_remove(pfe);
9792 +
9793 +       iounmap(pfe->cbus_baseaddr);
9794 +
9795 +       platform_set_drvdata(pdev, NULL);
9796 +
9797 +       kfree(pfe);
9798 +
9799 +       return rc;
9800 +}
9801 +
9802 +#ifdef CONFIG_PM
9803 +#ifdef CONFIG_PM_SLEEP
9804 +int pfe_platform_suspend(struct device *dev)
9805 +{
9806 +       struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9807 +       struct net_device *netdev;
9808 +       int i;
9809 +
9810 +       pfe->wake = 0;
9811 +
9812 +       for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9813 +               netdev = pfe->eth.eth_priv[i]->ndev;
9814 +
9815 +               netif_device_detach(netdev);
9816 +
9817 +               if (netif_running(netdev))
9818 +                       if (pfe_eth_suspend(netdev))
9819 +                               pfe->wake = 1;
9820 +       }
9821 +
9822 +       /* Shutdown PFE only if we're not waking up the system */
9823 +       if (!pfe->wake) {
9824 +#if defined(LS1012A_PFE_RESET_WA)
9825 +               pfe_hif_rx_idle(&pfe->hif);
9826 +#endif
9827 +               pfe_ctrl_suspend(&pfe->ctrl);
9828 +               pfe_firmware_exit(pfe);
9829 +
9830 +               pfe_hif_exit(pfe);
9831 +               pfe_hif_lib_exit(pfe);
9832 +
9833 +               pfe_hw_exit(pfe);
9834 +       }
9835 +
9836 +       return 0;
9837 +}
9838 +
9839 +static int pfe_platform_resume(struct device *dev)
9840 +{
9841 +       struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9842 +       struct net_device *netdev;
9843 +       int i;
9844 +
9845 +       if (!pfe->wake) {
9846 +               pfe_hw_init(pfe, 1);
9847 +               pfe_hif_lib_init(pfe);
9848 +               pfe_hif_init(pfe);
9849 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9850 +               util_enable();
9851 +#endif
9852 +               tmu_enable(0xf);
9853 +               class_enable();
9854 +               pfe_ctrl_resume(&pfe->ctrl);
9855 +       }
9856 +
9857 +       for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9858 +               netdev = pfe->eth.eth_priv[i]->ndev;
9859 +
9860 +               if (pfe->mdio.mdio_priv[i]->mii_bus)
9861 +                       pfe_eth_mdio_reset(pfe->mdio.mdio_priv[i]->mii_bus);
9862 +
9863 +               if (netif_running(netdev))
9864 +                       pfe_eth_resume(netdev);
9865 +
9866 +               netif_device_attach(netdev);
9867 +       }
9868 +       return 0;
9869 +}
9870 +#else
9871 +#define pfe_platform_suspend NULL
9872 +#define pfe_platform_resume NULL
9873 +#endif
9874 +
9875 +static const struct dev_pm_ops pfe_platform_pm_ops = {
9876 +       SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
9877 +};
9878 +#endif
9879 +
9880 +static const struct of_device_id pfe_match[] = {
9881 +       {
9882 +               .compatible = "fsl,pfe",
9883 +       },
9884 +       {},
9885 +};
9886 +MODULE_DEVICE_TABLE(of, pfe_match);
9887 +
9888 +static struct platform_driver pfe_platform_driver = {
9889 +       .probe = pfe_platform_probe,
9890 +       .remove = pfe_platform_remove,
9891 +       .driver = {
9892 +               .name = "pfe",
9893 +               .of_match_table = pfe_match,
9894 +#ifdef CONFIG_PM
9895 +               .pm = &pfe_platform_pm_ops,
9896 +#endif
9897 +       },
9898 +};
9899 +
9900 +module_platform_driver(pfe_platform_driver);
9901 +MODULE_LICENSE("GPL");
9902 +MODULE_DESCRIPTION("PFE Ethernet driver");
9903 +MODULE_AUTHOR("NXP DNCPE");
9904 --- /dev/null
9905 +++ b/drivers/staging/fsl_ppfe/pfe_mod.c
9906 @@ -0,0 +1,158 @@
9907 +// SPDX-License-Identifier: GPL-2.0+
9908 +/*
9909 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9910 + * Copyright 2017 NXP
9911 + */
9912 +
9913 +#include <linux/dma-mapping.h>
9914 +#include "pfe_mod.h"
9915 +#include "pfe_cdev.h"
9916 +
9917 +unsigned int us;
9918 +module_param(us, uint, 0444);
9919 +MODULE_PARM_DESC(us, "0: module enabled for kernel networking (DEFAULT)\n"
9920 +                       "1: module enabled for userspace networking\n");
9921 +struct pfe *pfe;
9922 +
9923 +/*
9924 + * pfe_probe -
9925 + */
9926 +int pfe_probe(struct pfe *pfe)
9927 +{
9928 +       int rc;
9929 +
9930 +       if (pfe->ddr_size < DDR_MAX_SIZE) {
9931 +               pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
9932 +                      __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
9933 +               rc = -ENOMEM;
9934 +               goto err_hw;
9935 +       }
9936 +
9937 +       if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
9938 +                       (8 * SZ_1M - 1)) != 0) {
9939 +               pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
9940 +                      __func__, (int)pfe->ddr_phys_baseaddr +
9941 +                       BMU2_DDR_BASEADDR);
9942 +               rc = -ENOMEM;
9943 +               goto err_hw;
9944 +       }
9945 +
9946 +       pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
9947 +               (unsigned long)pfe->cbus_baseaddr,
9948 +               (unsigned long)pfe->ddr_baseaddr,
9949 +               pfe->ddr_phys_baseaddr, pfe->ddr_size);
9950 +
9951 +       pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
9952 +                    pfe->ddr_phys_baseaddr, pfe->ddr_size);
9953 +
9954 +       rc = pfe_hw_init(pfe, 0);
9955 +       if (rc < 0)
9956 +               goto err_hw;
9957 +
9958 +       if (us)
9959 +               goto firmware_init;
9960 +
9961 +       rc = pfe_hif_lib_init(pfe);
9962 +       if (rc < 0)
9963 +               goto err_hif_lib;
9964 +
9965 +       rc = pfe_hif_init(pfe);
9966 +       if (rc < 0)
9967 +               goto err_hif;
9968 +
9969 +firmware_init:
9970 +       rc = pfe_firmware_init(pfe);
9971 +       if (rc < 0)
9972 +               goto err_firmware;
9973 +
9974 +       rc = pfe_ctrl_init(pfe);
9975 +       if (rc < 0)
9976 +               goto err_ctrl;
9977 +
9978 +       rc = pfe_eth_init(pfe);
9979 +       if (rc < 0)
9980 +               goto err_eth;
9981 +
9982 +       rc = pfe_sysfs_init(pfe);
9983 +       if (rc < 0)
9984 +               goto err_sysfs;
9985 +
9986 +       rc = pfe_debugfs_init(pfe);
9987 +       if (rc < 0)
9988 +               goto err_debugfs;
9989 +
9990 +       if (us) {
9991 +               /* Creating a character device */
9992 +               rc = pfe_cdev_init();
9993 +               if (rc < 0)
9994 +                       goto err_cdev;
9995 +       }
9996 +
9997 +       return 0;
9998 +
9999 +err_cdev:
10000 +       pfe_debugfs_exit(pfe);
10001 +
10002 +err_debugfs:
10003 +       pfe_sysfs_exit(pfe);
10004 +
10005 +err_sysfs:
10006 +       pfe_eth_exit(pfe);
10007 +
10008 +err_eth:
10009 +       pfe_ctrl_exit(pfe);
10010 +
10011 +err_ctrl:
10012 +       pfe_firmware_exit(pfe);
10013 +
10014 +err_firmware:
10015 +       if (us)
10016 +               goto err_hif_lib;
10017 +
10018 +       pfe_hif_exit(pfe);
10019 +
10020 +err_hif:
10021 +       pfe_hif_lib_exit(pfe);
10022 +
10023 +err_hif_lib:
10024 +       pfe_hw_exit(pfe);
10025 +
10026 +err_hw:
10027 +       return rc;
10028 +}
10029 +
10030 +/*
10031 + * pfe_remove -
10032 + */
10033 +int pfe_remove(struct pfe *pfe)
10034 +{
10035 +       pr_info("%s\n", __func__);
10036 +
10037 +       if (us)
10038 +               pfe_cdev_exit();
10039 +
10040 +       pfe_debugfs_exit(pfe);
10041 +
10042 +       pfe_sysfs_exit(pfe);
10043 +
10044 +       pfe_eth_exit(pfe);
10045 +
10046 +       pfe_ctrl_exit(pfe);
10047 +
10048 +#if defined(LS1012A_PFE_RESET_WA)
10049 +       pfe_hif_rx_idle(&pfe->hif);
10050 +#endif
10051 +       pfe_firmware_exit(pfe);
10052 +
10053 +       if (us)
10054 +               goto hw_exit;
10055 +
10056 +       pfe_hif_exit(pfe);
10057 +
10058 +       pfe_hif_lib_exit(pfe);
10059 +
10060 +hw_exit:
10061 +       pfe_hw_exit(pfe);
10062 +
10063 +       return 0;
10064 +}
10065 --- /dev/null
10066 +++ b/drivers/staging/fsl_ppfe/pfe_mod.h
10067 @@ -0,0 +1,103 @@
10068 +/* SPDX-License-Identifier: GPL-2.0+ */
10069 +/*
10070 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
10071 + * Copyright 2017 NXP
10072 + */
10073 +
10074 +#ifndef _PFE_MOD_H_
10075 +#define _PFE_MOD_H_
10076 +
10077 +#include <linux/device.h>
10078 +#include <linux/elf.h>
10079 +
10080 +extern unsigned int us;
10081 +
10082 +struct pfe;
10083 +
10084 +#include "pfe_hw.h"
10085 +#include "pfe_firmware.h"
10086 +#include "pfe_ctrl.h"
10087 +#include "pfe_hif.h"
10088 +#include "pfe_hif_lib.h"
10089 +#include "pfe_eth.h"
10090 +#include "pfe_sysfs.h"
10091 +#include "pfe_perfmon.h"
10092 +#include "pfe_debugfs.h"
10093 +
10094 +#define PHYID_MAX_VAL 32
10095 +
10096 +struct pfe_tmu_credit {
10097 +       /* Number of allowed TX packet in-flight, matches TMU queue size */
10098 +       unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
10099 +       unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
10100 +       unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
10101 +};
10102 +
10103 +struct pfe {
10104 +       struct regmap   *scfg;
10105 +       unsigned long ddr_phys_baseaddr;
10106 +       void *ddr_baseaddr;
10107 +       unsigned int ddr_size;
10108 +       void *cbus_baseaddr;
10109 +       void *apb_baseaddr;
10110 +       unsigned long iram_phys_baseaddr;
10111 +       void *iram_baseaddr;
10112 +       unsigned long ipsec_phys_baseaddr;
10113 +       void *ipsec_baseaddr;
10114 +       int hif_irq;
10115 +       int wol_irq;
10116 +       int hif_client_irq;
10117 +       struct device *dev;
10118 +       struct dentry *dentry;
10119 +       struct pfe_ctrl ctrl;
10120 +       struct pfe_hif hif;
10121 +       struct pfe_eth eth;
10122 +       struct pfe_mdio mdio;
10123 +       struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
10124 +#if defined(CFG_DIAGS)
10125 +       struct pfe_diags diags;
10126 +#endif
10127 +       struct pfe_tmu_credit tmu_credit;
10128 +       struct pfe_cpumon cpumon;
10129 +       struct pfe_memmon memmon;
10130 +       int wake;
10131 +       int mdio_muxval[PHYID_MAX_VAL];
10132 +       struct clk *hfe_clock;
10133 +};
10134 +
10135 +extern struct pfe *pfe;
10136 +
10137 +int pfe_probe(struct pfe *pfe);
10138 +int pfe_remove(struct pfe *pfe);
10139 +
10140 +/* DDR Mapping in reserved memory*/
10141 +#define ROUTE_TABLE_BASEADDR   0
10142 +#define ROUTE_TABLE_HASH_BITS  15      /* 32K entries */
10143 +#define ROUTE_TABLE_SIZE       ((1 << ROUTE_TABLE_HASH_BITS) \
10144 +                                 * CLASS_ROUTE_SIZE)
10145 +#define BMU2_DDR_BASEADDR      (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
10146 +#define BMU2_BUF_COUNT         (4096 - 256)
10147 +/* This is to get a total DDR size of 12MiB */
10148 +#define BMU2_DDR_SIZE          (DDR_BUF_SIZE * BMU2_BUF_COUNT)
10149 +#define UTIL_CODE_BASEADDR     (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
10150 +#define UTIL_CODE_SIZE         (128 * SZ_1K)
10151 +#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
10152 +#define UTIL_DDR_DATA_SIZE     (64 * SZ_1K)
10153 +#define CLASS_DDR_DATA_BASEADDR        (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
10154 +#define CLASS_DDR_DATA_SIZE    (32 * SZ_1K)
10155 +#define TMU_DDR_DATA_BASEADDR  (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
10156 +#define TMU_DDR_DATA_SIZE      (32 * SZ_1K)
10157 +#define TMU_LLM_BASEADDR       (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
10158 +#define TMU_LLM_QUEUE_LEN      (8 * 512)
10159 +/* Must be power of two and at least 16 * 8 = 128 bytes */
10160 +#define TMU_LLM_SIZE           (4 * 16 * TMU_LLM_QUEUE_LEN)
10161 +/* (4 TMU's x 16 queues x queue_len) */
10162 +
10163 +#define DDR_MAX_SIZE           (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
10164 +
10165 +/* LMEM Mapping */
10166 +#define BMU1_LMEM_BASEADDR     0
10167 +#define BMU1_BUF_COUNT         256
10168 +#define BMU1_LMEM_SIZE         (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
10169 +
10170 +#endif /* _PFE_MOD_H */
10171 --- /dev/null
10172 +++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
10173 @@ -0,0 +1,26 @@
10174 +/* SPDX-License-Identifier: GPL-2.0+ */
10175 +/*
10176 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
10177 + * Copyright 2017 NXP
10178 + */
10179 +
10180 +#ifndef _PFE_PERFMON_H_
10181 +#define _PFE_PERFMON_H_
10182 +
10183 +#include "pfe/pfe.h"
10184 +
10185 +#define        CT_CPUMON_INTERVAL      (1 * TIMER_TICKS_PER_SEC)
10186 +
10187 +struct pfe_cpumon {
10188 +       u32 cpu_usage_pct[MAX_PE];
10189 +       u32 class_usage_pct;
10190 +};
10191 +
10192 +struct pfe_memmon {
10193 +       u32 kernel_memory_allocated;
10194 +};
10195 +
10196 +int pfe_perfmon_init(struct pfe *pfe);
10197 +void pfe_perfmon_exit(struct pfe *pfe);
10198 +
10199 +#endif /* _PFE_PERFMON_H_ */
10200 --- /dev/null
10201 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
10202 @@ -0,0 +1,806 @@
10203 +// SPDX-License-Identifier: GPL-2.0+
10204 +/*
10205 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
10206 + * Copyright 2017 NXP
10207 + */
10208 +
10209 +#include <linux/module.h>
10210 +#include <linux/platform_device.h>
10211 +
10212 +#include "pfe_mod.h"
10213 +
10214 +#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
10215 +#define NUM_QUEUES             16
10216 +
10217 +static char register_name[20][5] = {
10218 +       "EPC", "ECAS", "EID", "ED",
10219 +       "r0", "r1", "r2", "r3",
10220 +       "r4", "r5", "r6", "r7",
10221 +       "r8", "r9", "r10", "r11",
10222 +       "r12", "r13", "r14", "r15",
10223 +};
10224 +
10225 +static char exception_name[14][20] = {
10226 +       "Reset",
10227 +       "HardwareFailure",
10228 +       "NMI",
10229 +       "InstBreakpoint",
10230 +       "DataBreakpoint",
10231 +       "Unsupported",
10232 +       "PrivilegeViolation",
10233 +       "InstBusError",
10234 +       "DataBusError",
10235 +       "AlignmentError",
10236 +       "ArithmeticError",
10237 +       "SystemCall",
10238 +       "MemoryManagement",
10239 +       "Interrupt",
10240 +};
10241 +
10242 +static unsigned long class_do_clear;
10243 +static unsigned long tmu_do_clear;
10244 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10245 +static unsigned long util_do_clear;
10246 +#endif
10247 +
10248 +static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
10249 +                                       do_clear)
10250 +{
10251 +       ssize_t len = 0;
10252 +       u32 val;
10253 +       char statebuf[5];
10254 +       struct pfe_cpumon *cpumon = &pfe->cpumon;
10255 +       u32 debug_indicator;
10256 +       u32 debug[20];
10257 +
10258 +       *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
10259 +       dmem_addr += 4;
10260 +
10261 +       statebuf[4] = '\0';
10262 +       len += sprintf(buf + len, "state=%4s ", statebuf);
10263 +
10264 +       val = pe_dmem_read(id, dmem_addr, 4);
10265 +       dmem_addr += 4;
10266 +       len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
10267 +
10268 +       val = pe_dmem_read(id, dmem_addr, 4);
10269 +       if (do_clear && val)
10270 +               pe_dmem_write(id, 0, dmem_addr, 4);
10271 +       dmem_addr += 4;
10272 +       len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
10273 +
10274 +       val = pe_dmem_read(id, dmem_addr, 4);
10275 +       if (do_clear && val)
10276 +               pe_dmem_write(id, 0, dmem_addr, 4);
10277 +       dmem_addr += 4;
10278 +       if (id >= TMU0_ID && id <= TMU_MAX_ID)
10279 +               len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
10280 +       else
10281 +               len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
10282 +
10283 +       val = pe_dmem_read(id, dmem_addr, 4);
10284 +       if (do_clear && val)
10285 +               pe_dmem_write(id, 0, dmem_addr, 4);
10286 +       dmem_addr += 4;
10287 +       if (val)
10288 +               len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
10289 +
10290 +       len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
10291 +
10292 +       len += sprintf(buf + len, "\n");
10293 +
10294 +       debug_indicator = pe_dmem_read(id, dmem_addr, 4);
10295 +       dmem_addr += 4;
10296 +       if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
10297 +               int j, last = 0;
10298 +
10299 +               for (j = 0; j < 16; j++) {
10300 +                       debug[j] = pe_dmem_read(id, dmem_addr, 4);
10301 +                       if (debug[j]) {
10302 +                               if (do_clear)
10303 +                                       pe_dmem_write(id, 0, dmem_addr, 4);
10304 +                               last = j + 1;
10305 +                       }
10306 +                       dmem_addr += 4;
10307 +               }
10308 +               for (j = 0; j < last; j++) {
10309 +                       len += sprintf(buf + len, "%08x%s",
10310 +                       cpu_to_be32(debug[j]),
10311 +                       (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
10312 +               }
10313 +       }
10314 +
10315 +       if (!strncmp(statebuf, "DEAD", 4)) {
10316 +               u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
10317 +
10318 +               len += sprintf(buf + len, "Exception details:\n");
10319 +               for (i = 0; i < 20; i++) {
10320 +                       debug[i] = pe_dmem_read(id, dump, 4);
10321 +                       dump += 4;
10322 +                       if (i == 2)
10323 +                               len += sprintf(buf + len, "%4s = %08x (=%s) ",
10324 +                               register_name[i], cpu_to_be32(debug[i]),
10325 +                               exception_name[min((u32)
10326 +                               cpu_to_be32(debug[i]), (u32)13)]);
10327 +                       else
10328 +                               len += sprintf(buf + len, "%4s = %08x%s",
10329 +                               register_name[i], cpu_to_be32(debug[i]),
10330 +                               (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
10331 +               }
10332 +       }
10333 +
10334 +       return len;
10335 +}
10336 +
10337 +static ssize_t class_phy_stats(char *buf, int phy)
10338 +{
10339 +       ssize_t len = 0;
10340 +       int off1 = phy * 0x28;
10341 +       int off2 = phy * 0x10;
10342 +
10343 +       if (phy == 3)
10344 +               off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
10345 +
10346 +       len += sprintf(buf + len, "phy: %d\n", phy);
10347 +       len += sprintf(buf + len,
10348 +                       "  rx:   %10u, tx:   %10u, intf:  %10u, ipv4:    %10u, ipv6: %10u\n",
10349 +                       readl(CLASS_PHY1_RX_PKTS + off1),
10350 +                       readl(CLASS_PHY1_TX_PKTS + off1),
10351 +                       readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
10352 +                       readl(CLASS_PHY1_V4_PKTS + off1),
10353 +                       readl(CLASS_PHY1_V6_PKTS + off1));
10354 +
10355 +       len += sprintf(buf + len,
10356 +                       "  icmp: %10u, igmp: %10u, tcp:   %10u, udp:     %10u\n",
10357 +                       readl(CLASS_PHY1_ICMP_PKTS + off2),
10358 +                       readl(CLASS_PHY1_IGMP_PKTS + off2),
10359 +                       readl(CLASS_PHY1_TCP_PKTS + off2),
10360 +                       readl(CLASS_PHY1_UDP_PKTS + off2));
10361 +
10362 +       len += sprintf(buf + len, "  err\n");
10363 +       len += sprintf(buf + len,
10364 +                       "  lp:   %10u, intf: %10u, l3:    %10u, chcksum: %10u, ttl:  %10u\n",
10365 +                       readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
10366 +                       readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
10367 +                       readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
10368 +                       readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
10369 +                       readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
10370 +
10371 +       return len;
10372 +}
10373 +
10374 +/* qm_read_drop_stat
10375 + * This function is used to read the drop statistics from the TMU
10376 + * hw drop counter.  Since the hw counter is always cleared afer
10377 + * reading, this function maintains the previous drop count, and
10378 + * adds the new value to it.  That value can be retrieved by
10379 + * passing a pointer to it with the total_drops arg.
10380 + *
10381 + * @param tmu          TMU number (0 - 3)
10382 + * @param queue                queue number (0 - 15)
10383 + * @param total_drops  pointer to location to store total drops (or NULL)
10384 + * @param do_reset     if TRUE, clear total drops after updating
10385 + */
10386 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
10387 +{
10388 +       static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
10389 +       u32 val;
10390 +
10391 +       writel((tmu << 8) | queue, TMU_TEQ_CTRL);
10392 +       writel((tmu << 8) | queue, TMU_LLM_CTRL);
10393 +       val = readl(TMU_TEQ_DROP_STAT);
10394 +       qtotal[tmu][queue] += val;
10395 +       if (total_drops)
10396 +               *total_drops = qtotal[tmu][queue];
10397 +       if (do_reset)
10398 +               qtotal[tmu][queue] = 0;
10399 +       return val;
10400 +}
10401 +
10402 +static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
10403 +{
10404 +       ssize_t len = 0;
10405 +       u32 drops;
10406 +
10407 +       len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
10408 +
10409 +       drops = qm_read_drop_stat(tmu, queue, NULL, 0);
10410 +
10411 +       /* Select queue */
10412 +       writel((tmu << 8) | queue, TMU_TEQ_CTRL);
10413 +       writel((tmu << 8) | queue, TMU_LLM_CTRL);
10414 +
10415 +       len += sprintf(buf + len,
10416 +                       "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
10417 +               drops, readl(TMU_TEQ_TRANS_STAT),
10418 +               readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
10419 +               readl(TMU_LLM_QUE_DROPCNT));
10420 +
10421 +       return len;
10422 +}
10423 +
10424 +static ssize_t tmu_queues(char *buf, int tmu)
10425 +{
10426 +       ssize_t len = 0;
10427 +       int queue;
10428 +
10429 +       for (queue = 0; queue < 16; queue++)
10430 +               len += tmu_queue_stats(buf + len, tmu, queue);
10431 +
10432 +       return len;
10433 +}
10434 +
10435 +static ssize_t block_version(char *buf, void *addr)
10436 +{
10437 +       ssize_t len = 0;
10438 +       u32 val;
10439 +
10440 +       val = readl(addr);
10441 +       len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
10442 +               (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
10443 +
10444 +       return len;
10445 +}
10446 +
10447 +static ssize_t bmu(char *buf, int id, void *base)
10448 +{
10449 +       ssize_t len = 0;
10450 +
10451 +       len += sprintf(buf + len, "%s: %d\n  ", __func__, id);
10452 +
10453 +       len += block_version(buf + len, base + BMU_VERSION);
10454 +
10455 +       len += sprintf(buf + len, "  buf size:  %x\n", (1 << readl(base +
10456 +                       BMU_BUF_SIZE)));
10457 +       len += sprintf(buf + len, "  buf count: %x\n", readl(base +
10458 +                       BMU_BUF_CNT));
10459 +       len += sprintf(buf + len, "  buf rem:   %x\n", readl(base +
10460 +                       BMU_REM_BUF_CNT));
10461 +       len += sprintf(buf + len, "  buf curr:  %x\n", readl(base +
10462 +                       BMU_CURR_BUF_CNT));
10463 +       len += sprintf(buf + len, "  free err:  %x\n", readl(base +
10464 +                       BMU_FREE_ERR_ADDR));
10465 +
10466 +       return len;
10467 +}
10468 +
10469 +static ssize_t gpi(char *buf, int id, void *base)
10470 +{
10471 +       ssize_t len = 0;
10472 +       u32 val;
10473 +
10474 +       len += sprintf(buf + len, "%s%d:\n  ", __func__, id);
10475 +       len += block_version(buf + len, base + GPI_VERSION);
10476 +
10477 +       len += sprintf(buf + len, "  tx under stick: %x\n", readl(base +
10478 +                       GPI_FIFO_STATUS));
10479 +       val = readl(base + GPI_FIFO_DEBUG);
10480 +       len += sprintf(buf + len, "  tx pkts:        %x\n", (val >> 23) &
10481 +                       0x3f);
10482 +       len += sprintf(buf + len, "  rx pkts:        %x\n", (val >> 18) &
10483 +                       0x3f);
10484 +       len += sprintf(buf + len, "  tx bytes:       %x\n", (val >> 9) &
10485 +                       0x1ff);
10486 +       len += sprintf(buf + len, "  rx bytes:       %x\n", (val >> 0) &
10487 +                       0x1ff);
10488 +       len += sprintf(buf + len, "  overrun:        %x\n", readl(base +
10489 +                       GPI_OVERRUN_DROPCNT));
10490 +
10491 +       return len;
10492 +}
10493 +
10494 +static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
10495 +                            const char *buf, size_t count)
10496 +{
10497 +       class_do_clear = kstrtoul(buf, 0, 0);
10498 +       return count;
10499 +}
10500 +
10501 +static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
10502 +                             char *buf)
10503 +{
10504 +       ssize_t len = 0;
10505 +       int id;
10506 +       u32 val;
10507 +       struct pfe_cpumon *cpumon = &pfe->cpumon;
10508 +
10509 +       len += block_version(buf + len, CLASS_VERSION);
10510 +
10511 +       for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10512 +               len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
10513 +
10514 +               val = readl(CLASS_PE0_DEBUG + id * 4);
10515 +               len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
10516 +
10517 +               len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
10518 +                                               class_do_clear);
10519 +       }
10520 +       len += sprintf(buf + len, "aggregate load=%d%%\n\n",
10521 +                       cpumon->class_usage_pct);
10522 +
10523 +       len += sprintf(buf + len, "pe status:   0x%x\n",
10524 +                       readl(CLASS_PE_STATUS));
10525 +       len += sprintf(buf + len, "max buf cnt: 0x%x   afull thres: 0x%x\n",
10526 +                       readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
10527 +       len += sprintf(buf + len, "tsq max cnt: 0x%x   tsq fifo thres: 0x%x\n",
10528 +                       readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
10529 +       len += sprintf(buf + len, "state:       0x%x\n", readl(CLASS_STATE));
10530 +
10531 +       len += class_phy_stats(buf + len, 0);
10532 +       len += class_phy_stats(buf + len, 1);
10533 +       len += class_phy_stats(buf + len, 2);
10534 +       len += class_phy_stats(buf + len, 3);
10535 +
10536 +       return len;
10537 +}
10538 +
10539 +static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
10540 +                          const char *buf, size_t count)
10541 +{
10542 +       tmu_do_clear = kstrtoul(buf, 0, 0);
10543 +       return count;
10544 +}
10545 +
10546 +static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
10547 +                           char *buf)
10548 +{
10549 +       ssize_t len = 0;
10550 +       int id;
10551 +       u32 val;
10552 +
10553 +       len += block_version(buf + len, TMU_VERSION);
10554 +
10555 +       for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
10556 +               if (id == TMU2_ID)
10557 +                       continue;
10558 +               len += sprintf(buf + len, "%d: ", id - TMU0_ID);
10559 +
10560 +               len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
10561 +                                               tmu_do_clear);
10562 +       }
10563 +
10564 +       len += sprintf(buf + len, "pe status:    %x\n", readl(TMU_PE_STATUS));
10565 +       len += sprintf(buf + len, "inq fifo cnt: %x\n",
10566 +                       readl(TMU_PHY_INQ_FIFO_CNT));
10567 +       val = readl(TMU_INQ_STAT);
10568 +       len += sprintf(buf + len, "inq wr ptr:     %x\n", val & 0x3ff);
10569 +       len += sprintf(buf + len, "inq rd ptr:     %x\n", val >> 10);
10570 +
10571 +       return len;
10572 +}
10573 +
10574 +static unsigned long drops_do_clear;
10575 +static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
10576 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10577 +static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
10578 +#endif
10579 +
10580 +char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
10581 +       "ICC",
10582 +       "Host Pkt Error",
10583 +       "Rx Error",
10584 +       "IPsec Outbound",
10585 +       "IPsec Inbound",
10586 +       "EXPT IPsec Error",
10587 +       "Reassembly",
10588 +       "Fragmenter",
10589 +       "NAT-T",
10590 +       "Socket",
10591 +       "Multicast",
10592 +       "NAT-PT",
10593 +       "Tx Disabled",
10594 +};
10595 +
10596 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10597 +char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
10598 +       "IPsec Outbound",
10599 +       "IPsec Inbound",
10600 +       "IPsec Rate Limiter",
10601 +       "Fragmenter",
10602 +       "Socket",
10603 +       "Tx Disabled",
10604 +       "Rx Error",
10605 +};
10606 +#endif
10607 +
10608 +static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
10609 +                            const char *buf, size_t count)
10610 +{
10611 +       drops_do_clear = kstrtoul(buf, 0, 0);
10612 +       return count;
10613 +}
10614 +
10615 +static u32 tmu_drops[4][16];
10616 +static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
10617 +                             char *buf)
10618 +{
10619 +       ssize_t len = 0;
10620 +       int id, dropnum;
10621 +       int tmu, queue;
10622 +       u32 val;
10623 +       u32 dmem_addr;
10624 +       int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
10625 +       struct pfe_ctrl *ctrl = &pfe->ctrl;
10626 +
10627 +       memset(class_drop_counter, 0, sizeof(class_drop_counter));
10628 +       for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10629 +               if (drops_do_clear)
10630 +                       pe_sync_stop(ctrl, (1 << id));
10631 +               for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10632 +                       dropnum++) {
10633 +                       dmem_addr = CLASS_DM_DROP_CNTR;
10634 +                       val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
10635 +                       class_drop_counter[dropnum] += val;
10636 +                       num_class_drops += val;
10637 +                       if (drops_do_clear)
10638 +                               pe_dmem_write(id, 0, dmem_addr, 4);
10639 +               }
10640 +               if (drops_do_clear)
10641 +                       pe_start(ctrl, (1 << id));
10642 +       }
10643 +
10644 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10645 +       if (drops_do_clear)
10646 +               pe_sync_stop(ctrl, (1 << UTIL_ID));
10647 +       for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10648 +               dmem_addr = UTIL_DM_DROP_CNTR;
10649 +               val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
10650 +               util_drop_counter[dropnum] = val;
10651 +               num_util_drops += val;
10652 +               if (drops_do_clear)
10653 +                       pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
10654 +       }
10655 +       if (drops_do_clear)
10656 +               pe_start(ctrl, (1 << UTIL_ID));
10657 +#endif
10658 +       for (tmu = 0; tmu < 4; tmu++) {
10659 +               for (queue = 0; queue < 16; queue++) {
10660 +                       qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
10661 +                                         drops_do_clear);
10662 +                       num_tmu_drops += tmu_drops[tmu][queue];
10663 +               }
10664 +       }
10665 +
10666 +       if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
10667 +               len += sprintf(buf + len, "No PE drops\n\n");
10668 +
10669 +       if (num_class_drops > 0) {
10670 +               len += sprintf(buf + len, "Class PE drops --\n");
10671 +               for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10672 +                       dropnum++) {
10673 +                       if (class_drop_counter[dropnum] > 0)
10674 +                               len += sprintf(buf + len, "  %s: %d\n",
10675 +                                       class_drop_description[dropnum],
10676 +                                       class_drop_counter[dropnum]);
10677 +               }
10678 +               len += sprintf(buf + len, "\n");
10679 +       }
10680 +
10681 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10682 +       if (num_util_drops > 0) {
10683 +               len += sprintf(buf + len, "Util PE drops --\n");
10684 +               for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10685 +                       if (util_drop_counter[dropnum] > 0)
10686 +                               len += sprintf(buf + len, "  %s: %d\n",
10687 +                                       util_drop_description[dropnum],
10688 +                                       util_drop_counter[dropnum]);
10689 +               }
10690 +               len += sprintf(buf + len, "\n");
10691 +       }
10692 +#endif
10693 +       if (num_tmu_drops > 0) {
10694 +               len += sprintf(buf + len, "TMU drops --\n");
10695 +               for (tmu = 0; tmu < 4; tmu++) {
10696 +                       for (queue = 0; queue < 16; queue++) {
10697 +                               if (tmu_drops[tmu][queue] > 0)
10698 +                                       len += sprintf(buf + len,
10699 +                                               "  TMU%d-Q%d: %d\n"
10700 +                                       , tmu, queue, tmu_drops[tmu][queue]);
10701 +                       }
10702 +               }
10703 +               len += sprintf(buf + len, "\n");
10704 +       }
10705 +
10706 +       return len;
10707 +}
10708 +
10709 +static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
10710 +                                       *attr, char *buf)
10711 +{
10712 +       return tmu_queues(buf, 0);
10713 +}
10714 +
10715 +static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
10716 +                                       *attr, char *buf)
10717 +{
10718 +       return tmu_queues(buf, 1);
10719 +}
10720 +
10721 +static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
10722 +                                       *attr, char *buf)
10723 +{
10724 +       return tmu_queues(buf, 2);
10725 +}
10726 +
10727 +static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
10728 +                                       *attr, char *buf)
10729 +{
10730 +       return tmu_queues(buf, 3);
10731 +}
10732 +
10733 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10734 +static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
10735 +                           const char *buf, size_t count)
10736 +{
10737 +       util_do_clear = kstrtoul(buf, NULL, 0);
10738 +       return count;
10739 +}
10740 +
10741 +static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
10742 +                            char *buf)
10743 +{
10744 +       ssize_t len = 0;
10745 +       struct pfe_ctrl *ctrl = &pfe->ctrl;
10746 +
10747 +       len += block_version(buf + len, UTIL_VERSION);
10748 +
10749 +       pe_sync_stop(ctrl, (1 << UTIL_ID));
10750 +       len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
10751 +                                       util_do_clear);
10752 +       pe_start(ctrl, (1 << UTIL_ID));
10753 +
10754 +       len += sprintf(buf + len, "pe status:   %x\n", readl(UTIL_PE_STATUS));
10755 +       len += sprintf(buf + len, "max buf cnt: %x\n",
10756 +                       readl(UTIL_MAX_BUF_CNT));
10757 +       len += sprintf(buf + len, "tsq max cnt: %x\n",
10758 +                       readl(UTIL_TSQ_MAX_CNT));
10759 +
10760 +       return len;
10761 +}
10762 +#endif
10763 +
10764 +static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
10765 +                           char *buf)
10766 +{
10767 +       ssize_t len = 0;
10768 +
10769 +       len += bmu(buf + len, 1, BMU1_BASE_ADDR);
10770 +       len += bmu(buf + len, 2, BMU2_BASE_ADDR);
10771 +
10772 +       return len;
10773 +}
10774 +
10775 +static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
10776 +                           char *buf)
10777 +{
10778 +       ssize_t len = 0;
10779 +
10780 +       len += sprintf(buf + len, "hif:\n  ");
10781 +       len += block_version(buf + len, HIF_VERSION);
10782 +
10783 +       len += sprintf(buf + len, "  tx curr bd:    %x\n",
10784 +                       readl(HIF_TX_CURR_BD_ADDR));
10785 +       len += sprintf(buf + len, "  tx status:     %x\n",
10786 +                       readl(HIF_TX_STATUS));
10787 +       len += sprintf(buf + len, "  tx dma status: %x\n",
10788 +                       readl(HIF_TX_DMA_STATUS));
10789 +
10790 +       len += sprintf(buf + len, "  rx curr bd:    %x\n",
10791 +                       readl(HIF_RX_CURR_BD_ADDR));
10792 +       len += sprintf(buf + len, "  rx status:     %x\n",
10793 +                       readl(HIF_RX_STATUS));
10794 +       len += sprintf(buf + len, "  rx dma status: %x\n",
10795 +                       readl(HIF_RX_DMA_STATUS));
10796 +
10797 +       len += sprintf(buf + len, "hif nocopy:\n  ");
10798 +       len += block_version(buf + len, HIF_NOCPY_VERSION);
10799 +
10800 +       len += sprintf(buf + len, "  tx curr bd:    %x\n",
10801 +                       readl(HIF_NOCPY_TX_CURR_BD_ADDR));
10802 +       len += sprintf(buf + len, "  tx status:     %x\n",
10803 +                       readl(HIF_NOCPY_TX_STATUS));
10804 +       len += sprintf(buf + len, "  tx dma status: %x\n",
10805 +                       readl(HIF_NOCPY_TX_DMA_STATUS));
10806 +
10807 +       len += sprintf(buf + len, "  rx curr bd:    %x\n",
10808 +                       readl(HIF_NOCPY_RX_CURR_BD_ADDR));
10809 +       len += sprintf(buf + len, "  rx status:     %x\n",
10810 +                       readl(HIF_NOCPY_RX_STATUS));
10811 +       len += sprintf(buf + len, "  rx dma status: %x\n",
10812 +                       readl(HIF_NOCPY_RX_DMA_STATUS));
10813 +
10814 +       return len;
10815 +}
10816 +
10817 +static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
10818 +                           char *buf)
10819 +{
10820 +       ssize_t len = 0;
10821 +
10822 +       len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
10823 +       len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
10824 +       len += gpi(buf + len, 3, HGPI_BASE_ADDR);
10825 +
10826 +       return len;
10827 +}
10828 +
10829 +static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
10830 +                               *attr, char *buf)
10831 +{
10832 +       ssize_t len = 0;
10833 +       struct pfe_memmon *memmon = &pfe->memmon;
10834 +
10835 +       len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
10836 +               memmon->kernel_memory_allocated,
10837 +               (memmon->kernel_memory_allocated + 1023) / 1024);
10838 +
10839 +       return len;
10840 +}
10841 +
10842 +#ifdef HIF_NAPI_STATS
10843 +static ssize_t pfe_show_hif_napi_stats(struct device *dev,
10844 +                                      struct device_attribute *attr,
10845 +                                      char *buf)
10846 +{
10847 +       struct platform_device *pdev = to_platform_device(dev);
10848 +       struct pfe *pfe = platform_get_drvdata(pdev);
10849 +       ssize_t len = 0;
10850 +
10851 +       len += sprintf(buf + len, "sched:  %u\n",
10852 +                       pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
10853 +       len += sprintf(buf + len, "poll:   %u\n",
10854 +                       pfe->hif.napi_counters[NAPI_POLL_COUNT]);
10855 +       len += sprintf(buf + len, "packet: %u\n",
10856 +                       pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
10857 +       len += sprintf(buf + len, "budget: %u\n",
10858 +                       pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
10859 +       len += sprintf(buf + len, "desc:   %u\n",
10860 +                       pfe->hif.napi_counters[NAPI_DESC_COUNT]);
10861 +       len += sprintf(buf + len, "full:   %u\n",
10862 +                       pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
10863 +
10864 +       return len;
10865 +}
10866 +
10867 +static ssize_t pfe_set_hif_napi_stats(struct device *dev,
10868 +                                     struct device_attribute *attr,
10869 +                                       const char *buf, size_t count)
10870 +{
10871 +       struct platform_device *pdev = to_platform_device(dev);
10872 +       struct pfe *pfe = platform_get_drvdata(pdev);
10873 +
10874 +       memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
10875 +
10876 +       return count;
10877 +}
10878 +
10879 +static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
10880 +                       pfe_set_hif_napi_stats);
10881 +#endif
10882 +
10883 +static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
10884 +static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
10885 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10886 +static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
10887 +#endif
10888 +static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
10889 +static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
10890 +static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
10891 +static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
10892 +static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
10893 +static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
10894 +static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
10895 +static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
10896 +static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
10897 +
10898 +int pfe_sysfs_init(struct pfe *pfe)
10899 +{
10900 +       if (device_create_file(pfe->dev, &dev_attr_class))
10901 +               goto err_class;
10902 +
10903 +       if (device_create_file(pfe->dev, &dev_attr_tmu))
10904 +               goto err_tmu;
10905 +
10906 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10907 +       if (device_create_file(pfe->dev, &dev_attr_util))
10908 +               goto err_util;
10909 +#endif
10910 +
10911 +       if (device_create_file(pfe->dev, &dev_attr_bmu))
10912 +               goto err_bmu;
10913 +
10914 +       if (device_create_file(pfe->dev, &dev_attr_hif))
10915 +               goto err_hif;
10916 +
10917 +       if (device_create_file(pfe->dev, &dev_attr_gpi))
10918 +               goto err_gpi;
10919 +
10920 +       if (device_create_file(pfe->dev, &dev_attr_drops))
10921 +               goto err_drops;
10922 +
10923 +       if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
10924 +               goto err_tmu0_queues;
10925 +
10926 +       if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
10927 +               goto err_tmu1_queues;
10928 +
10929 +       if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
10930 +               goto err_tmu2_queues;
10931 +
10932 +       if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
10933 +               goto err_tmu3_queues;
10934 +
10935 +       if (device_create_file(pfe->dev, &dev_attr_pfemem))
10936 +               goto err_pfemem;
10937 +
10938 +#ifdef HIF_NAPI_STATS
10939 +       if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
10940 +               goto err_hif_napi_stats;
10941 +#endif
10942 +
10943 +       return 0;
10944 +
10945 +#ifdef HIF_NAPI_STATS
10946 +err_hif_napi_stats:
10947 +       device_remove_file(pfe->dev, &dev_attr_pfemem);
10948 +#endif
10949 +
10950 +err_pfemem:
10951 +       device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10952 +
10953 +err_tmu3_queues:
10954 +       device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10955 +
10956 +err_tmu2_queues:
10957 +       device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10958 +
10959 +err_tmu1_queues:
10960 +       device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10961 +
10962 +err_tmu0_queues:
10963 +       device_remove_file(pfe->dev, &dev_attr_drops);
10964 +
10965 +err_drops:
10966 +       device_remove_file(pfe->dev, &dev_attr_gpi);
10967 +
10968 +err_gpi:
10969 +       device_remove_file(pfe->dev, &dev_attr_hif);
10970 +
10971 +err_hif:
10972 +       device_remove_file(pfe->dev, &dev_attr_bmu);
10973 +
10974 +err_bmu:
10975 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10976 +       device_remove_file(pfe->dev, &dev_attr_util);
10977 +
10978 +err_util:
10979 +#endif
10980 +       device_remove_file(pfe->dev, &dev_attr_tmu);
10981 +
10982 +err_tmu:
10983 +       device_remove_file(pfe->dev, &dev_attr_class);
10984 +
10985 +err_class:
10986 +       return -1;
10987 +}
10988 +
10989 +void pfe_sysfs_exit(struct pfe *pfe)
10990 +{
10991 +#ifdef HIF_NAPI_STATS
10992 +       device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
10993 +#endif
10994 +       device_remove_file(pfe->dev, &dev_attr_pfemem);
10995 +       device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10996 +       device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10997 +       device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10998 +       device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10999 +       device_remove_file(pfe->dev, &dev_attr_drops);
11000 +       device_remove_file(pfe->dev, &dev_attr_gpi);
11001 +       device_remove_file(pfe->dev, &dev_attr_hif);
11002 +       device_remove_file(pfe->dev, &dev_attr_bmu);
11003 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
11004 +       device_remove_file(pfe->dev, &dev_attr_util);
11005 +#endif
11006 +       device_remove_file(pfe->dev, &dev_attr_tmu);
11007 +       device_remove_file(pfe->dev, &dev_attr_class);
11008 +}
11009 --- /dev/null
11010 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
11011 @@ -0,0 +1,17 @@
11012 +/* SPDX-License-Identifier: GPL-2.0+ */
11013 +/*
11014 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
11015 + * Copyright 2017 NXP
11016 + */
11017 +
11018 +#ifndef _PFE_SYSFS_H_
11019 +#define _PFE_SYSFS_H_
11020 +
11021 +#include <linux/proc_fs.h>
11022 +
11023 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
11024 +
11025 +int pfe_sysfs_init(struct pfe *pfe);
11026 +void pfe_sysfs_exit(struct pfe *pfe);
11027 +
11028 +#endif /* _PFE_SYSFS_H_ */