ath79/mikrotik: use routerbootpart partitions
[oweals/openwrt.git] / target / linux / layerscape / patches-5.4 / 701-net-0286-staging-fsl_ppfe-eth-introduce-pfe-driver.patch
1 From fccb0e1e07fc0750fd081ab52ed94ee13f6b360f Mon Sep 17 00:00:00 2001
2 From: Calvin Johnson <calvin.johnson@nxp.com>
3 Date: Sat, 16 Sep 2017 14:22:17 +0530
4 Subject: [PATCH] staging: fsl_ppfe/eth: introduce pfe driver
5
6         This patch introduces Linux support for NXP's LS1012A Packet
7 Forwarding Engine (pfe_eth). LS1012A uses hardware packet forwarding
8 engine to provide high performance Ethernet interfaces. The device
9 includes two Ethernet ports.
10
11 Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
12 Signed-off-by: Anjaneyulu Jagarlmudi <anji.jagarlmudi@nxp.com>
13 ---
14  drivers/staging/fsl_ppfe/Kconfig                |   20 +
15  drivers/staging/fsl_ppfe/Makefile               |   19 +
16  drivers/staging/fsl_ppfe/TODO                   |    2 +
17  drivers/staging/fsl_ppfe/pfe_ctrl.c             |  238 +++
18  drivers/staging/fsl_ppfe/pfe_debugfs.c          |  111 ++
19  drivers/staging/fsl_ppfe/pfe_eth.c              | 2434 +++++++++++++++++++++++
20  drivers/staging/fsl_ppfe/pfe_firmware.c         |  314 +++
21  drivers/staging/fsl_ppfe/pfe_hal.c              | 1516 ++++++++++++++
22  drivers/staging/fsl_ppfe/pfe_hif.c              | 1094 ++++++++++
23  drivers/staging/fsl_ppfe/pfe_hif_lib.c          |  638 ++++++
24  drivers/staging/fsl_ppfe/pfe_hw.c               |  176 ++
25  drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c |  394 ++++
26  drivers/staging/fsl_ppfe/pfe_mod.c              |  141 ++
27  drivers/staging/fsl_ppfe/pfe_sysfs.c            |  818 ++++++++
28  14 files changed, 7915 insertions(+)
29  create mode 100644 drivers/staging/fsl_ppfe/Kconfig
30  create mode 100644 drivers/staging/fsl_ppfe/Makefile
31  create mode 100644 drivers/staging/fsl_ppfe/TODO
32  create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
33  create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
34  create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
35  create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
36  create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
37  create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
38  create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
39  create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
40  create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
41  create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
42  create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
43
44 --- /dev/null
45 +++ b/drivers/staging/fsl_ppfe/Kconfig
46 @@ -0,0 +1,20 @@
47 +#
48 +# Freescale Programmable Packet Forwarding Engine driver
49 +#
50 +config FSL_PPFE
51 +       bool "Freescale PPFE Driver"
52 +       default n
53 +       ---help---
54 +       Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
55 +       It provides two high performance ethernet interfaces.
56 +       This driver initializes, programs and controls the PPFE.
57 +       Use this driver to enable network connectivity on LS1012A platforms.
58 +
59 +if FSL_PPFE
60 +
61 +config FSL_PPFE_UTIL_DISABLED
62 +       bool "Disable PPFE UTIL Processor Engine"
63 +       ---help---
64 +       UTIL PE has to be enabled only if required.
65 +
66 +endif # FSL_PPFE
67 --- /dev/null
68 +++ b/drivers/staging/fsl_ppfe/Makefile
69 @@ -0,0 +1,19 @@
70 +#
71 +# Makefile for Freesecale PPFE driver
72 +#
73 +
74 +ccflags-y +=  -I$(src)/include  -I$(src)
75 +
76 +obj-m += pfe.o
77 +
78 +pfe-y += pfe_mod.o \
79 +       pfe_hw.o \
80 +       pfe_firmware.o \
81 +       pfe_ctrl.o \
82 +       pfe_hif.o \
83 +       pfe_hif_lib.o\
84 +       pfe_eth.o \
85 +       pfe_sysfs.o \
86 +       pfe_debugfs.o \
87 +       pfe_ls1012a_platform.o \
88 +       pfe_hal.o
89 --- /dev/null
90 +++ b/drivers/staging/fsl_ppfe/TODO
91 @@ -0,0 +1,2 @@
92 +TODO:
93 +       - provide pfe pe monitoring support
94 --- /dev/null
95 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
96 @@ -0,0 +1,238 @@
97 +/*
98 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
99 + * Copyright 2017 NXP
100 + *
101 + * This program is free software; you can redistribute it and/or modify
102 + * it under the terms of the GNU General Public License as published by
103 + * the Free Software Foundation; either version 2 of the License, or
104 + * (at your option) any later version.
105 + *
106 + * This program is distributed in the hope that it will be useful,
107 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
108 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
109 + * GNU General Public License for more details.
110 + *
111 + * You should have received a copy of the GNU General Public License
112 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
113 + */
114 +
115 +#include <linux/kernel.h>
116 +#include <linux/sched.h>
117 +#include <linux/module.h>
118 +#include <linux/list.h>
119 +#include <linux/kthread.h>
120 +
121 +#include "pfe_mod.h"
122 +#include "pfe_ctrl.h"
123 +
124 +#define TIMEOUT_MS     1000
125 +
126 +int relax(unsigned long end)
127 +{
128 +       if (time_after(jiffies, end)) {
129 +               if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
130 +                       return -1;
131 +
132 +               if (need_resched())
133 +                       schedule();
134 +       }
135 +
136 +       return 0;
137 +}
138 +
139 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
140 +{
141 +       int id;
142 +
143 +       mutex_lock(&ctrl->mutex);
144 +
145 +       for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
146 +               pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
147 +
148 +       for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
149 +               if (id == TMU2_ID)
150 +                       continue;
151 +               pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
152 +       }
153 +
154 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
155 +       pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
156 +#endif
157 +       mutex_unlock(&ctrl->mutex);
158 +}
159 +
160 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
161 +{
162 +       int pe_mask = CLASS_MASK | TMU_MASK;
163 +
164 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
165 +       pe_mask |= UTIL_MASK;
166 +#endif
167 +       mutex_lock(&ctrl->mutex);
168 +       pe_start(&pfe->ctrl, pe_mask);
169 +       mutex_unlock(&ctrl->mutex);
170 +}
171 +
172 +/* PE sync stop.
173 + * Stops packet processing for a list of PE's (specified using a bitmask).
174 + * The caller must hold ctrl->mutex.
175 + *
176 + * @param ctrl         Control context
177 + * @param pe_mask      Mask of PE id's to stop
178 + *
179 + */
180 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
181 +{
182 +       struct pe_sync_mailbox *mbox;
183 +       int pe_stopped = 0;
184 +       unsigned long end = jiffies + 2;
185 +       int i;
186 +
187 +       pe_mask &= 0x2FF;  /*Exclude Util + TMU2 */
188 +
189 +       for (i = 0; i < MAX_PE; i++)
190 +               if (pe_mask & (1 << i)) {
191 +                       mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
192 +
193 +                       pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
194 +                                       long)&mbox->stop, 4);
195 +               }
196 +
197 +       while (pe_stopped != pe_mask) {
198 +               for (i = 0; i < MAX_PE; i++)
199 +                       if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
200 +                               mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
201 +
202 +                               if (pe_dmem_read(i, (unsigned
203 +                                       long)&mbox->stopped, 4) &
204 +                                       cpu_to_be32(0x1))
205 +                                       pe_stopped |= (1 << i);
206 +                       }
207 +
208 +               if (relax(end) < 0)
209 +                       goto err;
210 +       }
211 +
212 +       return 0;
213 +
214 +err:
215 +       pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
216 +
217 +       for (i = 0; i < MAX_PE; i++)
218 +               if (pe_mask & (1 << i)) {
219 +                       mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
220 +
221 +                       pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
222 +                                       long)&mbox->stop, 4);
223 +       }
224 +
225 +       return -EIO;
226 +}
227 +
228 +/* PE start.
229 + * Starts packet processing for a list of PE's (specified using a bitmask).
230 + * The caller must hold ctrl->mutex.
231 + *
232 + * @param ctrl         Control context
233 + * @param pe_mask      Mask of PE id's to start
234 + *
235 + */
236 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
237 +{
238 +       struct pe_sync_mailbox *mbox;
239 +       int i;
240 +
241 +       for (i = 0; i < MAX_PE; i++)
242 +               if (pe_mask & (1 << i)) {
243 +                       mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
244 +
245 +                       pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
246 +                                       long)&mbox->stop, 4);
247 +               }
248 +}
249 +
250 +/* This function will ensure all PEs are put in to idle state */
251 +int pe_reset_all(struct pfe_ctrl *ctrl)
252 +{
253 +       struct pe_sync_mailbox *mbox;
254 +       int pe_stopped = 0;
255 +       unsigned long end = jiffies + 2;
256 +       int i;
257 +       int pe_mask  = CLASS_MASK | TMU_MASK;
258 +
259 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
260 +       pe_mask |= UTIL_MASK;
261 +#endif
262 +
263 +       for (i = 0; i < MAX_PE; i++)
264 +               if (pe_mask & (1 << i)) {
265 +                       mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
266 +
267 +                       pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
268 +                                       long)&mbox->stop, 4);
269 +               }
270 +
271 +       while (pe_stopped != pe_mask) {
272 +               for (i = 0; i < MAX_PE; i++)
273 +                       if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
274 +                               mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
275 +
276 +                               if (pe_dmem_read(i, (unsigned long)
277 +                                                       &mbox->stopped, 4) &
278 +                                               cpu_to_be32(0x1))
279 +                                       pe_stopped |= (1 << i);
280 +                       }
281 +
282 +               if (relax(end) < 0)
283 +                       goto err;
284 +       }
285 +
286 +       return 0;
287 +
288 +err:
289 +       pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
290 +       return -EIO;
291 +}
292 +
293 +int pfe_ctrl_init(struct pfe *pfe)
294 +{
295 +       struct pfe_ctrl *ctrl = &pfe->ctrl;
296 +       int id;
297 +
298 +       pr_info("%s\n", __func__);
299 +
300 +       mutex_init(&ctrl->mutex);
301 +       spin_lock_init(&ctrl->lock);
302 +
303 +       for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
304 +               ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
305 +               ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
306 +       }
307 +
308 +       for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
309 +               if (id == TMU2_ID)
310 +                       continue;
311 +               ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
312 +               ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
313 +       }
314 +
315 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
316 +       ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
317 +       ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
318 +#endif
319 +
320 +       ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
321 +       ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
322 +                                               ROUTE_TABLE_BASEADDR;
323 +
324 +       ctrl->dev = pfe->dev;
325 +
326 +       pr_info("%s finished\n", __func__);
327 +
328 +       return 0;
329 +}
330 +
331 +void pfe_ctrl_exit(struct pfe *pfe)
332 +{
333 +       pr_info("%s\n", __func__);
334 +}
335 --- /dev/null
336 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
337 @@ -0,0 +1,111 @@
338 +/*
339 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
340 + * Copyright 2017 NXP
341 + *
342 + * This program is free software; you can redistribute it and/or modify
343 + * it under the terms of the GNU General Public License as published by
344 + * the Free Software Foundation; either version 2 of the License, or
345 + * (at your option) any later version.
346 + *
347 + * This program is distributed in the hope that it will be useful,
348 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
349 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
350 + * GNU General Public License for more details.
351 + *
352 + * You should have received a copy of the GNU General Public License
353 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
354 + */
355 +
356 +#include <linux/module.h>
357 +#include <linux/debugfs.h>
358 +#include <linux/platform_device.h>
359 +
360 +#include "pfe_mod.h"
361 +
362 +static int dmem_show(struct seq_file *s, void *unused)
363 +{
364 +       u32 dmem_addr, val;
365 +       int id = (long int)s->private;
366 +       int i;
367 +
368 +       for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
369 +               seq_printf(s, "%04x:", dmem_addr);
370 +
371 +               for (i = 0; i < 8; i++) {
372 +                       val = pe_dmem_read(id, dmem_addr + i * 4, 4);
373 +                       seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
374 +                                  (val >> 8) & 0xff, (val >> 16) & 0xff,
375 +                                  (val >> 24) & 0xff);
376 +               }
377 +
378 +               seq_puts(s, "\n");
379 +       }
380 +
381 +       return 0;
382 +}
383 +
384 +static int dmem_open(struct inode *inode, struct file *file)
385 +{
386 +       return single_open(file, dmem_show, inode->i_private);
387 +}
388 +
389 +static const struct file_operations dmem_fops = {
390 +       .open           = dmem_open,
391 +       .read           = seq_read,
392 +       .llseek         = seq_lseek,
393 +       .release        = single_release,
394 +};
395 +
396 +int pfe_debugfs_init(struct pfe *pfe)
397 +{
398 +       struct dentry *d;
399 +
400 +       pr_info("%s\n", __func__);
401 +
402 +       pfe->dentry = debugfs_create_dir("pfe", NULL);
403 +       if (IS_ERR_OR_NULL(pfe->dentry))
404 +               goto err_dir;
405 +
406 +       d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
407 +                               &dmem_fops);
408 +       if (IS_ERR_OR_NULL(d))
409 +               goto err_pe;
410 +
411 +       d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
412 +                               &dmem_fops);
413 +       if (IS_ERR_OR_NULL(d))
414 +               goto err_pe;
415 +
416 +       d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
417 +                               &dmem_fops);
418 +       if (IS_ERR_OR_NULL(d))
419 +               goto err_pe;
420 +
421 +       d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
422 +                               &dmem_fops);
423 +       if (IS_ERR_OR_NULL(d))
424 +               goto err_pe;
425 +
426 +       d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
427 +                               &dmem_fops);
428 +       if (IS_ERR_OR_NULL(d))
429 +               goto err_pe;
430 +
431 +       d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
432 +                               &dmem_fops);
433 +       if (IS_ERR_OR_NULL(d))
434 +               goto err_pe;
435 +
436 +       return 0;
437 +
438 +err_pe:
439 +       debugfs_remove_recursive(pfe->dentry);
440 +
441 +err_dir:
442 +       return -1;
443 +}
444 +
445 +void pfe_debugfs_exit(struct pfe *pfe)
446 +{
447 +       debugfs_remove_recursive(pfe->dentry);
448 +}
449 --- /dev/null
450 +++ b/drivers/staging/fsl_ppfe/pfe_eth.c
451 @@ -0,0 +1,2434 @@
452 +/*
453 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
454 + * Copyright 2017 NXP
455 + *
456 + * This program is free software; you can redistribute it and/or modify
457 + * it under the terms of the GNU General Public License as published by
458 + * the Free Software Foundation; either version 2 of the License, or
459 + * (at your option) any later version.
460 + *
461 + * This program is distributed in the hope that it will be useful,
462 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
463 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
464 + * GNU General Public License for more details.
465 + *
466 + * You should have received a copy of the GNU General Public License
467 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
468 + */
469 +
470 +/* @pfe_eth.c.
471 + *  Ethernet driver for to handle exception path for PFE.
472 + *  - uses HIF functions to send/receive packets.
473 + *  - uses ctrl function to start/stop interfaces.
474 + *  - uses direct register accesses to control phy operation.
475 + */
476 +#include <linux/version.h>
477 +#include <linux/kernel.h>
478 +#include <linux/interrupt.h>
479 +#include <linux/dma-mapping.h>
480 +#include <linux/dmapool.h>
481 +#include <linux/netdevice.h>
482 +#include <linux/etherdevice.h>
483 +#include <linux/ethtool.h>
484 +#include <linux/mii.h>
485 +#include <linux/phy.h>
486 +#include <linux/timer.h>
487 +#include <linux/hrtimer.h>
488 +#include <linux/platform_device.h>
489 +
490 +#include <net/ip.h>
491 +#include <net/sock.h>
492 +
493 +#include <linux/io.h>
494 +#include <asm/irq.h>
495 +#include <linux/delay.h>
496 +#include <linux/regmap.h>
497 +#include <linux/i2c.h>
498 +
499 +#if defined(CONFIG_NF_CONNTRACK_MARK)
500 +#include <net/netfilter/nf_conntrack.h>
501 +#endif
502 +
503 +#include "pfe_mod.h"
504 +#include "pfe_eth.h"
505 +
506 +static void *cbus_emac_base[3];
507 +static void *cbus_gpi_base[3];
508 +
509 +/* Forward Declaration */
510 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
511 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
512 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
513 +                               from_tx, int n_desc);
514 +
515 +unsigned int gemac_regs[] = {
516 +       0x0004, /* Interrupt event */
517 +       0x0008, /* Interrupt mask */
518 +       0x0024, /* Ethernet control */
519 +       0x0064, /* MIB Control/Status */
520 +       0x0084, /* Receive control/status */
521 +       0x00C4, /* Transmit control */
522 +       0x00E4, /* Physical address low */
523 +       0x00E8, /* Physical address high */
524 +       0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
525 +       0x0190, /* Receive FIFO Section Full Threshold */
526 +       0x01A0, /* Transmit FIFO Section Empty Threshold */
527 +       0x01B0, /* Frame Truncation Length */
528 +};
529 +
530 +/********************************************************************/
531 +/*                   SYSFS INTERFACE                               */
532 +/********************************************************************/
533 +
534 +#ifdef PFE_ETH_NAPI_STATS
535 +/*
536 + * pfe_eth_show_napi_stats
537 + */
538 +static ssize_t pfe_eth_show_napi_stats(struct device *dev,
539 +                                      struct device_attribute *attr,
540 +                                      char *buf)
541 +{
542 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
543 +       ssize_t len = 0;
544 +
545 +       len += sprintf(buf + len, "sched:  %u\n",
546 +                       priv->napi_counters[NAPI_SCHED_COUNT]);
547 +       len += sprintf(buf + len, "poll:   %u\n",
548 +                       priv->napi_counters[NAPI_POLL_COUNT]);
549 +       len += sprintf(buf + len, "packet: %u\n",
550 +                       priv->napi_counters[NAPI_PACKET_COUNT]);
551 +       len += sprintf(buf + len, "budget: %u\n",
552 +                       priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
553 +       len += sprintf(buf + len, "desc:   %u\n",
554 +                       priv->napi_counters[NAPI_DESC_COUNT]);
555 +
556 +       return len;
557 +}
558 +
559 +/*
560 + * pfe_eth_set_napi_stats
561 + */
562 +static ssize_t pfe_eth_set_napi_stats(struct device *dev,
563 +                                     struct device_attribute *attr,
564 +                                     const char *buf, size_t count)
565 +{
566 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
567 +
568 +       memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
569 +
570 +       return count;
571 +}
572 +#endif
573 +#ifdef PFE_ETH_TX_STATS
574 +/* pfe_eth_show_tx_stats
575 + *
576 + */
577 +static ssize_t pfe_eth_show_tx_stats(struct device *dev,
578 +                                    struct device_attribute *attr,
579 +                                    char *buf)
580 +{
581 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
582 +       ssize_t len = 0;
583 +       int i;
584 +
585 +       len += sprintf(buf + len, "TX queues stats:\n");
586 +
587 +       for (i = 0; i < emac_txq_cnt; i++) {
588 +               struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
589 +                                                                       i);
590 +
591 +               len += sprintf(buf + len, "\n");
592 +               __netif_tx_lock_bh(tx_queue);
593 +
594 +               hif_tx_lock(&pfe->hif);
595 +               len += sprintf(buf + len,
596 +                               "Queue %2d :  credits               = %10d\n"
597 +                               , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
598 +               len += sprintf(buf + len,
599 +                                "            tx packets            = %10d\n"
600 +                               ,  pfe->tmu_credit.tx_packets[priv->id][i]);
601 +               hif_tx_unlock(&pfe->hif);
602 +
603 +               /* Don't output additionnal stats if queue never used */
604 +               if (!pfe->tmu_credit.tx_packets[priv->id][i])
605 +                       goto skip;
606 +
607 +               len += sprintf(buf + len,
608 +                                "            clean_fail            = %10d\n"
609 +                               , priv->clean_fail[i]);
610 +               len += sprintf(buf + len,
611 +                                "            stop_queue            = %10d\n"
612 +                               , priv->stop_queue_total[i]);
613 +               len += sprintf(buf + len,
614 +                                "            stop_queue_hif        = %10d\n"
615 +                               , priv->stop_queue_hif[i]);
616 +               len += sprintf(buf + len,
617 +                               "            stop_queue_hif_client = %10d\n"
618 +                               , priv->stop_queue_hif_client[i]);
619 +               len += sprintf(buf + len,
620 +                                "            stop_queue_credit     = %10d\n"
621 +                               , priv->stop_queue_credit[i]);
622 +skip:
623 +               __netif_tx_unlock_bh(tx_queue);
624 +       }
625 +       return len;
626 +}
627 +
628 +/* pfe_eth_set_tx_stats
629 + *
630 + */
631 +static ssize_t pfe_eth_set_tx_stats(struct device *dev,
632 +                                   struct device_attribute *attr,
633 +                                   const char *buf, size_t count)
634 +{
635 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
636 +       int i;
637 +
638 +       for (i = 0; i < emac_txq_cnt; i++) {
639 +               struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
640 +                                                                       i);
641 +
642 +               __netif_tx_lock_bh(tx_queue);
643 +               priv->clean_fail[i] = 0;
644 +               priv->stop_queue_total[i] = 0;
645 +               priv->stop_queue_hif[i] = 0;
646 +               priv->stop_queue_hif_client[i] = 0;
647 +               priv->stop_queue_credit[i] = 0;
648 +               __netif_tx_unlock_bh(tx_queue);
649 +       }
650 +
651 +       return count;
652 +}
653 +#endif
654 +/* pfe_eth_show_txavail
655 + *
656 + */
657 +static ssize_t pfe_eth_show_txavail(struct device *dev,
658 +                                   struct device_attribute *attr,
659 +                                   char *buf)
660 +{
661 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
662 +       ssize_t len = 0;
663 +       int i;
664 +
665 +       for (i = 0; i < emac_txq_cnt; i++) {
666 +               struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
667 +                                                                       i);
668 +
669 +               __netif_tx_lock_bh(tx_queue);
670 +
671 +               len += sprintf(buf + len, "%d",
672 +                               hif_lib_tx_avail(&priv->client, i));
673 +
674 +               __netif_tx_unlock_bh(tx_queue);
675 +
676 +               if (i == (emac_txq_cnt - 1))
677 +                       len += sprintf(buf + len, "\n");
678 +               else
679 +                       len += sprintf(buf + len, " ");
680 +       }
681 +
682 +       return len;
683 +}
684 +
685 +/* pfe_eth_show_default_priority
686 + *
687 + */
688 +static ssize_t pfe_eth_show_default_priority(struct device *dev,
689 +                                            struct device_attribute *attr,
690 +                                               char *buf)
691 +{
692 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
693 +       unsigned long flags;
694 +       int rc;
695 +
696 +       spin_lock_irqsave(&priv->lock, flags);
697 +       rc = sprintf(buf, "%d\n", priv->default_priority);
698 +       spin_unlock_irqrestore(&priv->lock, flags);
699 +
700 +       return rc;
701 +}
702 +
703 +/* pfe_eth_set_default_priority
704 + *
705 + */
706 +
707 +static ssize_t pfe_eth_set_default_priority(struct device *dev,
708 +                                           struct device_attribute *attr,
709 +                                           const char *buf, size_t count)
710 +{
711 +       struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
712 +       unsigned long flags;
713 +
714 +       spin_lock_irqsave(&priv->lock, flags);
715 +       priv->default_priority = kstrtoul(buf, 0, 0);
716 +       spin_unlock_irqrestore(&priv->lock, flags);
717 +
718 +       return count;
719 +}
720 +
721 +static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
722 +static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
723 +                       pfe_eth_set_default_priority);
724 +
725 +#ifdef PFE_ETH_NAPI_STATS
726 +static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
727 +                       pfe_eth_set_napi_stats);
728 +#endif
729 +
730 +#ifdef PFE_ETH_TX_STATS
731 +static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
732 +                       pfe_eth_set_tx_stats);
733 +#endif
734 +
735 +/*
736 + * pfe_eth_sysfs_init
737 + *
738 + */
739 +static int pfe_eth_sysfs_init(struct net_device *ndev)
740 +{
741 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
742 +       int err;
743 +
744 +       /* Initialize the default values */
745 +
746 +       /*
747 +        * By default, packets without conntrack will use this default high
748 +        * priority queue
749 +        */
750 +       priv->default_priority = 15;
751 +
752 +       /* Create our sysfs files */
753 +       err = device_create_file(&ndev->dev, &dev_attr_default_priority);
754 +       if (err) {
755 +               netdev_err(ndev,
756 +                          "failed to create default_priority sysfs files\n");
757 +               goto err_priority;
758 +       }
759 +
760 +       err = device_create_file(&ndev->dev, &dev_attr_txavail);
761 +       if (err) {
762 +               netdev_err(ndev,
763 +                          "failed to create default_priority sysfs files\n");
764 +               goto err_txavail;
765 +       }
766 +
767 +#ifdef PFE_ETH_NAPI_STATS
768 +       err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
769 +       if (err) {
770 +               netdev_err(ndev, "failed to create napi stats sysfs files\n");
771 +               goto err_napi;
772 +       }
773 +#endif
774 +
775 +#ifdef PFE_ETH_TX_STATS
776 +       err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
777 +       if (err) {
778 +               netdev_err(ndev, "failed to create tx stats sysfs files\n");
779 +               goto err_tx;
780 +       }
781 +#endif
782 +
783 +       return 0;
784 +
785 +#ifdef PFE_ETH_TX_STATS
786 +err_tx:
787 +#endif
788 +#ifdef PFE_ETH_NAPI_STATS
789 +       device_remove_file(&ndev->dev, &dev_attr_napi_stats);
790 +
791 +err_napi:
792 +#endif
793 +       device_remove_file(&ndev->dev, &dev_attr_txavail);
794 +
795 +err_txavail:
796 +       device_remove_file(&ndev->dev, &dev_attr_default_priority);
797 +
798 +err_priority:
799 +       return -1;
800 +}
801 +
802 +/* pfe_eth_sysfs_exit
803 + *
804 + */
805 +void pfe_eth_sysfs_exit(struct net_device *ndev)
806 +{
807 +#ifdef PFE_ETH_TX_STATS
808 +       device_remove_file(&ndev->dev, &dev_attr_tx_stats);
809 +#endif
810 +
811 +#ifdef PFE_ETH_NAPI_STATS
812 +       device_remove_file(&ndev->dev, &dev_attr_napi_stats);
813 +#endif
814 +       device_remove_file(&ndev->dev, &dev_attr_txavail);
815 +       device_remove_file(&ndev->dev, &dev_attr_default_priority);
816 +}
817 +
818 +/*************************************************************************/
819 +/*             ETHTOOL INTERCAE                                         */
820 +/*************************************************************************/
821 +
822 +/*MTIP GEMAC */
823 +static const struct fec_stat {
824 +       char name[ETH_GSTRING_LEN];
825 +       u16 offset;
826 +} fec_stats[] = {
827 +       /* RMON TX */
828 +       { "tx_dropped", RMON_T_DROP },
829 +       { "tx_packets", RMON_T_PACKETS },
830 +       { "tx_broadcast", RMON_T_BC_PKT },
831 +       { "tx_multicast", RMON_T_MC_PKT },
832 +       { "tx_crc_errors", RMON_T_CRC_ALIGN },
833 +       { "tx_undersize", RMON_T_UNDERSIZE },
834 +       { "tx_oversize", RMON_T_OVERSIZE },
835 +       { "tx_fragment", RMON_T_FRAG },
836 +       { "tx_jabber", RMON_T_JAB },
837 +       { "tx_collision", RMON_T_COL },
838 +       { "tx_64byte", RMON_T_P64 },
839 +       { "tx_65to127byte", RMON_T_P65TO127 },
840 +       { "tx_128to255byte", RMON_T_P128TO255 },
841 +       { "tx_256to511byte", RMON_T_P256TO511 },
842 +       { "tx_512to1023byte", RMON_T_P512TO1023 },
843 +       { "tx_1024to2047byte", RMON_T_P1024TO2047 },
844 +       { "tx_GTE2048byte", RMON_T_P_GTE2048 },
845 +       { "tx_octets", RMON_T_OCTETS },
846 +
847 +       /* IEEE TX */
848 +       { "IEEE_tx_drop", IEEE_T_DROP },
849 +       { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
850 +       { "IEEE_tx_1col", IEEE_T_1COL },
851 +       { "IEEE_tx_mcol", IEEE_T_MCOL },
852 +       { "IEEE_tx_def", IEEE_T_DEF },
853 +       { "IEEE_tx_lcol", IEEE_T_LCOL },
854 +       { "IEEE_tx_excol", IEEE_T_EXCOL },
855 +       { "IEEE_tx_macerr", IEEE_T_MACERR },
856 +       { "IEEE_tx_cserr", IEEE_T_CSERR },
857 +       { "IEEE_tx_sqe", IEEE_T_SQE },
858 +       { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
859 +       { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
860 +
861 +       /* RMON RX */
862 +       { "rx_packets", RMON_R_PACKETS },
863 +       { "rx_broadcast", RMON_R_BC_PKT },
864 +       { "rx_multicast", RMON_R_MC_PKT },
865 +       { "rx_crc_errors", RMON_R_CRC_ALIGN },
866 +       { "rx_undersize", RMON_R_UNDERSIZE },
867 +       { "rx_oversize", RMON_R_OVERSIZE },
868 +       { "rx_fragment", RMON_R_FRAG },
869 +       { "rx_jabber", RMON_R_JAB },
870 +       { "rx_64byte", RMON_R_P64 },
871 +       { "rx_65to127byte", RMON_R_P65TO127 },
872 +       { "rx_128to255byte", RMON_R_P128TO255 },
873 +       { "rx_256to511byte", RMON_R_P256TO511 },
874 +       { "rx_512to1023byte", RMON_R_P512TO1023 },
875 +       { "rx_1024to2047byte", RMON_R_P1024TO2047 },
876 +       { "rx_GTE2048byte", RMON_R_P_GTE2048 },
877 +       { "rx_octets", RMON_R_OCTETS },
878 +
879 +       /* IEEE RX */
880 +       { "IEEE_rx_drop", IEEE_R_DROP },
881 +       { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
882 +       { "IEEE_rx_crc", IEEE_R_CRC },
883 +       { "IEEE_rx_align", IEEE_R_ALIGN },
884 +       { "IEEE_rx_macerr", IEEE_R_MACERR },
885 +       { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
886 +       { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
887 +};
888 +
889 +static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
890 +                               *stats, u64 *data)
891 +{
892 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
893 +       int i;
894 +
895 +       for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
896 +               data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
897 +}
898 +
899 +static void pfe_eth_gstrings(struct net_device *netdev,
900 +                            u32 stringset, u8 *data)
901 +{
902 +       int i;
903 +
904 +       switch (stringset) {
905 +       case ETH_SS_STATS:
906 +               for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
907 +                       memcpy(data + i * ETH_GSTRING_LEN,
908 +                              fec_stats[i].name, ETH_GSTRING_LEN);
909 +               break;
910 +       }
911 +}
912 +
913 +static int pfe_eth_stats_count(struct net_device *ndev, int sset)
914 +{
915 +       switch (sset) {
916 +       case ETH_SS_STATS:
917 +               return ARRAY_SIZE(fec_stats);
918 +       default:
919 +               return -EOPNOTSUPP;
920 +       }
921 +}
922 +
923 +/*
924 + * pfe_eth_gemac_reglen - Return the length of the register structure.
925 + *
926 + */
927 +static int pfe_eth_gemac_reglen(struct net_device *ndev)
928 +{
929 +       pr_info("%s()\n", __func__);
930 +       return (sizeof(gemac_regs) / sizeof(u32));
931 +}
932 +
933 +/*
934 + * pfe_eth_gemac_get_regs - Return the gemac register structure.
935 + *
936 + */
937 +static void  pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
938 +                                       *regs, void *regbuf)
939 +{
940 +       int i;
941 +
942 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
943 +       u32 *buf = (u32 *)regbuf;
944 +
945 +       pr_info("%s()\n", __func__);
946 +       for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
947 +               buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
948 +}
949 +
950 +/*
951 + * pfe_eth_set_wol - Set the magic packet option, in WoL register.
952 + *
953 + */
954 +static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
955 +{
956 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
957 +
958 +       if (wol->wolopts & ~WAKE_MAGIC)
959 +               return -EOPNOTSUPP;
960 +
961 +       /* for MTIP we store wol->wolopts */
962 +       priv->wol = wol->wolopts;
963 +
964 +       device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
965 +
966 +       return 0;
967 +}
968 +
969 +/*
970 + *
971 + * pfe_eth_get_wol - Get the WoL options.
972 + *
973 + */
974 +static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
975 +                               *wol)
976 +{
977 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
978 +
979 +       wol->supported = WAKE_MAGIC;
980 +       wol->wolopts = 0;
981 +
982 +       if (priv->wol & WAKE_MAGIC)
983 +               wol->wolopts = WAKE_MAGIC;
984 +
985 +       memset(&wol->sopass, 0, sizeof(wol->sopass));
986 +}
987 +
988 +/*
989 + * pfe_eth_get_drvinfo -  Fills in the drvinfo structure with some basic info
990 + *
991 + */
992 +static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
993 +                               *drvinfo)
994 +{
995 +       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
996 +       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
997 +       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
998 +       strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
999 +}
1000 +
1001 +/*
1002 + * pfe_eth_set_settings - Used to send commands to PHY.
1003 + *
1004 + */
1005 +static int pfe_eth_set_settings(struct net_device *ndev,
1006 +                               const struct ethtool_link_ksettings *cmd)
1007 +{
1008 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1009 +       struct phy_device *phydev = priv->phydev;
1010 +
1011 +       if (!phydev)
1012 +               return -ENODEV;
1013 +
1014 +       return phy_ethtool_ksettings_set(phydev, cmd);
1015 +}
1016 +
1017 +/*
1018 + * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
1019 + * structure.
1020 + *
1021 + */
1022 +static int pfe_eth_get_settings(struct net_device *ndev,
1023 +                               struct ethtool_link_ksettings *cmd)
1024 +{
1025 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1026 +       struct phy_device *phydev = priv->phydev;
1027 +
1028 +       if (!phydev)
1029 +               return -ENODEV;
1030 +
1031 +       return phy_ethtool_ksettings_get(phydev, cmd);
1032 +}
1033 +
1034 +/*
1035 + * pfe_eth_get_msglevel - Gets the debug message mask.
1036 + *
1037 + */
1038 +static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
1039 +{
1040 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1041 +
1042 +       return priv->msg_enable;
1043 +}
1044 +
1045 +/*
1046 + * pfe_eth_set_msglevel - Sets the debug message mask.
1047 + *
1048 + */
1049 +static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
1050 +{
1051 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1052 +
1053 +       priv->msg_enable = data;
1054 +}
1055 +
1056 +#define HIF_RX_COAL_MAX_CLKS           (~(1 << 31))
1057 +#define HIF_RX_COAL_CLKS_PER_USEC      (pfe->ctrl.sys_clk / 1000)
1058 +#define HIF_RX_COAL_MAX_USECS          (HIF_RX_COAL_MAX_CLKS   / \
1059 +                                               HIF_RX_COAL_CLKS_PER_USEC)
1060 +
1061 +/*
1062 + * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
1063 + *
1064 + */
1065 +static int pfe_eth_set_coalesce(struct net_device *ndev,
1066 +                               struct ethtool_coalesce *ec)
1067 +{
1068 +       if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
1069 +               return -EINVAL;
1070 +
1071 +       if (!ec->rx_coalesce_usecs) {
1072 +               writel(0, HIF_INT_COAL);
1073 +               return 0;
1074 +       }
1075 +
1076 +       writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
1077 +                       HIF_INT_COAL_ENABLE, HIF_INT_COAL);
1078 +
1079 +       return 0;
1080 +}
1081 +
1082 +/*
1083 + * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
1084 + *
1085 + */
1086 +static int pfe_eth_get_coalesce(struct net_device *ndev,
1087 +                               struct ethtool_coalesce *ec)
1088 +{
1089 +       int reg_val = readl(HIF_INT_COAL);
1090 +
1091 +       if (reg_val & HIF_INT_COAL_ENABLE)
1092 +               ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
1093 +                                               HIF_RX_COAL_CLKS_PER_USEC;
1094 +       else
1095 +               ec->rx_coalesce_usecs = 0;
1096 +
1097 +       return 0;
1098 +}
1099 +
1100 +/*
1101 + * pfe_eth_set_pauseparam - Sets pause parameters
1102 + *
1103 + */
1104 +static int pfe_eth_set_pauseparam(struct net_device *ndev,
1105 +                                 struct ethtool_pauseparam *epause)
1106 +{
1107 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1108 +
1109 +       if (epause->tx_pause != epause->rx_pause) {
1110 +               netdev_info(ndev,
1111 +                           "hardware only support enable/disable both tx and rx\n");
1112 +               return -EINVAL;
1113 +       }
1114 +
1115 +       priv->pause_flag = 0;
1116 +       priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
1117 +       priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
1118 +
1119 +       if (epause->rx_pause || epause->autoneg) {
1120 +               gemac_enable_pause_rx(priv->EMAC_baseaddr);
1121 +               writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
1122 +                                       EGPI_PAUSE_ENABLE),
1123 +                               priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
1124 +               if (priv->phydev) {
1125 +                       priv->phydev->supported |= ADVERTISED_Pause |
1126 +                                                       ADVERTISED_Asym_Pause;
1127 +                       priv->phydev->advertising |= ADVERTISED_Pause |
1128 +                                                       ADVERTISED_Asym_Pause;
1129 +               }
1130 +       } else {
1131 +               gemac_disable_pause_rx(priv->EMAC_baseaddr);
1132 +               writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
1133 +                                       ~EGPI_PAUSE_ENABLE),
1134 +                               priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
1135 +               if (priv->phydev) {
1136 +                       priv->phydev->supported &= ~(ADVERTISED_Pause |
1137 +                                                       ADVERTISED_Asym_Pause);
1138 +                       priv->phydev->advertising &= ~(ADVERTISED_Pause |
1139 +                                                       ADVERTISED_Asym_Pause);
1140 +               }
1141 +       }
1142 +
1143 +       return 0;
1144 +}
1145 +
1146 +/*
1147 + * pfe_eth_get_pauseparam - Gets pause parameters
1148 + *
1149 + */
1150 +static void pfe_eth_get_pauseparam(struct net_device *ndev,
1151 +                                  struct ethtool_pauseparam *epause)
1152 +{
1153 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1154 +
1155 +       epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
1156 +       epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
1157 +       epause->rx_pause = epause->tx_pause;
1158 +}
1159 +
1160 +/*
1161 + * pfe_eth_get_hash
1162 + */
1163 +#define PFE_HASH_BITS  6               /* #bits in hash */
1164 +#define CRC32_POLY     0xEDB88320
1165 +
1166 +static int pfe_eth_get_hash(u8 *addr)
1167 +{
1168 +       unsigned int i, bit, data, crc, hash;
1169 +
1170 +       /* calculate crc32 value of mac address */
1171 +       crc = 0xffffffff;
1172 +
1173 +       for (i = 0; i < 6; i++) {
1174 +               data = addr[i];
1175 +               for (bit = 0; bit < 8; bit++, data >>= 1) {
1176 +                       crc = (crc >> 1) ^
1177 +                               (((crc ^ data) & 1) ? CRC32_POLY : 0);
1178 +               }
1179 +       }
1180 +
1181 +       /*
1182 +        * only upper 6 bits (PFE_HASH_BITS) are used
1183 +        * which point to specific bit in the hash registers
1184 +        */
1185 +       hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
1186 +
1187 +       return hash;
1188 +}
1189 +
1190 +const struct ethtool_ops pfe_ethtool_ops = {
1191 +       .get_drvinfo = pfe_eth_get_drvinfo,
1192 +       .get_regs_len = pfe_eth_gemac_reglen,
1193 +       .get_regs = pfe_eth_gemac_get_regs,
1194 +       .get_link = ethtool_op_get_link,
1195 +       .get_wol  = pfe_eth_get_wol,
1196 +       .set_wol  = pfe_eth_set_wol,
1197 +       .set_pauseparam = pfe_eth_set_pauseparam,
1198 +       .get_pauseparam = pfe_eth_get_pauseparam,
1199 +       .get_strings = pfe_eth_gstrings,
1200 +       .get_sset_count = pfe_eth_stats_count,
1201 +       .get_ethtool_stats = pfe_eth_fill_stats,
1202 +       .get_msglevel = pfe_eth_get_msglevel,
1203 +       .set_msglevel = pfe_eth_set_msglevel,
1204 +       .set_coalesce = pfe_eth_set_coalesce,
1205 +       .get_coalesce = pfe_eth_get_coalesce,
1206 +       .get_link_ksettings = pfe_eth_get_settings,
1207 +       .set_link_ksettings = pfe_eth_set_settings,
1208 +};
1209 +
1210 +/* pfe_eth_mdio_reset
1211 + */
1212 +int pfe_eth_mdio_reset(struct mii_bus *bus)
1213 +{
1214 +       struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
1215 +       u32 phy_speed;
1216 +
1217 +       netif_info(priv, hw, priv->ndev, "%s\n", __func__);
1218 +
1219 +       mutex_lock(&bus->mdio_lock);
1220 +
1221 +       /*
1222 +        * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1223 +        *
1224 +        * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
1225 +        * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
1226 +        */
1227 +       phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
1228 +                    << EMAC_MII_SPEED_SHIFT);
1229 +       phy_speed |= EMAC_HOLDTIME(0x5);
1230 +       __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
1231 +
1232 +       mutex_unlock(&bus->mdio_lock);
1233 +
1234 +       return 0;
1235 +}
1236 +
1237 +/* pfe_eth_gemac_phy_timeout
1238 + *
1239 + */
1240 +static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
1241 +{
1242 +       while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
1243 +                       EMAC_IEVENT_MII)) {
1244 +               if (timeout-- <= 0)
1245 +                       return -1;
1246 +               usleep_range(10, 20);
1247 +       }
1248 +       __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
1249 +       return 0;
1250 +}
1251 +
1252 +static int pfe_eth_mdio_mux(u8 muxval)
1253 +{
1254 +       struct i2c_adapter *a;
1255 +       struct i2c_msg msg;
1256 +       unsigned char buf[2];
1257 +       int ret;
1258 +
1259 +       a = i2c_get_adapter(0);
1260 +       if (!a)
1261 +               return -ENODEV;
1262 +
1263 +       /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
1264 +       buf[0] = 0x54; /* reg number */
1265 +       buf[1] = (muxval << 6) | 0x3; /* data */
1266 +       msg.addr = 0x66;
1267 +       msg.buf = buf;
1268 +       msg.len = 2;
1269 +       msg.flags = 0;
1270 +       ret = i2c_transfer(a, &msg, 1);
1271 +       i2c_put_adapter(a);
1272 +       if (ret != 1)
1273 +               return -ENODEV;
1274 +       return 0;
1275 +}
1276 +
1277 +static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
1278 +                                  int dev_addr, int regnum)
1279 +{
1280 +       struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
1281 +
1282 +       __raw_writel(EMAC_MII_DATA_PA(mii_id) |
1283 +                    EMAC_MII_DATA_RA(dev_addr) |
1284 +                    EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
1285 +                    priv->PHY_baseaddr + EMAC_MII_DATA_REG);
1286 +
1287 +       if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
1288 +               netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
1289 +                          __func__);
1290 +               return -1;
1291 +       }
1292 +
1293 +       return 0;
1294 +}
1295 +
1296 +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1297 +                             u16 value)
1298 +{
1299 +       struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
1300 +
1301 +       /*To access external PHYs on QDS board mux needs to be configured*/
1302 +       if ((mii_id) && (pfe->mdio_muxval[mii_id]))
1303 +               pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
1304 +
1305 +       if (regnum & MII_ADDR_C45) {
1306 +               pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
1307 +                                       regnum & 0xffff);
1308 +               __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
1309 +                            EMAC_MII_DATA_PA(mii_id) |
1310 +                            EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
1311 +                            EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
1312 +                            priv->PHY_baseaddr + EMAC_MII_DATA_REG);
1313 +       } else {
1314 +               /* start a write op */
1315 +               __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
1316 +                            EMAC_MII_DATA_PA(mii_id) |
1317 +                            EMAC_MII_DATA_RA(regnum) |
1318 +                            EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
1319 +                            priv->PHY_baseaddr + EMAC_MII_DATA_REG);
1320 +       }
1321 +
1322 +       if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
1323 +               netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
1324 +                          __func__);
1325 +               return -1;
1326 +       }
1327 +       netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
1328 +                  mii_id, regnum, value);
1329 +
1330 +       return 0;
1331 +}
1332 +
1333 +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1334 +{
1335 +       struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
1336 +       u16 value = 0;
1337 +
1338 +       /*To access external PHYs on QDS board mux needs to be configured*/
1339 +       if ((mii_id) && (pfe->mdio_muxval[mii_id]))
1340 +               pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
1341 +
1342 +       if (regnum & MII_ADDR_C45) {
1343 +               pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
1344 +                                       regnum & 0xffff);
1345 +               __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
1346 +                            EMAC_MII_DATA_PA(mii_id) |
1347 +                            EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
1348 +                            EMAC_MII_DATA_TA,
1349 +                            priv->PHY_baseaddr + EMAC_MII_DATA_REG);
1350 +       } else {
1351 +               /* start a read op */
1352 +               __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
1353 +                            EMAC_MII_DATA_PA(mii_id) |
1354 +                            EMAC_MII_DATA_RA(regnum) |
1355 +                            EMAC_MII_DATA_TA, priv->PHY_baseaddr +
1356 +                            EMAC_MII_DATA_REG);
1357 +       }
1358 +
1359 +       if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
1360 +               netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
1361 +               return -1;
1362 +       }
1363 +
1364 +       value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
1365 +                                               EMAC_MII_DATA_REG));
1366 +       netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
1367 +                  mii_id, regnum, value);
1368 +       return value;
1369 +}
1370 +
1371 +static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
1372 +                            struct ls1012a_mdio_platform_data *minfo)
1373 +{
1374 +       struct mii_bus *bus;
1375 +       int rc;
1376 +
1377 +       netif_info(priv, drv, priv->ndev, "%s\n", __func__);
1378 +       pr_info("%s\n", __func__);
1379 +
1380 +       bus = mdiobus_alloc();
1381 +       if (!bus) {
1382 +               netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
1383 +               rc = -ENOMEM;
1384 +               goto err0;
1385 +       }
1386 +
1387 +       bus->name = "ls1012a MDIO Bus";
1388 +       bus->read = &pfe_eth_mdio_read;
1389 +       bus->write = &pfe_eth_mdio_write;
1390 +       bus->reset = &pfe_eth_mdio_reset;
1391 +       snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
1392 +       bus->priv = priv;
1393 +
1394 +       bus->phy_mask = minfo->phy_mask;
1395 +       priv->mdc_div = minfo->mdc_div;
1396 +
1397 +       if (!priv->mdc_div)
1398 +               priv->mdc_div = 64;
1399 +
1400 +       bus->irq[0] = minfo->irq[0];
1401 +
1402 +       bus->parent = priv->pfe->dev;
1403 +
1404 +       netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
1405 +                  __func__, priv->mdc_div, bus->phy_mask);
1406 +       rc = mdiobus_register(bus);
1407 +       if (rc) {
1408 +               netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
1409 +                          bus->name);
1410 +               goto err1;
1411 +       }
1412 +
1413 +       priv->mii_bus = bus;
1414 +       pfe_eth_mdio_reset(bus);
1415 +
1416 +       return 0;
1417 +
1418 +err1:
1419 +       mdiobus_free(bus);
1420 +err0:
1421 +       return rc;
1422 +}
1423 +
1424 +/* pfe_eth_mdio_exit
1425 + */
1426 +static void pfe_eth_mdio_exit(struct mii_bus *bus)
1427 +{
1428 +       if (!bus)
1429 +               return;
1430 +
1431 +       netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
1432 +                       pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
1433 +
1434 +       mdiobus_unregister(bus);
1435 +       mdiobus_free(bus);
1436 +}
1437 +
1438 +/* pfe_get_phydev_speed
1439 + */
1440 +static int pfe_get_phydev_speed(struct phy_device *phydev)
1441 +{
1442 +       switch (phydev->speed) {
1443 +       case 10:
1444 +                       return SPEED_10M;
1445 +       case 100:
1446 +                       return SPEED_100M;
1447 +       case 1000:
1448 +       default:
1449 +                       return SPEED_1000M;
1450 +       }
1451 +}
1452 +
1453 +/* pfe_set_rgmii_speed
1454 + */
1455 +#define RGMIIPCR       0x434
1456 +/* RGMIIPCR bit definitions*/
1457 +#define SCFG_RGMIIPCR_EN_AUTO           (0x00000008)
1458 +#define SCFG_RGMIIPCR_SETSP_1000M       (0x00000004)
1459 +#define SCFG_RGMIIPCR_SETSP_100M        (0x00000000)
1460 +#define SCFG_RGMIIPCR_SETSP_10M         (0x00000002)
1461 +#define SCFG_RGMIIPCR_SETFD             (0x00000001)
1462 +
1463 +static void pfe_set_rgmii_speed(struct phy_device *phydev)
1464 +{
1465 +       u32 rgmii_pcr;
1466 +
1467 +       regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
1468 +       rgmii_pcr  &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
1469 +
1470 +       switch (phydev->speed) {
1471 +       case 10:
1472 +                       rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
1473 +                       break;
1474 +       case 1000:
1475 +                       rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
1476 +                       break;
1477 +       case 100:
1478 +       default:
1479 +                       /* Default is 100M */
1480 +                       break;
1481 +       }
1482 +       regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
1483 +}
1484 +
1485 +/* pfe_get_phydev_duplex
1486 + */
1487 +static int pfe_get_phydev_duplex(struct phy_device *phydev)
1488 +{
1489 +       /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
1490 +       return DUPLEX_FULL;
1491 +}
1492 +
1493 +/* pfe_eth_adjust_link
1494 + */
1495 +static void pfe_eth_adjust_link(struct net_device *ndev)
1496 +{
1497 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1498 +       unsigned long flags;
1499 +       struct phy_device *phydev = priv->phydev;
1500 +       int new_state = 0;
1501 +
1502 +       netif_info(priv, drv, ndev, "%s\n", __func__);
1503 +
1504 +       spin_lock_irqsave(&priv->lock, flags);
1505 +
1506 +       if (phydev->link) {
1507 +               /*
1508 +                * Now we make sure that we can be in full duplex mode.
1509 +                * If not, we operate in half-duplex mode.
1510 +                */
1511 +               if (phydev->duplex != priv->oldduplex) {
1512 +                       new_state = 1;
1513 +                       gemac_set_duplex(priv->EMAC_baseaddr,
1514 +                                        pfe_get_phydev_duplex(phydev));
1515 +                       priv->oldduplex = phydev->duplex;
1516 +               }
1517 +
1518 +               if (phydev->speed != priv->oldspeed) {
1519 +                       new_state = 1;
1520 +                       gemac_set_speed(priv->EMAC_baseaddr,
1521 +                                       pfe_get_phydev_speed(phydev));
1522 +                       if (priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII)
1523 +                               pfe_set_rgmii_speed(phydev);
1524 +                       priv->oldspeed = phydev->speed;
1525 +               }
1526 +
1527 +               if (!priv->oldlink) {
1528 +                       new_state = 1;
1529 +                       priv->oldlink = 1;
1530 +               }
1531 +
1532 +       } else if (priv->oldlink) {
1533 +               new_state = 1;
1534 +               priv->oldlink = 0;
1535 +               priv->oldspeed = 0;
1536 +               priv->oldduplex = -1;
1537 +       }
1538 +
1539 +       if (new_state && netif_msg_link(priv))
1540 +               phy_print_status(phydev);
1541 +
1542 +       spin_unlock_irqrestore(&priv->lock, flags);
1543 +}
1544 +
1545 +/* pfe_phy_exit
1546 + */
1547 +static void pfe_phy_exit(struct net_device *ndev)
1548 +{
1549 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1550 +
1551 +       netif_info(priv, drv, ndev, "%s\n", __func__);
1552 +
1553 +       phy_disconnect(priv->phydev);
1554 +       priv->phydev = NULL;
1555 +}
1556 +
1557 +/* pfe_eth_stop
1558 + */
1559 +static void pfe_eth_stop(struct net_device *ndev, int wake)
1560 +{
1561 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1562 +
1563 +       netif_info(priv, drv, ndev, "%s\n", __func__);
1564 +
1565 +       if (wake) {
1566 +               gemac_tx_disable(priv->EMAC_baseaddr);
1567 +       } else {
1568 +               gemac_disable(priv->EMAC_baseaddr);
1569 +               gpi_disable(priv->GPI_baseaddr);
1570 +
1571 +               if (priv->phydev)
1572 +                       phy_stop(priv->phydev);
1573 +       }
1574 +}
1575 +
1576 +/* pfe_eth_start
1577 + */
1578 +static int pfe_eth_start(struct pfe_eth_priv_s *priv)
1579 +{
1580 +       netif_info(priv, drv, priv->ndev, "%s\n", __func__);
1581 +
1582 +       if (priv->phydev)
1583 +               phy_start(priv->phydev);
1584 +
1585 +       gpi_enable(priv->GPI_baseaddr);
1586 +       gemac_enable(priv->EMAC_baseaddr);
1587 +
1588 +       return 0;
1589 +}
1590 +
1591 +/*
1592 + * Configure on chip serdes through mdio
1593 + */
1594 +static void ls1012a_configure_serdes(struct net_device *ndev)
1595 +{
1596 +       struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
1597 +       int sgmii_2500 = 0;
1598 +       struct mii_bus *bus = priv->mii_bus;
1599 +
1600 +       if (priv->einfo->mii_config == PHY_INTERFACE_MODE_SGMII_2500)
1601 +               sgmii_2500 = 1;
1602 +
1603 +       netif_info(priv, drv, ndev, "%s\n", __func__);
1604 +       /* PCS configuration done with corresponding GEMAC */
1605 +
1606 +       pfe_eth_mdio_read(bus, 0, 0);
1607 +       pfe_eth_mdio_read(bus, 0, 1);
1608 +
1609 +       /*These settings taken from validtion team */
1610 +       pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
1611 +       if (sgmii_2500) {
1612 +               pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
1613 +               pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
1614 +               pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
1615 +               pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
1616 +       } else {
1617 +               pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
1618 +               pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
1619 +               pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
1620 +               pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
1621 +       }
1622 +
1623 +       pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
1624 +}
1625 +
1626 +/*
1627 + * pfe_phy_init
1628 + *
1629 + */
1630 +static int pfe_phy_init(struct net_device *ndev)
1631 +{
1632 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1633 +       struct phy_device *phydev;
1634 +       char phy_id[MII_BUS_ID_SIZE + 3];
1635 +       char bus_id[MII_BUS_ID_SIZE];
1636 +       phy_interface_t interface;
1637 +
1638 +       priv->oldlink = 0;
1639 +       priv->oldspeed = 0;
1640 +       priv->oldduplex = -1;
1641 +
1642 +       snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
1643 +       snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
1644 +                priv->einfo->phy_id);
1645 +
1646 +       netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
1647 +       interface = priv->einfo->mii_config;
1648 +       if ((interface == PHY_INTERFACE_MODE_SGMII) ||
1649 +           (interface == PHY_INTERFACE_MODE_SGMII_2500)) {
1650 +               /*Configure SGMII PCS */
1651 +               if (pfe->scfg) {
1652 +                       /*Config MDIO from serdes */
1653 +                       regmap_write(pfe->scfg, 0x484, 0x00000000);
1654 +               }
1655 +               ls1012a_configure_serdes(ndev);
1656 +       }
1657 +
1658 +       if (pfe->scfg) {
1659 +               /*Config MDIO from PAD */
1660 +               regmap_write(pfe->scfg, 0x484, 0x80000000);
1661 +       }
1662 +
1663 +       priv->oldlink = 0;
1664 +       priv->oldspeed = 0;
1665 +       priv->oldduplex = -1;
1666 +       pr_info("%s interface %x\n", __func__, interface);
1667 +       phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
1668 +
1669 +       if (IS_ERR(phydev)) {
1670 +               netdev_err(ndev, "phy_connect() failed\n");
1671 +               return PTR_ERR(phydev);
1672 +       }
1673 +
1674 +       priv->phydev = phydev;
1675 +       phydev->irq = PHY_POLL;
1676 +
1677 +       return 0;
1678 +}
1679 +
1680 +/* pfe_gemac_init
1681 + */
1682 +static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
1683 +{
1684 +       struct gemac_cfg cfg;
1685 +
1686 +       netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
1687 +
1688 +       cfg.speed = SPEED_1000M;
1689 +       cfg.duplex = DUPLEX_FULL;
1690 +
1691 +       gemac_set_config(priv->EMAC_baseaddr, &cfg);
1692 +       gemac_allow_broadcast(priv->EMAC_baseaddr);
1693 +       gemac_enable_1536_rx(priv->EMAC_baseaddr);
1694 +       gemac_enable_rx_jmb(priv->EMAC_baseaddr);
1695 +       gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
1696 +       gemac_enable_pause_rx(priv->EMAC_baseaddr);
1697 +       gemac_set_bus_width(priv->EMAC_baseaddr, 64);
1698 +
1699 +       /*GEM will perform checksum verifications*/
1700 +       if (priv->ndev->features & NETIF_F_RXCSUM)
1701 +               gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
1702 +       else
1703 +               gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
1704 +
1705 +       return 0;
1706 +}
1707 +
1708 +/* pfe_eth_event_handler
1709 + */
1710 +static int pfe_eth_event_handler(void *data, int event, int qno)
1711 +{
1712 +       struct pfe_eth_priv_s *priv = data;
1713 +
1714 +       switch (event) {
1715 +       case EVENT_RX_PKT_IND:
1716 +
1717 +               if (qno == 0) {
1718 +                       if (napi_schedule_prep(&priv->high_napi)) {
1719 +                               netif_info(priv, intr, priv->ndev,
1720 +                                          "%s: schedule high prio poll\n"
1721 +                                          , __func__);
1722 +
1723 +#ifdef PFE_ETH_NAPI_STATS
1724 +                               priv->napi_counters[NAPI_SCHED_COUNT]++;
1725 +#endif
1726 +
1727 +                               __napi_schedule(&priv->high_napi);
1728 +                       }
1729 +               } else if (qno == 1) {
1730 +                       if (napi_schedule_prep(&priv->low_napi)) {
1731 +                               netif_info(priv, intr, priv->ndev,
1732 +                                          "%s: schedule low prio poll\n"
1733 +                                          , __func__);
1734 +
1735 +#ifdef PFE_ETH_NAPI_STATS
1736 +                               priv->napi_counters[NAPI_SCHED_COUNT]++;
1737 +#endif
1738 +                               __napi_schedule(&priv->low_napi);
1739 +                       }
1740 +               } else if (qno == 2) {
1741 +                       if (napi_schedule_prep(&priv->lro_napi)) {
1742 +                               netif_info(priv, intr, priv->ndev,
1743 +                                          "%s: schedule lro prio poll\n"
1744 +                                          , __func__);
1745 +
1746 +#ifdef PFE_ETH_NAPI_STATS
1747 +                               priv->napi_counters[NAPI_SCHED_COUNT]++;
1748 +#endif
1749 +                               __napi_schedule(&priv->lro_napi);
1750 +                       }
1751 +               }
1752 +
1753 +               break;
1754 +
1755 +       case EVENT_TXDONE_IND:
1756 +               pfe_eth_flush_tx(priv);
1757 +               hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
1758 +               break;
1759 +       case EVENT_HIGH_RX_WM:
1760 +       default:
1761 +               break;
1762 +       }
1763 +
1764 +       return 0;
1765 +}
1766 +
1767 +/* pfe_eth_open
1768 + */
1769 +static int pfe_eth_open(struct net_device *ndev)
1770 +{
1771 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1772 +       struct hif_client_s *client;
1773 +       int rc;
1774 +
1775 +       netif_info(priv, ifup, ndev, "%s\n", __func__);
1776 +
1777 +       /* Register client driver with HIF */
1778 +       client = &priv->client;
1779 +       memset(client, 0, sizeof(*client));
1780 +       client->id = PFE_CL_GEM0 + priv->id;
1781 +       client->tx_qn = emac_txq_cnt;
1782 +       client->rx_qn = EMAC_RXQ_CNT;
1783 +       client->priv = priv;
1784 +       client->pfe = priv->pfe;
1785 +       client->event_handler = pfe_eth_event_handler;
1786 +
1787 +       client->tx_qsize = EMAC_TXQ_DEPTH;
1788 +       client->rx_qsize = EMAC_RXQ_DEPTH;
1789 +
1790 +       rc = hif_lib_client_register(client);
1791 +       if (rc) {
1792 +               netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
1793 +                          __func__, client->id);
1794 +               goto err0;
1795 +       }
1796 +
1797 +       netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
1798 +                  client);
1799 +
1800 +       pfe_gemac_init(priv);
1801 +
1802 +       if (!is_valid_ether_addr(ndev->dev_addr)) {
1803 +               netdev_err(ndev, "%s: invalid MAC address\n", __func__);
1804 +               rc = -EADDRNOTAVAIL;
1805 +               goto err1;
1806 +       }
1807 +
1808 +       gemac_set_laddrN(priv->EMAC_baseaddr,
1809 +                        (struct pfe_mac_addr *)ndev->dev_addr, 1);
1810 +
1811 +       napi_enable(&priv->high_napi);
1812 +       napi_enable(&priv->low_napi);
1813 +       napi_enable(&priv->lro_napi);
1814 +
1815 +       rc = pfe_eth_start(priv);
1816 +
1817 +       netif_tx_wake_all_queues(ndev);
1818 +
1819 +       return rc;
1820 +
1821 +err1:
1822 +       hif_lib_client_unregister(&priv->client);
1823 +
1824 +err0:
1825 +       return rc;
1826 +}
1827 +
1828 +/*
1829 + *  pfe_eth_shutdown
1830 + */
1831 +int pfe_eth_shutdown(struct net_device *ndev, int wake)
1832 +{
1833 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1834 +       int i, qstatus;
1835 +       unsigned long next_poll = jiffies + 1, end = jiffies +
1836 +                               (TX_POLL_TIMEOUT_MS * HZ) / 1000;
1837 +       int tx_pkts, prv_tx_pkts;
1838 +
1839 +       netif_info(priv, ifdown, ndev, "%s\n", __func__);
1840 +
1841 +       for (i = 0; i < emac_txq_cnt; i++)
1842 +               hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
1843 +
1844 +       netif_tx_stop_all_queues(ndev);
1845 +
1846 +       do {
1847 +               tx_pkts = 0;
1848 +               pfe_eth_flush_tx(priv);
1849 +
1850 +               for (i = 0; i < emac_txq_cnt; i++)
1851 +                       tx_pkts += hif_lib_tx_pending(&priv->client, i);
1852 +
1853 +               if (tx_pkts) {
1854 +                       /*Don't wait forever, break if we cross max timeout */
1855 +                       if (time_after(jiffies, end)) {
1856 +                               pr_err(
1857 +                                       "(%s)Tx is not complete after %dmsec\n",
1858 +                                       ndev->name, TX_POLL_TIMEOUT_MS);
1859 +                               break;
1860 +                       }
1861 +
1862 +                       pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
1863 +                               , __func__, ndev->name, tx_pkts);
1864 +                       if (need_resched())
1865 +                               schedule();
1866 +               }
1867 +
1868 +       } while (tx_pkts);
1869 +
1870 +       end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
1871 +
1872 +       prv_tx_pkts = tmu_pkts_processed(priv->id);
1873 +       /*
1874 +        * Wait till TMU transmits all pending packets
1875 +        * poll tmu_qstatus and pkts processed by TMU for every 10ms
1876 +        * Consider TMU is busy, If we see TMU qeueu pending or any packets
1877 +        * processed by TMU
1878 +        */
1879 +       while (1) {
1880 +               if (time_after(jiffies, next_poll)) {
1881 +                       tx_pkts = tmu_pkts_processed(priv->id);
1882 +                       qstatus = tmu_qstatus(priv->id) & 0x7ffff;
1883 +
1884 +                       if (!qstatus && (tx_pkts == prv_tx_pkts))
1885 +                               break;
1886 +                       /* Don't wait forever, break if we cross max
1887 +                        * timeout(TX_POLL_TIMEOUT_MS)
1888 +                        */
1889 +                       if (time_after(jiffies, end)) {
1890 +                               pr_err("TMU%d is busy after %dmsec\n",
1891 +                                      priv->id, TX_POLL_TIMEOUT_MS);
1892 +                               break;
1893 +                       }
1894 +                       prv_tx_pkts = tx_pkts;
1895 +                       next_poll++;
1896 +               }
1897 +               if (need_resched())
1898 +                       schedule();
1899 +       }
1900 +       /* Wait for some more time to complete transmitting packet if any */
1901 +       next_poll = jiffies + 1;
1902 +       while (1) {
1903 +               if (time_after(jiffies, next_poll))
1904 +                       break;
1905 +               if (need_resched())
1906 +                       schedule();
1907 +       }
1908 +
1909 +       pfe_eth_stop(ndev, wake);
1910 +
1911 +       napi_disable(&priv->lro_napi);
1912 +       napi_disable(&priv->low_napi);
1913 +       napi_disable(&priv->high_napi);
1914 +
1915 +       hif_lib_client_unregister(&priv->client);
1916 +
1917 +       return 0;
1918 +}
1919 +
1920 +/* pfe_eth_close
1921 + *
1922 + */
1923 +static int pfe_eth_close(struct net_device *ndev)
1924 +{
1925 +       pfe_eth_shutdown(ndev, 0);
1926 +
1927 +       return 0;
1928 +}
1929 +
1930 +/* pfe_eth_suspend
1931 + *
1932 + * return value : 1 if netdevice is configured to wakeup system
1933 + *                0 otherwise
1934 + */
1935 +int pfe_eth_suspend(struct net_device *ndev)
1936 +{
1937 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1938 +       int retval = 0;
1939 +
1940 +       if (priv->wol) {
1941 +               gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
1942 +               retval = 1;
1943 +       }
1944 +       pfe_eth_shutdown(ndev, priv->wol);
1945 +
1946 +       return retval;
1947 +}
1948 +
1949 +/* pfe_eth_resume
1950 + *
1951 + */
1952 +int pfe_eth_resume(struct net_device *ndev)
1953 +{
1954 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
1955 +
1956 +       if (priv->wol)
1957 +               gemac_set_wol(priv->EMAC_baseaddr, 0);
1958 +       gemac_tx_enable(priv->EMAC_baseaddr);
1959 +
1960 +       return pfe_eth_open(ndev);
1961 +}
1962 +
1963 +/* pfe_eth_get_queuenum
1964 + */
1965 +static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
1966 +                                       *skb)
1967 +{
1968 +       int queuenum = 0;
1969 +       unsigned long flags;
1970 +
1971 +       /* Get the Fast Path queue number */
1972 +       /*
1973 +        * Use conntrack mark (if conntrack exists), then packet mark (if any),
1974 +        * then fallback to default
1975 +        */
1976 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
1977 +       if (skb->_nfct) {
1978 +               enum ip_conntrack_info cinfo;
1979 +               struct nf_conn *ct;
1980 +
1981 +               ct = nf_ct_get(skb, &cinfo);
1982 +
1983 +               if (ct) {
1984 +                       u32 connmark;
1985 +
1986 +                       connmark = ct->mark;
1987 +
1988 +                       if ((connmark & 0x80000000) && priv->id != 0)
1989 +                               connmark >>= 16;
1990 +
1991 +                       queuenum = connmark & EMAC_QUEUENUM_MASK;
1992 +               }
1993 +       } else  {/* continued after #endif ... */
1994 +#endif
1995 +               if (skb->mark) {
1996 +                       queuenum = skb->mark & EMAC_QUEUENUM_MASK;
1997 +               } else {
1998 +                       spin_lock_irqsave(&priv->lock, flags);
1999 +                       queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
2000 +                       spin_unlock_irqrestore(&priv->lock, flags);
2001 +               }
2002 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
2003 +       }
2004 +#endif
2005 +       return queuenum;
2006 +}
2007 +
2008 +/* pfe_eth_might_stop_tx
2009 + *
2010 + */
2011 +static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
2012 +                                struct netdev_queue *tx_queue,
2013 +                                unsigned int n_desc,
2014 +                                unsigned int n_segs)
2015 +{
2016 +       ktime_t kt;
2017 +
2018 +       if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
2019 +                    (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
2020 +       (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
2021 +#ifdef PFE_ETH_TX_STATS
2022 +               if (__hif_tx_avail(&pfe->hif) < n_desc) {
2023 +                       priv->stop_queue_hif[queuenum]++;
2024 +               } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
2025 +                       priv->stop_queue_hif_client[queuenum]++;
2026 +               } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
2027 +                       n_segs) {
2028 +                       priv->stop_queue_credit[queuenum]++;
2029 +               }
2030 +               priv->stop_queue_total[queuenum]++;
2031 +#endif
2032 +               netif_tx_stop_queue(tx_queue);
2033 +
2034 +               kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
2035 +                               NSEC_PER_MSEC);
2036 +               hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
2037 +                             HRTIMER_MODE_REL);
2038 +               return -1;
2039 +       } else {
2040 +               return 0;
2041 +       }
2042 +}
2043 +
2044 +#define SA_MAX_OP 2
2045 +/* pfe_hif_send_packet
2046 + *
2047 + * At this level if TX fails we drop the packet
2048 + */
2049 +static void pfe_hif_send_packet(struct sk_buff *skb, struct  pfe_eth_priv_s
2050 +                                       *priv, int queuenum)
2051 +{
2052 +       struct skb_shared_info *sh = skb_shinfo(skb);
2053 +       unsigned int nr_frags;
2054 +       u32 ctrl = 0;
2055 +
2056 +       netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
2057 +
2058 +       if (skb_is_gso(skb)) {
2059 +               priv->stats.tx_dropped++;
2060 +               return;
2061 +       }
2062 +
2063 +       if (skb->ip_summed == CHECKSUM_PARTIAL)
2064 +               ctrl = HIF_CTRL_TX_CHECKSUM;
2065 +
2066 +       nr_frags = sh->nr_frags;
2067 +
2068 +       if (nr_frags) {
2069 +               skb_frag_t *f;
2070 +               int i;
2071 +
2072 +               __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
2073 +                                  skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
2074 +                                  skb);
2075 +
2076 +               for (i = 0; i < nr_frags - 1; i++) {
2077 +                       f = &sh->frags[i];
2078 +                       __hif_lib_xmit_pkt(&priv->client, queuenum,
2079 +                                          skb_frag_address(f),
2080 +                                          skb_frag_size(f),
2081 +                                          0x0, 0x0, skb);
2082 +               }
2083 +
2084 +               f = &sh->frags[i];
2085 +
2086 +               __hif_lib_xmit_pkt(&priv->client, queuenum,
2087 +                                  skb_frag_address(f), skb_frag_size(f),
2088 +                                  0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
2089 +                                  skb);
2090 +
2091 +               netif_info(priv, tx_queued, priv->ndev,
2092 +                          "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
2093 +                          __func__, skb, nr_frags, skb->len);
2094 +       } else {
2095 +               __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
2096 +                                  skb->len, ctrl, HIF_FIRST_BUFFER |
2097 +                                  HIF_LAST_BUFFER | HIF_DATA_VALID,
2098 +                                  skb);
2099 +               netif_info(priv, tx_queued, priv->ndev,
2100 +                          "%s: pkt sent successfully skb:%p len:%d\n",
2101 +                          __func__, skb, skb->len);
2102 +       }
2103 +       hif_tx_dma_start();
2104 +       priv->stats.tx_packets++;
2105 +       priv->stats.tx_bytes += skb->len;
2106 +       hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
2107 +}
2108 +
2109 +/* pfe_eth_flush_txQ
2110 + */
2111 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
2112 +                               from_tx, int n_desc)
2113 +{
2114 +       struct sk_buff *skb;
2115 +       struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2116 +                                                               tx_q_num);
2117 +       unsigned int flags;
2118 +
2119 +       netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
2120 +
2121 +       if (!from_tx)
2122 +               __netif_tx_lock_bh(tx_queue);
2123 +
2124 +       /* Clean HIF and client queue */
2125 +       while ((skb = hif_lib_tx_get_next_complete(&priv->client,
2126 +                                                  tx_q_num, &flags,
2127 +                                                  HIF_TX_DESC_NT))) {
2128 +               if (flags & HIF_DATA_VALID)
2129 +                       dev_kfree_skb_any(skb);
2130 +       }
2131 +       if (!from_tx)
2132 +               __netif_tx_unlock_bh(tx_queue);
2133 +}
2134 +
2135 +/* pfe_eth_flush_tx
2136 + */
2137 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
2138 +{
2139 +       int ii;
2140 +
2141 +       netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
2142 +
2143 +       for (ii = 0; ii < emac_txq_cnt; ii++)
2144 +               pfe_eth_flush_txQ(priv, ii, 0, 0);
2145 +}
2146 +
2147 +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
2148 +                               *n_segs)
2149 +{
2150 +       struct skb_shared_info *sh = skb_shinfo(skb);
2151 +
2152 +       /* Scattered data */
2153 +       if (sh->nr_frags) {
2154 +               *n_desc = sh->nr_frags + 1;
2155 +               *n_segs = 1;
2156 +       /* Regular case */
2157 +       } else {
2158 +               *n_desc = 1;
2159 +               *n_segs = 1;
2160 +       }
2161 +}
2162 +
2163 +/* pfe_eth_send_packet
2164 + */
2165 +static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
2166 +{
2167 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2168 +       int tx_q_num = skb_get_queue_mapping(skb);
2169 +       int n_desc, n_segs;
2170 +       struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2171 +                                                               tx_q_num);
2172 +
2173 +       netif_info(priv, tx_queued, ndev, "%s\n", __func__);
2174 +
2175 +       if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
2176 +                       sizeof(unsigned long)))) {
2177 +               netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
2178 +                          __func__);
2179 +
2180 +               if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
2181 +                                       long)), 0, GFP_ATOMIC)) {
2182 +                       /* No need to re-transmit, no way to recover*/
2183 +                       kfree_skb(skb);
2184 +                       priv->stats.tx_dropped++;
2185 +                       return NETDEV_TX_OK;
2186 +               }
2187 +       }
2188 +
2189 +       pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
2190 +
2191 +       hif_tx_lock(&pfe->hif);
2192 +       if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
2193 +                                          n_segs))) {
2194 +#ifdef PFE_ETH_TX_STATS
2195 +               if (priv->was_stopped[tx_q_num]) {
2196 +                       priv->clean_fail[tx_q_num]++;
2197 +                       priv->was_stopped[tx_q_num] = 0;
2198 +               }
2199 +#endif
2200 +               hif_tx_unlock(&pfe->hif);
2201 +               return NETDEV_TX_BUSY;
2202 +       }
2203 +
2204 +       pfe_hif_send_packet(skb, priv, tx_q_num);
2205 +
2206 +       hif_tx_unlock(&pfe->hif);
2207 +
2208 +       tx_queue->trans_start = jiffies;
2209 +
2210 +#ifdef PFE_ETH_TX_STATS
2211 +       priv->was_stopped[tx_q_num] = 0;
2212 +#endif
2213 +
2214 +       return NETDEV_TX_OK;
2215 +}
2216 +
2217 +/* pfe_eth_select_queue
2218 + *
2219 + */
2220 +static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
2221 +                               struct net_device *sb_dev,
2222 +                               select_queue_fallback_t fallback)
2223 +{
2224 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2225 +
2226 +       return pfe_eth_get_queuenum(priv, skb);
2227 +}
2228 +
2229 +/* pfe_eth_get_stats
2230 + */
2231 +static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
2232 +{
2233 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2234 +
2235 +       netif_info(priv, drv, ndev, "%s\n", __func__);
2236 +
2237 +       return &priv->stats;
2238 +}
2239 +
2240 +/* pfe_eth_set_mac_address
2241 + */
2242 +static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
2243 +{
2244 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2245 +       struct sockaddr *sa = addr;
2246 +
2247 +       netif_info(priv, drv, ndev, "%s\n", __func__);
2248 +
2249 +       if (!is_valid_ether_addr(sa->sa_data))
2250 +               return -EADDRNOTAVAIL;
2251 +
2252 +       memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
2253 +
2254 +       gemac_set_laddrN(priv->EMAC_baseaddr,
2255 +                        (struct pfe_mac_addr *)ndev->dev_addr, 1);
2256 +
2257 +       return 0;
2258 +}
2259 +
2260 +/* pfe_eth_enet_addr_byte_mac
2261 + */
2262 +int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
2263 +                              struct pfe_mac_addr *enet_addr)
2264 +{
2265 +       if (!enet_byte_addr || !enet_addr) {
2266 +               return -1;
2267 +
2268 +       } else {
2269 +               enet_addr->bottom = enet_byte_addr[0] |
2270 +                       (enet_byte_addr[1] << 8) |
2271 +                       (enet_byte_addr[2] << 16) |
2272 +                       (enet_byte_addr[3] << 24);
2273 +               enet_addr->top = enet_byte_addr[4] |
2274 +                       (enet_byte_addr[5] << 8);
2275 +               return 0;
2276 +       }
2277 +}
2278 +
2279 +/* pfe_eth_set_multi
2280 + */
2281 +static void pfe_eth_set_multi(struct net_device *ndev)
2282 +{
2283 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2284 +       struct pfe_mac_addr    hash_addr; /* hash register structure */
2285 +       /* specific mac address register structure */
2286 +       struct pfe_mac_addr    spec_addr;
2287 +       int             result; /* index into hash register to set.. */
2288 +       int             uc_count = 0;
2289 +       struct netdev_hw_addr *ha;
2290 +
2291 +       if (ndev->flags & IFF_PROMISC) {
2292 +               netif_info(priv, drv, ndev, "entering promiscuous mode\n");
2293 +
2294 +               priv->promisc = 1;
2295 +               gemac_enable_copy_all(priv->EMAC_baseaddr);
2296 +       } else {
2297 +               priv->promisc = 0;
2298 +               gemac_disable_copy_all(priv->EMAC_baseaddr);
2299 +       }
2300 +
2301 +       /* Enable broadcast frame reception if required. */
2302 +       if (ndev->flags & IFF_BROADCAST) {
2303 +               gemac_allow_broadcast(priv->EMAC_baseaddr);
2304 +       } else {
2305 +               netif_info(priv, drv, ndev,
2306 +                          "disabling broadcast frame reception\n");
2307 +
2308 +               gemac_no_broadcast(priv->EMAC_baseaddr);
2309 +       }
2310 +
2311 +       if (ndev->flags & IFF_ALLMULTI) {
2312 +               /* Set the hash to rx all multicast frames */
2313 +               hash_addr.bottom = 0xFFFFFFFF;
2314 +               hash_addr.top = 0xFFFFFFFF;
2315 +               gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
2316 +               netdev_for_each_uc_addr(ha, ndev) {
2317 +                       if (uc_count >= MAX_UC_SPEC_ADDR_REG)
2318 +                               break;
2319 +                       pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
2320 +                       gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
2321 +                                        uc_count + 2);
2322 +                       uc_count++;
2323 +               }
2324 +       } else if ((netdev_mc_count(ndev) > 0)  || (netdev_uc_count(ndev))) {
2325 +               u8 *addr;
2326 +
2327 +               hash_addr.bottom = 0;
2328 +               hash_addr.top = 0;
2329 +
2330 +               netdev_for_each_mc_addr(ha, ndev) {
2331 +                       addr = ha->addr;
2332 +
2333 +                       netif_info(priv, drv, ndev,
2334 +                                  "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
2335 +                               addr[0], addr[1], addr[2],
2336 +                               addr[3], addr[4], addr[5]);
2337 +
2338 +                       result = pfe_eth_get_hash(addr);
2339 +
2340 +                       if (result < EMAC_HASH_REG_BITS) {
2341 +                               if (result < 32)
2342 +                                       hash_addr.bottom |= (1 << result);
2343 +                               else
2344 +                                       hash_addr.top |= (1 << (result - 32));
2345 +                       } else {
2346 +                               break;
2347 +                       }
2348 +               }
2349 +
2350 +               uc_count = -1;
2351 +               netdev_for_each_uc_addr(ha, ndev) {
2352 +                       addr = ha->addr;
2353 +
2354 +                       if (++uc_count < MAX_UC_SPEC_ADDR_REG)   {
2355 +                               netdev_info(ndev,
2356 +                                           "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
2357 +                                           addr[0], addr[1], addr[2],
2358 +                                           addr[3], addr[4], addr[5]);
2359 +                               pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
2360 +                               gemac_set_laddrN(priv->EMAC_baseaddr,
2361 +                                                &spec_addr, uc_count + 2);
2362 +                       } else {
2363 +                               netif_info(priv, drv, ndev,
2364 +                                          "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
2365 +                                          addr[0], addr[1], addr[2],
2366 +                                          addr[3], addr[4], addr[5]);
2367 +
2368 +                               result = pfe_eth_get_hash(addr);
2369 +                               if (result >= EMAC_HASH_REG_BITS) {
2370 +                                       break;
2371 +
2372 +                               } else {
2373 +                                       if (result < 32)
2374 +                                               hash_addr.bottom |= (1 <<
2375 +                                                               result);
2376 +                                       else
2377 +                                               hash_addr.top |= (1 <<
2378 +                                                               (result - 32));
2379 +                               }
2380 +                       }
2381 +               }
2382 +
2383 +               gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
2384 +       }
2385 +
2386 +       if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
2387 +               /*
2388 +                *  Check if there are any specific address HW registers that
2389 +                * need to be flushed
2390 +                */
2391 +               for (uc_count = netdev_uc_count(ndev); uc_count <
2392 +                       MAX_UC_SPEC_ADDR_REG; uc_count++)
2393 +                       gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
2394 +       }
2395 +
2396 +       if (ndev->flags & IFF_LOOPBACK)
2397 +               gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
2398 +}
2399 +
2400 +/* pfe_eth_set_features
2401 + */
2402 +static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
2403 +                                       features)
2404 +{
2405 +       struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2406 +       int rc = 0;
2407 +
2408 +       if (features & NETIF_F_RXCSUM)
2409 +               gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
2410 +       else
2411 +               gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
2412 +       return rc;
2413 +}
2414 +
2415 +/* pfe_eth_fast_tx_timeout
2416 + */
2417 +static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
2418 +{
2419 +       struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
2420 +                                                       pfe_eth_fast_timer,
2421 +                                                       timer);
2422 +       struct pfe_eth_priv_s *priv =  container_of(fast_tx_timeout->base,
2423 +                                                       struct pfe_eth_priv_s,
2424 +                                                       fast_tx_timeout);
2425 +       struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2426 +                                               fast_tx_timeout->queuenum);
2427 +
2428 +       if (netif_tx_queue_stopped(tx_queue)) {
2429 +#ifdef PFE_ETH_TX_STATS
2430 +               priv->was_stopped[fast_tx_timeout->queuenum] = 1;
2431 +#endif
2432 +               netif_tx_wake_queue(tx_queue);
2433 +       }
2434 +
2435 +       return HRTIMER_NORESTART;
2436 +}
2437 +
2438 +/* pfe_eth_fast_tx_timeout_init
2439 + */
2440 +static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
2441 +{
2442 +       int i;
2443 +
2444 +       for (i = 0; i < emac_txq_cnt; i++) {
2445 +               priv->fast_tx_timeout[i].queuenum = i;
2446 +               hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
2447 +                            HRTIMER_MODE_REL);
2448 +               priv->fast_tx_timeout[i].timer.function =
2449 +                               pfe_eth_fast_tx_timeout;
2450 +               priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
2451 +       }
2452 +}
2453 +
2454 +static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
2455 +                                     struct    pfe_eth_priv_s *priv,
2456 +                                     unsigned int qno)
2457 +{
2458 +       void *buf_addr;
2459 +       unsigned int rx_ctrl;
2460 +       unsigned int desc_ctrl = 0;
2461 +       struct hif_ipsec_hdr *ipsec_hdr = NULL;
2462 +       struct sk_buff *skb;
2463 +       struct sk_buff *skb_frag, *skb_frag_last = NULL;
2464 +       int length = 0, offset;
2465 +
2466 +       skb = priv->skb_inflight[qno];
2467 +
2468 +       if (skb) {
2469 +               skb_frag_last = skb_shinfo(skb)->frag_list;
2470 +               if (skb_frag_last) {
2471 +                       while (skb_frag_last->next)
2472 +                               skb_frag_last = skb_frag_last->next;
2473 +               }
2474 +       }
2475 +
2476 +       while (!(desc_ctrl & CL_DESC_LAST)) {
2477 +               buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
2478 +                                              &offset, &rx_ctrl, &desc_ctrl,
2479 +                                              (void **)&ipsec_hdr);
2480 +               if (!buf_addr)
2481 +                       goto incomplete;
2482 +
2483 +#ifdef PFE_ETH_NAPI_STATS
2484 +               priv->napi_counters[NAPI_DESC_COUNT]++;
2485 +#endif
2486 +
2487 +               /* First frag */
2488 +               if (desc_ctrl & CL_DESC_FIRST) {
2489 +                       skb = build_skb(buf_addr, 0);
2490 +                       if (unlikely(!skb))
2491 +                               goto pkt_drop;
2492 +
2493 +                       skb_reserve(skb, offset);
2494 +                       skb_put(skb, length);
2495 +                       skb->dev = ndev;
2496 +
2497 +                       if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
2498 +                                       HIF_CTRL_RX_CHECKSUMMED))
2499 +                               skb->ip_summed = CHECKSUM_UNNECESSARY;
2500 +                       else
2501 +                               skb_checksum_none_assert(skb);
2502 +
2503 +               } else {
2504 +                       /* Next frags */
2505 +                       if (unlikely(!skb)) {
2506 +                               pr_err("%s: NULL skb_inflight\n",
2507 +                                      __func__);
2508 +                               goto pkt_drop;
2509 +                       }
2510 +
2511 +                       skb_frag = build_skb(buf_addr, 0);
2512 +
2513 +                       if (unlikely(!skb_frag)) {
2514 +                               kfree(buf_addr);
2515 +                               goto pkt_drop;
2516 +                       }
2517 +
2518 +                       skb_reserve(skb_frag, offset);
2519 +                       skb_put(skb_frag, length);
2520 +
2521 +                       skb_frag->dev = ndev;
2522 +
2523 +                       if (skb_shinfo(skb)->frag_list)
2524 +                               skb_frag_last->next = skb_frag;
2525 +                       else
2526 +                               skb_shinfo(skb)->frag_list = skb_frag;
2527 +
2528 +                       skb->truesize += skb_frag->truesize;
2529 +                       skb->data_len += length;
2530 +                       skb->len += length;
2531 +                       skb_frag_last = skb_frag;
2532 +               }
2533 +       }
2534 +
2535 +       priv->skb_inflight[qno] = NULL;
2536 +       return skb;
2537 +
2538 +incomplete:
2539 +       priv->skb_inflight[qno] = skb;
2540 +       return NULL;
2541 +
2542 +pkt_drop:
2543 +       priv->skb_inflight[qno] = NULL;
2544 +
2545 +       if (skb)
2546 +               kfree_skb(skb);
2547 +       else
2548 +               kfree(buf_addr);
2549 +
2550 +       priv->stats.rx_errors++;
2551 +
2552 +       return NULL;
2553 +}
2554 +
2555 +/* pfe_eth_poll
2556 + */
2557 +static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
2558 +                       unsigned int qno, int budget)
2559 +{
2560 +       struct net_device *ndev = priv->ndev;
2561 +       struct sk_buff *skb;
2562 +       int work_done = 0;
2563 +       unsigned int len;
2564 +
2565 +       netif_info(priv, intr, priv->ndev, "%s\n", __func__);
2566 +
2567 +#ifdef PFE_ETH_NAPI_STATS
2568 +       priv->napi_counters[NAPI_POLL_COUNT]++;
2569 +#endif
2570 +
2571 +       do {
2572 +               skb = pfe_eth_rx_skb(ndev, priv, qno);
2573 +
2574 +               if (!skb)
2575 +                       break;
2576 +
2577 +               len = skb->len;
2578 +
2579 +               /* Packet will be processed */
2580 +               skb->protocol = eth_type_trans(skb, ndev);
2581 +
2582 +               netif_receive_skb(skb);
2583 +
2584 +               priv->stats.rx_packets++;
2585 +               priv->stats.rx_bytes += len;
2586 +
2587 +               work_done++;
2588 +
2589 +#ifdef PFE_ETH_NAPI_STATS
2590 +               priv->napi_counters[NAPI_PACKET_COUNT]++;
2591 +#endif
2592 +
2593 +       } while (work_done < budget);
2594 +
2595 +       /*
2596 +        * If no Rx receive nor cleanup work was done, exit polling mode.
2597 +        * No more netif_running(dev) check is required here , as this is
2598 +        * checked in net/core/dev.c (2.6.33.5 kernel specific).
2599 +        */
2600 +       if (work_done < budget) {
2601 +               napi_complete(napi);
2602 +
2603 +               hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
2604 +                                           qno);
2605 +       }
2606 +#ifdef PFE_ETH_NAPI_STATS
2607 +       else
2608 +               priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
2609 +#endif
2610 +
2611 +       return work_done;
2612 +}
2613 +
2614 +/*
2615 + * pfe_eth_lro_poll
2616 + */
2617 +static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
2618 +{
2619 +       struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
2620 +                                                       lro_napi);
2621 +
2622 +       netif_info(priv, intr, priv->ndev, "%s\n", __func__);
2623 +
2624 +       return pfe_eth_poll(priv, napi, 2, budget);
2625 +}
2626 +
2627 +/* pfe_eth_low_poll
2628 + */
2629 +static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
2630 +{
2631 +       struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
2632 +                                                       low_napi);
2633 +
2634 +       netif_info(priv, intr, priv->ndev, "%s\n", __func__);
2635 +
2636 +       return pfe_eth_poll(priv, napi, 1, budget);
2637 +}
2638 +
2639 +/* pfe_eth_high_poll
2640 + */
2641 +static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
2642 +{
2643 +       struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
2644 +                                                       high_napi);
2645 +
2646 +       netif_info(priv, intr, priv->ndev, "%s\n", __func__);
2647 +
2648 +       return pfe_eth_poll(priv, napi, 0, budget);
2649 +}
2650 +
2651 +static const struct net_device_ops pfe_netdev_ops = {
2652 +       .ndo_open = pfe_eth_open,
2653 +       .ndo_stop = pfe_eth_close,
2654 +       .ndo_start_xmit = pfe_eth_send_packet,
2655 +       .ndo_select_queue = pfe_eth_select_queue,
2656 +       .ndo_get_stats = pfe_eth_get_stats,
2657 +       .ndo_set_mac_address = pfe_eth_set_mac_address,
2658 +       .ndo_set_rx_mode = pfe_eth_set_multi,
2659 +       .ndo_set_features = pfe_eth_set_features,
2660 +       .ndo_validate_addr = eth_validate_addr,
2661 +};
2662 +
2663 +/* pfe_eth_init_one
2664 + */
2665 +static int pfe_eth_init_one(struct pfe *pfe, int id)
2666 +{
2667 +       struct net_device *ndev = NULL;
2668 +       struct pfe_eth_priv_s *priv = NULL;
2669 +       struct ls1012a_eth_platform_data *einfo;
2670 +       struct ls1012a_mdio_platform_data *minfo;
2671 +       struct ls1012a_pfe_platform_data *pfe_info;
2672 +       int err;
2673 +
2674 +       /* Extract pltform data */
2675 +       pfe_info = (struct ls1012a_pfe_platform_data *)
2676 +                                       pfe->dev->platform_data;
2677 +       if (!pfe_info) {
2678 +               pr_err(
2679 +                       "%s: pfe missing additional platform data\n"
2680 +                       , __func__);
2681 +               err = -ENODEV;
2682 +               goto err0;
2683 +       }
2684 +
2685 +       einfo = (struct ls1012a_eth_platform_data *)
2686 +                               pfe_info->ls1012a_eth_pdata;
2687 +
2688 +       /* einfo never be NULL, but no harm in having this check */
2689 +       if (!einfo) {
2690 +               pr_err(
2691 +                       "%s: pfe missing additional gemacs platform data\n"
2692 +                       , __func__);
2693 +               err = -ENODEV;
2694 +               goto err0;
2695 +       }
2696 +
2697 +       minfo = (struct ls1012a_mdio_platform_data *)
2698 +                               pfe_info->ls1012a_mdio_pdata;
2699 +
2700 +       /* einfo never be NULL, but no harm in having this check */
2701 +       if (!minfo) {
2702 +               pr_err(
2703 +                       "%s: pfe missing additional mdios platform data\n",
2704 +                        __func__);
2705 +               err = -ENODEV;
2706 +               goto err0;
2707 +       }
2708 +
2709 +       /* Create an ethernet device instance */
2710 +       ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
2711 +
2712 +       if (!ndev) {
2713 +               pr_err("%s: gemac %d device allocation failed\n",
2714 +                      __func__, einfo[id].gem_id);
2715 +               err = -ENOMEM;
2716 +               goto err0;
2717 +       }
2718 +
2719 +       priv = netdev_priv(ndev);
2720 +       priv->ndev = ndev;
2721 +       priv->id = einfo[id].gem_id;
2722 +       priv->pfe = pfe;
2723 +
2724 +       SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
2725 +
2726 +       pfe->eth.eth_priv[id] = priv;
2727 +
2728 +       /* Set the info in the priv to the current info */
2729 +       priv->einfo = &einfo[id];
2730 +       priv->EMAC_baseaddr = cbus_emac_base[id];
2731 +       priv->PHY_baseaddr = cbus_emac_base[0];
2732 +       priv->GPI_baseaddr = cbus_gpi_base[id];
2733 +
2734 +#define HIF_GEMAC_TMUQ_BASE    6
2735 +       priv->low_tmu_q =  HIF_GEMAC_TMUQ_BASE + (id * 2);
2736 +       priv->high_tmu_q        =  priv->low_tmu_q + 1;
2737 +
2738 +       spin_lock_init(&priv->lock);
2739 +
2740 +       pfe_eth_fast_tx_timeout_init(priv);
2741 +
2742 +       /* Copy the station address into the dev structure, */
2743 +       memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
2744 +
2745 +       /* Initialize mdio */
2746 +       if (minfo[id].enabled) {
2747 +               err = pfe_eth_mdio_init(priv, &minfo[id]);
2748 +               if (err) {
2749 +                       netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
2750 +                                  __func__);
2751 +                       goto err2;
2752 +               }
2753 +       }
2754 +
2755 +       ndev->mtu = 1500;
2756 +
2757 +       /* Set MTU limits */
2758 +       ndev->min_mtu = ETH_MIN_MTU;
2759 +       ndev->max_mtu = JUMBO_FRAME_SIZE;
2760 +
2761 +       /* supported features */
2762 +       ndev->hw_features = NETIF_F_SG;
2763 +
2764 +       /*Enable after checksum offload is validated */
2765 +       ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
2766 +               NETIF_F_IPV6_CSUM | NETIF_F_SG;
2767 +
2768 +       /* enabled by default */
2769 +       ndev->features = ndev->hw_features;
2770 +
2771 +       priv->usr_features = ndev->features;
2772 +
2773 +       ndev->netdev_ops = &pfe_netdev_ops;
2774 +
2775 +       ndev->ethtool_ops = &pfe_ethtool_ops;
2776 +
2777 +       /* Enable basic messages by default */
2778 +       priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
2779 +                               NETIF_MSG_PROBE;
2780 +
2781 +       netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
2782 +                      HIF_RX_POLL_WEIGHT - 16);
2783 +       netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
2784 +                      HIF_RX_POLL_WEIGHT - 16);
2785 +       netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
2786 +                      HIF_RX_POLL_WEIGHT - 16);
2787 +
2788 +       err = register_netdev(ndev);
2789 +
2790 +       if (err) {
2791 +               netdev_err(ndev, "register_netdev() failed\n");
2792 +               goto err3;
2793 +       }
2794 +       device_init_wakeup(&ndev->dev, WAKE_MAGIC);
2795 +
2796 +       if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
2797 +               err = pfe_phy_init(ndev);
2798 +               if (err) {
2799 +                       netdev_err(ndev, "%s: pfe_phy_init() failed\n",
2800 +                                  __func__);
2801 +                       goto err4;
2802 +               }
2803 +       }
2804 +
2805 +       netif_carrier_on(ndev);
2806 +
2807 +       /* Create all the sysfs files */
2808 +       if (pfe_eth_sysfs_init(ndev))
2809 +               goto err4;
2810 +
2811 +       netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
2812 +                  __func__, priv->EMAC_baseaddr);
2813 +
2814 +       return 0;
2815 +err4:
2816 +       unregister_netdev(ndev);
2817 +err3:
2818 +       pfe_eth_mdio_exit(priv->mii_bus);
2819 +err2:
2820 +       free_netdev(priv->ndev);
2821 +err0:
2822 +       return err;
2823 +}
2824 +
2825 +/* pfe_eth_init
2826 + */
2827 +int pfe_eth_init(struct pfe *pfe)
2828 +{
2829 +       int ii = 0;
2830 +       int err;
2831 +
2832 +       pr_info("%s\n", __func__);
2833 +
2834 +       cbus_emac_base[0] = EMAC1_BASE_ADDR;
2835 +       cbus_emac_base[1] = EMAC2_BASE_ADDR;
2836 +
2837 +       cbus_gpi_base[0] = EGPI1_BASE_ADDR;
2838 +       cbus_gpi_base[1] = EGPI2_BASE_ADDR;
2839 +
2840 +       for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
2841 +               err = pfe_eth_init_one(pfe, ii);
2842 +               if (err)
2843 +                       goto err0;
2844 +       }
2845 +
2846 +       return 0;
2847 +
2848 +err0:
2849 +       while (ii--)
2850 +               pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
2851 +
2852 +       /* Register three network devices in the kernel */
2853 +       return err;
2854 +}
2855 +
2856 +/* pfe_eth_exit_one
2857 + */
2858 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
2859 +{
2860 +       netif_info(priv, probe, priv->ndev, "%s\n", __func__);
2861 +
2862 +       pfe_eth_sysfs_exit(priv->ndev);
2863 +
2864 +       unregister_netdev(priv->ndev);
2865 +
2866 +       if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
2867 +               pfe_phy_exit(priv->ndev);
2868 +
2869 +       if (priv->mii_bus)
2870 +               pfe_eth_mdio_exit(priv->mii_bus);
2871 +
2872 +       free_netdev(priv->ndev);
2873 +}
2874 +
2875 +/* pfe_eth_exit
2876 + */
2877 +void pfe_eth_exit(struct pfe *pfe)
2878 +{
2879 +       int ii;
2880 +
2881 +       pr_info("%s\n", __func__);
2882 +
2883 +       for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
2884 +               pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
2885 +}
2886 --- /dev/null
2887 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
2888 @@ -0,0 +1,314 @@
2889 +/*
2890 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2891 + * Copyright 2017 NXP
2892 + *
2893 + * This program is free software; you can redistribute it and/or modify
2894 + * it under the terms of the GNU General Public License as published by
2895 + * the Free Software Foundation; either version 2 of the License, or
2896 + * (at your option) any later version.
2897 + *
2898 + * This program is distributed in the hope that it will be useful,
2899 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2900 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
2901 + * GNU General Public License for more details.
2902 + *
2903 + * You should have received a copy of the GNU General Public License
2904 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
2905 + */
2906 +
2907 +/*
2908 + * @file
2909 + * Contains all the functions to handle parsing and loading of PE firmware
2910 + * files.
2911 + */
2912 +#include <linux/firmware.h>
2913 +
2914 +#include "pfe_mod.h"
2915 +#include "pfe_firmware.h"
2916 +#include "pfe/pfe.h"
2917 +
2918 +static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
2919 +                                                const char *section)
2920 +{
2921 +       struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
2922 +       struct elf32_shdr *shdr;
2923 +       struct elf32_shdr *shdr_shstr;
2924 +       Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
2925 +       Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
2926 +       Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
2927 +       Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
2928 +       Elf32_Off shstr_offset;
2929 +       Elf32_Word sh_name;
2930 +       const char *name;
2931 +       int i;
2932 +
2933 +       /* Section header strings */
2934 +       shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
2935 +                                       e_shentsize);
2936 +       shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
2937 +
2938 +       for (i = 0; i < e_shnum; i++) {
2939 +               shdr = (struct elf32_shdr *)(fw->data + e_shoff
2940 +                                            + i * e_shentsize);
2941 +
2942 +               sh_name = be32_to_cpu(shdr->sh_name);
2943 +
2944 +               name = (const char *)(fw->data + shstr_offset + sh_name);
2945 +
2946 +               if (!strcmp(name, section))
2947 +                       return shdr;
2948 +       }
2949 +
2950 +       pr_err("%s: didn't find section %s\n", __func__, section);
2951 +
2952 +       return NULL;
2953 +}
2954 +
2955 +#if defined(CFG_DIAGS)
2956 +static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
2957 +                               *diags_info)
2958 +{
2959 +       struct elf32_shdr *shdr;
2960 +       unsigned long offset, size;
2961 +
2962 +       shdr = get_elf_section_header(fw, ".pfe_diags_str");
2963 +       if (shdr) {
2964 +               offset = be32_to_cpu(shdr->sh_offset);
2965 +               size = be32_to_cpu(shdr->sh_size);
2966 +               diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
2967 +               diags_info->diags_str_size = size;
2968 +               diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
2969 +               memcpy(diags_info->diags_str_array, fw->data + offset, size);
2970 +
2971 +               return 0;
2972 +       } else {
2973 +               return -1;
2974 +       }
2975 +}
2976 +#endif
2977 +
2978 +static void pfe_check_version_info(const struct firmware *fw)
2979 +{
2980 +       /*static char *version = NULL;*/
2981 +       static char *version;
2982 +
2983 +       struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
2984 +
2985 +       if (shdr) {
2986 +               if (!version) {
2987 +                       /*
2988 +                        * this is the first fw we load, use its version
2989 +                        * string as reference (whatever it is)
2990 +                        */
2991 +                       version = (char *)(fw->data +
2992 +                                       be32_to_cpu(shdr->sh_offset));
2993 +
2994 +                       pr_info("PFE binary version: %s\n", version);
2995 +               } else {
2996 +                       /*
2997 +                        * already have loaded at least one firmware, check
2998 +                        * sequence can start now
2999 +                        */
3000 +                       if (strcmp(version, (char *)(fw->data +
3001 +                               be32_to_cpu(shdr->sh_offset)))) {
3002 +                               pr_info(
3003 +                               "WARNING: PFE firmware binaries from incompatible version\n");
3004 +                       }
3005 +               }
3006 +       } else {
3007 +               /*
3008 +                * version cannot be verified, a potential issue that should
3009 +                * be reported
3010 +                */
3011 +               pr_info(
3012 +                        "WARNING: PFE firmware binaries from incompatible version\n");
3013 +       }
3014 +}
3015 +
3016 +/* PFE elf firmware loader.
3017 + * Loads an elf firmware image into a list of PE's (specified using a bitmask)
3018 + *
3019 + * @param pe_mask      Mask of PE id's to load firmware to
3020 + * @param fw           Pointer to the firmware image
3021 + *
3022 + * @return             0 on success, a negative value on error
3023 + *
3024 + */
3025 +int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
3026 +{
3027 +       struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
3028 +       Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
3029 +       struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
3030 +                                       be32_to_cpu(elf_hdr->e_shoff));
3031 +       int id, section;
3032 +       int rc;
3033 +
3034 +       pr_info("%s\n", __func__);
3035 +
3036 +       /* Some sanity checks */
3037 +       if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
3038 +               pr_err("%s: incorrect elf magic number\n", __func__);
3039 +               return -EINVAL;
3040 +       }
3041 +
3042 +       if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
3043 +               pr_err("%s: incorrect elf class(%x)\n", __func__,
3044 +                      elf_hdr->e_ident[EI_CLASS]);
3045 +               return -EINVAL;
3046 +       }
3047 +
3048 +       if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
3049 +               pr_err("%s: incorrect elf data(%x)\n", __func__,
3050 +                      elf_hdr->e_ident[EI_DATA]);
3051 +               return -EINVAL;
3052 +       }
3053 +
3054 +       if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
3055 +               pr_err("%s: incorrect elf file type(%x)\n", __func__,
3056 +                      be16_to_cpu(elf_hdr->e_type));
3057 +               return -EINVAL;
3058 +       }
3059 +
3060 +       for (section = 0; section < sections; section++, shdr++) {
3061 +               if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
3062 +                       SHF_EXECINSTR)))
3063 +                       continue;
3064 +
3065 +               for (id = 0; id < MAX_PE; id++)
3066 +                       if (pe_mask & (1 << id)) {
3067 +                               rc = pe_load_elf_section(id, fw->data, shdr,
3068 +                                                        pfe->dev);
3069 +                               if (rc < 0)
3070 +                                       goto err;
3071 +                       }
3072 +       }
3073 +
3074 +       pfe_check_version_info(fw);
3075 +
3076 +       return 0;
3077 +
3078 +err:
3079 +       return rc;
3080 +}
3081 +
3082 +/* PFE firmware initialization.
3083 + * Loads different firmware files from filesystem.
3084 + * Initializes PE IMEM/DMEM and UTIL-PE DDR
3085 + * Initializes control path symbol addresses (by looking them up in the elf
3086 + * firmware files
3087 + * Takes PE's out of reset
3088 + *
3089 + * @return     0 on success, a negative value on error
3090 + *
3091 + */
3092 +int pfe_firmware_init(struct pfe *pfe)
3093 +{
3094 +       const struct firmware *class_fw, *tmu_fw;
3095 +       int rc = 0;
3096 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
3097 +       const char *util_fw_name;
3098 +       const struct firmware *util_fw;
3099 +#endif
3100 +
3101 +       pr_info("%s\n", __func__);
3102 +
3103 +       if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
3104 +               pr_err("%s: request firmware %s failed\n", __func__,
3105 +                      CLASS_FIRMWARE_FILENAME);
3106 +               rc = -ETIMEDOUT;
3107 +               goto err0;
3108 +       }
3109 +
3110 +       if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
3111 +               pr_err("%s: request firmware %s failed\n", __func__,
3112 +                      TMU_FIRMWARE_FILENAME);
3113 +               rc = -ETIMEDOUT;
3114 +               goto err1;
3115 +}
3116 +
3117 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
3118 +       util_fw_name = UTIL_FIRMWARE_FILENAME;
3119 +
3120 +       if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
3121 +               pr_err("%s: request firmware %s failed\n", __func__,
3122 +                      util_fw_name);
3123 +               rc = -ETIMEDOUT;
3124 +               goto err2;
3125 +       }
3126 +#endif
3127 +       rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
3128 +       if (rc < 0) {
3129 +               pr_err("%s: class firmware load failed\n", __func__);
3130 +               goto err3;
3131 +       }
3132 +
3133 +#if defined(CFG_DIAGS)
3134 +       rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
3135 +       if (rc < 0) {
3136 +               pr_warn(
3137 +                       "PFE diags won't be available for class PEs\n");
3138 +               rc = 0;
3139 +       }
3140 +#endif
3141 +
3142 +       rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
3143 +       if (rc < 0) {
3144 +               pr_err("%s: tmu firmware load failed\n", __func__);
3145 +               goto err3;
3146 +       }
3147 +
3148 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
3149 +       rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
3150 +       if (rc < 0) {
3151 +               pr_err("%s: util firmware load failed\n", __func__);
3152 +               goto err3;
3153 +       }
3154 +
3155 +#if defined(CFG_DIAGS)
3156 +       rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
3157 +       if (rc < 0) {
3158 +               pr_warn(
3159 +                       "PFE diags won't be available for util PE\n");
3160 +               rc = 0;
3161 +       }
3162 +#endif
3163 +
3164 +       util_enable();
3165 +#endif
3166 +
3167 +       tmu_enable(0xf);
3168 +       class_enable();
3169 +
3170 +err3:
3171 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
3172 +       release_firmware(util_fw);
3173 +
3174 +err2:
3175 +#endif
3176 +       release_firmware(tmu_fw);
3177 +
3178 +err1:
3179 +       release_firmware(class_fw);
3180 +
3181 +err0:
3182 +       return rc;
3183 +}
3184 +
3185 +/* PFE firmware cleanup
3186 + * Puts PE's in reset
3187 + *
3188 + *
3189 + */
3190 +void pfe_firmware_exit(struct pfe *pfe)
3191 +{
3192 +       pr_info("%s\n", __func__);
3193 +
3194 +       if (pe_reset_all(&pfe->ctrl) != 0)
3195 +               pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
3196 +
3197 +       class_disable();
3198 +       tmu_disable(0xf);
3199 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
3200 +       util_disable();
3201 +#endif
3202 +}
3203 --- /dev/null
3204 +++ b/drivers/staging/fsl_ppfe/pfe_hal.c
3205 @@ -0,0 +1,1516 @@
3206 +/*
3207 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
3208 + * Copyright 2017 NXP
3209 + *
3210 + * This program is free software; you can redistribute it and/or modify
3211 + * it under the terms of the GNU General Public License as published by
3212 + * the Free Software Foundation; either version 2 of the License, or
3213 + * (at your option) any later version.
3214 + *
3215 + * This program is distributed in the hope that it will be useful,
3216 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3217 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
3218 + * GNU General Public License for more details.
3219 + *
3220 + * You should have received a copy of the GNU General Public License
3221 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
3222 + */
3223 +
3224 +#include "pfe_mod.h"
3225 +#include "pfe/pfe.h"
3226 +
3227 +void *cbus_base_addr;
3228 +void *ddr_base_addr;
3229 +unsigned long ddr_phys_base_addr;
3230 +unsigned int ddr_size;
3231 +
3232 +static struct pe_info pe[MAX_PE];
3233 +
3234 +/* Initializes the PFE library.
3235 + * Must be called before using any of the library functions.
3236 + *
3237 + * @param[in] cbus_base                CBUS virtual base address (as mapped in
3238 + * the host CPU address space)
3239 + * @param[in] ddr_base         PFE DDR range virtual base address (as
3240 + * mapped in the host CPU address space)
3241 + * @param[in] ddr_phys_base    PFE DDR range physical base address (as
3242 + * mapped in platform)
3243 + * @param[in] size             PFE DDR range size (as defined by the host
3244 + * software)
3245 + */
3246 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
3247 +                 unsigned int size)
3248 +{
3249 +       cbus_base_addr = cbus_base;
3250 +       ddr_base_addr = ddr_base;
3251 +       ddr_phys_base_addr = ddr_phys_base;
3252 +       ddr_size = size;
3253 +
3254 +       pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
3255 +       pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
3256 +       pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
3257 +       pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
3258 +       pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
3259 +       pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
3260 +
3261 +       pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
3262 +       pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
3263 +       pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
3264 +       pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
3265 +       pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
3266 +       pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
3267 +
3268 +       pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
3269 +       pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
3270 +       pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
3271 +       pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
3272 +       pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
3273 +       pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
3274 +
3275 +       pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
3276 +       pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
3277 +       pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
3278 +       pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
3279 +       pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
3280 +       pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
3281 +
3282 +       pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
3283 +       pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
3284 +       pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
3285 +       pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
3286 +       pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
3287 +       pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
3288 +
3289 +       pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
3290 +       pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
3291 +       pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
3292 +       pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
3293 +       pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
3294 +       pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
3295 +
3296 +       pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
3297 +       pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
3298 +       pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
3299 +       pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
3300 +       pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
3301 +       pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
3302 +
3303 +       pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
3304 +       pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
3305 +       pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
3306 +       pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
3307 +       pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
3308 +       pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
3309 +
3310 +       pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
3311 +       pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
3312 +       pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
3313 +       pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
3314 +       pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
3315 +       pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
3316 +
3317 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
3318 +       pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
3319 +       pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
3320 +       pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
3321 +       pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
3322 +#endif
3323 +}
3324 +
3325 +/* Writes a buffer to PE internal memory from the host
3326 + * through indirect access registers.
3327 + *
3328 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
3329 + * ..., UTIL_ID)
3330 + * @param[in] src              Buffer source address
3331 + * @param[in] mem_access_addr  DMEM destination address (must be 32bit
3332 + * aligned)
3333 + * @param[in] len              Number of bytes to copy
3334 + */
3335 +void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
3336 +int len)
3337 +{
3338 +       u32 offset = 0, val, addr;
3339 +       unsigned int len32 = len >> 2;
3340 +       int i;
3341 +
3342 +       addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
3343 +               PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
3344 +
3345 +       for (i = 0; i < len32; i++, offset += 4, src += 4) {
3346 +               val = *(u32 *)src;
3347 +               writel(cpu_to_be32(val), pe[id].mem_access_wdata);
3348 +               writel(addr + offset, pe[id].mem_access_addr);
3349 +       }
3350 +
3351 +       len = (len & 0x3);
3352 +       if (len) {
3353 +               val = 0;
3354 +
3355 +               addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
3356 +                       PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
3357 +
3358 +               for (i = 0; i < len; i++, src++)
3359 +                       val |= (*(u8 *)src) << (8 * i);
3360 +
3361 +               writel(cpu_to_be32(val), pe[id].mem_access_wdata);
3362 +               writel(addr, pe[id].mem_access_addr);
3363 +       }
3364 +}
3365 +
3366 +/* Writes a buffer to PE internal data memory (DMEM) from the host
3367 + * through indirect access registers.
3368 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
3369 + * ..., UTIL_ID)
3370 + * @param[in] src              Buffer source address
3371 + * @param[in] dst              DMEM destination address (must be 32bit
3372 + * aligned)
3373 + * @param[in] len              Number of bytes to copy
3374 + */
3375 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
3376 +{
3377 +       pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
3378 +                               PE_MEM_ACCESS_DMEM, src, len);
3379 +}
3380 +
3381 +/* Writes a buffer to PE internal program memory (PMEM) from the host
3382 + * through indirect access registers.
3383 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
3384 + * ..., TMU3_ID)
3385 + * @param[in] src              Buffer source address
3386 + * @param[in] dst              PMEM destination address (must be 32bit
3387 + * aligned)
3388 + * @param[in] len              Number of bytes to copy
3389 + */
3390 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
3391 +{
3392 +       pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
3393 +                               - 1)) | PE_MEM_ACCESS_IMEM, src, len);
3394 +}
3395 +
3396 +/* Reads PE internal program memory (IMEM) from the host
3397 + * through indirect access registers.
3398 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
3399 + * ..., TMU3_ID)
3400 + * @param[in] addr             PMEM read address (must be aligned on size)
3401 + * @param[in] size             Number of bytes to read (maximum 4, must not
3402 + * cross 32bit boundaries)
3403 + * @return                     the data read (in PE endianness, i.e BE).
3404 + */
3405 +u32 pe_pmem_read(int id, u32 addr, u8 size)
3406 +{
3407 +       u32 offset = addr & 0x3;
3408 +       u32 mask = 0xffffffff >> ((4 - size) << 3);
3409 +       u32 val;
3410 +
3411 +       addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
3412 +               | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
3413 +
3414 +       writel(addr, pe[id].mem_access_addr);
3415 +       val = be32_to_cpu(readl(pe[id].mem_access_rdata));
3416 +
3417 +       return (val >> (offset << 3)) & mask;
3418 +}
3419 +
3420 +/* Writes PE internal data memory (DMEM) from the host
3421 + * through indirect access registers.
3422 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
3423 + * ..., UTIL_ID)
3424 + * @param[in] addr             DMEM write address (must be aligned on size)
3425 + * @param[in] val              Value to write (in PE endianness, i.e BE)
3426 + * @param[in] size             Number of bytes to write (maximum 4, must not
3427 + * cross 32bit boundaries)
3428 + */
3429 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
3430 +{
3431 +       u32 offset = addr & 0x3;
3432 +
3433 +       addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
3434 +               PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
3435 +
3436 +       /* Indirect access interface is byte swapping data being written */
3437 +       writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
3438 +       writel(addr, pe[id].mem_access_addr);
3439 +}
3440 +
3441 +/* Reads PE internal data memory (DMEM) from the host
3442 + * through indirect access registers.
3443 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
3444 + * ..., UTIL_ID)
3445 + * @param[in] addr             DMEM read address (must be aligned on size)
3446 + * @param[in] size             Number of bytes to read (maximum 4, must not
3447 + * cross 32bit boundaries)
3448 + * @return                     the data read (in PE endianness, i.e BE).
3449 + */
3450 +u32 pe_dmem_read(int id, u32 addr, u8 size)
3451 +{
3452 +       u32 offset = addr & 0x3;
3453 +       u32 mask = 0xffffffff >> ((4 - size) << 3);
3454 +       u32 val;
3455 +
3456 +       addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
3457 +                       PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
3458 +
3459 +       writel(addr, pe[id].mem_access_addr);
3460 +
3461 +       /* Indirect access interface is byte swapping data being read */
3462 +       val = be32_to_cpu(readl(pe[id].mem_access_rdata));
3463 +
3464 +       return (val >> (offset << 3)) & mask;
3465 +}
3466 +
3467 +/* This function is used to write to CLASS internal bus peripherals (ccu,
3468 + * pe-lem) from the host
3469 + * through indirect access registers.
3470 + * @param[in]  val     value to write
3471 + * @param[in]  addr    Address to write to (must be aligned on size)
3472 + * @param[in]  size    Number of bytes to write (1, 2 or 4)
3473 + *
3474 + */
3475 +void class_bus_write(u32 val, u32 addr, u8 size)
3476 +{
3477 +       u32 offset = addr & 0x3;
3478 +
3479 +       writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
3480 +
3481 +       addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
3482 +                       (size << 24);
3483 +
3484 +       writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
3485 +       writel(addr, CLASS_BUS_ACCESS_ADDR);
3486 +}
3487 +
3488 +/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
3489 + * through indirect access registers.
3490 + * @param[in] addr     Address to read from (must be aligned on size)
3491 + * @param[in] size     Number of bytes to read (1, 2 or 4)
3492 + * @return             the read data
3493 + *
3494 + */
3495 +u32 class_bus_read(u32 addr, u8 size)
3496 +{
3497 +       u32 offset = addr & 0x3;
3498 +       u32 mask = 0xffffffff >> ((4 - size) << 3);
3499 +       u32 val;
3500 +
3501 +       writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
3502 +
3503 +       addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
3504 +
3505 +       writel(addr, CLASS_BUS_ACCESS_ADDR);
3506 +       val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
3507 +
3508 +       return (val >> (offset << 3)) & mask;
3509 +}
3510 +
3511 +/* Writes data to the cluster memory (PE_LMEM)
3512 + * @param[in] dst      PE LMEM destination address (must be 32bit aligned)
3513 + * @param[in] src      Buffer source address
3514 + * @param[in] len      Number of bytes to copy
3515 + */
3516 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
3517 +{
3518 +       u32 len32 = len >> 2;
3519 +       int i;
3520 +
3521 +       for (i = 0; i < len32; i++, src += 4, dst += 4)
3522 +               class_bus_write(*(u32 *)src, dst, 4);
3523 +
3524 +       if (len & 0x2) {
3525 +               class_bus_write(*(u16 *)src, dst, 2);
3526 +               src += 2;
3527 +               dst += 2;
3528 +       }
3529 +
3530 +       if (len & 0x1) {
3531 +               class_bus_write(*(u8 *)src, dst, 1);
3532 +               src++;
3533 +               dst++;
3534 +       }
3535 +}
3536 +
3537 +/* Writes value to the cluster memory (PE_LMEM)
3538 + * @param[in] dst      PE LMEM destination address (must be 32bit aligned)
3539 + * @param[in] val      Value to write
3540 + * @param[in] len      Number of bytes to write
3541 + */
3542 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
3543 +{
3544 +       u32 len32 = len >> 2;
3545 +       int i;
3546 +
3547 +       val = val | (val << 8) | (val << 16) | (val << 24);
3548 +
3549 +       for (i = 0; i < len32; i++, dst += 4)
3550 +               class_bus_write(val, dst, 4);
3551 +
3552 +       if (len & 0x2) {
3553 +               class_bus_write(val, dst, 2);
3554 +               dst += 2;
3555 +       }
3556 +
3557 +       if (len & 0x1) {
3558 +               class_bus_write(val, dst, 1);
3559 +               dst++;
3560 +       }
3561 +}
3562 +
3563 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
3564 +
3565 +/* Writes UTIL program memory (DDR) from the host.
3566 + *
3567 + * @param[in] addr     Address to write (virtual, must be aligned on size)
3568 + * @param[in] val              Value to write (in PE endianness, i.e BE)
3569 + * @param[in] size             Number of bytes to write (2 or 4)
3570 + */
3571 +static void util_pmem_write(u32 val, void *addr, u8 size)
3572 +{
3573 +       void *addr64 = (void *)((unsigned long)addr & ~0x7);
3574 +       unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
3575 +
3576 +       /*
3577 +        * IMEM should  be loaded as a 64bit swapped value in a 64bit aligned
3578 +        * location
3579 +        */
3580 +       if (size == 4)
3581 +               writel(be32_to_cpu(val), addr64 + off);
3582 +       else
3583 +               writew(be16_to_cpu((u16)val), addr64 + off);
3584 +}
3585 +
3586 +/* Writes a buffer to UTIL program memory (DDR) from the host.
3587 + *
3588 + * @param[in] dst      Address to write (virtual, must be at least 16bit
3589 + * aligned)
3590 + * @param[in] src      Buffer to write (in PE endianness, i.e BE, must have
3591 + * same alignment as dst)
3592 + * @param[in] len      Number of bytes to write (must be at least 16bit
3593 + * aligned)
3594 + */
3595 +static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
3596 +{
3597 +       unsigned int len32;
3598 +       int i;
3599 +
3600 +       if ((unsigned long)src & 0x2) {
3601 +               util_pmem_write(*(u16 *)src, dst, 2);
3602 +               src += 2;
3603 +               dst += 2;
3604 +               len -= 2;
3605 +       }
3606 +
3607 +       len32 = len >> 2;
3608 +
3609 +       for (i = 0; i < len32; i++, dst += 4, src += 4)
3610 +               util_pmem_write(*(u32 *)src, dst, 4);
3611 +
3612 +       if (len & 0x2)
3613 +               util_pmem_write(*(u16 *)src, dst, len & 0x2);
3614 +}
3615 +#endif
3616 +
3617 +/* Loads an elf section into pmem
3618 + * Code needs to be at least 16bit aligned and only PROGBITS sections are
3619 + * supported
3620 + *
3621 + * @param[in] id       PE identification (CLASS0_ID, ..., TMU0_ID, ...,
3622 + * TMU3_ID)
3623 + * @param[in] data     pointer to the elf firmware
3624 + * @param[in] shdr     pointer to the elf section header
3625 + *
3626 + */
3627 +static int pe_load_pmem_section(int id, const void *data,
3628 +                               struct elf32_shdr *shdr)
3629 +{
3630 +       u32 offset = be32_to_cpu(shdr->sh_offset);
3631 +       u32 addr = be32_to_cpu(shdr->sh_addr);
3632 +       u32 size = be32_to_cpu(shdr->sh_size);
3633 +       u32 type = be32_to_cpu(shdr->sh_type);
3634 +
3635 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
3636 +       if (id == UTIL_ID) {
3637 +               pr_err("%s: unsupported pmem section for UTIL\n",
3638 +                      __func__);
3639 +               return -EINVAL;
3640 +       }
3641 +#endif
3642 +
3643 +       if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
3644 +               pr_err(
3645 +                       "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
3646 +                       , __func__, addr, (unsigned long)data + offset);
3647 +
3648 +               return -EINVAL;
3649 +       }
3650 +
3651 +       if (addr & 0x1) {
3652 +               pr_err("%s: load address(%x) is not 16bit aligned\n",
3653 +                      __func__, addr);
3654 +               return -EINVAL;
3655 +       }
3656 +
3657 +       if (size & 0x1) {
3658 +               pr_err("%s: load size(%x) is not 16bit aligned\n",
3659 +                      __func__, size);
3660 +               return -EINVAL;
3661 +       }
3662 +
3663 +       switch (type) {
3664 +       case SHT_PROGBITS:
3665 +               pe_pmem_memcpy_to32(id, addr, data + offset, size);
3666 +
3667 +               break;
3668 +
3669 +       default:
3670 +               pr_err("%s: unsupported section type(%x)\n", __func__,
3671 +                      type);
3672 +               return -EINVAL;
3673 +       }
3674 +
3675 +       return 0;
3676 +}
3677 +
3678 +/* Loads an elf section into dmem
3679 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
3680 + * initialized to 0
3681 + *
3682 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
3683 + * ..., UTIL_ID)
3684 + * @param[in] data             pointer to the elf firmware
3685 + * @param[in] shdr             pointer to the elf section header
3686 + *
3687 + */
3688 +static int pe_load_dmem_section(int id, const void *data,
3689 +                               struct elf32_shdr *shdr)
3690 +{
3691 +       u32 offset = be32_to_cpu(shdr->sh_offset);
3692 +       u32 addr = be32_to_cpu(shdr->sh_addr);
3693 +       u32 size = be32_to_cpu(shdr->sh_size);
3694 +       u32 type = be32_to_cpu(shdr->sh_type);
3695 +       u32 size32 = size >> 2;
3696 +       int i;
3697 +
3698 +       if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
3699 +               pr_err(
3700 +                       "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
3701 +                       __func__, addr, (unsigned long)data + offset);
3702 +
3703 +               return -EINVAL;
3704 +       }
3705 +
3706 +       if (addr & 0x3) {
3707 +               pr_err("%s: load address(%x) is not 32bit aligned\n",
3708 +                      __func__, addr);
3709 +               return -EINVAL;
3710 +       }
3711 +
3712 +       switch (type) {
3713 +       case SHT_PROGBITS:
3714 +               pe_dmem_memcpy_to32(id, addr, data + offset, size);
3715 +               break;
3716 +
3717 +       case SHT_NOBITS:
3718 +               for (i = 0; i < size32; i++, addr += 4)
3719 +                       pe_dmem_write(id, 0, addr, 4);
3720 +
3721 +               if (size & 0x3)
3722 +                       pe_dmem_write(id, 0, addr, size & 0x3);
3723 +
3724 +               break;
3725 +
3726 +       default:
3727 +               pr_err("%s: unsupported section type(%x)\n", __func__,
3728 +                      type);
3729 +               return -EINVAL;
3730 +       }
3731 +
3732 +       return 0;
3733 +}
3734 +
3735 +/* Loads an elf section into DDR
3736 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
3737 + * initialized to 0
3738 + *
3739 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
3740 + * ..., UTIL_ID)
3741 + * @param[in] data             pointer to the elf firmware
3742 + * @param[in] shdr             pointer to the elf section header
3743 + *
3744 + */
3745 +static int pe_load_ddr_section(int id, const void *data,
3746 +                              struct elf32_shdr *shdr,
3747 +                              struct device *dev) {
3748 +       u32 offset = be32_to_cpu(shdr->sh_offset);
3749 +       u32 addr = be32_to_cpu(shdr->sh_addr);
3750 +       u32 size = be32_to_cpu(shdr->sh_size);
3751 +       u32 type = be32_to_cpu(shdr->sh_type);
3752 +       u32 flags = be32_to_cpu(shdr->sh_flags);
3753 +
3754 +       switch (type) {
3755 +       case SHT_PROGBITS:
3756 +               if (flags & SHF_EXECINSTR) {
3757 +                       if (id <= CLASS_MAX_ID) {
3758 +                               /* DO the loading only once in DDR */
3759 +                               if (id == CLASS0_ID) {
3760 +                                       pr_err(
3761 +                                               "%s: load address(%x) and elf file address(%lx) rcvd\n",
3762 +                                               __func__, addr,
3763 +                                               (unsigned long)data + offset);
3764 +                                       if (((unsigned long)(data + offset)
3765 +                                               & 0x3) != (addr & 0x3)) {
3766 +                                               pr_err(
3767 +                                                       "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
3768 +                                                       , __func__, addr,
3769 +                                               (unsigned long)data + offset);
3770 +
3771 +                                               return -EINVAL;
3772 +                                       }
3773 +
3774 +                                       if (addr & 0x1) {
3775 +                                               pr_err(
3776 +                                                       "%s: load address(%x) is not 16bit aligned\n"
3777 +                                                       , __func__, addr);
3778 +                                               return -EINVAL;
3779 +                                       }
3780 +
3781 +                                       if (size & 0x1) {
3782 +                                               pr_err(
3783 +                                                       "%s: load length(%x) is not 16bit aligned\n"
3784 +                                                       , __func__, size);
3785 +                                               return -EINVAL;
3786 +                                       }
3787 +                                       memcpy(DDR_PHYS_TO_VIRT(
3788 +                                               DDR_PFE_TO_PHYS(addr)),
3789 +                                               data + offset, size);
3790 +                               }
3791 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
3792 +                       } else if (id == UTIL_ID) {
3793 +                               if (((unsigned long)(data + offset) & 0x3)
3794 +                                       != (addr & 0x3)) {
3795 +                                       pr_err(
3796 +                                               "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
3797 +                                               , __func__, addr,
3798 +                                               (unsigned long)data + offset);
3799 +
3800 +                                       return -EINVAL;
3801 +                               }
3802 +
3803 +                               if (addr & 0x1) {
3804 +                                       pr_err(
3805 +                                               "%s: load address(%x) is not 16bit aligned\n"
3806 +                                               , __func__, addr);
3807 +                                       return -EINVAL;
3808 +                               }
3809 +
3810 +                               if (size & 0x1) {
3811 +                                       pr_err(
3812 +                                               "%s: load length(%x) is not 16bit aligned\n"
3813 +                                               , __func__, size);
3814 +                                       return -EINVAL;
3815 +                               }
3816 +
3817 +                               util_pmem_memcpy(DDR_PHYS_TO_VIRT(
3818 +                                                       DDR_PFE_TO_PHYS(addr)),
3819 +                                                       data + offset, size);
3820 +                       }
3821 +#endif
3822 +                       } else {
3823 +                               pr_err(
3824 +                                       "%s: unsupported ddr section type(%x) for PE(%d)\n"
3825 +                                               , __func__, type, id);
3826 +                               return -EINVAL;
3827 +                       }
3828 +
3829 +               } else {
3830 +                       memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
3831 +                               + offset, size);
3832 +               }
3833 +
3834 +               break;
3835 +
3836 +       case SHT_NOBITS:
3837 +               memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
3838 +
3839 +               break;
3840 +
3841 +       default:
3842 +               pr_err("%s: unsupported section type(%x)\n", __func__,
3843 +                      type);
3844 +               return -EINVAL;
3845 +       }
3846 +
3847 +       return 0;
3848 +}
3849 +
3850 +/* Loads an elf section into pe lmem
3851 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
3852 + * initialized to 0
3853 + *
3854 + * @param[in] id               PE identification (CLASS0_ID,..., CLASS5_ID)
3855 + * @param[in] data             pointer to the elf firmware
3856 + * @param[in] shdr             pointer to the elf section header
3857 + *
3858 + */
3859 +static int pe_load_pe_lmem_section(int id, const void *data,
3860 +                                  struct elf32_shdr *shdr)
3861 +{
3862 +       u32 offset = be32_to_cpu(shdr->sh_offset);
3863 +       u32 addr = be32_to_cpu(shdr->sh_addr);
3864 +       u32 size = be32_to_cpu(shdr->sh_size);
3865 +       u32 type = be32_to_cpu(shdr->sh_type);
3866 +
3867 +       if (id > CLASS_MAX_ID) {
3868 +               pr_err(
3869 +                       "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
3870 +                        __func__, type, id);
3871 +               return -EINVAL;
3872 +       }
3873 +
3874 +       if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
3875 +               pr_err(
3876 +                       "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
3877 +                       __func__, addr, (unsigned long)data + offset);
3878 +
3879 +               return -EINVAL;
3880 +       }
3881 +
3882 +       if (addr & 0x3) {
3883 +               pr_err("%s: load address(%x) is not 32bit aligned\n",
3884 +                      __func__, addr);
3885 +               return -EINVAL;
3886 +       }
3887 +
3888 +       switch (type) {
3889 +       case SHT_PROGBITS:
3890 +               class_pe_lmem_memcpy_to32(addr, data + offset, size);
3891 +               break;
3892 +
3893 +       case SHT_NOBITS:
3894 +               class_pe_lmem_memset(addr, 0, size);
3895 +               break;
3896 +
3897 +       default:
3898 +               pr_err("%s: unsupported section type(%x)\n", __func__,
3899 +                      type);
3900 +               return -EINVAL;
3901 +       }
3902 +
3903 +       return 0;
3904 +}
3905 +
3906 +/* Loads an elf section into a PE
3907 + * For now only supports loading a section to dmem (all PE's), pmem (class and
3908 + * tmu PE's),
3909 + * DDDR (util PE code)
3910 + *
3911 + * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
3912 + * ..., UTIL_ID)
3913 + * @param[in] data             pointer to the elf firmware
3914 + * @param[in] shdr             pointer to the elf section header
3915 + *
3916 + */
3917 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
3918 +                       struct device *dev) {
3919 +       u32 addr = be32_to_cpu(shdr->sh_addr);
3920 +       u32 size = be32_to_cpu(shdr->sh_size);
3921 +
3922 +       if (IS_DMEM(addr, size))
3923 +               return pe_load_dmem_section(id, data, shdr);
3924 +       else if (IS_PMEM(addr, size))
3925 +               return pe_load_pmem_section(id, data, shdr);
3926 +       else if (IS_PFE_LMEM(addr, size))
3927 +               return 0;
3928 +       else if (IS_PHYS_DDR(addr, size))
3929 +               return pe_load_ddr_section(id, data, shdr, dev);
3930 +       else if (IS_PE_LMEM(addr, size))
3931 +               return pe_load_pe_lmem_section(id, data, shdr);
3932 +
3933 +       pr_err("%s: unsupported memory range(%x)\n", __func__,
3934 +              addr);
3935 +       return 0;
3936 +}
3937 +
3938 +/**************************** BMU ***************************/
3939 +
3940 +/* Initializes a BMU block.
3941 + * @param[in] base     BMU block base address
3942 + * @param[in] cfg      BMU configuration
3943 + */
3944 +void bmu_init(void *base, struct BMU_CFG *cfg)
3945 +{
3946 +       bmu_disable(base);
3947 +
3948 +       bmu_set_config(base, cfg);
3949 +
3950 +       bmu_reset(base);
3951 +}
3952 +
3953 +/* Resets a BMU block.
3954 + * @param[in] base     BMU block base address
3955 + */
3956 +void bmu_reset(void *base)
3957 +{
3958 +       writel(CORE_SW_RESET, base + BMU_CTRL);
3959 +
3960 +       /* Wait for self clear */
3961 +       while (readl(base + BMU_CTRL) & CORE_SW_RESET)
3962 +               ;
3963 +}
3964 +
3965 +/* Enabled a BMU block.
3966 + * @param[in] base     BMU block base address
3967 + */
3968 +void bmu_enable(void *base)
3969 +{
3970 +       writel(CORE_ENABLE, base + BMU_CTRL);
3971 +}
3972 +
3973 +/* Disables a BMU block.
3974 + * @param[in] base     BMU block base address
3975 + */
3976 +void bmu_disable(void *base)
3977 +{
3978 +       writel(CORE_DISABLE, base + BMU_CTRL);
3979 +}
3980 +
3981 +/* Sets the configuration of a BMU block.
3982 + * @param[in] base     BMU block base address
3983 + * @param[in] cfg      BMU configuration
3984 + */
3985 +void bmu_set_config(void *base, struct BMU_CFG *cfg)
3986 +{
3987 +       writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
3988 +       writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
3989 +       writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
3990 +
3991 +       /* Interrupts are never used */
3992 +       writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
3993 +       writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
3994 +       writel(0x0, base + BMU_INT_ENABLE);
3995 +}
3996 +
3997 +/**************************** MTIP GEMAC ***************************/
3998 +
3999 +/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
4000 + *   TCP or UDP checksums are discarded
4001 + *
4002 + * @param[in] base     GEMAC base address.
4003 + */
4004 +void gemac_enable_rx_checksum_offload(void *base)
4005 +{
4006 +       /*Do not find configuration to do this */
4007 +}
4008 +
4009 +/* Disable Rx Checksum Engine.
4010 + *
4011 + * @param[in] base     GEMAC base address.
4012 + */
4013 +void gemac_disable_rx_checksum_offload(void *base)
4014 +{
4015 +       /*Do not find configuration to do this */
4016 +}
4017 +
4018 +/* GEMAC set speed.
4019 + * @param[in] base     GEMAC base address
4020 + * @param[in] speed    GEMAC speed (10, 100 or 1000 Mbps)
4021 + */
4022 +void gemac_set_speed(void *base, enum mac_speed gem_speed)
4023 +{
4024 +       u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
4025 +       u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
4026 +
4027 +       switch (gem_speed) {
4028 +       case SPEED_10M:
4029 +                       rcr |= EMAC_RCNTRL_RMII_10T;
4030 +                       break;
4031 +
4032 +       case SPEED_1000M:
4033 +                       ecr |= EMAC_ECNTRL_SPEED;
4034 +                       break;
4035 +
4036 +       case SPEED_100M:
4037 +       default:
4038 +                       /*It is in 100M mode */
4039 +                       break;
4040 +       }
4041 +       writel(ecr, (base + EMAC_ECNTRL_REG));
4042 +       writel(rcr, (base + EMAC_RCNTRL_REG));
4043 +}
4044 +
4045 +/* GEMAC set duplex.
4046 + * @param[in] base     GEMAC base address
4047 + * @param[in] duplex   GEMAC duplex mode (Full, Half)
4048 + */
4049 +void gemac_set_duplex(void *base, int duplex)
4050 +{
4051 +       if (duplex == DUPLEX_HALF) {
4052 +               writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
4053 +                       + EMAC_TCNTRL_REG);
4054 +               writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
4055 +                       + EMAC_RCNTRL_REG));
4056 +       } else{
4057 +               writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
4058 +                       + EMAC_TCNTRL_REG);
4059 +               writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
4060 +                       + EMAC_RCNTRL_REG));
4061 +       }
4062 +}
4063 +
4064 +/* GEMAC set mode.
4065 + * @param[in] base     GEMAC base address
4066 + * @param[in] mode     GEMAC operation mode (MII, RMII, RGMII, SGMII)
4067 + */
4068 +void gemac_set_mode(void *base, int mode)
4069 +{
4070 +       u32 val = readl(base + EMAC_RCNTRL_REG);
4071 +
4072 +       /*Remove loopbank*/
4073 +       val &= ~EMAC_RCNTRL_LOOP;
4074 +
4075 +       /*Enable flow control and MII mode*/
4076 +       val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
4077 +
4078 +       writel(val, base + EMAC_RCNTRL_REG);
4079 +}
4080 +
4081 +/* GEMAC enable function.
4082 + * @param[in] base     GEMAC base address
4083 + */
4084 +void gemac_enable(void *base)
4085 +{
4086 +       writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
4087 +               EMAC_ECNTRL_REG);
4088 +}
4089 +
4090 +/* GEMAC disable function.
4091 + * @param[in] base     GEMAC base address
4092 + */
4093 +void gemac_disable(void *base)
4094 +{
4095 +       writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
4096 +               EMAC_ECNTRL_REG);
4097 +}
4098 +
4099 +/* GEMAC TX disable function.
4100 + * @param[in] base     GEMAC base address
4101 + */
4102 +void gemac_tx_disable(void *base)
4103 +{
4104 +       writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
4105 +               EMAC_TCNTRL_REG);
4106 +}
4107 +
4108 +void gemac_tx_enable(void *base)
4109 +{
4110 +       writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
4111 +                       EMAC_TCNTRL_REG);
4112 +}
4113 +
4114 +/* Sets the hash register of the MAC.
4115 + * This register is used for matching unicast and multicast frames.
4116 + *
4117 + * @param[in] base     GEMAC base address.
4118 + * @param[in] hash     64-bit hash to be configured.
4119 + */
4120 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
4121 +{
4122 +       writel(hash->bottom,  base + EMAC_GALR);
4123 +       writel(hash->top, base + EMAC_GAUR);
4124 +}
4125 +
4126 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
4127 +                     unsigned int entry_index)
4128 +{
4129 +       if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
4130 +               return;
4131 +
4132 +       entry_index = entry_index - 1;
4133 +       if (entry_index < 1) {
4134 +               writel(htonl(address->bottom),  base + EMAC_PHY_ADDR_LOW);
4135 +               writel((htonl(address->top) | 0x8808), base +
4136 +                       EMAC_PHY_ADDR_HIGH);
4137 +       } else {
4138 +               writel(htonl(address->bottom),  base + ((entry_index - 1) * 8)
4139 +                       + EMAC_SMAC_0_0);
4140 +               writel((htonl(address->top) | 0x8808), base + ((entry_index -
4141 +                       1) * 8) + EMAC_SMAC_0_1);
4142 +       }
4143 +}
4144 +
4145 +void gemac_clear_laddrN(void *base, unsigned int entry_index)
4146 +{
4147 +       if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
4148 +               return;
4149 +
4150 +       entry_index = entry_index - 1;
4151 +       if (entry_index < 1) {
4152 +               writel(0, base + EMAC_PHY_ADDR_LOW);
4153 +               writel(0, base + EMAC_PHY_ADDR_HIGH);
4154 +       } else {
4155 +               writel(0,  base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
4156 +               writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
4157 +       }
4158 +}
4159 +
4160 +/* Set the loopback mode of the MAC.  This can be either no loopback for
4161 + * normal operation, local loopback through MAC internal loopback module or PHY
4162 + *   loopback for external loopback through a PHY.  This asserts the external
4163 + * loop pin.
4164 + *
4165 + * @param[in] base     GEMAC base address.
4166 + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
4167 + * Loopback,
4168 + *                     LB_EXT - PHY Loopback.
4169 + */
4170 +void gemac_set_loop(void *base, enum mac_loop gem_loop)
4171 +{
4172 +       pr_info("%s()\n", __func__);
4173 +       writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
4174 +               EMAC_RCNTRL_REG));
4175 +}
4176 +
4177 +/* GEMAC allow frames
4178 + * @param[in] base     GEMAC base address
4179 + */
4180 +void gemac_enable_copy_all(void *base)
4181 +{
4182 +       writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
4183 +               EMAC_RCNTRL_REG));
4184 +}
4185 +
4186 +/* GEMAC do not allow frames
4187 + * @param[in] base     GEMAC base address
4188 + */
4189 +void gemac_disable_copy_all(void *base)
4190 +{
4191 +       writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
4192 +               EMAC_RCNTRL_REG));
4193 +}
4194 +
4195 +/* GEMAC allow broadcast function.
4196 + * @param[in] base     GEMAC base address
4197 + */
4198 +void gemac_allow_broadcast(void *base)
4199 +{
4200 +       writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
4201 +               EMAC_RCNTRL_REG);
4202 +}
4203 +
4204 +/* GEMAC no broadcast function.
4205 + * @param[in] base     GEMAC base address
4206 + */
4207 +void gemac_no_broadcast(void *base)
4208 +{
4209 +       writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
4210 +               EMAC_RCNTRL_REG);
4211 +}
4212 +
4213 +/* GEMAC enable 1536 rx function.
4214 + * @param[in]  base    GEMAC base address
4215 + */
4216 +void gemac_enable_1536_rx(void *base)
4217 +{
4218 +       /* Set 1536 as Maximum frame length */
4219 +       writel(readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base +
4220 +               EMAC_RCNTRL_REG);
4221 +}
4222 +
4223 +/* GEMAC enable jumbo function.
4224 + * @param[in]  base    GEMAC base address
4225 + */
4226 +void gemac_enable_rx_jmb(void *base)
4227 +{
4228 +       writel(readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base
4229 +               + EMAC_RCNTRL_REG);
4230 +}
4231 +
4232 +/* GEMAC enable stacked vlan function.
4233 + * @param[in]  base    GEMAC base address
4234 + */
4235 +void gemac_enable_stacked_vlan(void *base)
4236 +{
4237 +       /* MTIP doesn't support stacked vlan */
4238 +}
4239 +
4240 +/* GEMAC enable pause rx function.
4241 + * @param[in] base     GEMAC base address
4242 + */
4243 +void gemac_enable_pause_rx(void *base)
4244 +{
4245 +       writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
4246 +              base + EMAC_RCNTRL_REG);
4247 +}
4248 +
4249 +/* GEMAC disable pause rx function.
4250 + * @param[in] base     GEMAC base address
4251 + */
4252 +void gemac_disable_pause_rx(void *base)
4253 +{
4254 +       writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
4255 +              base + EMAC_RCNTRL_REG);
4256 +}
4257 +
4258 +/* GEMAC enable pause tx function.
4259 + * @param[in] base GEMAC base address
4260 + */
4261 +void gemac_enable_pause_tx(void *base)
4262 +{
4263 +       writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
4264 +}
4265 +
4266 +/* GEMAC disable pause tx function.
4267 + * @param[in] base GEMAC base address
4268 + */
4269 +void gemac_disable_pause_tx(void *base)
4270 +{
4271 +       writel(0x0, base + EMAC_RX_SECTION_EMPTY);
4272 +}
4273 +
4274 +/* GEMAC wol configuration
4275 + * @param[in] base     GEMAC base address
4276 + * @param[in] wol_conf WoL register configuration
4277 + */
4278 +void gemac_set_wol(void *base, u32 wol_conf)
4279 +{
4280 +       u32  val = readl(base + EMAC_ECNTRL_REG);
4281 +
4282 +       if (wol_conf)
4283 +               val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
4284 +       else
4285 +               val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
4286 +       writel(val, base + EMAC_ECNTRL_REG);
4287 +}
4288 +
4289 +/* Sets Gemac bus width to 64bit
4290 + * @param[in] base       GEMAC base address
4291 + * @param[in] width     gemac bus width to be set possible values are 32/64/128
4292 + */
4293 +void gemac_set_bus_width(void *base, int width)
4294 +{
4295 +}
4296 +
4297 +/* Sets Gemac configuration.
4298 + * @param[in] base     GEMAC base address
4299 + * @param[in] cfg      GEMAC configuration
4300 + */
4301 +void gemac_set_config(void *base, struct gemac_cfg *cfg)
4302 +{
4303 +       /*GEMAC config taken from VLSI */
4304 +       writel(0x00000004, base + EMAC_TFWR_STR_FWD);
4305 +       writel(0x00000005, base + EMAC_RX_SECTION_FULL);
4306 +       writel(0x00003fff, base + EMAC_TRUNC_FL);
4307 +       writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
4308 +       writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
4309 +
4310 +       gemac_set_mode(base, cfg->mode);
4311 +
4312 +       gemac_set_speed(base, cfg->speed);
4313 +
4314 +       gemac_set_duplex(base, cfg->duplex);
4315 +}
4316 +
4317 +/**************************** GPI ***************************/
4318 +
4319 +/* Initializes a GPI block.
4320 + * @param[in] base     GPI base address
4321 + * @param[in] cfg      GPI configuration
4322 + */
4323 +void gpi_init(void *base, struct gpi_cfg *cfg)
4324 +{
4325 +       gpi_reset(base);
4326 +
4327 +       gpi_disable(base);
4328 +
4329 +       gpi_set_config(base, cfg);
4330 +}
4331 +
4332 +/* Resets a GPI block.
4333 + * @param[in] base     GPI base address
4334 + */
4335 +void gpi_reset(void *base)
4336 +{
4337 +       writel(CORE_SW_RESET, base + GPI_CTRL);
4338 +}
4339 +
4340 +/* Enables a GPI block.
4341 + * @param[in] base     GPI base address
4342 + */
4343 +void gpi_enable(void *base)
4344 +{
4345 +       writel(CORE_ENABLE, base + GPI_CTRL);
4346 +}
4347 +
4348 +/* Disables a GPI block.
4349 + * @param[in] base     GPI base address
4350 + */
4351 +void gpi_disable(void *base)
4352 +{
4353 +       writel(CORE_DISABLE, base + GPI_CTRL);
4354 +}
4355 +
4356 +/* Sets the configuration of a GPI block.
4357 + * @param[in] base     GPI base address
4358 + * @param[in] cfg      GPI configuration
4359 + */
4360 +void gpi_set_config(void *base, struct gpi_cfg *cfg)
4361 +{
4362 +       writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL),       base
4363 +               + GPI_LMEM_ALLOC_ADDR);
4364 +       writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL),        base
4365 +               + GPI_LMEM_FREE_ADDR);
4366 +       writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL),       base
4367 +               + GPI_DDR_ALLOC_ADDR);
4368 +       writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),        base
4369 +               + GPI_DDR_FREE_ADDR);
4370 +       writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
4371 +       writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
4372 +       writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
4373 +       writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
4374 +       writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
4375 +       writel((DDR_HDR_SIZE << 16) |   LMEM_HDR_SIZE,  base + GPI_HDR_SIZE);
4376 +       writel((DDR_BUF_SIZE << 16) |   LMEM_BUF_SIZE,  base + GPI_BUF_SIZE);
4377 +
4378 +       writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
4379 +               GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
4380 +       writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
4381 +       writel(cfg->aseq_len,   base + GPI_DTX_ASEQ);
4382 +       writel(1, base + GPI_TOE_CHKSUM_EN);
4383 +
4384 +       if (cfg->mtip_pause_reg) {
4385 +               writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
4386 +               writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
4387 +       }
4388 +}
4389 +
4390 +/**************************** CLASSIFIER ***************************/
4391 +
4392 +/* Initializes CLASSIFIER block.
4393 + * @param[in] cfg      CLASSIFIER configuration
4394 + */
4395 +void class_init(struct class_cfg *cfg)
4396 +{
4397 +       class_reset();
4398 +
4399 +       class_disable();
4400 +
4401 +       class_set_config(cfg);
4402 +}
4403 +
4404 +/* Resets CLASSIFIER block.
4405 + *
4406 + */
4407 +void class_reset(void)
4408 +{
4409 +       writel(CORE_SW_RESET, CLASS_TX_CTRL);
4410 +}
4411 +
4412 +/* Enables all CLASS-PE's cores.
4413 + *
4414 + */
4415 +void class_enable(void)
4416 +{
4417 +       writel(CORE_ENABLE, CLASS_TX_CTRL);
4418 +}
4419 +
4420 +/* Disables all CLASS-PE's cores.
4421 + *
4422 + */
4423 +void class_disable(void)
4424 +{
4425 +       writel(CORE_DISABLE, CLASS_TX_CTRL);
4426 +}
4427 +
4428 +/*
4429 + * Sets the configuration of the CLASSIFIER block.
4430 + * @param[in] cfg      CLASSIFIER configuration
4431 + */
4432 +void class_set_config(struct class_cfg *cfg)
4433 +{
4434 +       u32 val;
4435 +
4436 +       /* Initialize route table */
4437 +       if (!cfg->resume)
4438 +               memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
4439 +               cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
4440 +
4441 +#if !defined(LS1012A_PFE_RESET_WA)
4442 +       writel(cfg->pe_sys_clk_ratio,   CLASS_PE_SYS_CLK_RATIO);
4443 +#endif
4444 +
4445 +       writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE,    CLASS_HDR_SIZE);
4446 +       writel(LMEM_BUF_SIZE,                           CLASS_LMEM_BUF_SIZE);
4447 +       writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
4448 +               CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
4449 +               CLASS_ROUTE_HASH_ENTRY_SIZE);
4450 +       writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
4451 +              CLASS_HIF_PARSE);
4452 +
4453 +       val = HASH_CRC_PORT_IP | QB2BUS_LE;
4454 +
4455 +#if defined(CONFIG_IP_ALIGNED)
4456 +       val |= IP_ALIGNED;
4457 +#endif
4458 +
4459 +       /*
4460 +        *  Class PE packet steering will only work if TOE mode, bridge fetch or
4461 +        * route fetch are enabled (see class/qb_fet.v). Route fetch would
4462 +        * trigger additional memory copies (likely from DDR because of hash
4463 +        * table size, which cannot be reduced because PE software still
4464 +        * relies on hash value computed in HW), so when not in TOE mode we
4465 +        * simply enable HW bridge fetch even though we don't use it.
4466 +        */
4467 +       if (cfg->toe_mode)
4468 +               val |= CLASS_TOE;
4469 +       else
4470 +               val |= HW_BRIDGE_FETCH;
4471 +
4472 +       writel(val, CLASS_ROUTE_MULTI);
4473 +
4474 +       writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
4475 +              CLASS_ROUTE_TABLE_BASE);
4476 +       writel(CLASS_PE0_RO_DM_ADDR0_VAL,               CLASS_PE0_RO_DM_ADDR0);
4477 +       writel(CLASS_PE0_RO_DM_ADDR1_VAL,               CLASS_PE0_RO_DM_ADDR1);
4478 +       writel(CLASS_PE0_QB_DM_ADDR0_VAL,               CLASS_PE0_QB_DM_ADDR0);
4479 +       writel(CLASS_PE0_QB_DM_ADDR1_VAL,               CLASS_PE0_QB_DM_ADDR1);
4480 +       writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR),    CLASS_TM_INQ_ADDR);
4481 +
4482 +       writel(23, CLASS_AFULL_THRES);
4483 +       writel(23, CLASS_TSQ_FIFO_THRES);
4484 +
4485 +       writel(24, CLASS_MAX_BUF_CNT);
4486 +       writel(24, CLASS_TSQ_MAX_CNT);
4487 +}
4488 +
4489 +/**************************** TMU ***************************/
4490 +
4491 +void tmu_reset(void)
4492 +{
4493 +       writel(SW_RESET, TMU_CTRL);
4494 +}
4495 +
4496 +/* Initializes TMU block.
4497 + * @param[in] cfg      TMU configuration
4498 + */
4499 +void tmu_init(struct tmu_cfg *cfg)
4500 +{
4501 +       int q, phyno;
4502 +
4503 +       tmu_disable(0xF);
4504 +       mdelay(10);
4505 +
4506 +#if !defined(LS1012A_PFE_RESET_WA)
4507 +       /* keep in soft reset */
4508 +       writel(SW_RESET, TMU_CTRL);
4509 +#endif
4510 +       writel(0x3, TMU_SYS_GENERIC_CONTROL);
4511 +       writel(750, TMU_INQ_WATERMARK);
4512 +       writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
4513 +               GPI_INQ_PKTPTR),        TMU_PHY0_INQ_ADDR);
4514 +       writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
4515 +               GPI_INQ_PKTPTR),        TMU_PHY1_INQ_ADDR);
4516 +       writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
4517 +               GPI_INQ_PKTPTR),        TMU_PHY3_INQ_ADDR);
4518 +       writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
4519 +       writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
4520 +       writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
4521 +              TMU_BMU_INQ_ADDR);
4522 +
4523 +       writel(0x3FF,   TMU_TDQ0_SCH_CTRL);     /*
4524 +                                                * enabling all 10
4525 +                                                * schedulers [9:0] of each TDQ
4526 +                                                */
4527 +       writel(0x3FF,   TMU_TDQ1_SCH_CTRL);
4528 +       writel(0x3FF,   TMU_TDQ3_SCH_CTRL);
4529 +
4530 +#if !defined(LS1012A_PFE_RESET_WA)
4531 +       writel(cfg->pe_sys_clk_ratio,   TMU_PE_SYS_CLK_RATIO);
4532 +#endif
4533 +
4534 +#if !defined(LS1012A_PFE_RESET_WA)
4535 +       writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr),     TMU_LLM_BASE_ADDR);
4536 +       /* Extra packet pointers will be stored from this address onwards */
4537 +
4538 +       writel(cfg->llm_queue_len,      TMU_LLM_QUE_LEN);
4539 +       writel(5,                       TMU_TDQ_IIFG_CFG);
4540 +       writel(DDR_BUF_SIZE,            TMU_BMU_BUF_SIZE);
4541 +
4542 +       writel(0x0,                     TMU_CTRL);
4543 +
4544 +       /* MEM init */
4545 +       pr_info("%s: mem init\n", __func__);
4546 +       writel(MEM_INIT,        TMU_CTRL);
4547 +
4548 +       while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
4549 +               ;
4550 +
4551 +       /* LLM init */
4552 +       pr_info("%s: lmem init\n", __func__);
4553 +       writel(LLM_INIT,        TMU_CTRL);
4554 +
4555 +       while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
4556 +               ;
4557 +#endif
4558 +       /* set up each queue for tail drop */
4559 +       for (phyno = 0; phyno < 4; phyno++) {
4560 +               if (phyno == 2)
4561 +                       continue;
4562 +               for (q = 0; q < 16; q++) {
4563 +                       u32 qdepth;
4564 +
4565 +                       writel((phyno << 8) | q, TMU_TEQ_CTRL);
4566 +                       writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
4567 +
4568 +                       if (phyno == 3)
4569 +                               qdepth = DEFAULT_TMU3_QDEPTH;
4570 +                       else
4571 +                               qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
4572 +                                               DEFAULT_MAX_QDEPTH;
4573 +
4574 +                       /* LOG: 68855 */
4575 +                       /*
4576 +                        * The following is a workaround for the reordered
4577 +                        * packet and BMU2 buffer leakage issue.
4578 +                        */
4579 +                       if (CHIP_REVISION() == 0)
4580 +                               qdepth = 31;
4581 +
4582 +                       writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
4583 +                       writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
4584 +               }
4585 +       }
4586 +
4587 +#ifdef CFG_LRO
4588 +       /* Set TMU-3 queue 5 (LRO) in no-drop mode */
4589 +       writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
4590 +       writel(0, TMU_TEQ_QCFG);
4591 +#endif
4592 +
4593 +       writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
4594 +
4595 +       writel(0x0, TMU_CTRL);
4596 +}
4597 +
4598 +/* Enables TMU-PE cores.
4599 + * @param[in] pe_mask  TMU PE mask
4600 + */
4601 +void tmu_enable(u32 pe_mask)
4602 +{
4603 +       writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
4604 +}
4605 +
4606 +/* Disables TMU cores.
4607 + * @param[in] pe_mask  TMU PE mask
4608 + */
4609 +void tmu_disable(u32 pe_mask)
4610 +{
4611 +       writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
4612 +}
4613 +
4614 +/* This will return the tmu queue status
4615 + * @param[in] if_id    gem interface id or TMU index
4616 + * @return             returns the bit mask of busy queues, zero means all
4617 + * queues are empty
4618 + */
4619 +u32 tmu_qstatus(u32 if_id)
4620 +{
4621 +       return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
4622 +               offsetof(struct pe_status, tmu_qstatus), 4));
4623 +}
4624 +
4625 +u32 tmu_pkts_processed(u32 if_id)
4626 +{
4627 +       return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
4628 +               offsetof(struct pe_status, rx), 4));
4629 +}
4630 +
4631 +/**************************** UTIL ***************************/
4632 +
4633 +/* Resets UTIL block.
4634 + */
4635 +void util_reset(void)
4636 +{
4637 +       writel(CORE_SW_RESET, UTIL_TX_CTRL);
4638 +}
4639 +
4640 +/* Initializes UTIL block.
4641 + * @param[in] cfg      UTIL configuration
4642 + */
4643 +void util_init(struct util_cfg *cfg)
4644 +{
4645 +       writel(cfg->pe_sys_clk_ratio,   UTIL_PE_SYS_CLK_RATIO);
4646 +}
4647 +
4648 +/* Enables UTIL-PE core.
4649 + *
4650 + */
4651 +void util_enable(void)
4652 +{
4653 +       writel(CORE_ENABLE, UTIL_TX_CTRL);
4654 +}
4655 +
4656 +/* Disables UTIL-PE core.
4657 + *
4658 + */
4659 +void util_disable(void)
4660 +{
4661 +       writel(CORE_DISABLE, UTIL_TX_CTRL);
4662 +}
4663 +
4664 +/**************************** HIF ***************************/
4665 +/* Initializes HIF copy block.
4666 + *
4667 + */
4668 +void hif_init(void)
4669 +{
4670 +       /*Initialize HIF registers*/
4671 +       writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
4672 +              HIF_POLL_CTRL);
4673 +}
4674 +
4675 +/* Enable hif tx DMA and interrupt
4676 + *
4677 + */
4678 +void hif_tx_enable(void)
4679 +{
4680 +       writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
4681 +       writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
4682 +              HIF_INT_ENABLE);
4683 +}
4684 +
4685 +/* Disable hif tx DMA and interrupt
4686 + *
4687 + */
4688 +void hif_tx_disable(void)
4689 +{
4690 +       u32     hif_int;
4691 +
4692 +       writel(0, HIF_TX_CTRL);
4693 +
4694 +       hif_int = readl(HIF_INT_ENABLE);
4695 +       hif_int &= HIF_TXPKT_INT_EN;
4696 +       writel(hif_int, HIF_INT_ENABLE);
4697 +}
4698 +
4699 +/* Enable hif rx DMA and interrupt
4700 + *
4701 + */
4702 +void hif_rx_enable(void)
4703 +{
4704 +       hif_rx_dma_start();
4705 +       writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
4706 +              HIF_INT_ENABLE);
4707 +}
4708 +
4709 +/* Disable hif rx DMA and interrupt
4710 + *
4711 + */
4712 +void hif_rx_disable(void)
4713 +{
4714 +       u32     hif_int;
4715 +
4716 +       writel(0, HIF_RX_CTRL);
4717 +
4718 +       hif_int = readl(HIF_INT_ENABLE);
4719 +       hif_int &= HIF_RXPKT_INT_EN;
4720 +       writel(hif_int, HIF_INT_ENABLE);
4721 +}
4722 --- /dev/null
4723 +++ b/drivers/staging/fsl_ppfe/pfe_hif.c
4724 @@ -0,0 +1,1094 @@
4725 +/*
4726 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4727 + * Copyright 2017 NXP
4728 + *
4729 + * This program is free software; you can redistribute it and/or modify
4730 + * it under the terms of the GNU General Public License as published by
4731 + * the Free Software Foundation; either version 2 of the License, or
4732 + * (at your option) any later version.
4733 + *
4734 + * This program is distributed in the hope that it will be useful,
4735 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4736 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
4737 + * GNU General Public License for more details.
4738 + *
4739 + * You should have received a copy of the GNU General Public License
4740 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
4741 + */
4742 +
4743 +#include <linux/kernel.h>
4744 +#include <linux/interrupt.h>
4745 +#include <linux/dma-mapping.h>
4746 +#include <linux/dmapool.h>
4747 +#include <linux/sched.h>
4748 +#include <linux/module.h>
4749 +#include <linux/list.h>
4750 +#include <linux/kthread.h>
4751 +#include <linux/slab.h>
4752 +
4753 +#include <linux/io.h>
4754 +#include <asm/irq.h>
4755 +
4756 +#include "pfe_mod.h"
4757 +
4758 +#define HIF_INT_MASK   (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
4759 +
4760 +unsigned char napi_first_batch;
4761 +
4762 +static void pfe_tx_do_cleanup(unsigned long data);
4763 +
4764 +static int pfe_hif_alloc_descr(struct pfe_hif *hif)
4765 +{
4766 +       void *addr;
4767 +       dma_addr_t dma_addr;
4768 +       int err = 0;
4769 +
4770 +       pr_info("%s\n", __func__);
4771 +       addr = dma_alloc_coherent(pfe->dev,
4772 +                                 HIF_RX_DESC_NT * sizeof(struct hif_desc) +
4773 +                                 HIF_TX_DESC_NT * sizeof(struct hif_desc),
4774 +                                 &dma_addr, GFP_KERNEL);
4775 +
4776 +       if (!addr) {
4777 +               pr_err("%s: Could not allocate buffer descriptors!\n"
4778 +                       , __func__);
4779 +               err = -ENOMEM;
4780 +               goto err0;
4781 +       }
4782 +
4783 +       hif->descr_baseaddr_p = dma_addr;
4784 +       hif->descr_baseaddr_v = addr;
4785 +       hif->rx_ring_size = HIF_RX_DESC_NT;
4786 +       hif->tx_ring_size = HIF_TX_DESC_NT;
4787 +
4788 +       return 0;
4789 +
4790 +err0:
4791 +       return err;
4792 +}
4793 +
4794 +#if defined(LS1012A_PFE_RESET_WA)
4795 +static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
4796 +{
4797 +       int ii;
4798 +       struct hif_desc *desc = hif->rx_base;
4799 +
4800 +       /*Mark all descriptors as LAST_BD */
4801 +       for (ii = 0; ii < hif->rx_ring_size; ii++) {
4802 +               desc->ctrl |= BD_CTRL_LAST_BD;
4803 +               desc++;
4804 +       }
4805 +}
4806 +
4807 +struct class_rx_hdr_t {
4808 +       u32     next_ptr;       /* ptr to the start of the first DDR buffer */
4809 +       u16     length;         /* total packet length */
4810 +       u16     phyno;          /* input physical port number */
4811 +       u32     status;         /* gemac status bits */
4812 +       u32     status2;            /* reserved for software usage */
4813 +};
4814 +
4815 +/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
4816 + * except overflow
4817 + */
4818 +#define STATUS_BAD_FRAME_ERR            BIT(16)
4819 +#define STATUS_LENGTH_ERR               BIT(17)
4820 +#define STATUS_CRC_ERR                  BIT(18)
4821 +#define STATUS_TOO_SHORT_ERR            BIT(19)
4822 +#define STATUS_TOO_LONG_ERR             BIT(20)
4823 +#define STATUS_CODE_ERR                 BIT(21)
4824 +#define STATUS_MC_HASH_MATCH            BIT(22)
4825 +#define STATUS_CUMULATIVE_ARC_HIT       BIT(23)
4826 +#define STATUS_UNICAST_HASH_MATCH       BIT(24)
4827 +#define STATUS_IP_CHECKSUM_CORRECT      BIT(25)
4828 +#define STATUS_TCP_CHECKSUM_CORRECT     BIT(26)
4829 +#define STATUS_UDP_CHECKSUM_CORRECT     BIT(27)
4830 +#define STATUS_OVERFLOW_ERR             BIT(28) /* GPI error */
4831 +#define MIN_PKT_SIZE                   64
4832 +
4833 +static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
4834 +{
4835 +       int i;
4836 +
4837 +       for (i = 0; i < len; i += sizeof(u32))  {
4838 +               *dst = htonl(*src);
4839 +               dst++; src++;
4840 +       }
4841 +}
4842 +
4843 +static void send_dummy_pkt_to_hif(void)
4844 +{
4845 +       void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
4846 +       u32 physaddr;
4847 +       struct class_rx_hdr_t local_hdr;
4848 +       static u32 dummy_pkt[] =  {
4849 +               0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
4850 +               0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
4851 +               0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
4852 +               0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
4853 +
4854 +       ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
4855 +       if (!ddr_ptr)
4856 +               return;
4857 +
4858 +       lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
4859 +       if (!lmem_ptr)
4860 +               return;
4861 +
4862 +       pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
4863 +       physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
4864 +
4865 +       lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
4866 +
4867 +       local_hdr.phyno = htons(0); /* RX_PHY_0 */
4868 +       local_hdr.length = htons(MIN_PKT_SIZE);
4869 +
4870 +       local_hdr.next_ptr = htonl((u32)physaddr);
4871 +       /*Mark checksum is correct */
4872 +       local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
4873 +                               STATUS_UDP_CHECKSUM_CORRECT |
4874 +                               STATUS_TCP_CHECKSUM_CORRECT |
4875 +                               STATUS_UNICAST_HASH_MATCH |
4876 +                               STATUS_CUMULATIVE_ARC_HIT));
4877 +       copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
4878 +                    sizeof(local_hdr));
4879 +
4880 +       copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
4881 +                    0x40);
4882 +
4883 +       writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
4884 +}
4885 +
4886 +void pfe_hif_rx_idle(struct pfe_hif *hif)
4887 +{
4888 +       int hif_stop_loop = 10;
4889 +       u32 rx_status;
4890 +
4891 +       pfe_hif_disable_rx_desc(hif);
4892 +       pr_info("Bringing hif to idle state...");
4893 +       writel(0, HIF_INT_ENABLE);
4894 +       /*If HIF Rx BDP is busy send a dummy packet */
4895 +       do {
4896 +               rx_status = readl(HIF_RX_STATUS);
4897 +               if (rx_status & BDP_CSR_RX_DMA_ACTV)
4898 +                       send_dummy_pkt_to_hif();
4899 +
4900 +               usleep_range(100, 150);
4901 +       } while (--hif_stop_loop);
4902 +
4903 +       if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
4904 +               pr_info("Failed\n");
4905 +       else
4906 +               pr_info("Done\n");
4907 +}
4908 +#endif
4909 +
4910 +static void pfe_hif_free_descr(struct pfe_hif *hif)
4911 +{
4912 +       pr_info("%s\n", __func__);
4913 +
4914 +       dma_free_coherent(pfe->dev,
4915 +                         hif->rx_ring_size * sizeof(struct hif_desc) +
4916 +                         hif->tx_ring_size * sizeof(struct hif_desc),
4917 +                         hif->descr_baseaddr_v, hif->descr_baseaddr_p);
4918 +}
4919 +
4920 +void pfe_hif_desc_dump(struct pfe_hif *hif)
4921 +{
4922 +       struct hif_desc *desc;
4923 +       unsigned long desc_p;
4924 +       int ii = 0;
4925 +
4926 +       pr_info("%s\n", __func__);
4927 +
4928 +       desc = hif->rx_base;
4929 +       desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
4930 +                       hif->descr_baseaddr_p);
4931 +
4932 +       pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
4933 +       for (ii = 0; ii < hif->rx_ring_size; ii++) {
4934 +               pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
4935 +                       readl(&desc->status), readl(&desc->ctrl),
4936 +                       readl(&desc->data), readl(&desc->next));
4937 +                       desc++;
4938 +       }
4939 +
4940 +       desc = hif->tx_base;
4941 +       desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
4942 +                       hif->descr_baseaddr_p);
4943 +
4944 +       pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
4945 +       for (ii = 0; ii < hif->tx_ring_size; ii++) {
4946 +               pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
4947 +                       readl(&desc->status), readl(&desc->ctrl),
4948 +                       readl(&desc->data), readl(&desc->next));
4949 +               desc++;
4950 +       }
4951 +}
4952 +
4953 +/* pfe_hif_release_buffers */
4954 +static void pfe_hif_release_buffers(struct pfe_hif *hif)
4955 +{
4956 +       struct hif_desc *desc;
4957 +       int i = 0;
4958 +
4959 +       hif->rx_base = hif->descr_baseaddr_v;
4960 +
4961 +       pr_info("%s\n", __func__);
4962 +
4963 +       /*Free Rx buffers */
4964 +       desc = hif->rx_base;
4965 +       for (i = 0; i < hif->rx_ring_size; i++) {
4966 +               if (readl(&desc->data)) {
4967 +                       if ((i < hif->shm->rx_buf_pool_cnt) &&
4968 +                           (!hif->shm->rx_buf_pool[i])) {
4969 +                               /*
4970 +                                * dma_unmap_single(hif->dev, desc->data,
4971 +                                * hif->rx_buf_len[i], DMA_FROM_DEVICE);
4972 +                                */
4973 +                               dma_unmap_single(hif->dev,
4974 +                                                DDR_PFE_TO_PHYS(
4975 +                                                readl(&desc->data)),
4976 +                                                hif->rx_buf_len[i],
4977 +                                                DMA_FROM_DEVICE);
4978 +                               hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
4979 +                       } else {
4980 +                               pr_err("%s: buffer pool already full\n"
4981 +                                       , __func__);
4982 +                       }
4983 +               }
4984 +
4985 +               writel(0, &desc->data);
4986 +               writel(0, &desc->status);
4987 +               writel(0, &desc->ctrl);
4988 +               desc++;
4989 +       }
4990 +}
4991 +
4992 +/*
4993 + * pfe_hif_init_buffers
4994 + * This function initializes the HIF Rx/Tx ring descriptors and
4995 + * initialize Rx queue with buffers.
4996 + */
4997 +static int pfe_hif_init_buffers(struct pfe_hif *hif)
4998 +{
4999 +       struct hif_desc *desc, *first_desc_p;
5000 +       u32 data;
5001 +       int i = 0;
5002 +
5003 +       pr_info("%s\n", __func__);
5004 +
5005 +       /* Check enough Rx buffers available in the shared memory */
5006 +       if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
5007 +               return -ENOMEM;
5008 +
5009 +       hif->rx_base = hif->descr_baseaddr_v;
5010 +       memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
5011 +
5012 +       /*Initialize Rx descriptors */
5013 +       desc = hif->rx_base;
5014 +       first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
5015 +
5016 +       for (i = 0; i < hif->rx_ring_size; i++) {
5017 +               /* Initialize Rx buffers from the shared memory */
5018 +
5019 +               data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
5020 +                               pfe_pkt_size, DMA_FROM_DEVICE);
5021 +               hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
5022 +               hif->rx_buf_len[i] = pfe_pkt_size;
5023 +               hif->shm->rx_buf_pool[i] = NULL;
5024 +
5025 +               if (likely(dma_mapping_error(hif->dev, data) == 0)) {
5026 +                       writel(DDR_PHYS_TO_PFE(data), &desc->data);
5027 +               } else {
5028 +                       pr_err("%s : low on mem\n",  __func__);
5029 +
5030 +                       goto err;
5031 +               }
5032 +
5033 +               writel(0, &desc->status);
5034 +
5035 +               /*
5036 +                * Ensure everything else is written to DDR before
5037 +                * writing bd->ctrl
5038 +                */
5039 +               wmb();
5040 +
5041 +               writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
5042 +                       | BD_CTRL_DIR | BD_CTRL_DESC_EN
5043 +                       | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
5044 +
5045 +               /* Chain descriptors */
5046 +               writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
5047 +               desc++;
5048 +       }
5049 +
5050 +       /* Overwrite last descriptor to chain it to first one*/
5051 +       desc--;
5052 +       writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
5053 +
5054 +       hif->rxtoclean_index = 0;
5055 +
5056 +       /*Initialize Rx buffer descriptor ring base address */
5057 +       writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
5058 +
5059 +       hif->tx_base = hif->rx_base + hif->rx_ring_size;
5060 +       first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
5061 +                               hif->rx_ring_size;
5062 +       memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
5063 +
5064 +       /*Initialize tx descriptors */
5065 +       desc = hif->tx_base;
5066 +
5067 +       for (i = 0; i < hif->tx_ring_size; i++) {
5068 +               /* Chain descriptors */
5069 +               writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
5070 +               writel(0, &desc->ctrl);
5071 +               desc++;
5072 +       }
5073 +
5074 +       /* Overwrite last descriptor to chain it to first one */
5075 +       desc--;
5076 +       writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
5077 +       hif->txavail = hif->tx_ring_size;
5078 +       hif->txtosend = 0;
5079 +       hif->txtoclean = 0;
5080 +       hif->txtoflush = 0;
5081 +
5082 +       /*Initialize Tx buffer descriptor ring base address */
5083 +       writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
5084 +
5085 +       return 0;
5086 +
5087 +err:
5088 +       pfe_hif_release_buffers(hif);
5089 +       return -ENOMEM;
5090 +}
5091 +
5092 +/*
5093 + * pfe_hif_client_register
5094 + *
5095 + * This function used to register a client driver with the HIF driver.
5096 + *
5097 + * Return value:
5098 + * 0 - on Successful registration
5099 + */
5100 +static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
5101 +                                  struct hif_client_shm *client_shm)
5102 +{
5103 +       struct hif_client *client = &hif->client[client_id];
5104 +       u32 i, cnt;
5105 +       struct rx_queue_desc *rx_qbase;
5106 +       struct tx_queue_desc *tx_qbase;
5107 +       struct hif_rx_queue *rx_queue;
5108 +       struct hif_tx_queue *tx_queue;
5109 +       int err = 0;
5110 +
5111 +       pr_info("%s\n", __func__);
5112 +
5113 +       spin_lock_bh(&hif->tx_lock);
5114 +
5115 +       if (test_bit(client_id, &hif->shm->g_client_status[0])) {
5116 +               pr_err("%s: client %d already registered\n",
5117 +                      __func__, client_id);
5118 +               err = -1;
5119 +               goto unlock;
5120 +       }
5121 +
5122 +       memset(client, 0, sizeof(struct hif_client));
5123 +
5124 +       /* Initialize client Rx queues baseaddr, size */
5125 +
5126 +       cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
5127 +       /* Check if client is requesting for more queues than supported */
5128 +       if (cnt > HIF_CLIENT_QUEUES_MAX)
5129 +               cnt = HIF_CLIENT_QUEUES_MAX;
5130 +
5131 +       client->rx_qn = cnt;
5132 +       rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
5133 +       for (i = 0; i < cnt; i++) {
5134 +               rx_queue = &client->rx_q[i];
5135 +               rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
5136 +               rx_queue->size = client_shm->rx_qsize;
5137 +               rx_queue->write_idx = 0;
5138 +       }
5139 +
5140 +       /* Initialize client Tx queues baseaddr, size */
5141 +       cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
5142 +
5143 +       /* Check if client is requesting for more queues than supported */
5144 +       if (cnt > HIF_CLIENT_QUEUES_MAX)
5145 +               cnt = HIF_CLIENT_QUEUES_MAX;
5146 +
5147 +       client->tx_qn = cnt;
5148 +       tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
5149 +       for (i = 0; i < cnt; i++) {
5150 +               tx_queue = &client->tx_q[i];
5151 +               tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
5152 +               tx_queue->size = client_shm->tx_qsize;
5153 +               tx_queue->ack_idx = 0;
5154 +       }
5155 +
5156 +       set_bit(client_id, &hif->shm->g_client_status[0]);
5157 +
5158 +unlock:
5159 +       spin_unlock_bh(&hif->tx_lock);
5160 +
5161 +       return err;
5162 +}
5163 +
5164 +/*
5165 + * pfe_hif_client_unregister
5166 + *
5167 + * This function used to unregister a client  from the HIF driver.
5168 + *
5169 + */
5170 +static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
5171 +{
5172 +       pr_info("%s\n", __func__);
5173 +
5174 +       /*
5175 +        * Mark client as no longer available (which prevents further packet
5176 +        * receive for this client)
5177 +        */
5178 +       spin_lock_bh(&hif->tx_lock);
5179 +
5180 +       if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
5181 +               pr_err("%s: client %d not registered\n", __func__,
5182 +                      client_id);
5183 +
5184 +               spin_unlock_bh(&hif->tx_lock);
5185 +               return;
5186 +       }
5187 +
5188 +       clear_bit(client_id, &hif->shm->g_client_status[0]);
5189 +
5190 +       spin_unlock_bh(&hif->tx_lock);
5191 +}
5192 +
5193 +/*
5194 + * client_put_rxpacket-
5195 + * This functions puts the Rx pkt  in the given client Rx queue.
5196 + * It actually swap the Rx pkt in the client Rx descriptor buffer
5197 + * and returns the free buffer from it.
5198 + *
5199 + * If the function returns NULL means client Rx queue is full and
5200 + * packet couldn't send to client queue.
5201 + */
5202 +static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
5203 +                                u32 flags, u32 client_ctrl, u32 *rem_len)
5204 +{
5205 +       void *free_pkt = NULL;
5206 +       struct rx_queue_desc *desc = queue->base + queue->write_idx;
5207 +
5208 +       if (readl(&desc->ctrl) & CL_DESC_OWN) {
5209 +               if (page_mode) {
5210 +                       int rem_page_size = PAGE_SIZE -
5211 +                                       PRESENT_OFST_IN_PAGE(pkt);
5212 +                       int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
5213 +                                       pfe_pkt_headroom);
5214 +                       *rem_len = (rem_page_size - cur_pkt_size);
5215 +                       if (*rem_len) {
5216 +                               free_pkt = pkt + cur_pkt_size;
5217 +                               get_page(virt_to_page(free_pkt));
5218 +                       } else {
5219 +                               free_pkt = (void
5220 +                               *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
5221 +                               *rem_len = pfe_pkt_size;
5222 +                       }
5223 +               } else {
5224 +                       free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
5225 +                                       GFP_DMA_PFE);
5226 +                       *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
5227 +               }
5228 +
5229 +               if (free_pkt) {
5230 +                       desc->data = pkt;
5231 +                       desc->client_ctrl = client_ctrl;
5232 +                       /*
5233 +                        * Ensure everything else is written to DDR before
5234 +                        * writing bd->ctrl
5235 +                        */
5236 +                       smp_wmb();
5237 +                       writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
5238 +                       /* queue->write_idx = (queue->write_idx + 1)
5239 +                        *                     & (queue->size - 1);
5240 +                        */
5241 +                       free_pkt += pfe_pkt_headroom;
5242 +               }
5243 +       }
5244 +
5245 +       return free_pkt;
5246 +}
5247 +
5248 +/*
5249 + * pfe_hif_rx_process-
5250 + * This function does pfe hif rx queue processing.
5251 + * Dequeue packet from Rx queue and send it to corresponding client queue
5252 + */
5253 +static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
5254 +{
5255 +       struct hif_desc *desc;
5256 +       struct hif_hdr *pkt_hdr;
5257 +       struct __hif_hdr hif_hdr;
5258 +       void *free_buf;
5259 +       int rtc, len, rx_processed = 0;
5260 +       struct __hif_desc local_desc;
5261 +       int flags;
5262 +       unsigned int desc_p;
5263 +       unsigned int buf_size = 0;
5264 +
5265 +       spin_lock_bh(&hif->lock);
5266 +
5267 +       rtc = hif->rxtoclean_index;
5268 +
5269 +       while (rx_processed < budget) {
5270 +               desc = hif->rx_base + rtc;
5271 +
5272 +               __memcpy12(&local_desc, desc);
5273 +
5274 +               /* ACK pending Rx interrupt */
5275 +               if (local_desc.ctrl & BD_CTRL_DESC_EN) {
5276 +                       writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
5277 +
5278 +                       if (rx_processed == 0) {
5279 +                               if (napi_first_batch == 1) {
5280 +                                       desc_p = hif->descr_baseaddr_p +
5281 +                                       ((unsigned long int)(desc) -
5282 +                                       (unsigned long
5283 +                                       int)hif->descr_baseaddr_v);
5284 +                                       napi_first_batch = 0;
5285 +                               }
5286 +                       }
5287 +
5288 +                       __memcpy12(&local_desc, desc);
5289 +
5290 +                       if (local_desc.ctrl & BD_CTRL_DESC_EN)
5291 +                               break;
5292 +               }
5293 +
5294 +               napi_first_batch = 0;
5295 +
5296 +#ifdef HIF_NAPI_STATS
5297 +               hif->napi_counters[NAPI_DESC_COUNT]++;
5298 +#endif
5299 +               len = BD_BUF_LEN(local_desc.ctrl);
5300 +               /*
5301 +                * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
5302 +                * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
5303 +                */
5304 +               dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
5305 +                                hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
5306 +
5307 +               pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
5308 +
5309 +               /* Track last HIF header received */
5310 +               if (!hif->started) {
5311 +                       hif->started = 1;
5312 +
5313 +                       __memcpy8(&hif_hdr, pkt_hdr);
5314 +
5315 +                       hif->qno = hif_hdr.hdr.q_num;
5316 +                       hif->client_id = hif_hdr.hdr.client_id;
5317 +                       hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
5318 +                                               hif_hdr.hdr.client_ctrl;
5319 +                       flags = CL_DESC_FIRST;
5320 +
5321 +               } else {
5322 +                       flags = 0;
5323 +               }
5324 +
5325 +               if (local_desc.ctrl & BD_CTRL_LIFM)
5326 +                       flags |= CL_DESC_LAST;
5327 +
5328 +               /* Check for valid client id and still registered */
5329 +               if ((hif->client_id >= HIF_CLIENTS_MAX) ||
5330 +                   !(test_bit(hif->client_id,
5331 +                       &hif->shm->g_client_status[0]))) {
5332 +                       printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
5333 +                                          __func__,
5334 +                                          hif->client_id,
5335 +                                          hif->qno);
5336 +
5337 +                       free_buf = pkt_hdr;
5338 +
5339 +                       goto pkt_drop;
5340 +               }
5341 +
5342 +               /* Check to valid queue number */
5343 +               if (hif->client[hif->client_id].rx_qn <= hif->qno) {
5344 +                       pr_info("%s: packet with invalid queue: %d\n"
5345 +                               , __func__, hif->qno);
5346 +                       hif->qno = 0;
5347 +               }
5348 +
5349 +               free_buf =
5350 +               client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
5351 +                                   (void *)pkt_hdr, len, flags,
5352 +                       hif->client_ctrl, &buf_size);
5353 +
5354 +               hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
5355 +                                       hif->qno);
5356 +
5357 +               if (unlikely(!free_buf)) {
5358 +#ifdef HIF_NAPI_STATS
5359 +                       hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
5360 +#endif
5361 +                       /*
5362 +                        * If we want to keep in polling mode to retry later,
5363 +                        * we need to tell napi that we consumed
5364 +                        * the full budget or we will hit a livelock scenario.
5365 +                        * The core code keeps this napi instance
5366 +                        * at the head of the list and none of the other
5367 +                        * instances get to run
5368 +                        */
5369 +                       rx_processed = budget;
5370 +
5371 +                       if (flags & CL_DESC_FIRST)
5372 +                               hif->started = 0;
5373 +
5374 +                       break;
5375 +               }
5376 +
5377 +pkt_drop:
5378 +               /*Fill free buffer in the descriptor */
5379 +               hif->rx_buf_addr[rtc] = free_buf;
5380 +               hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
5381 +               writel((DDR_PHYS_TO_PFE
5382 +                       ((u32)dma_map_single(hif->dev,
5383 +                       free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
5384 +                       &desc->data);
5385 +               /*
5386 +                * Ensure everything else is written to DDR before
5387 +                * writing bd->ctrl
5388 +                */
5389 +               wmb();
5390 +               writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
5391 +                       BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
5392 +                       &desc->ctrl);
5393 +
5394 +               rtc = (rtc + 1) & (hif->rx_ring_size - 1);
5395 +
5396 +               if (local_desc.ctrl & BD_CTRL_LIFM) {
5397 +                       if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
5398 +                               rx_processed++;
5399 +
5400 +#ifdef HIF_NAPI_STATS
5401 +                               hif->napi_counters[NAPI_PACKET_COUNT]++;
5402 +#endif
5403 +                       }
5404 +                       hif->started = 0;
5405 +               }
5406 +       }
5407 +
5408 +       hif->rxtoclean_index = rtc;
5409 +       spin_unlock_bh(&hif->lock);
5410 +
5411 +       /* we made some progress, re-start rx dma in case it stopped */
5412 +       hif_rx_dma_start();
5413 +
5414 +       return rx_processed;
5415 +}
5416 +
5417 +/*
5418 + * client_ack_txpacket-
5419 + * This function ack the Tx packet in the give client Tx queue by resetting
5420 + * ownership bit in the descriptor.
5421 + */
5422 +static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
5423 +                              unsigned int q_no)
5424 +{
5425 +       struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
5426 +       struct tx_queue_desc *desc = queue->base + queue->ack_idx;
5427 +
5428 +       if (readl(&desc->ctrl) & CL_DESC_OWN) {
5429 +               writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
5430 +               /* queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1); */
5431 +
5432 +               return 0;
5433 +
5434 +       } else {
5435 +               /*This should not happen */
5436 +               pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
5437 +                      hif->txtosend, hif->txtoclean, hif->txavail,
5438 +                       client_id, q_no, queue, queue->ack_idx);
5439 +               WARN(1, "%s: doesn't own this descriptor", __func__);
5440 +               return 1;
5441 +       }
5442 +}
5443 +
5444 +void __hif_tx_done_process(struct pfe_hif *hif, int count)
5445 +{
5446 +       struct hif_desc *desc;
5447 +       struct hif_desc_sw *desc_sw;
5448 +       int ttc, tx_avl;
5449 +       int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
5450 +
5451 +       ttc = hif->txtoclean;
5452 +       tx_avl = hif->txavail;
5453 +
5454 +       while ((tx_avl < hif->tx_ring_size) && count--) {
5455 +               desc = hif->tx_base + ttc;
5456 +
5457 +               if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
5458 +                       break;
5459 +
5460 +               desc_sw = &hif->tx_sw_queue[ttc];
5461 +
5462 +               if (desc_sw->data) {
5463 +                       /*
5464 +                        * dmap_unmap_single(hif->dev, desc_sw->data,
5465 +                        * desc_sw->len, DMA_TO_DEVICE);
5466 +                        */
5467 +                       dma_unmap_single(hif->dev, desc_sw->data,
5468 +                                        desc_sw->len, DMA_TO_DEVICE);
5469 +               }
5470 +
5471 +               if (desc_sw->client_id > HIF_CLIENTS_MAX)
5472 +                       pr_err("Invalid cl id %d\n", desc_sw->client_id);
5473 +
5474 +               pkts_done[desc_sw->client_id]++;
5475 +
5476 +               client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
5477 +
5478 +               ttc = (ttc + 1) & (hif->tx_ring_size - 1);
5479 +               tx_avl++;
5480 +       }
5481 +
5482 +       if (pkts_done[0])
5483 +               hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
5484 +       if (pkts_done[1])
5485 +               hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
5486 +
5487 +       hif->txtoclean = ttc;
5488 +       hif->txavail = tx_avl;
5489 +
5490 +       if (!count) {
5491 +               tasklet_schedule(&hif->tx_cleanup_tasklet);
5492 +       } else {
5493 +               /*Enable Tx done interrupt */
5494 +               writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
5495 +                      HIF_INT_ENABLE);
5496 +       }
5497 +}
5498 +
5499 +static void pfe_tx_do_cleanup(unsigned long data)
5500 +{
5501 +       struct pfe_hif *hif = (struct pfe_hif *)data;
5502 +
5503 +       writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
5504 +
5505 +       hif_tx_done_process(hif, 64);
5506 +}
5507 +
5508 +/*
5509 + * __hif_xmit_pkt -
5510 + * This function puts one packet in the HIF Tx queue
5511 + */
5512 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
5513 +                       q_no, void *data, u32 len, unsigned int flags)
5514 +{
5515 +       struct hif_desc *desc;
5516 +       struct hif_desc_sw *desc_sw;
5517 +
5518 +       desc = hif->tx_base + hif->txtosend;
5519 +       desc_sw = &hif->tx_sw_queue[hif->txtosend];
5520 +
5521 +       desc_sw->len = len;
5522 +       desc_sw->client_id = client_id;
5523 +       desc_sw->q_no = q_no;
5524 +       desc_sw->flags = flags;
5525 +
5526 +       if (flags & HIF_DONT_DMA_MAP) {
5527 +               desc_sw->data = 0;
5528 +               writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
5529 +       } else {
5530 +               desc_sw->data = dma_map_single(hif->dev, data, len,
5531 +                                               DMA_TO_DEVICE);
5532 +               writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
5533 +       }
5534 +
5535 +       hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
5536 +       hif->txavail--;
5537 +
5538 +       if ((!((flags & HIF_DATA_VALID) && (flags &
5539 +                               HIF_LAST_BUFFER))))
5540 +               goto skip_tx;
5541 +
5542 +       /*
5543 +        * Ensure everything else is written to DDR before
5544 +        * writing bd->ctrl
5545 +        */
5546 +       wmb();
5547 +
5548 +       do {
5549 +               desc_sw = &hif->tx_sw_queue[hif->txtoflush];
5550 +               desc = hif->tx_base + hif->txtoflush;
5551 +
5552 +               if (desc_sw->flags & HIF_LAST_BUFFER) {
5553 +                       writel((BD_CTRL_LIFM |
5554 +                              BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
5555 +                              | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
5556 +                               BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
5557 +                               &desc->ctrl);
5558 +               } else {
5559 +                       writel((BD_CTRL_DESC_EN |
5560 +                               BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
5561 +               }
5562 +               hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
5563 +       }
5564 +       while (hif->txtoflush != hif->txtosend)
5565 +               ;
5566 +
5567 +skip_tx:
5568 +       return;
5569 +}
5570 +
5571 +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
5572 +                void *data, unsigned int len)
5573 +{
5574 +       int rc = 0;
5575 +
5576 +       spin_lock_bh(&hif->tx_lock);
5577 +
5578 +       if (!hif->txavail) {
5579 +               rc = 1;
5580 +       } else {
5581 +               __hif_xmit_pkt(hif, client_id, q_no, data, len,
5582 +                              HIF_FIRST_BUFFER | HIF_LAST_BUFFER);
5583 +               hif_tx_dma_start();
5584 +       }
5585 +
5586 +       if (hif->txavail < (hif->tx_ring_size >> 1))
5587 +               __hif_tx_done_process(hif, TX_FREE_MAX_COUNT);
5588 +
5589 +       spin_unlock_bh(&hif->tx_lock);
5590 +
5591 +       return rc;
5592 +}
5593 +
5594 +static irqreturn_t wol_isr(int irq, void *dev_id)
5595 +{
5596 +       pr_info("WoL\n");
5597 +       gemac_set_wol(EMAC1_BASE_ADDR, 0);
5598 +       gemac_set_wol(EMAC2_BASE_ADDR, 0);
5599 +       return IRQ_HANDLED;
5600 +}
5601 +
5602 +/*
5603 + * hif_isr-
5604 + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
5605 + */
5606 +static irqreturn_t hif_isr(int irq, void *dev_id)
5607 +{
5608 +       struct pfe_hif *hif = (struct pfe_hif *)dev_id;
5609 +       int int_status;
5610 +       int int_enable_mask;
5611 +
5612 +       /*Read hif interrupt source register */
5613 +       int_status = readl_relaxed(HIF_INT_SRC);
5614 +       int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
5615 +
5616 +       if ((int_status & HIF_INT) == 0)
5617 +               return IRQ_NONE;
5618 +
5619 +       int_status &= ~(HIF_INT);
5620 +
5621 +       if (int_status & HIF_RXPKT_INT) {
5622 +               int_status &= ~(HIF_RXPKT_INT);
5623 +               int_enable_mask &= ~(HIF_RXPKT_INT);
5624 +
5625 +               napi_first_batch = 1;
5626 +
5627 +               if (napi_schedule_prep(&hif->napi)) {
5628 +#ifdef HIF_NAPI_STATS
5629 +                       hif->napi_counters[NAPI_SCHED_COUNT]++;
5630 +#endif
5631 +                       __napi_schedule(&hif->napi);
5632 +               }
5633 +       }
5634 +       if (int_status & HIF_TXPKT_INT) {
5635 +               int_status &= ~(HIF_TXPKT_INT);
5636 +               int_enable_mask &= ~(HIF_TXPKT_INT);
5637 +               /*Schedule tx cleanup tassklet */
5638 +               tasklet_schedule(&hif->tx_cleanup_tasklet);
5639 +       }
5640 +
5641 +       /*Disable interrupts, they will be enabled after they are serviced */
5642 +       writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
5643 +
5644 +       if (int_status) {
5645 +               pr_info("%s : Invalid interrupt : %d\n", __func__,
5646 +                       int_status);
5647 +               writel(int_status, HIF_INT_SRC);
5648 +       }
5649 +
5650 +       return IRQ_HANDLED;
5651 +}
5652 +
5653 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
5654 +{
5655 +       unsigned int client_id = data1;
5656 +
5657 +       if (client_id >= HIF_CLIENTS_MAX) {
5658 +               pr_err("%s: client id %d out of bounds\n", __func__,
5659 +                      client_id);
5660 +               return;
5661 +       }
5662 +
5663 +       switch (req) {
5664 +       case REQUEST_CL_REGISTER:
5665 +                       /* Request for register a client */
5666 +                       pr_info("%s: register client_id %d\n",
5667 +                               __func__, client_id);
5668 +                       pfe_hif_client_register(hif, client_id, (struct
5669 +                               hif_client_shm *)&hif->shm->client[client_id]);
5670 +                       break;
5671 +
5672 +       case REQUEST_CL_UNREGISTER:
5673 +                       pr_info("%s: unregister client_id %d\n",
5674 +                               __func__, client_id);
5675 +
5676 +                       /* Request for unregister a client */
5677 +                       pfe_hif_client_unregister(hif, client_id);
5678 +
5679 +                       break;
5680 +
5681 +       default:
5682 +                       pr_err("%s: unsupported request %d\n",
5683 +                              __func__, req);
5684 +                       break;
5685 +       }
5686 +
5687 +       /*
5688 +        * Process client Tx queues
5689 +        * Currently we don't have checking for tx pending
5690 +        */
5691 +}
5692 +
5693 +/*
5694 + * pfe_hif_rx_poll
5695 + *  This function is NAPI poll function to process HIF Rx queue.
5696 + */
5697 +static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
5698 +{
5699 +       struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
5700 +       int work_done;
5701 +
5702 +#ifdef HIF_NAPI_STATS
5703 +       hif->napi_counters[NAPI_POLL_COUNT]++;
5704 +#endif
5705 +
5706 +       work_done = pfe_hif_rx_process(hif, budget);
5707 +
5708 +       if (work_done < budget) {
5709 +               napi_complete(napi);
5710 +               writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
5711 +                      HIF_INT_ENABLE);
5712 +       }
5713 +#ifdef HIF_NAPI_STATS
5714 +       else
5715 +               hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
5716 +#endif
5717 +
5718 +       return work_done;
5719 +}
5720 +
5721 +/*
5722 + * pfe_hif_init
5723 + * This function initializes the baseaddresses and irq, etc.
5724 + */
5725 +int pfe_hif_init(struct pfe *pfe)
5726 +{
5727 +       struct pfe_hif *hif = &pfe->hif;
5728 +       int err;
5729 +
5730 +       pr_info("%s\n", __func__);
5731 +
5732 +       hif->dev = pfe->dev;
5733 +       hif->irq = pfe->hif_irq;
5734 +
5735 +       err = pfe_hif_alloc_descr(hif);
5736 +       if (err)
5737 +               goto err0;
5738 +
5739 +       if (pfe_hif_init_buffers(hif)) {
5740 +               pr_err("%s: Could not initialize buffer descriptors\n"
5741 +                       , __func__);
5742 +               err = -ENOMEM;
5743 +               goto err1;
5744 +       }
5745 +
5746 +       /* Initialize NAPI for Rx processing */
5747 +       init_dummy_netdev(&hif->dummy_dev);
5748 +       netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
5749 +                      HIF_RX_POLL_WEIGHT);
5750 +       napi_enable(&hif->napi);
5751 +
5752 +       spin_lock_init(&hif->tx_lock);
5753 +       spin_lock_init(&hif->lock);
5754 +
5755 +       hif_init();
5756 +       hif_rx_enable();
5757 +       hif_tx_enable();
5758 +
5759 +       /* Disable tx done interrupt */
5760 +       writel(HIF_INT_MASK, HIF_INT_ENABLE);
5761 +
5762 +       gpi_enable(HGPI_BASE_ADDR);
5763 +
5764 +       err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
5765 +       if (err) {
5766 +               pr_err("%s: failed to get the hif IRQ = %d\n",
5767 +                      __func__, hif->irq);
5768 +               goto err1;
5769 +       }
5770 +
5771 +       err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
5772 +       if (err) {
5773 +               pr_err("%s: failed to get the wol IRQ = %d\n",
5774 +                      __func__, pfe->wol_irq);
5775 +               goto err1;
5776 +       }
5777 +
5778 +       tasklet_init(&hif->tx_cleanup_tasklet,
5779 +                    (void(*)(unsigned long))pfe_tx_do_cleanup,
5780 +                    (unsigned long)hif);
5781 +
5782 +       return 0;
5783 +err1:
5784 +       pfe_hif_free_descr(hif);
5785 +err0:
5786 +       return err;
5787 +}
5788 +
5789 +/* pfe_hif_exit- */
5790 +void pfe_hif_exit(struct pfe *pfe)
5791 +{
5792 +       struct pfe_hif *hif = &pfe->hif;
5793 +
5794 +       pr_info("%s\n", __func__);
5795 +
5796 +       tasklet_kill(&hif->tx_cleanup_tasklet);
5797 +
5798 +       spin_lock_bh(&hif->lock);
5799 +       hif->shm->g_client_status[0] = 0;
5800 +       /* Make sure all clients are disabled*/
5801 +       hif->shm->g_client_status[1] = 0;
5802 +
5803 +       spin_unlock_bh(&hif->lock);
5804 +
5805 +       /*Disable Rx/Tx */
5806 +       gpi_disable(HGPI_BASE_ADDR);
5807 +       hif_rx_disable();
5808 +       hif_tx_disable();
5809 +
5810 +       napi_disable(&hif->napi);
5811 +       netif_napi_del(&hif->napi);
5812 +
5813 +       free_irq(pfe->wol_irq, pfe);
5814 +       free_irq(hif->irq, hif);
5815 +
5816 +       pfe_hif_release_buffers(hif);
5817 +       pfe_hif_free_descr(hif);
5818 +}
5819 --- /dev/null
5820 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
5821 @@ -0,0 +1,638 @@
5822 +/*
5823 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5824 + * Copyright 2017 NXP
5825 + *
5826 + * This program is free software; you can redistribute it and/or modify
5827 + * it under the terms of the GNU General Public License as published by
5828 + * the Free Software Foundation; either version 2 of the License, or
5829 + * (at your option) any later version.
5830 + *
5831 + * This program is distributed in the hope that it will be useful,
5832 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5833 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
5834 + * GNU General Public License for more details.
5835 + *
5836 + * You should have received a copy of the GNU General Public License
5837 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
5838 + */
5839 +
5840 +#include <linux/version.h>
5841 +#include <linux/kernel.h>
5842 +#include <linux/slab.h>
5843 +#include <linux/interrupt.h>
5844 +#include <linux/workqueue.h>
5845 +#include <linux/dma-mapping.h>
5846 +#include <linux/dmapool.h>
5847 +#include <linux/sched.h>
5848 +#include <linux/skbuff.h>
5849 +#include <linux/moduleparam.h>
5850 +#include <linux/cpu.h>
5851 +
5852 +#include "pfe_mod.h"
5853 +#include "pfe_hif.h"
5854 +#include "pfe_hif_lib.h"
5855 +
5856 +unsigned int lro_mode;
5857 +unsigned int page_mode;
5858 +unsigned int tx_qos;
5859 +unsigned int pfe_pkt_size;
5860 +unsigned int pfe_pkt_headroom;
5861 +unsigned int emac_txq_cnt;
5862 +
5863 +/*
5864 + * @pfe_hal_lib.c.
5865 + * Common functions used by HIF client drivers
5866 + */
5867 +
5868 +/*HIF shared memory Global variable */
5869 +struct hif_shm ghif_shm;
5870 +
5871 +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
5872 + * This function should be called after pfe_hif_exit
5873 + *
5874 + * @param[in] hif_shm          Shared memory address location in DDR
5875 + */
5876 +static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
5877 +{
5878 +       int i;
5879 +       void *pkt;
5880 +
5881 +       for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
5882 +               pkt = hif_shm->rx_buf_pool[i];
5883 +               if (pkt) {
5884 +                       hif_shm->rx_buf_pool[i] = NULL;
5885 +                       pkt -= pfe_pkt_headroom;
5886 +
5887 +                       if (page_mode)
5888 +                               put_page(virt_to_page(pkt));
5889 +                       else
5890 +                               kfree(pkt);
5891 +               }
5892 +       }
5893 +}
5894 +
5895 +/* Initialize shared memory used between HIF driver and clients,
5896 + * allocate rx_buffer_pool required for HIF Rx descriptors.
5897 + * This function should be called before initializing HIF driver.
5898 + *
5899 + * @param[in] hif_shm          Shared memory address location in DDR
5900 + * @rerurn                     0 - on succes, <0 on fail to initialize
5901 + */
5902 +static int pfe_hif_shm_init(struct hif_shm *hif_shm)
5903 +{
5904 +       int i;
5905 +       void *pkt;
5906 +
5907 +       memset(hif_shm, 0, sizeof(struct hif_shm));
5908 +       hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
5909 +
5910 +       for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
5911 +               if (page_mode) {
5912 +                       pkt = (void *)__get_free_page(GFP_KERNEL |
5913 +                               GFP_DMA_PFE);
5914 +               } else {
5915 +                       pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
5916 +               }
5917 +
5918 +               if (pkt)
5919 +                       hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
5920 +               else
5921 +                       goto err0;
5922 +       }
5923 +
5924 +       return 0;
5925 +
5926 +err0:
5927 +       pr_err("%s Low memory\n", __func__);
5928 +       pfe_hif_shm_clean(hif_shm);
5929 +       return -ENOMEM;
5930 +}
5931 +
5932 +/*This function sends indication to HIF driver
5933 + *
5934 + * @param[in] hif      hif context
5935 + */
5936 +static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
5937 +                                       data2)
5938 +{
5939 +       hif_process_client_req(hif, req, data1, data2);
5940 +}
5941 +
5942 +void hif_lib_indicate_client(int client_id, int event_type, int qno)
5943 +{
5944 +       struct hif_client_s *client = pfe->hif_client[client_id];
5945 +
5946 +       if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
5947 +               HIF_CLIENT_QUEUES_MAX))
5948 +               return;
5949 +
5950 +       if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
5951 +               client->event_handler(client->priv, event_type, qno);
5952 +}
5953 +
5954 +/*This function releases Rx queue descriptors memory and pre-filled buffers
5955 + *
5956 + * @param[in] client   hif_client context
5957 + */
5958 +static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
5959 +{
5960 +       struct rx_queue_desc *desc;
5961 +       int qno, ii;
5962 +       void *buf;
5963 +
5964 +       for (qno = 0; qno < client->rx_qn; qno++) {
5965 +               desc = client->rx_q[qno].base;
5966 +
5967 +               for (ii = 0; ii < client->rx_q[qno].size; ii++) {
5968 +                       buf = (void *)desc->data;
5969 +                       if (buf) {
5970 +                               buf -= pfe_pkt_headroom;
5971 +
5972 +                               if (page_mode)
5973 +                                       free_page((unsigned long)buf);
5974 +                               else
5975 +                                       kfree(buf);
5976 +
5977 +                               desc->ctrl = 0;
5978 +                       }
5979 +
5980 +                       desc++;
5981 +               }
5982 +       }
5983 +
5984 +       kfree(client->rx_qbase);
5985 +}
5986 +
5987 +/*This function allocates memory for the rxq descriptors and pre-fill rx queues
5988 + * with buffers.
5989 + * @param[in] client   client context
5990 + * @param[in] q_size   size of the rxQ, all queues are of same size
5991 + */
5992 +static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
5993 +                                               q_size)
5994 +{
5995 +       struct rx_queue_desc *desc;
5996 +       struct hif_client_rx_queue *queue;
5997 +       int ii, qno;
5998 +
5999 +       /*Allocate memory for the client queues */
6000 +       client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
6001 +                               rx_queue_desc), GFP_KERNEL);
6002 +       if (!client->rx_qbase)
6003 +               goto err;
6004 +
6005 +       for (qno = 0; qno < client->rx_qn; qno++) {
6006 +               queue = &client->rx_q[qno];
6007 +
6008 +               queue->base = client->rx_qbase + qno * q_size * sizeof(struct
6009 +                               rx_queue_desc);
6010 +               queue->size = q_size;
6011 +               queue->read_idx = 0;
6012 +               queue->write_idx = 0;
6013 +
6014 +               pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
6015 +                        queue->base, queue->size);
6016 +       }
6017 +
6018 +       for (qno = 0; qno < client->rx_qn; qno++) {
6019 +               queue = &client->rx_q[qno];
6020 +               desc = queue->base;
6021 +
6022 +               for (ii = 0; ii < queue->size; ii++) {
6023 +                       desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
6024 +                                       CL_DESC_OWN;
6025 +                       desc++;
6026 +               }
6027 +       }
6028 +
6029 +       return 0;
6030 +
6031 +err:
6032 +       return 1;
6033 +}
6034 +
6035 +#define inc_cl_idx(idxname)                                    \
6036 +       ({ typeof(idxname) idxname_ = (idxname);                \
6037 +       ((idxname_) = (idxname_ + 1) & (queue->size - 1)); })
6038 +
6039 +static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
6040 +{
6041 +       pr_debug("%s\n", __func__);
6042 +
6043 +       /*
6044 +        * Check if there are any pending packets. Client must flush the tx
6045 +        * queues before unregistering, by calling by calling
6046 +        * hif_lib_tx_get_next_complete()
6047 +        *
6048 +        * Hif no longer calls since we are no longer registered
6049 +        */
6050 +       if (queue->tx_pending)
6051 +               pr_err("%s: pending transmit packets\n", __func__);
6052 +}
6053 +
6054 +static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
6055 +{
6056 +       int qno;
6057 +
6058 +       pr_debug("%s\n", __func__);
6059 +
6060 +       for (qno = 0; qno < client->tx_qn; qno++)
6061 +               hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
6062 +
6063 +       kfree(client->tx_qbase);
6064 +}
6065 +
6066 +static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
6067 +                                               q_size)
6068 +{
6069 +       struct hif_client_tx_queue *queue;
6070 +       int qno;
6071 +
6072 +       client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
6073 +                                       tx_queue_desc), GFP_KERNEL);
6074 +       if (!client->tx_qbase)
6075 +               return 1;
6076 +
6077 +       for (qno = 0; qno < client->tx_qn; qno++) {
6078 +               queue = &client->tx_q[qno];
6079 +
6080 +               queue->base = client->tx_qbase + qno * q_size * sizeof(struct
6081 +                               tx_queue_desc);
6082 +               queue->size = q_size;
6083 +               queue->read_idx = 0;
6084 +               queue->write_idx = 0;
6085 +               queue->tx_pending = 0;
6086 +               queue->nocpy_flag = 0;
6087 +               queue->prev_tmu_tx_pkts = 0;
6088 +               queue->done_tmu_tx_pkts = 0;
6089 +
6090 +               pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
6091 +                        queue->base, queue->size);
6092 +       }
6093 +
6094 +       return 0;
6095 +}
6096 +
6097 +static int hif_lib_event_dummy(void *priv, int event_type, int qno)
6098 +{
6099 +       return 0;
6100 +}
6101 +
6102 +int hif_lib_client_register(struct hif_client_s *client)
6103 +{
6104 +       struct hif_shm *hif_shm;
6105 +       struct hif_client_shm *client_shm;
6106 +       int err, i;
6107 +       /* int loop_cnt = 0; */
6108 +
6109 +       pr_debug("%s\n", __func__);
6110 +
6111 +       /*Allocate memory before spin_lock*/
6112 +       if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
6113 +               err = -ENOMEM;
6114 +               goto err_rx;
6115 +       }
6116 +
6117 +       if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
6118 +               err = -ENOMEM;
6119 +               goto err_tx;
6120 +       }
6121 +
6122 +       spin_lock_bh(&pfe->hif.lock);
6123 +       if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
6124 +           (pfe->hif_client[client->id])) {
6125 +               err = -EINVAL;
6126 +               goto err;
6127 +       }
6128 +
6129 +       hif_shm = client->pfe->hif.shm;
6130 +
6131 +       if (!client->event_handler)
6132 +               client->event_handler = hif_lib_event_dummy;
6133 +
6134 +       /*Initialize client specific shared memory */
6135 +       client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
6136 +       client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
6137 +       client_shm->rx_qsize = client->rx_qsize;
6138 +       client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
6139 +       client_shm->tx_qsize = client->tx_qsize;
6140 +       client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
6141 +                               (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
6142 +       /* spin_lock_init(&client->rx_lock); */
6143 +
6144 +       for (i = 0; i < HIF_EVENT_MAX; i++) {
6145 +               client->queue_mask[i] = 0;  /*
6146 +                                            * By default all events are
6147 +                                            * unmasked
6148 +                                            */
6149 +       }
6150 +
6151 +       /*Indicate to HIF driver*/
6152 +       hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
6153 +
6154 +       pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
6155 +                __func__, client, client->id, client->tx_qsize,
6156 +                client->rx_qsize);
6157 +
6158 +       client->cpu_id = -1;
6159 +
6160 +       pfe->hif_client[client->id] = client;
6161 +       spin_unlock_bh(&pfe->hif.lock);
6162 +
6163 +       return 0;
6164 +
6165 +err:
6166 +       spin_unlock_bh(&pfe->hif.lock);
6167 +       hif_lib_client_release_tx_buffers(client);
6168 +
6169 +err_tx:
6170 +       hif_lib_client_release_rx_buffers(client);
6171 +
6172 +err_rx:
6173 +       return err;
6174 +}
6175 +
6176 +int hif_lib_client_unregister(struct hif_client_s *client)
6177 +{
6178 +       struct pfe *pfe = client->pfe;
6179 +       u32 client_id = client->id;
6180 +
6181 +       pr_info(
6182 +               "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
6183 +               , __func__, client, client->id, client->tx_qsize,
6184 +               client->rx_qsize);
6185 +
6186 +       spin_lock_bh(&pfe->hif.lock);
6187 +       hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
6188 +
6189 +       hif_lib_client_release_tx_buffers(client);
6190 +       hif_lib_client_release_rx_buffers(client);
6191 +       pfe->hif_client[client_id] = NULL;
6192 +       spin_unlock_bh(&pfe->hif.lock);
6193 +
6194 +       return 0;
6195 +}
6196 +
6197 +int hif_lib_event_handler_start(struct hif_client_s *client, int event,
6198 +                               int qno)
6199 +{
6200 +       struct hif_client_rx_queue *queue = &client->rx_q[qno];
6201 +       struct rx_queue_desc *desc = queue->base + queue->read_idx;
6202 +
6203 +       if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
6204 +               pr_debug("%s: Unsupported event : %d  queue number : %d\n",
6205 +                        __func__, event, qno);
6206 +               return -1;
6207 +       }
6208 +
6209 +       test_and_clear_bit(qno, &client->queue_mask[event]);
6210 +
6211 +       switch (event) {
6212 +       case EVENT_RX_PKT_IND:
6213 +               if (!(desc->ctrl & CL_DESC_OWN))
6214 +                       hif_lib_indicate_client(client->id,
6215 +                                               EVENT_RX_PKT_IND, qno);
6216 +               break;
6217 +
6218 +       case EVENT_HIGH_RX_WM:
6219 +       case EVENT_TXDONE_IND:
6220 +       default:
6221 +               break;
6222 +       }
6223 +
6224 +       return 0;
6225 +}
6226 +
6227 +/*
6228 + * This function gets one packet from the specified client queue
6229 + * It also refill the rx buffer
6230 + */
6231 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
6232 +                               *ofst, unsigned int *rx_ctrl,
6233 +                               unsigned int *desc_ctrl, void **priv_data)
6234 +{
6235 +       struct hif_client_rx_queue *queue = &client->rx_q[qno];
6236 +       struct rx_queue_desc *desc;
6237 +       void *pkt = NULL;
6238 +
6239 +       /*
6240 +        * Following lock is to protect rx queue access from,
6241 +        * hif_lib_event_handler_start.
6242 +        * In general below lock is not required, because hif_lib_xmit_pkt and
6243 +        * hif_lib_event_handler_start are called from napi poll and which is
6244 +        * not re-entrant. But if some client use in different way this lock is
6245 +        * required.
6246 +        */
6247 +       /*spin_lock_irqsave(&client->rx_lock, flags); */
6248 +       desc = queue->base + queue->read_idx;
6249 +       if (!(desc->ctrl & CL_DESC_OWN)) {
6250 +               pkt = desc->data - pfe_pkt_headroom;
6251 +
6252 +               *rx_ctrl = desc->client_ctrl;
6253 +               *desc_ctrl = desc->ctrl;
6254 +
6255 +               if (desc->ctrl & CL_DESC_FIRST) {
6256 +                       u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
6257 +
6258 +                       if (size) {
6259 +                               *len = CL_DESC_BUF_LEN(desc->ctrl) -
6260 +                                               PFE_PKT_HEADER_SZ - size;
6261 +                               *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
6262 +                                                               + size;
6263 +                               *priv_data = desc->data + PFE_PKT_HEADER_SZ;
6264 +                       } else {
6265 +                               *len = CL_DESC_BUF_LEN(desc->ctrl) -
6266 +                                               PFE_PKT_HEADER_SZ;
6267 +                               *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ;
6268 +                               *priv_data = NULL;
6269 +                       }
6270 +
6271 +               } else {
6272 +                       *len = CL_DESC_BUF_LEN(desc->ctrl);
6273 +                       *ofst = pfe_pkt_headroom;
6274 +               }
6275 +
6276 +               /*
6277 +                * Needed so we don't free a buffer/page
6278 +                * twice on module_exit
6279 +                */
6280 +               desc->data = NULL;
6281 +
6282 +               /*
6283 +                * Ensure everything else is written to DDR before
6284 +                * writing bd->ctrl
6285 +                */
6286 +               smp_wmb();
6287 +
6288 +               desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
6289 +               inc_cl_idx(queue->read_idx);
6290 +       }
6291 +
6292 +       /*spin_unlock_irqrestore(&client->rx_lock, flags); */
6293 +       return pkt;
6294 +}
6295 +
6296 +static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
6297 +                                       client_id, unsigned int qno,
6298 +                                       u32 client_ctrl)
6299 +{
6300 +       /* Optimize the write since the destinaton may be non-cacheable */
6301 +       if (!((unsigned long)pkt_hdr & 0x3)) {
6302 +               ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
6303 +                                       client_id;
6304 +       } else {
6305 +               ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
6306 +               ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
6307 +       }
6308 +}
6309 +
6310 +/*This function puts the given packet in the specific client queue */
6311 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
6312 +                               *data, unsigned int len, u32 client_ctrl,
6313 +                               unsigned int flags, void *client_data)
6314 +{
6315 +       struct hif_client_tx_queue *queue = &client->tx_q[qno];
6316 +       struct tx_queue_desc *desc = queue->base + queue->write_idx;
6317 +
6318 +       /* First buffer */
6319 +       if (flags & HIF_FIRST_BUFFER) {
6320 +               data -= sizeof(struct hif_hdr);
6321 +               len += sizeof(struct hif_hdr);
6322 +
6323 +               hif_hdr_write(data, client->id, qno, client_ctrl);
6324 +       }
6325 +
6326 +       desc->data = client_data;
6327 +       desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
6328 +
6329 +       __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
6330 +
6331 +       inc_cl_idx(queue->write_idx);
6332 +       queue->tx_pending++;
6333 +       queue->jiffies_last_packet = jiffies;
6334 +}
6335 +
6336 +/*This function puts the given packet in the specific client queue */
6337 +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
6338 +                    unsigned int len, u32 client_ctrl, void *client_data)
6339 +{
6340 +       struct hif_client_tx_queue *queue = &client->tx_q[qno];
6341 +       struct tx_queue_desc *desc = queue->base + queue->write_idx;
6342 +
6343 +       if (queue->tx_pending < queue->size) {
6344 +               /*Construct pkt header */
6345 +
6346 +               data -= sizeof(struct hif_hdr);
6347 +               len += sizeof(struct hif_hdr);
6348 +
6349 +               hif_hdr_write(data, client->id, qno, client_ctrl);
6350 +
6351 +               desc->data = client_data;
6352 +               desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(HIF_FIRST_BUFFER |
6353 +                               HIF_LAST_BUFFER | HIF_DATA_VALID);
6354 +
6355 +               if (hif_xmit_pkt(&pfe->hif, client->id, qno, data, len))
6356 +                       return 1;
6357 +
6358 +               inc_cl_idx(queue->write_idx);
6359 +               queue->tx_pending++;
6360 +               queue->jiffies_last_packet = jiffies;
6361 +
6362 +               return 0;
6363 +       }
6364 +
6365 +       pr_debug("%s Tx client %d qno %d is full\n", __func__, client->id,
6366 +                qno);
6367 +       return 1;
6368 +}
6369 +
6370 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
6371 +                                  unsigned int *flags, int count)
6372 +{
6373 +       struct hif_client_tx_queue *queue = &client->tx_q[qno];
6374 +       struct tx_queue_desc *desc = queue->base + queue->read_idx;
6375 +
6376 +       pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
6377 +                queue->read_idx, queue->tx_pending);
6378 +
6379 +       if (!queue->tx_pending)
6380 +               return NULL;
6381 +
6382 +       if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
6383 +               u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
6384 +                       client->id, TMU_DM_TX_TRANS, 4));
6385 +
6386 +               if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
6387 +                       queue->done_tmu_tx_pkts = UINT_MAX -
6388 +                               queue->prev_tmu_tx_pkts + tmu_tx_pkts;
6389 +               else
6390 +                       queue->done_tmu_tx_pkts = tmu_tx_pkts -
6391 +                                               queue->prev_tmu_tx_pkts;
6392 +
6393 +               queue->prev_tmu_tx_pkts  = tmu_tx_pkts;
6394 +
6395 +               if (!queue->done_tmu_tx_pkts)
6396 +                       return NULL;
6397 +       }
6398 +
6399 +       if (desc->ctrl & CL_DESC_OWN)
6400 +               return NULL;
6401 +
6402 +       inc_cl_idx(queue->read_idx);
6403 +       queue->tx_pending--;
6404 +
6405 +       *flags = CL_DESC_GET_FLAGS(desc->ctrl);
6406 +
6407 +       if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
6408 +               queue->done_tmu_tx_pkts--;
6409 +
6410 +       return desc->data;
6411 +}
6412 +
6413 +static void hif_lib_tmu_credit_init(struct pfe *pfe)
6414 +{
6415 +       int i, q;
6416 +
6417 +       for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
6418 +               for (q = 0; q < emac_txq_cnt; q++) {
6419 +                       pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
6420 +                                       DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
6421 +                       pfe->tmu_credit.tx_credit[i][q] =
6422 +                                       pfe->tmu_credit.tx_credit_max[i][q];
6423 +               }
6424 +}
6425 +
6426 +int pfe_hif_lib_init(struct pfe *pfe)
6427 +{
6428 +       int rc;
6429 +
6430 +       pr_info("%s\n", __func__);
6431 +
6432 +       if (lro_mode) {
6433 +               page_mode = 1;
6434 +               pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
6435 +               pfe_pkt_headroom = 0;
6436 +       } else {
6437 +               page_mode = 0;
6438 +               pfe_pkt_size = PFE_PKT_SIZE;
6439 +               pfe_pkt_headroom = PFE_PKT_HEADROOM;
6440 +       }
6441 +
6442 +       if (tx_qos)
6443 +               emac_txq_cnt = EMAC_TXQ_CNT / 2;
6444 +       else
6445 +               emac_txq_cnt = EMAC_TXQ_CNT;
6446 +
6447 +       hif_lib_tmu_credit_init(pfe);
6448 +       pfe->hif.shm = &ghif_shm;
6449 +       rc = pfe_hif_shm_init(pfe->hif.shm);
6450 +
6451 +       return rc;
6452 +}
6453 +
6454 +void pfe_hif_lib_exit(struct pfe *pfe)
6455 +{
6456 +       pr_info("%s\n", __func__);
6457 +
6458 +       pfe_hif_shm_clean(pfe->hif.shm);
6459 +}
6460 --- /dev/null
6461 +++ b/drivers/staging/fsl_ppfe/pfe_hw.c
6462 @@ -0,0 +1,176 @@
6463 +/*
6464 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
6465 + * Copyright 2017 NXP
6466 + *
6467 + * This program is free software; you can redistribute it and/or modify
6468 + * it under the terms of the GNU General Public License as published by
6469 + * the Free Software Foundation; either version 2 of the License, or
6470 + * (at your option) any later version.
6471 + *
6472 + * This program is distributed in the hope that it will be useful,
6473 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
6474 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
6475 + * GNU General Public License for more details.
6476 + *
6477 + * You should have received a copy of the GNU General Public License
6478 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
6479 + */
6480 +
6481 +#include "pfe_mod.h"
6482 +#include "pfe_hw.h"
6483 +
6484 +/* Functions to handle most of pfe hw register initialization */
6485 +int pfe_hw_init(struct pfe *pfe, int resume)
6486 +{
6487 +       struct class_cfg class_cfg = {
6488 +               .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
6489 +               .route_table_baseaddr = pfe->ddr_phys_baseaddr +
6490 +                                       ROUTE_TABLE_BASEADDR,
6491 +               .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
6492 +       };
6493 +
6494 +       struct tmu_cfg tmu_cfg = {
6495 +               .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
6496 +               .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
6497 +               .llm_queue_len = TMU_LLM_QUEUE_LEN,
6498 +       };
6499 +
6500 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
6501 +       struct util_cfg util_cfg = {
6502 +               .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
6503 +       };
6504 +#endif
6505 +
6506 +       struct BMU_CFG bmu1_cfg = {
6507 +               .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
6508 +                                               BMU1_LMEM_BASEADDR),
6509 +               .count = BMU1_BUF_COUNT,
6510 +               .size = BMU1_BUF_SIZE,
6511 +               .low_watermark = 10,
6512 +               .high_watermark = 15,
6513 +       };
6514 +
6515 +       struct BMU_CFG bmu2_cfg = {
6516 +               .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
6517 +                                               BMU2_DDR_BASEADDR),
6518 +               .count = BMU2_BUF_COUNT,
6519 +               .size = BMU2_BUF_SIZE,
6520 +               .low_watermark = 250,
6521 +               .high_watermark = 253,
6522 +       };
6523 +
6524 +       struct gpi_cfg egpi1_cfg = {
6525 +               .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
6526 +               .tmlf_txthres = EGPI1_TMLF_TXTHRES,
6527 +               .aseq_len = EGPI1_ASEQ_LEN,
6528 +               .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
6529 +                                               EMAC_TCNTRL_REG),
6530 +       };
6531 +
6532 +       struct gpi_cfg egpi2_cfg = {
6533 +               .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
6534 +               .tmlf_txthres = EGPI2_TMLF_TXTHRES,
6535 +               .aseq_len = EGPI2_ASEQ_LEN,
6536 +               .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
6537 +                                               EMAC_TCNTRL_REG),
6538 +       };
6539 +
6540 +       struct gpi_cfg hgpi_cfg = {
6541 +               .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
6542 +               .tmlf_txthres = HGPI_TMLF_TXTHRES,
6543 +               .aseq_len = HGPI_ASEQ_LEN,
6544 +               .mtip_pause_reg = 0,
6545 +       };
6546 +
6547 +       pr_info("%s\n", __func__);
6548 +
6549 +#if !defined(LS1012A_PFE_RESET_WA)
6550 +       /* LS1012A needs this to make PE work correctly */
6551 +       writel(0x3,     CLASS_PE_SYS_CLK_RATIO);
6552 +       writel(0x3,     TMU_PE_SYS_CLK_RATIO);
6553 +       writel(0x3,     UTIL_PE_SYS_CLK_RATIO);
6554 +       usleep_range(10, 20);
6555 +#endif
6556 +
6557 +       pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
6558 +       pr_info("TMU version: %x\n", readl(TMU_VERSION));
6559 +
6560 +       pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
6561 +               BMU_VERSION));
6562 +       pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
6563 +               BMU_VERSION));
6564 +
6565 +       pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
6566 +               GPI_VERSION));
6567 +       pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
6568 +               GPI_VERSION));
6569 +       pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
6570 +               GPI_VERSION));
6571 +
6572 +       pr_info("HIF version: %x\n", readl(HIF_VERSION));
6573 +       pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
6574 +
6575 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
6576 +       pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
6577 +#endif
6578 +       while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
6579 +               ;
6580 +
6581 +       hif_rx_disable();
6582 +       hif_tx_disable();
6583 +
6584 +       bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
6585 +
6586 +       pr_info("bmu_init(1) done\n");
6587 +
6588 +       bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
6589 +
6590 +       pr_info("bmu_init(2) done\n");
6591 +
6592 +       class_cfg.resume = resume ? 1 : 0;
6593 +
6594 +       class_init(&class_cfg);
6595 +
6596 +       pr_info("class_init() done\n");
6597 +
6598 +       tmu_init(&tmu_cfg);
6599 +
6600 +       pr_info("tmu_init() done\n");
6601 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
6602 +       util_init(&util_cfg);
6603 +
6604 +       pr_info("util_init() done\n");
6605 +#endif
6606 +       gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
6607 +
6608 +       pr_info("gpi_init(1) done\n");
6609 +
6610 +       gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
6611 +
6612 +       pr_info("gpi_init(2) done\n");
6613 +
6614 +       gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
6615 +
6616 +       pr_info("gpi_init(hif) done\n");
6617 +
6618 +       bmu_enable(BMU1_BASE_ADDR);
6619 +
6620 +       pr_info("bmu_enable(1) done\n");
6621 +
6622 +       bmu_enable(BMU2_BASE_ADDR);
6623 +
6624 +       pr_info("bmu_enable(2) done\n");
6625 +
6626 +       return 0;
6627 +}
6628 +
6629 +void pfe_hw_exit(struct pfe *pfe)
6630 +{
6631 +       pr_info("%s\n", __func__);
6632 +
6633 +       bmu_disable(BMU1_BASE_ADDR);
6634 +       bmu_reset(BMU1_BASE_ADDR);
6635 +
6636 +       bmu_disable(BMU2_BASE_ADDR);
6637 +       bmu_reset(BMU2_BASE_ADDR);
6638 +}
6639 --- /dev/null
6640 +++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
6641 @@ -0,0 +1,394 @@
6642 +/*
6643 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
6644 + * Copyright 2017 NXP
6645 + *
6646 + * This program is free software; you can redistribute it and/or modify
6647 + * it under the terms of the GNU General Public License as published by
6648 + * the Free Software Foundation; either version 2 of the License, or
6649 + * (at your option) any later version.
6650 + *
6651 + * This program is distributed in the hope that it will be useful,
6652 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
6653 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
6654 + * GNU General Public License for more details.
6655 + *
6656 + * You should have received a copy of the GNU General Public License
6657 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
6658 + */
6659 +
6660 +#include <linux/module.h>
6661 +#include <linux/device.h>
6662 +#include <linux/of_net.h>
6663 +#include <linux/of_address.h>
6664 +#include <linux/platform_device.h>
6665 +#include <linux/slab.h>
6666 +#include <linux/clk.h>
6667 +#include <linux/mfd/syscon.h>
6668 +#include <linux/regmap.h>
6669 +
6670 +#include "pfe_mod.h"
6671 +
6672 +struct ls1012a_pfe_platform_data pfe_platform_data;
6673 +
6674 +static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
6675 +                                       if_cnt,
6676 +                                       struct ls1012a_pfe_platform_data
6677 +                                       *pdata)
6678 +{
6679 +       struct device_node *gem = NULL, *phy = NULL;
6680 +       int size;
6681 +       int ii = 0, phy_id = 0;
6682 +       const u32 *addr;
6683 +       const void *mac_addr;
6684 +
6685 +       for (ii = 0; ii < if_cnt; ii++) {
6686 +               gem = of_get_next_child(parent, gem);
6687 +               if (!gem)
6688 +                       goto err;
6689 +               addr = of_get_property(gem, "reg", &size);
6690 +               if (addr && (be32_to_cpup(addr) == port))
6691 +                       break;
6692 +       }
6693 +
6694 +       if (ii >= if_cnt) {
6695 +               pr_err("%s:%d Failed to find interface = %d\n",
6696 +                      __func__, __LINE__, if_cnt);
6697 +               goto err;
6698 +       }
6699 +
6700 +       pdata->ls1012a_eth_pdata[port].gem_id = port;
6701 +
6702 +       mac_addr = of_get_mac_address(gem);
6703 +
6704 +       if (mac_addr) {
6705 +               memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
6706 +                      ETH_ALEN);
6707 +       }
6708 +
6709 +       pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
6710 +
6711 +       if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
6712 +               pr_err("%s:%d Incorrect Phy mode....\n", __func__,
6713 +                      __LINE__);
6714 +
6715 +       addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
6716 +       if (!addr)
6717 +               pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
6718 +                      __LINE__);
6719 +       else
6720 +               pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
6721 +
6722 +       addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
6723 +       if (!addr) {
6724 +               pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
6725 +                      __LINE__);
6726 +       } else {
6727 +               phy_id = be32_to_cpup(addr);
6728 +               pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
6729 +               pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
6730 +       }
6731 +
6732 +       addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
6733 +       if (!addr)
6734 +               pr_err("%s: Invalid mdio-mux-val....\n", __func__);
6735 +       else
6736 +               phy_id = be32_to_cpup(addr);
6737 +               pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
6738 +
6739 +       if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
6740 +               pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
6741 +                        pdata->ls1012a_eth_pdata[port].mdio_muxval;
6742 +
6743 +       addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
6744 +       if (!addr)
6745 +               pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
6746 +                      __func__, __LINE__);
6747 +       else
6748 +               pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
6749 +
6750 +       /* If PHY is enabled, read mdio properties */
6751 +       if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
6752 +               goto done;
6753 +
6754 +       phy = of_get_next_child(gem, NULL);
6755 +
6756 +       addr = of_get_property(phy, "reg", &size);
6757 +
6758 +       if (!addr)
6759 +               pr_err("%s:%d Invalid phy enable flag....\n",
6760 +                      __func__, __LINE__);
6761 +       else
6762 +               pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
6763 +
6764 +       pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
6765 +
6766 +done:
6767 +
6768 +       return 0;
6769 +
6770 +err:
6771 +       return -1;
6772 +}
6773 +
6774 +/*
6775 + *
6776 + * pfe_platform_probe -
6777 + *
6778 + *
6779 + */
6780 +static int pfe_platform_probe(struct platform_device *pdev)
6781 +{
6782 +       struct resource res;
6783 +       int ii, rc, interface_count = 0, size = 0;
6784 +       const u32 *prop;
6785 +       struct device_node  *np;
6786 +       struct clk *pfe_clk;
6787 +
6788 +       np = pdev->dev.of_node;
6789 +
6790 +       if (!np) {
6791 +               pr_err("Invalid device node\n");
6792 +               return -EINVAL;
6793 +       }
6794 +
6795 +       pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
6796 +       if (!pfe) {
6797 +               rc = -ENOMEM;
6798 +               goto err_alloc;
6799 +       }
6800 +
6801 +       platform_set_drvdata(pdev, pfe);
6802 +
6803 +       dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6804 +
6805 +       if (of_address_to_resource(np, 1, &res)) {
6806 +               rc = -ENOMEM;
6807 +               pr_err("failed to get ddr resource\n");
6808 +               goto err_ddr;
6809 +       }
6810 +
6811 +       pfe->ddr_phys_baseaddr = res.start;
6812 +       pfe->ddr_size = resource_size(&res);
6813 +
6814 +       pfe->ddr_baseaddr = phys_to_virt(res.start);
6815 +       if (!pfe->ddr_baseaddr) {
6816 +               pr_err("ioremap() ddr failed\n");
6817 +               rc = -ENOMEM;
6818 +               goto err_ddr;
6819 +       }
6820 +
6821 +       pfe->scfg =
6822 +               syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
6823 +                                               "fsl,pfe-scfg");
6824 +       if (IS_ERR(pfe->scfg)) {
6825 +               dev_err(&pdev->dev, "No syscfg phandle specified\n");
6826 +               return PTR_ERR(pfe->scfg);
6827 +       }
6828 +
6829 +       pfe->cbus_baseaddr = of_iomap(np, 0);
6830 +       if (!pfe->cbus_baseaddr) {
6831 +               rc = -ENOMEM;
6832 +               pr_err("failed to get axi resource\n");
6833 +               goto err_axi;
6834 +       }
6835 +
6836 +       pfe->hif_irq = platform_get_irq(pdev, 0);
6837 +       if (pfe->hif_irq < 0) {
6838 +               pr_err("platform_get_irq for hif failed\n");
6839 +               rc = pfe->hif_irq;
6840 +               goto err_hif_irq;
6841 +       }
6842 +
6843 +       pfe->wol_irq = platform_get_irq(pdev, 2);
6844 +       if (pfe->wol_irq < 0) {
6845 +               pr_err("platform_get_irq for WoL failed\n");
6846 +               rc = pfe->wol_irq;
6847 +               goto err_hif_irq;
6848 +       }
6849 +
6850 +       /* Read interface count */
6851 +       prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
6852 +       if (!prop) {
6853 +               pr_err("Failed to read number of interfaces\n");
6854 +               rc = -ENXIO;
6855 +               goto err_prop;
6856 +       }
6857 +
6858 +       interface_count = be32_to_cpup(prop);
6859 +       if (interface_count <= 0) {
6860 +               pr_err("No ethernet interface count : %d\n",
6861 +                      interface_count);
6862 +               rc = -ENXIO;
6863 +               goto err_prop;
6864 +       }
6865 +
6866 +       pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
6867 +
6868 +       for (ii = 0; ii < interface_count; ii++) {
6869 +               pfe_get_gemac_if_proprties(np, ii, interface_count,
6870 +                                          &pfe_platform_data);
6871 +       }
6872 +
6873 +       pfe->dev = &pdev->dev;
6874 +
6875 +       pfe->dev->platform_data = &pfe_platform_data;
6876 +
6877 +       /* declare WoL capabilities */
6878 +       device_init_wakeup(&pdev->dev, true);
6879 +
6880 +       /* find the clocks */
6881 +       pfe_clk = devm_clk_get(pfe->dev, "pfe");
6882 +       if (IS_ERR(pfe_clk))
6883 +               return PTR_ERR(pfe_clk);
6884 +
6885 +       /* PFE clock is (platform clock / 2) */
6886 +       /* save sys_clk value as KHz */
6887 +       pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
6888 +
6889 +       rc = pfe_probe(pfe);
6890 +       if (rc < 0)
6891 +               goto err_probe;
6892 +
6893 +       return 0;
6894 +
6895 +err_probe:
6896 +err_prop:
6897 +err_hif_irq:
6898 +       iounmap(pfe->cbus_baseaddr);
6899 +
6900 +err_axi:
6901 +       iounmap(pfe->ddr_baseaddr);
6902 +
6903 +err_ddr:
6904 +       platform_set_drvdata(pdev, NULL);
6905 +
6906 +       kfree(pfe);
6907 +
6908 +err_alloc:
6909 +       return rc;
6910 +}
6911 +
6912 +/*
6913 + * pfe_platform_remove -
6914 + */
6915 +static int pfe_platform_remove(struct platform_device *pdev)
6916 +{
6917 +       struct pfe *pfe = platform_get_drvdata(pdev);
6918 +       int rc;
6919 +
6920 +       pr_info("%s\n", __func__);
6921 +
6922 +       rc = pfe_remove(pfe);
6923 +
6924 +       iounmap(pfe->cbus_baseaddr);
6925 +       iounmap(pfe->ddr_baseaddr);
6926 +
6927 +       platform_set_drvdata(pdev, NULL);
6928 +
6929 +       kfree(pfe);
6930 +
6931 +       return rc;
6932 +}
6933 +
6934 +#ifdef CONFIG_PM
6935 +#ifdef CONFIG_PM_SLEEP
6936 +int pfe_platform_suspend(struct device *dev)
6937 +{
6938 +       struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
6939 +       struct net_device *netdev;
6940 +       int i;
6941 +
6942 +       pfe->wake = 0;
6943 +
6944 +       for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
6945 +               netdev = pfe->eth.eth_priv[i]->ndev;
6946 +
6947 +               netif_device_detach(netdev);
6948 +
6949 +               if (netif_running(netdev))
6950 +                       if (pfe_eth_suspend(netdev))
6951 +                               pfe->wake = 1;
6952 +       }
6953 +
6954 +       /* Shutdown PFE only if we're not waking up the system */
6955 +       if (!pfe->wake) {
6956 +#if defined(LS1012A_PFE_RESET_WA)
6957 +               pfe_hif_rx_idle(&pfe->hif);
6958 +#endif
6959 +               pfe_ctrl_suspend(&pfe->ctrl);
6960 +               pfe_firmware_exit(pfe);
6961 +
6962 +               pfe_hif_exit(pfe);
6963 +               pfe_hif_lib_exit(pfe);
6964 +
6965 +               pfe_hw_exit(pfe);
6966 +       }
6967 +
6968 +       return 0;
6969 +}
6970 +
6971 +static int pfe_platform_resume(struct device *dev)
6972 +{
6973 +       struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
6974 +       struct net_device *netdev;
6975 +       int i;
6976 +
6977 +       if (!pfe->wake) {
6978 +               pfe_hw_init(pfe, 1);
6979 +               pfe_hif_lib_init(pfe);
6980 +               pfe_hif_init(pfe);
6981 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
6982 +               util_enable();
6983 +#endif
6984 +               tmu_enable(0xf);
6985 +               class_enable();
6986 +               pfe_ctrl_resume(&pfe->ctrl);
6987 +       }
6988 +
6989 +       for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
6990 +               netdev = pfe->eth.eth_priv[i]->ndev;
6991 +
6992 +               if (pfe->eth.eth_priv[i]->mii_bus)
6993 +                       pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
6994 +
6995 +               if (netif_running(netdev))
6996 +                       pfe_eth_resume(netdev);
6997 +
6998 +               netif_device_attach(netdev);
6999 +       }
7000 +       return 0;
7001 +}
7002 +#else
7003 +#define pfe_platform_suspend NULL
7004 +#define pfe_platform_resume NULL
7005 +#endif
7006 +
7007 +static const struct dev_pm_ops pfe_platform_pm_ops = {
7008 +       SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
7009 +};
7010 +#endif
7011 +
7012 +static const struct of_device_id pfe_match[] = {
7013 +       {
7014 +               .compatible = "fsl,pfe",
7015 +       },
7016 +       {},
7017 +};
7018 +MODULE_DEVICE_TABLE(of, pfe_match);
7019 +
7020 +static struct platform_driver pfe_platform_driver = {
7021 +       .probe = pfe_platform_probe,
7022 +       .remove = pfe_platform_remove,
7023 +       .driver = {
7024 +               .name = "pfe",
7025 +               .of_match_table = pfe_match,
7026 +#ifdef CONFIG_PM
7027 +               .pm = &pfe_platform_pm_ops,
7028 +#endif
7029 +       },
7030 +};
7031 +
7032 +module_platform_driver(pfe_platform_driver);
7033 +MODULE_LICENSE("GPL");
7034 +MODULE_DESCRIPTION("PFE Ethernet driver");
7035 +MODULE_AUTHOR("NXP DNCPE");
7036 --- /dev/null
7037 +++ b/drivers/staging/fsl_ppfe/pfe_mod.c
7038 @@ -0,0 +1,141 @@
7039 +/*
7040 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
7041 + * Copyright 2017 NXP
7042 + *
7043 + * This program is free software; you can redistribute it and/or modify
7044 + * it under the terms of the GNU General Public License as published by
7045 + * the Free Software Foundation; either version 2 of the License, or
7046 + * (at your option) any later version.
7047 + *
7048 + * This program is distributed in the hope that it will be useful,
7049 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7050 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
7051 + * GNU General Public License for more details.
7052 + *
7053 + * You should have received a copy of the GNU General Public License
7054 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
7055 + */
7056 +
7057 +#include <linux/dma-mapping.h>
7058 +#include "pfe_mod.h"
7059 +
7060 +struct pfe *pfe;
7061 +
7062 +/*
7063 + * pfe_probe -
7064 + */
7065 +int pfe_probe(struct pfe *pfe)
7066 +{
7067 +       int rc;
7068 +
7069 +       if (pfe->ddr_size < DDR_MAX_SIZE) {
7070 +               pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
7071 +                      __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
7072 +               rc = -ENOMEM;
7073 +               goto err_hw;
7074 +       }
7075 +
7076 +       if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
7077 +                       (8 * SZ_1M - 1)) != 0) {
7078 +               pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
7079 +                      __func__, (int)pfe->ddr_phys_baseaddr +
7080 +                       BMU2_DDR_BASEADDR);
7081 +               rc = -ENOMEM;
7082 +               goto err_hw;
7083 +       }
7084 +
7085 +       pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
7086 +               (unsigned long)pfe->cbus_baseaddr,
7087 +               (unsigned long)pfe->ddr_baseaddr,
7088 +               pfe->ddr_phys_baseaddr, pfe->ddr_size);
7089 +
7090 +       pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
7091 +                    pfe->ddr_phys_baseaddr, pfe->ddr_size);
7092 +
7093 +       rc = pfe_hw_init(pfe, 0);
7094 +       if (rc < 0)
7095 +               goto err_hw;
7096 +
7097 +       rc = pfe_hif_lib_init(pfe);
7098 +       if (rc < 0)
7099 +               goto err_hif_lib;
7100 +
7101 +       rc = pfe_hif_init(pfe);
7102 +       if (rc < 0)
7103 +               goto err_hif;
7104 +
7105 +       rc = pfe_firmware_init(pfe);
7106 +       if (rc < 0)
7107 +               goto err_firmware;
7108 +
7109 +       rc = pfe_ctrl_init(pfe);
7110 +       if (rc < 0)
7111 +               goto err_ctrl;
7112 +
7113 +       rc = pfe_eth_init(pfe);
7114 +       if (rc < 0)
7115 +               goto err_eth;
7116 +
7117 +       rc = pfe_sysfs_init(pfe);
7118 +       if (rc < 0)
7119 +               goto err_sysfs;
7120 +
7121 +       rc = pfe_debugfs_init(pfe);
7122 +       if (rc < 0)
7123 +               goto err_debugfs;
7124 +
7125 +       return 0;
7126 +
7127 +err_debugfs:
7128 +       pfe_sysfs_exit(pfe);
7129 +
7130 +err_sysfs:
7131 +       pfe_eth_exit(pfe);
7132 +
7133 +err_eth:
7134 +       pfe_ctrl_exit(pfe);
7135 +
7136 +err_ctrl:
7137 +       pfe_firmware_exit(pfe);
7138 +
7139 +err_firmware:
7140 +       pfe_hif_exit(pfe);
7141 +
7142 +err_hif:
7143 +       pfe_hif_lib_exit(pfe);
7144 +
7145 +err_hif_lib:
7146 +       pfe_hw_exit(pfe);
7147 +
7148 +err_hw:
7149 +       return rc;
7150 +}
7151 +
7152 +/*
7153 + * pfe_remove -
7154 + */
7155 +int pfe_remove(struct pfe *pfe)
7156 +{
7157 +       pr_info("%s\n", __func__);
7158 +
7159 +       pfe_debugfs_exit(pfe);
7160 +
7161 +       pfe_sysfs_exit(pfe);
7162 +
7163 +       pfe_eth_exit(pfe);
7164 +
7165 +       pfe_ctrl_exit(pfe);
7166 +
7167 +#if defined(LS1012A_PFE_RESET_WA)
7168 +       pfe_hif_rx_idle(&pfe->hif);
7169 +#endif
7170 +       pfe_firmware_exit(pfe);
7171 +
7172 +       pfe_hif_exit(pfe);
7173 +
7174 +       pfe_hif_lib_exit(pfe);
7175 +
7176 +       pfe_hw_exit(pfe);
7177 +
7178 +       return 0;
7179 +}
7180 --- /dev/null
7181 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
7182 @@ -0,0 +1,818 @@
7183 +/*
7184 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
7185 + * Copyright 2017 NXP
7186 + *
7187 + * This program is free software; you can redistribute it and/or modify
7188 + * it under the terms of the GNU General Public License as published by
7189 + * the Free Software Foundation; either version 2 of the License, or
7190 + * (at your option) any later version.
7191 + *
7192 + * This program is distributed in the hope that it will be useful,
7193 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7194 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
7195 + * GNU General Public License for more details.
7196 + *
7197 + * You should have received a copy of the GNU General Public License
7198 + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
7199 + */
7200 +
7201 +#include <linux/module.h>
7202 +#include <linux/platform_device.h>
7203 +
7204 +#include "pfe_mod.h"
7205 +
7206 +#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
7207 +#define NUM_QUEUES             16
7208 +
7209 +static char register_name[20][5] = {
7210 +       "EPC", "ECAS", "EID", "ED",
7211 +       "r0", "r1", "r2", "r3",
7212 +       "r4", "r5", "r6", "r7",
7213 +       "r8", "r9", "r10", "r11",
7214 +       "r12", "r13", "r14", "r15",
7215 +};
7216 +
7217 +static char exception_name[14][20] = {
7218 +       "Reset",
7219 +       "HardwareFailure",
7220 +       "NMI",
7221 +       "InstBreakpoint",
7222 +       "DataBreakpoint",
7223 +       "Unsupported",
7224 +       "PrivilegeViolation",
7225 +       "InstBusError",
7226 +       "DataBusError",
7227 +       "AlignmentError",
7228 +       "ArithmeticError",
7229 +       "SystemCall",
7230 +       "MemoryManagement",
7231 +       "Interrupt",
7232 +};
7233 +
7234 +static unsigned long class_do_clear;
7235 +static unsigned long tmu_do_clear;
7236 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7237 +static unsigned long util_do_clear;
7238 +#endif
7239 +
7240 +static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
7241 +                                       do_clear)
7242 +{
7243 +       ssize_t len = 0;
7244 +       u32 val;
7245 +       char statebuf[5];
7246 +       struct pfe_cpumon *cpumon = &pfe->cpumon;
7247 +       u32 debug_indicator;
7248 +       u32 debug[20];
7249 +
7250 +       *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
7251 +       dmem_addr += 4;
7252 +
7253 +       statebuf[4] = '\0';
7254 +       len += sprintf(buf + len, "state=%4s ", statebuf);
7255 +
7256 +       val = pe_dmem_read(id, dmem_addr, 4);
7257 +       dmem_addr += 4;
7258 +       len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
7259 +
7260 +       val = pe_dmem_read(id, dmem_addr, 4);
7261 +       if (do_clear && val)
7262 +               pe_dmem_write(id, 0, dmem_addr, 4);
7263 +       dmem_addr += 4;
7264 +       len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
7265 +
7266 +       val = pe_dmem_read(id, dmem_addr, 4);
7267 +       if (do_clear && val)
7268 +               pe_dmem_write(id, 0, dmem_addr, 4);
7269 +       dmem_addr += 4;
7270 +       if (id >= TMU0_ID && id <= TMU_MAX_ID)
7271 +               len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
7272 +       else
7273 +               len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
7274 +
7275 +       val = pe_dmem_read(id, dmem_addr, 4);
7276 +       if (do_clear && val)
7277 +               pe_dmem_write(id, 0, dmem_addr, 4);
7278 +       dmem_addr += 4;
7279 +       if (val)
7280 +               len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
7281 +
7282 +       len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
7283 +
7284 +       len += sprintf(buf + len, "\n");
7285 +
7286 +       debug_indicator = pe_dmem_read(id, dmem_addr, 4);
7287 +       dmem_addr += 4;
7288 +       if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
7289 +               int j, last = 0;
7290 +
7291 +               for (j = 0; j < 16; j++) {
7292 +                       debug[j] = pe_dmem_read(id, dmem_addr, 4);
7293 +                       if (debug[j]) {
7294 +                               if (do_clear)
7295 +                                       pe_dmem_write(id, 0, dmem_addr, 4);
7296 +                               last = j + 1;
7297 +                       }
7298 +                       dmem_addr += 4;
7299 +               }
7300 +               for (j = 0; j < last; j++) {
7301 +                       len += sprintf(buf + len, "%08x%s",
7302 +                       cpu_to_be32(debug[j]),
7303 +                       (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
7304 +               }
7305 +       }
7306 +
7307 +       if (!strncmp(statebuf, "DEAD", 4)) {
7308 +               u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
7309 +
7310 +               len += sprintf(buf + len, "Exception details:\n");
7311 +               for (i = 0; i < 20; i++) {
7312 +                       debug[i] = pe_dmem_read(id, dump, 4);
7313 +                       dump += 4;
7314 +                       if (i == 2)
7315 +                               len += sprintf(buf + len, "%4s = %08x (=%s) ",
7316 +                               register_name[i], cpu_to_be32(debug[i]),
7317 +                               exception_name[min((u32)
7318 +                               cpu_to_be32(debug[i]), (u32)13)]);
7319 +                       else
7320 +                               len += sprintf(buf + len, "%4s = %08x%s",
7321 +                               register_name[i], cpu_to_be32(debug[i]),
7322 +                               (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
7323 +               }
7324 +       }
7325 +
7326 +       return len;
7327 +}
7328 +
7329 +static ssize_t class_phy_stats(char *buf, int phy)
7330 +{
7331 +       ssize_t len = 0;
7332 +       int off1 = phy * 0x28;
7333 +       int off2 = phy * 0x10;
7334 +
7335 +       if (phy == 3)
7336 +               off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
7337 +
7338 +       len += sprintf(buf + len, "phy: %d\n", phy);
7339 +       len += sprintf(buf + len,
7340 +                       "  rx:   %10u, tx:   %10u, intf:  %10u, ipv4:    %10u, ipv6: %10u\n",
7341 +                       readl(CLASS_PHY1_RX_PKTS + off1),
7342 +                       readl(CLASS_PHY1_TX_PKTS + off1),
7343 +                       readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
7344 +                       readl(CLASS_PHY1_V4_PKTS + off1),
7345 +                       readl(CLASS_PHY1_V6_PKTS + off1));
7346 +
7347 +       len += sprintf(buf + len,
7348 +                       "  icmp: %10u, igmp: %10u, tcp:   %10u, udp:     %10u\n",
7349 +                       readl(CLASS_PHY1_ICMP_PKTS + off2),
7350 +                       readl(CLASS_PHY1_IGMP_PKTS + off2),
7351 +                       readl(CLASS_PHY1_TCP_PKTS + off2),
7352 +                       readl(CLASS_PHY1_UDP_PKTS + off2));
7353 +
7354 +       len += sprintf(buf + len, "  err\n");
7355 +       len += sprintf(buf + len,
7356 +                       "  lp:   %10u, intf: %10u, l3:    %10u, chcksum: %10u, ttl:  %10u\n",
7357 +                       readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
7358 +                       readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
7359 +                       readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
7360 +                       readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
7361 +                       readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
7362 +
7363 +       return len;
7364 +}
7365 +
7366 +/* qm_read_drop_stat
7367 + * This function is used to read the drop statistics from the TMU
7368 + * hw drop counter.  Since the hw counter is always cleared afer
7369 + * reading, this function maintains the previous drop count, and
7370 + * adds the new value to it.  That value can be retrieved by
7371 + * passing a pointer to it with the total_drops arg.
7372 + *
7373 + * @param tmu          TMU number (0 - 3)
7374 + * @param queue                queue number (0 - 15)
7375 + * @param total_drops  pointer to location to store total drops (or NULL)
7376 + * @param do_reset     if TRUE, clear total drops after updating
7377 + */
7378 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
7379 +{
7380 +       static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
7381 +       u32 val;
7382 +
7383 +       writel((tmu << 8) | queue, TMU_TEQ_CTRL);
7384 +       writel((tmu << 8) | queue, TMU_LLM_CTRL);
7385 +       val = readl(TMU_TEQ_DROP_STAT);
7386 +       qtotal[tmu][queue] += val;
7387 +       if (total_drops)
7388 +               *total_drops = qtotal[tmu][queue];
7389 +       if (do_reset)
7390 +               qtotal[tmu][queue] = 0;
7391 +       return val;
7392 +}
7393 +
7394 +static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
7395 +{
7396 +       ssize_t len = 0;
7397 +       u32 drops;
7398 +
7399 +       len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
7400 +
7401 +       drops = qm_read_drop_stat(tmu, queue, NULL, 0);
7402 +
7403 +       /* Select queue */
7404 +       writel((tmu << 8) | queue, TMU_TEQ_CTRL);
7405 +       writel((tmu << 8) | queue, TMU_LLM_CTRL);
7406 +
7407 +       len += sprintf(buf + len,
7408 +                       "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
7409 +               drops, readl(TMU_TEQ_TRANS_STAT),
7410 +               readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
7411 +               readl(TMU_LLM_QUE_DROPCNT));
7412 +
7413 +       return len;
7414 +}
7415 +
7416 +static ssize_t tmu_queues(char *buf, int tmu)
7417 +{
7418 +       ssize_t len = 0;
7419 +       int queue;
7420 +
7421 +       for (queue = 0; queue < 16; queue++)
7422 +               len += tmu_queue_stats(buf + len, tmu, queue);
7423 +
7424 +       return len;
7425 +}
7426 +
7427 +static ssize_t block_version(char *buf, void *addr)
7428 +{
7429 +       ssize_t len = 0;
7430 +       u32 val;
7431 +
7432 +       val = readl(addr);
7433 +       len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
7434 +               (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
7435 +
7436 +       return len;
7437 +}
7438 +
7439 +static ssize_t bmu(char *buf, int id, void *base)
7440 +{
7441 +       ssize_t len = 0;
7442 +
7443 +       len += sprintf(buf + len, "%s: %d\n  ", __func__, id);
7444 +
7445 +       len += block_version(buf + len, base + BMU_VERSION);
7446 +
7447 +       len += sprintf(buf + len, "  buf size:  %x\n", (1 << readl(base +
7448 +                       BMU_BUF_SIZE)));
7449 +       len += sprintf(buf + len, "  buf count: %x\n", readl(base +
7450 +                       BMU_BUF_CNT));
7451 +       len += sprintf(buf + len, "  buf rem:   %x\n", readl(base +
7452 +                       BMU_REM_BUF_CNT));
7453 +       len += sprintf(buf + len, "  buf curr:  %x\n", readl(base +
7454 +                       BMU_CURR_BUF_CNT));
7455 +       len += sprintf(buf + len, "  free err:  %x\n", readl(base +
7456 +                       BMU_FREE_ERR_ADDR));
7457 +
7458 +       return len;
7459 +}
7460 +
7461 +static ssize_t gpi(char *buf, int id, void *base)
7462 +{
7463 +       ssize_t len = 0;
7464 +       u32 val;
7465 +
7466 +       len += sprintf(buf + len, "%s%d:\n  ", __func__, id);
7467 +       len += block_version(buf + len, base + GPI_VERSION);
7468 +
7469 +       len += sprintf(buf + len, "  tx under stick: %x\n", readl(base +
7470 +                       GPI_FIFO_STATUS));
7471 +       val = readl(base + GPI_FIFO_DEBUG);
7472 +       len += sprintf(buf + len, "  tx pkts:        %x\n", (val >> 23) &
7473 +                       0x3f);
7474 +       len += sprintf(buf + len, "  rx pkts:        %x\n", (val >> 18) &
7475 +                       0x3f);
7476 +       len += sprintf(buf + len, "  tx bytes:       %x\n", (val >> 9) &
7477 +                       0x1ff);
7478 +       len += sprintf(buf + len, "  rx bytes:       %x\n", (val >> 0) &
7479 +                       0x1ff);
7480 +       len += sprintf(buf + len, "  overrun:        %x\n", readl(base +
7481 +                       GPI_OVERRUN_DROPCNT));
7482 +
7483 +       return len;
7484 +}
7485 +
7486 +static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
7487 +                            const char *buf, size_t count)
7488 +{
7489 +       class_do_clear = kstrtoul(buf, 0, 0);
7490 +       return count;
7491 +}
7492 +
7493 +static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
7494 +                             char *buf)
7495 +{
7496 +       ssize_t len = 0;
7497 +       int id;
7498 +       u32 val;
7499 +       struct pfe_cpumon *cpumon = &pfe->cpumon;
7500 +
7501 +       len += block_version(buf + len, CLASS_VERSION);
7502 +
7503 +       for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
7504 +               len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
7505 +
7506 +               val = readl(CLASS_PE0_DEBUG + id * 4);
7507 +               len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
7508 +
7509 +               len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
7510 +                                               class_do_clear);
7511 +       }
7512 +       len += sprintf(buf + len, "aggregate load=%d%%\n\n",
7513 +                       cpumon->class_usage_pct);
7514 +
7515 +       len += sprintf(buf + len, "pe status:   0x%x\n",
7516 +                       readl(CLASS_PE_STATUS));
7517 +       len += sprintf(buf + len, "max buf cnt: 0x%x   afull thres: 0x%x\n",
7518 +                       readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
7519 +       len += sprintf(buf + len, "tsq max cnt: 0x%x   tsq fifo thres: 0x%x\n",
7520 +                       readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
7521 +       len += sprintf(buf + len, "state:       0x%x\n", readl(CLASS_STATE));
7522 +
7523 +       len += class_phy_stats(buf + len, 0);
7524 +       len += class_phy_stats(buf + len, 1);
7525 +       len += class_phy_stats(buf + len, 2);
7526 +       len += class_phy_stats(buf + len, 3);
7527 +
7528 +       return len;
7529 +}
7530 +
7531 +static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
7532 +                          const char *buf, size_t count)
7533 +{
7534 +       tmu_do_clear = kstrtoul(buf, 0, 0);
7535 +       return count;
7536 +}
7537 +
7538 +static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
7539 +                           char *buf)
7540 +{
7541 +       ssize_t len = 0;
7542 +       int id;
7543 +       u32 val;
7544 +
7545 +       len += block_version(buf + len, TMU_VERSION);
7546 +
7547 +       for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
7548 +               if (id == TMU2_ID)
7549 +                       continue;
7550 +               len += sprintf(buf + len, "%d: ", id - TMU0_ID);
7551 +
7552 +               len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
7553 +                                               tmu_do_clear);
7554 +       }
7555 +
7556 +       len += sprintf(buf + len, "pe status:    %x\n", readl(TMU_PE_STATUS));
7557 +       len += sprintf(buf + len, "inq fifo cnt: %x\n",
7558 +                       readl(TMU_PHY_INQ_FIFO_CNT));
7559 +       val = readl(TMU_INQ_STAT);
7560 +       len += sprintf(buf + len, "inq wr ptr:     %x\n", val & 0x3ff);
7561 +       len += sprintf(buf + len, "inq rd ptr:     %x\n", val >> 10);
7562 +
7563 +       return len;
7564 +}
7565 +
7566 +static unsigned long drops_do_clear;
7567 +static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
7568 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7569 +static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
7570 +#endif
7571 +
7572 +char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
7573 +       "ICC",
7574 +       "Host Pkt Error",
7575 +       "Rx Error",
7576 +       "IPsec Outbound",
7577 +       "IPsec Inbound",
7578 +       "EXPT IPsec Error",
7579 +       "Reassembly",
7580 +       "Fragmenter",
7581 +       "NAT-T",
7582 +       "Socket",
7583 +       "Multicast",
7584 +       "NAT-PT",
7585 +       "Tx Disabled",
7586 +};
7587 +
7588 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7589 +char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
7590 +       "IPsec Outbound",
7591 +       "IPsec Inbound",
7592 +       "IPsec Rate Limiter",
7593 +       "Fragmenter",
7594 +       "Socket",
7595 +       "Tx Disabled",
7596 +       "Rx Error",
7597 +};
7598 +#endif
7599 +
7600 +static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
7601 +                            const char *buf, size_t count)
7602 +{
7603 +       drops_do_clear = kstrtoul(buf, 0, 0);
7604 +       return count;
7605 +}
7606 +
7607 +static u32 tmu_drops[4][16];
7608 +static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
7609 +                             char *buf)
7610 +{
7611 +       ssize_t len = 0;
7612 +       int id, dropnum;
7613 +       int tmu, queue;
7614 +       u32 val;
7615 +       u32 dmem_addr;
7616 +       int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
7617 +       struct pfe_ctrl *ctrl = &pfe->ctrl;
7618 +
7619 +       memset(class_drop_counter, 0, sizeof(class_drop_counter));
7620 +       for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
7621 +               if (drops_do_clear)
7622 +                       pe_sync_stop(ctrl, (1 << id));
7623 +               for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
7624 +                       dropnum++) {
7625 +                       dmem_addr = CLASS_DM_DROP_CNTR;
7626 +                       val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
7627 +                       class_drop_counter[dropnum] += val;
7628 +                       num_class_drops += val;
7629 +                       if (drops_do_clear)
7630 +                               pe_dmem_write(id, 0, dmem_addr, 4);
7631 +               }
7632 +               if (drops_do_clear)
7633 +                       pe_start(ctrl, (1 << id));
7634 +       }
7635 +
7636 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7637 +       if (drops_do_clear)
7638 +               pe_sync_stop(ctrl, (1 << UTIL_ID));
7639 +       for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
7640 +               dmem_addr = UTIL_DM_DROP_CNTR;
7641 +               val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
7642 +               util_drop_counter[dropnum] = val;
7643 +               num_util_drops += val;
7644 +               if (drops_do_clear)
7645 +                       pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
7646 +       }
7647 +       if (drops_do_clear)
7648 +               pe_start(ctrl, (1 << UTIL_ID));
7649 +#endif
7650 +       for (tmu = 0; tmu < 4; tmu++) {
7651 +               for (queue = 0; queue < 16; queue++) {
7652 +                       qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
7653 +                                         drops_do_clear);
7654 +                       num_tmu_drops += tmu_drops[tmu][queue];
7655 +               }
7656 +       }
7657 +
7658 +       if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
7659 +               len += sprintf(buf + len, "No PE drops\n\n");
7660 +
7661 +       if (num_class_drops > 0) {
7662 +               len += sprintf(buf + len, "Class PE drops --\n");
7663 +               for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
7664 +                       dropnum++) {
7665 +                       if (class_drop_counter[dropnum] > 0)
7666 +                               len += sprintf(buf + len, "  %s: %d\n",
7667 +                                       class_drop_description[dropnum],
7668 +                                       class_drop_counter[dropnum]);
7669 +               }
7670 +               len += sprintf(buf + len, "\n");
7671 +       }
7672 +
7673 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7674 +       if (num_util_drops > 0) {
7675 +               len += sprintf(buf + len, "Util PE drops --\n");
7676 +               for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
7677 +                       if (util_drop_counter[dropnum] > 0)
7678 +                               len += sprintf(buf + len, "  %s: %d\n",
7679 +                                       util_drop_description[dropnum],
7680 +                                       util_drop_counter[dropnum]);
7681 +               }
7682 +               len += sprintf(buf + len, "\n");
7683 +       }
7684 +#endif
7685 +       if (num_tmu_drops > 0) {
7686 +               len += sprintf(buf + len, "TMU drops --\n");
7687 +               for (tmu = 0; tmu < 4; tmu++) {
7688 +                       for (queue = 0; queue < 16; queue++) {
7689 +                               if (tmu_drops[tmu][queue] > 0)
7690 +                                       len += sprintf(buf + len,
7691 +                                               "  TMU%d-Q%d: %d\n"
7692 +                                       , tmu, queue, tmu_drops[tmu][queue]);
7693 +                       }
7694 +               }
7695 +               len += sprintf(buf + len, "\n");
7696 +       }
7697 +
7698 +       return len;
7699 +}
7700 +
7701 +static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
7702 +                                       *attr, char *buf)
7703 +{
7704 +       return tmu_queues(buf, 0);
7705 +}
7706 +
7707 +static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
7708 +                                       *attr, char *buf)
7709 +{
7710 +       return tmu_queues(buf, 1);
7711 +}
7712 +
7713 +static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
7714 +                                       *attr, char *buf)
7715 +{
7716 +       return tmu_queues(buf, 2);
7717 +}
7718 +
7719 +static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
7720 +                                       *attr, char *buf)
7721 +{
7722 +       return tmu_queues(buf, 3);
7723 +}
7724 +
7725 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7726 +static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
7727 +                           const char *buf, size_t count)
7728 +{
7729 +       util_do_clear = kstrtoul(buf, NULL, 0);
7730 +       return count;
7731 +}
7732 +
7733 +static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
7734 +                            char *buf)
7735 +{
7736 +       ssize_t len = 0;
7737 +       struct pfe_ctrl *ctrl = &pfe->ctrl;
7738 +
7739 +       len += block_version(buf + len, UTIL_VERSION);
7740 +
7741 +       pe_sync_stop(ctrl, (1 << UTIL_ID));
7742 +       len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
7743 +                                       util_do_clear);
7744 +       pe_start(ctrl, (1 << UTIL_ID));
7745 +
7746 +       len += sprintf(buf + len, "pe status:   %x\n", readl(UTIL_PE_STATUS));
7747 +       len += sprintf(buf + len, "max buf cnt: %x\n",
7748 +                       readl(UTIL_MAX_BUF_CNT));
7749 +       len += sprintf(buf + len, "tsq max cnt: %x\n",
7750 +                       readl(UTIL_TSQ_MAX_CNT));
7751 +
7752 +       return len;
7753 +}
7754 +#endif
7755 +
7756 +static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
7757 +                           char *buf)
7758 +{
7759 +       ssize_t len = 0;
7760 +
7761 +       len += bmu(buf + len, 1, BMU1_BASE_ADDR);
7762 +       len += bmu(buf + len, 2, BMU2_BASE_ADDR);
7763 +
7764 +       return len;
7765 +}
7766 +
7767 +static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
7768 +                           char *buf)
7769 +{
7770 +       ssize_t len = 0;
7771 +
7772 +       len += sprintf(buf + len, "hif:\n  ");
7773 +       len += block_version(buf + len, HIF_VERSION);
7774 +
7775 +       len += sprintf(buf + len, "  tx curr bd:    %x\n",
7776 +                       readl(HIF_TX_CURR_BD_ADDR));
7777 +       len += sprintf(buf + len, "  tx status:     %x\n",
7778 +                       readl(HIF_TX_STATUS));
7779 +       len += sprintf(buf + len, "  tx dma status: %x\n",
7780 +                       readl(HIF_TX_DMA_STATUS));
7781 +
7782 +       len += sprintf(buf + len, "  rx curr bd:    %x\n",
7783 +                       readl(HIF_RX_CURR_BD_ADDR));
7784 +       len += sprintf(buf + len, "  rx status:     %x\n",
7785 +                       readl(HIF_RX_STATUS));
7786 +       len += sprintf(buf + len, "  rx dma status: %x\n",
7787 +                       readl(HIF_RX_DMA_STATUS));
7788 +
7789 +       len += sprintf(buf + len, "hif nocopy:\n  ");
7790 +       len += block_version(buf + len, HIF_NOCPY_VERSION);
7791 +
7792 +       len += sprintf(buf + len, "  tx curr bd:    %x\n",
7793 +                       readl(HIF_NOCPY_TX_CURR_BD_ADDR));
7794 +       len += sprintf(buf + len, "  tx status:     %x\n",
7795 +                       readl(HIF_NOCPY_TX_STATUS));
7796 +       len += sprintf(buf + len, "  tx dma status: %x\n",
7797 +                       readl(HIF_NOCPY_TX_DMA_STATUS));
7798 +
7799 +       len += sprintf(buf + len, "  rx curr bd:    %x\n",
7800 +                       readl(HIF_NOCPY_RX_CURR_BD_ADDR));
7801 +       len += sprintf(buf + len, "  rx status:     %x\n",
7802 +                       readl(HIF_NOCPY_RX_STATUS));
7803 +       len += sprintf(buf + len, "  rx dma status: %x\n",
7804 +                       readl(HIF_NOCPY_RX_DMA_STATUS));
7805 +
7806 +       return len;
7807 +}
7808 +
7809 +static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
7810 +                           char *buf)
7811 +{
7812 +       ssize_t len = 0;
7813 +
7814 +       len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
7815 +       len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
7816 +       len += gpi(buf + len, 3, HGPI_BASE_ADDR);
7817 +
7818 +       return len;
7819 +}
7820 +
7821 +static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
7822 +                               *attr, char *buf)
7823 +{
7824 +       ssize_t len = 0;
7825 +       struct pfe_memmon *memmon = &pfe->memmon;
7826 +
7827 +       len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
7828 +               memmon->kernel_memory_allocated,
7829 +               (memmon->kernel_memory_allocated + 1023) / 1024);
7830 +
7831 +       return len;
7832 +}
7833 +
7834 +#ifdef HIF_NAPI_STATS
7835 +static ssize_t pfe_show_hif_napi_stats(struct device *dev,
7836 +                                      struct device_attribute *attr,
7837 +                                      char *buf)
7838 +{
7839 +       struct platform_device *pdev = to_platform_device(dev);
7840 +       struct pfe *pfe = platform_get_drvdata(pdev);
7841 +       ssize_t len = 0;
7842 +
7843 +       len += sprintf(buf + len, "sched:  %u\n",
7844 +                       pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
7845 +       len += sprintf(buf + len, "poll:   %u\n",
7846 +                       pfe->hif.napi_counters[NAPI_POLL_COUNT]);
7847 +       len += sprintf(buf + len, "packet: %u\n",
7848 +                       pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
7849 +       len += sprintf(buf + len, "budget: %u\n",
7850 +                       pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
7851 +       len += sprintf(buf + len, "desc:   %u\n",
7852 +                       pfe->hif.napi_counters[NAPI_DESC_COUNT]);
7853 +       len += sprintf(buf + len, "full:   %u\n",
7854 +                       pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
7855 +
7856 +       return len;
7857 +}
7858 +
7859 +static ssize_t pfe_set_hif_napi_stats(struct device *dev,
7860 +                                     struct device_attribute *attr,
7861 +                                       const char *buf, size_t count)
7862 +{
7863 +       struct platform_device *pdev = to_platform_device(dev);
7864 +       struct pfe *pfe = platform_get_drvdata(pdev);
7865 +
7866 +       memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
7867 +
7868 +       return count;
7869 +}
7870 +
7871 +static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
7872 +                       pfe_set_hif_napi_stats);
7873 +#endif
7874 +
7875 +static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
7876 +static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
7877 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7878 +static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
7879 +#endif
7880 +static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
7881 +static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
7882 +static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
7883 +static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
7884 +static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
7885 +static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
7886 +static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
7887 +static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
7888 +static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
7889 +
7890 +int pfe_sysfs_init(struct pfe *pfe)
7891 +{
7892 +       if (device_create_file(pfe->dev, &dev_attr_class))
7893 +               goto err_class;
7894 +
7895 +       if (device_create_file(pfe->dev, &dev_attr_tmu))
7896 +               goto err_tmu;
7897 +
7898 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7899 +       if (device_create_file(pfe->dev, &dev_attr_util))
7900 +               goto err_util;
7901 +#endif
7902 +
7903 +       if (device_create_file(pfe->dev, &dev_attr_bmu))
7904 +               goto err_bmu;
7905 +
7906 +       if (device_create_file(pfe->dev, &dev_attr_hif))
7907 +               goto err_hif;
7908 +
7909 +       if (device_create_file(pfe->dev, &dev_attr_gpi))
7910 +               goto err_gpi;
7911 +
7912 +       if (device_create_file(pfe->dev, &dev_attr_drops))
7913 +               goto err_drops;
7914 +
7915 +       if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
7916 +               goto err_tmu0_queues;
7917 +
7918 +       if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
7919 +               goto err_tmu1_queues;
7920 +
7921 +       if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
7922 +               goto err_tmu2_queues;
7923 +
7924 +       if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
7925 +               goto err_tmu3_queues;
7926 +
7927 +       if (device_create_file(pfe->dev, &dev_attr_pfemem))
7928 +               goto err_pfemem;
7929 +
7930 +#ifdef HIF_NAPI_STATS
7931 +       if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
7932 +               goto err_hif_napi_stats;
7933 +#endif
7934 +
7935 +       return 0;
7936 +
7937 +#ifdef HIF_NAPI_STATS
7938 +err_hif_napi_stats:
7939 +       device_remove_file(pfe->dev, &dev_attr_pfemem);
7940 +#endif
7941 +
7942 +err_pfemem:
7943 +       device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
7944 +
7945 +err_tmu3_queues:
7946 +       device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
7947 +
7948 +err_tmu2_queues:
7949 +       device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
7950 +
7951 +err_tmu1_queues:
7952 +       device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
7953 +
7954 +err_tmu0_queues:
7955 +       device_remove_file(pfe->dev, &dev_attr_drops);
7956 +
7957 +err_drops:
7958 +       device_remove_file(pfe->dev, &dev_attr_gpi);
7959 +
7960 +err_gpi:
7961 +       device_remove_file(pfe->dev, &dev_attr_hif);
7962 +
7963 +err_hif:
7964 +       device_remove_file(pfe->dev, &dev_attr_bmu);
7965 +
7966 +err_bmu:
7967 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7968 +       device_remove_file(pfe->dev, &dev_attr_util);
7969 +
7970 +err_util:
7971 +#endif
7972 +       device_remove_file(pfe->dev, &dev_attr_tmu);
7973 +
7974 +err_tmu:
7975 +       device_remove_file(pfe->dev, &dev_attr_class);
7976 +
7977 +err_class:
7978 +       return -1;
7979 +}
7980 +
7981 +void pfe_sysfs_exit(struct pfe *pfe)
7982 +{
7983 +#ifdef HIF_NAPI_STATS
7984 +       device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
7985 +#endif
7986 +       device_remove_file(pfe->dev, &dev_attr_pfemem);
7987 +       device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
7988 +       device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
7989 +       device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
7990 +       device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
7991 +       device_remove_file(pfe->dev, &dev_attr_drops);
7992 +       device_remove_file(pfe->dev, &dev_attr_gpi);
7993 +       device_remove_file(pfe->dev, &dev_attr_hif);
7994 +       device_remove_file(pfe->dev, &dev_attr_bmu);
7995 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
7996 +       device_remove_file(pfe->dev, &dev_attr_util);
7997 +#endif
7998 +       device_remove_file(pfe->dev, &dev_attr_tmu);
7999 +       device_remove_file(pfe->dev, &dev_attr_class);
8000 +}