kernel: bump 4.14 to 4.14.125 (FS#2305 FS#2297)
[oweals/openwrt.git] / target / linux / layerscape / patches-4.14 / 802-dma-support-layerscape.patch
1 From 5cb4bc977d933323429050033da9c701b24df43e Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:23 +0800
4 Subject: [PATCH] dma: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch of dma for layerscape
10
11 Signed-off-by: Biwen Li <biwen.li@nxp.com>
12 Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
13 Signed-off-by: Changming Huang <jerry.huang@nxp.com>
14 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
15 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
16 Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
17 Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
18 Signed-off-by: Peng Ma <peng.ma@nxp.com>
19 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
20 Signed-off-by: Rajiv Vishwakarma <rajiv.vishwakarma@nxp.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Wen He <wen.he_1@nxp.com>
23 Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
24 ---
25  drivers/dma/Kconfig                     |   33 +-
26  drivers/dma/Makefile                    |    3 +
27  drivers/dma/caam_dma.c                  |  462 ++++++++
28  drivers/dma/dpaa2-qdma/Kconfig          |    8 +
29  drivers/dma/dpaa2-qdma/Makefile         |    8 +
30  drivers/dma/dpaa2-qdma/dpaa2-qdma.c     |  781 ++++++++++++++
31  drivers/dma/dpaa2-qdma/dpaa2-qdma.h     |  181 ++++
32  drivers/dma/dpaa2-qdma/dpdmai.c         |  515 +++++++++
33  drivers/dma/dpaa2-qdma/fsl_dpdmai.h     |  521 +++++++++
34  drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h |  222 ++++
35  drivers/dma/fsl-edma.c                  |   66 +-
36  drivers/dma/fsl-qdma.c                  | 1278 +++++++++++++++++++++++
37  12 files changed, 4073 insertions(+), 5 deletions(-)
38  create mode 100644 drivers/dma/caam_dma.c
39  create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
40  create mode 100644 drivers/dma/dpaa2-qdma/Makefile
41  create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
42  create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
43  create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
44  create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
45  create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
46  create mode 100644 drivers/dma/fsl-qdma.c
47
48 --- a/drivers/dma/Kconfig
49 +++ b/drivers/dma/Kconfig
50 @@ -129,6 +129,24 @@ config COH901318
51         help
52           Enable support for ST-Ericsson COH 901 318 DMA.
53  
54 +config CRYPTO_DEV_FSL_CAAM_DMA
55 +       tristate "CAAM DMA engine support"
56 +       depends on CRYPTO_DEV_FSL_CAAM_JR
57 +       default n
58 +       select DMA_ENGINE
59 +       select ASYNC_CORE
60 +       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
61 +       help
62 +         Selecting this will offload the DMA operations for users of
63 +         the scatter gather memcopy API to the CAAM via job rings. The
64 +         CAAM is a hardware module that provides hardware acceleration to
65 +         cryptographic operations. It has a built-in DMA controller that can
66 +         be programmed to read/write cryptographic data. This module defines
67 +         a DMA driver that uses the DMA capabilities of the CAAM.
68 +
69 +         To compile this as a module, choose M here: the module
70 +         will be called caam_dma.
71 +
72  config DMA_BCM2835
73         tristate "BCM2835 DMA engine support"
74         depends on ARCH_BCM2835
75 @@ -215,6 +233,20 @@ config FSL_EDMA
76           multiplexing capability for DMA request sources(slot).
77           This module can be found on Freescale Vybrid and LS-1 SoCs.
78  
79 +config FSL_QDMA
80 +       tristate "NXP Layerscape qDMA engine support"
81 +       select DMA_ENGINE
82 +       select DMA_VIRTUAL_CHANNELS
83 +       select DMA_ENGINE_RAID
84 +       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
85 +       help
86 +         Support the NXP Layerscape qDMA engine with command queue and legacy mode.
87 +         Channel virtualization is supported through enqueuing of DMA jobs to,
88 +         or dequeuing DMA jobs from, different work queues.
89 +         This module can be found on NXP Layerscape SoCs.
90 +
91 +source drivers/dma/dpaa2-qdma/Kconfig
92 +
93  config FSL_RAID
94          tristate "Freescale RAID engine Support"
95          depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
96 @@ -600,7 +632,6 @@ config ZX_DMA
97         help
98           Support the DMA engine for ZTE ZX family platform devices.
99  
100 -
101  # driver files
102  source "drivers/dma/bestcomm/Kconfig"
103  
104 --- a/drivers/dma/Makefile
105 +++ b/drivers/dma/Makefile
106 @@ -31,7 +31,9 @@ obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
107  obj-$(CONFIG_DW_DMAC_CORE) += dw/
108  obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
109  obj-$(CONFIG_FSL_DMA) += fsldma.o
110 +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
111  obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
112 +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
113  obj-$(CONFIG_FSL_RAID) += fsl_raid.o
114  obj-$(CONFIG_HSU_DMA) += hsu/
115  obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
116 @@ -71,6 +73,7 @@ obj-$(CONFIG_TI_EDMA) += edma.o
117  obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
118  obj-$(CONFIG_ZX_DMA) += zx_dma.o
119  obj-$(CONFIG_ST_FDMA) += st_fdma.o
120 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
121  
122  obj-y += qcom/
123  obj-y += xilinx/
124 --- /dev/null
125 +++ b/drivers/dma/caam_dma.c
126 @@ -0,0 +1,462 @@
127 +/*
128 + * caam support for SG DMA
129 + *
130 + * Copyright 2016 Freescale Semiconductor, Inc
131 + * Copyright 2017 NXP
132 + *
133 + * Redistribution and use in source and binary forms, with or without
134 + * modification, are permitted provided that the following conditions are met:
135 + *     * Redistributions of source code must retain the above copyright
136 + *       notice, this list of conditions and the following disclaimer.
137 + *     * Redistributions in binary form must reproduce the above copyright
138 + *       notice, this list of conditions and the following disclaimer in the
139 + *       documentation and/or other materials provided with the distribution.
140 + *     * Neither the names of the above-listed copyright holders nor the
141 + *       names of any contributors may be used to endorse or promote products
142 + *       derived from this software without specific prior written permission.
143 + *
144 + *
145 + * ALTERNATIVELY, this software may be distributed under the terms of the
146 + * GNU General Public License ("GPL") as published by the Free Software
147 + * Foundation, either version 2 of that License or (at your option) any
148 + * later version.
149 + *
150 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
151 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
152 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
153 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
154 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
155 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
156 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
157 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
158 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
159 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
160 + * POSSIBILITY OF SUCH DAMAGE.
161 + */
162 +
163 +#include <linux/dma-mapping.h>
164 +#include <linux/dmaengine.h>
165 +#include <linux/module.h>
166 +#include <linux/platform_device.h>
167 +#include <linux/slab.h>
168 +
169 +#include "dmaengine.h"
170 +
171 +#include "../crypto/caam/regs.h"
172 +#include "../crypto/caam/jr.h"
173 +#include "../crypto/caam/error.h"
174 +#include "../crypto/caam/desc_constr.h"
175 +
176 +#define DESC_DMA_MEMCPY_LEN    ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
177 +                                CAAM_CMD_SZ)
178 +
179 +/*
180 + * This is max chunk size of a DMA transfer. If a buffer is larger than this
181 + * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
182 + * and for each chunk a DMA transfer request is issued.
183 + * This value is the largest number on 16 bits that is a multiple of 256 bytes
184 + * (the largest configurable CAAM DMA burst size).
185 + */
186 +#define CAAM_DMA_CHUNK_SIZE    65280
187 +
188 +struct caam_dma_sh_desc {
189 +       u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
190 +       dma_addr_t desc_dma;
191 +};
192 +
193 +/* caam dma extended descriptor */
194 +struct caam_dma_edesc {
195 +       struct dma_async_tx_descriptor async_tx;
196 +       struct list_head node;
197 +       struct caam_dma_ctx *ctx;
198 +       dma_addr_t src_dma;
199 +       dma_addr_t dst_dma;
200 +       unsigned int src_len;
201 +       unsigned int dst_len;
202 +       u32 jd[] ____cacheline_aligned;
203 +};
204 +
205 +/*
206 + * caam_dma_ctx - per jr/channel context
207 + * @chan: dma channel used by async_tx API
208 + * @node: list_head used to attach to the global dma_ctx_list
209 + * @jrdev: Job Ring device
210 + * @pending_q: queue of pending (submitted, but not enqueued) jobs
211 + * @done_not_acked: jobs that have been completed by jr, but maybe not acked
212 + * @edesc_lock: protects extended descriptor
213 + */
214 +struct caam_dma_ctx {
215 +       struct dma_chan chan;
216 +       struct list_head node;
217 +       struct device *jrdev;
218 +       struct list_head pending_q;
219 +       struct list_head done_not_acked;
220 +       spinlock_t edesc_lock;
221 +};
222 +
223 +static struct dma_device *dma_dev;
224 +static struct caam_dma_sh_desc *dma_sh_desc;
225 +static LIST_HEAD(dma_ctx_list);
226 +
227 +static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
228 +{
229 +       struct caam_dma_edesc *edesc = NULL;
230 +       struct caam_dma_ctx *ctx = NULL;
231 +       dma_cookie_t cookie;
232 +
233 +       edesc = container_of(tx, struct caam_dma_edesc, async_tx);
234 +       ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
235 +
236 +       spin_lock_bh(&ctx->edesc_lock);
237 +
238 +       cookie = dma_cookie_assign(tx);
239 +       list_add_tail(&edesc->node, &ctx->pending_q);
240 +
241 +       spin_unlock_bh(&ctx->edesc_lock);
242 +
243 +       return cookie;
244 +}
245 +
246 +static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
247 +{
248 +       struct caam_dma_ctx *ctx = edesc->ctx;
249 +       struct caam_dma_edesc *_edesc = NULL;
250 +
251 +       spin_lock_bh(&ctx->edesc_lock);
252 +
253 +       list_add_tail(&edesc->node, &ctx->done_not_acked);
254 +       list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
255 +               if (async_tx_test_ack(&edesc->async_tx)) {
256 +                       list_del(&edesc->node);
257 +                       kfree(edesc);
258 +               }
259 +       }
260 +
261 +       spin_unlock_bh(&ctx->edesc_lock);
262 +}
263 +
264 +static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
265 +                         void *context)
266 +{
267 +       struct caam_dma_edesc *edesc = context;
268 +       struct caam_dma_ctx *ctx = edesc->ctx;
269 +       dma_async_tx_callback callback;
270 +       void *callback_param;
271 +
272 +       if (err)
273 +               caam_jr_strstatus(ctx->jrdev, err);
274 +
275 +       dma_run_dependencies(&edesc->async_tx);
276 +
277 +       spin_lock_bh(&ctx->edesc_lock);
278 +       dma_cookie_complete(&edesc->async_tx);
279 +       spin_unlock_bh(&ctx->edesc_lock);
280 +
281 +       callback = edesc->async_tx.callback;
282 +       callback_param = edesc->async_tx.callback_param;
283 +
284 +       dma_descriptor_unmap(&edesc->async_tx);
285 +
286 +       caam_jr_chan_free_edesc(edesc);
287 +
288 +       if (callback)
289 +               callback(callback_param);
290 +}
291 +
292 +static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
293 +{
294 +       u32 *jd = edesc->jd;
295 +       u32 *sh_desc = dma_sh_desc->desc;
296 +       dma_addr_t desc_dma = dma_sh_desc->desc_dma;
297 +
298 +       /* init the job descriptor */
299 +       init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
300 +
301 +       /* set SEQIN PTR */
302 +       append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
303 +
304 +       /* set SEQOUT PTR */
305 +       append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
306 +
307 +       print_hex_dump_debug("caam dma desc@" __stringify(__LINE__) ": ",
308 +                            DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
309 +}
310 +
311 +static struct dma_async_tx_descriptor *
312 +caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
313 +                    size_t len, unsigned long flags)
314 +{
315 +       struct caam_dma_edesc *edesc;
316 +       struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
317 +                                               chan);
318 +
319 +       edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
320 +       if (!edesc)
321 +               return ERR_PTR(-ENOMEM);
322 +
323 +       dma_async_tx_descriptor_init(&edesc->async_tx, chan);
324 +       edesc->async_tx.tx_submit = caam_dma_tx_submit;
325 +       edesc->async_tx.flags = flags;
326 +       edesc->async_tx.cookie = -EBUSY;
327 +
328 +       edesc->src_dma = src;
329 +       edesc->src_len = len;
330 +       edesc->dst_dma = dst;
331 +       edesc->dst_len = len;
332 +       edesc->ctx = ctx;
333 +
334 +       caam_dma_memcpy_init_job_desc(edesc);
335 +
336 +       return &edesc->async_tx;
337 +}
338 +
339 +/* This function can be called in an interrupt context */
340 +static void caam_dma_issue_pending(struct dma_chan *chan)
341 +{
342 +       struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
343 +                                               chan);
344 +       struct caam_dma_edesc *edesc, *_edesc;
345 +
346 +       spin_lock_bh(&ctx->edesc_lock);
347 +       list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
348 +               if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
349 +                                   caam_dma_done, edesc) < 0)
350 +                       break;
351 +               list_del(&edesc->node);
352 +       }
353 +       spin_unlock_bh(&ctx->edesc_lock);
354 +}
355 +
356 +static void caam_dma_free_chan_resources(struct dma_chan *chan)
357 +{
358 +       struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
359 +                                               chan);
360 +       struct caam_dma_edesc *edesc, *_edesc;
361 +
362 +       spin_lock_bh(&ctx->edesc_lock);
363 +       list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
364 +               list_del(&edesc->node);
365 +               kfree(edesc);
366 +       }
367 +       list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
368 +               list_del(&edesc->node);
369 +               kfree(edesc);
370 +       }
371 +       spin_unlock_bh(&ctx->edesc_lock);
372 +}
373 +
374 +static int caam_dma_jr_chan_bind(void)
375 +{
376 +       struct device *jrdev;
377 +       struct caam_dma_ctx *ctx;
378 +       int bonds = 0;
379 +       int i;
380 +
381 +       for (i = 0; i < caam_jr_driver_probed(); i++) {
382 +               jrdev = caam_jridx_alloc(i);
383 +               if (IS_ERR(jrdev)) {
384 +                       pr_err("job ring device %d allocation failed\n", i);
385 +                       continue;
386 +               }
387 +
388 +               ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
389 +               if (!ctx) {
390 +                       caam_jr_free(jrdev);
391 +                       continue;
392 +               }
393 +
394 +               ctx->chan.device = dma_dev;
395 +               ctx->chan.private = ctx;
396 +
397 +               ctx->jrdev = jrdev;
398 +
399 +               INIT_LIST_HEAD(&ctx->pending_q);
400 +               INIT_LIST_HEAD(&ctx->done_not_acked);
401 +               INIT_LIST_HEAD(&ctx->node);
402 +               spin_lock_init(&ctx->edesc_lock);
403 +
404 +               dma_cookie_init(&ctx->chan);
405 +
406 +               /* add the context of this channel to the context list */
407 +               list_add_tail(&ctx->node, &dma_ctx_list);
408 +
409 +               /* add this channel to the device chan list */
410 +               list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
411 +
412 +               bonds++;
413 +       }
414 +
415 +       return bonds;
416 +}
417 +
418 +static inline void caam_jr_dma_free(struct dma_chan *chan)
419 +{
420 +       struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
421 +                                               chan);
422 +
423 +       list_del(&ctx->node);
424 +       list_del(&chan->device_node);
425 +       caam_jr_free(ctx->jrdev);
426 +       kfree(ctx);
427 +}
428 +
429 +static void set_caam_dma_desc(u32 *desc)
430 +{
431 +       u32 *jmp_cmd;
432 +
433 +       /* dma shared descriptor */
434 +       init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
435 +
436 +       /* REG1 = CAAM_DMA_CHUNK_SIZE */
437 +       append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
438 +
439 +       /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
440 +       append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
441 +
442 +       /*
443 +        * if (REG0 > 0)
444 +        *      jmp to LABEL1
445 +        */
446 +       jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
447 +                             JUMP_COND_MATH_Z);
448 +
449 +       /* REG1 = SEQINLEN */
450 +       append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
451 +
452 +       /* LABEL1 */
453 +       set_jump_tgt_here(desc, jmp_cmd);
454 +
455 +       /* VARSEQINLEN = REG1 */
456 +       append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
457 +
458 +       /* VARSEQOUTLEN = REG1 */
459 +       append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
460 +
461 +       /* do FIFO STORE */
462 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
463 +
464 +       /* do FIFO LOAD */
465 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
466 +                            FIFOLD_TYPE_IFIFO | LDST_VLF);
467 +
468 +       /*
469 +        * if (REG0 > 0)
470 +        *      jmp 0xF8 (after shared desc header)
471 +        */
472 +       append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
473 +                   JUMP_COND_MATH_Z | 0xF8);
474 +
475 +       print_hex_dump_debug("caam dma shdesc@" __stringify(__LINE__) ": ",
476 +                            DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
477 +                            1);
478 +}
479 +
480 +static int __init caam_dma_probe(struct platform_device *pdev)
481 +{
482 +       struct device *dev = &pdev->dev;
483 +       struct device *ctrldev = dev->parent;
484 +       struct dma_chan *chan, *_chan;
485 +       u32 *sh_desc;
486 +       int err = -ENOMEM;
487 +       int bonds;
488 +
489 +       if (!caam_jr_driver_probed()) {
490 +               dev_info(dev, "Defer probing after JR driver probing\n");
491 +               return -EPROBE_DEFER;
492 +       }
493 +
494 +       dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
495 +       if (!dma_dev)
496 +               return -ENOMEM;
497 +
498 +       dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
499 +       if (!dma_sh_desc)
500 +               goto desc_err;
501 +
502 +       sh_desc = dma_sh_desc->desc;
503 +       set_caam_dma_desc(sh_desc);
504 +       dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
505 +                                              desc_bytes(sh_desc),
506 +                                              DMA_TO_DEVICE);
507 +       if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
508 +               dev_err(dev, "unable to map dma descriptor\n");
509 +               goto map_err;
510 +       }
511 +
512 +       INIT_LIST_HEAD(&dma_dev->channels);
513 +
514 +       bonds = caam_dma_jr_chan_bind();
515 +       if (!bonds) {
516 +               err = -ENODEV;
517 +               goto jr_bind_err;
518 +       }
519 +
520 +       dma_dev->dev = dev;
521 +       dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
522 +       dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
523 +       dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
524 +       dma_dev->device_tx_status = dma_cookie_status;
525 +       dma_dev->device_issue_pending = caam_dma_issue_pending;
526 +       dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
527 +       dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
528 +
529 +       err = dma_async_device_register(dma_dev);
530 +       if (err) {
531 +               dev_err(dev, "Failed to register CAAM DMA engine\n");
532 +               goto jr_bind_err;
533 +       }
534 +
535 +       dev_info(dev, "caam dma support with %d job rings\n", bonds);
536 +
537 +       return err;
538 +
539 +jr_bind_err:
540 +       list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
541 +               caam_jr_dma_free(chan);
542 +
543 +       dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
544 +                        DMA_TO_DEVICE);
545 +map_err:
546 +       kfree(dma_sh_desc);
547 +desc_err:
548 +       kfree(dma_dev);
549 +       return err;
550 +}
551 +
552 +static int caam_dma_remove(struct platform_device *pdev)
553 +{
554 +       struct device *dev = &pdev->dev;
555 +       struct device *ctrldev = dev->parent;
556 +       struct caam_dma_ctx *ctx, *_ctx;
557 +
558 +       dma_async_device_unregister(dma_dev);
559 +
560 +       list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
561 +               list_del(&ctx->node);
562 +               caam_jr_free(ctx->jrdev);
563 +               kfree(ctx);
564 +       }
565 +
566 +       dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
567 +                        desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
568 +
569 +       kfree(dma_sh_desc);
570 +       kfree(dma_dev);
571 +
572 +       dev_info(dev, "caam dma support disabled\n");
573 +       return 0;
574 +}
575 +
576 +static struct platform_driver caam_dma_driver = {
577 +       .driver = {
578 +               .name = "caam-dma",
579 +       },
580 +       .probe  = caam_dma_probe,
581 +       .remove = caam_dma_remove,
582 +};
583 +module_platform_driver(caam_dma_driver);
584 +
585 +MODULE_LICENSE("Dual BSD/GPL");
586 +MODULE_DESCRIPTION("NXP CAAM support for DMA engine");
587 +MODULE_AUTHOR("NXP Semiconductors");
588 +MODULE_ALIAS("platform:caam-dma");
589 --- /dev/null
590 +++ b/drivers/dma/dpaa2-qdma/Kconfig
591 @@ -0,0 +1,8 @@
592 +menuconfig FSL_DPAA2_QDMA
593 +       tristate "NXP DPAA2 QDMA"
594 +       depends on FSL_MC_BUS && FSL_MC_DPIO
595 +       select DMA_ENGINE
596 +       select DMA_VIRTUAL_CHANNELS
597 +       ---help---
598 +         NXP Data Path Acceleration Architecture 2 QDMA driver,
599 +         using the NXP MC bus driver.
600 --- /dev/null
601 +++ b/drivers/dma/dpaa2-qdma/Makefile
602 @@ -0,0 +1,8 @@
603 +#
604 +# Makefile for the NXP DPAA2 CAAM controllers
605 +#
606 +ccflags-y += -DVERSION=\"\"
607 +
608 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
609 +
610 +fsl-dpaa2-qdma-objs    := dpaa2-qdma.o dpdmai.o
611 --- /dev/null
612 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
613 @@ -0,0 +1,781 @@
614 +/*
615 + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
616 + *
617 + * Copyright 2015-2017 NXP Semiconductor, Inc.
618 + * Author: Changming Huang <jerry.huang@nxp.com>
619 + *
620 + * Driver for the NXP QDMA engine with QMan mode.
621 + * Channel virtualization is supported through enqueuing of DMA jobs to,
622 + * or dequeuing DMA jobs from different work queues with QMan portal.
623 + * This module can be found on NXP LS2 SoCs.
624 + *
625 + * This program is free software; you can redistribute  it and/or modify it
626 + * under  the terms of  the GNU General  Public License as published by the
627 + * Free Software Foundation;  either version 2 of the  License, or (at your
628 + * option) any later version.
629 + */
630 +
631 +#include <linux/init.h>
632 +#include <linux/module.h>
633 +#include <linux/interrupt.h>
634 +#include <linux/clk.h>
635 +#include <linux/dma-mapping.h>
636 +#include <linux/dmapool.h>
637 +#include <linux/slab.h>
638 +#include <linux/spinlock.h>
639 +#include <linux/of.h>
640 +#include <linux/of_device.h>
641 +#include <linux/of_address.h>
642 +#include <linux/of_irq.h>
643 +#include <linux/of_dma.h>
644 +#include <linux/types.h>
645 +#include <linux/delay.h>
646 +#include <linux/iommu.h>
647 +#include <linux/sys_soc.h>
648 +
649 +#include "../virt-dma.h"
650 +
651 +#include <linux/fsl/mc.h>
652 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
653 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
654 +#include "fsl_dpdmai_cmd.h"
655 +#include "fsl_dpdmai.h"
656 +#include "dpaa2-qdma.h"
657 +
658 +static bool smmu_disable = true;
659 +
660 +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
661 +{
662 +       return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
663 +}
664 +
665 +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
666 +{
667 +       return container_of(vd, struct dpaa2_qdma_comp, vdesc);
668 +}
669 +
670 +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
671 +{
672 +       return 0;
673 +}
674 +
675 +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
676 +{
677 +       struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
678 +       unsigned long flags;
679 +       LIST_HEAD(head);
680 +
681 +       spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
682 +       vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
683 +       spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
684 +
685 +       vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
686 +}
687 +
688 +/*
689 + * Request a command descriptor for enqueue.
690 + */
691 +static struct dpaa2_qdma_comp *
692 +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
693 +{
694 +       struct dpaa2_qdma_comp *comp_temp = NULL;
695 +       unsigned long flags;
696 +
697 +       spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
698 +       if (list_empty(&dpaa2_chan->comp_free)) {
699 +               spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
700 +               comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
701 +               if (!comp_temp)
702 +                       goto err;
703 +               comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
704 +                               GFP_NOWAIT, &comp_temp->fd_bus_addr);
705 +               if (!comp_temp->fd_virt_addr)
706 +                       goto err;
707 +
708 +               comp_temp->fl_virt_addr =
709 +                       (void *)((struct dpaa2_fd *)
710 +                               comp_temp->fd_virt_addr + 1);
711 +               comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
712 +                                       sizeof(struct dpaa2_fd);
713 +               comp_temp->desc_virt_addr =
714 +                       (void *)((struct dpaa2_fl_entry *)
715 +                               comp_temp->fl_virt_addr + 3);
716 +               comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
717 +                               sizeof(struct dpaa2_fl_entry) * 3;
718 +
719 +               comp_temp->qchan = dpaa2_chan;
720 +               return comp_temp;
721 +       }
722 +       comp_temp = list_first_entry(&dpaa2_chan->comp_free,
723 +                       struct dpaa2_qdma_comp, list);
724 +       list_del(&comp_temp->list);
725 +       spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
726 +
727 +       comp_temp->qchan = dpaa2_chan;
728 +err:
729 +       return comp_temp;
730 +}
731 +
732 +static void dpaa2_qdma_populate_fd(uint32_t format,
733 +               struct dpaa2_qdma_comp *dpaa2_comp)
734 +{
735 +       struct dpaa2_fd *fd;
736 +
737 +       fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
738 +       memset(fd, 0, sizeof(struct dpaa2_fd));
739 +
740 +       /* fd populated */
741 +       dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
742 +       /* Bypass memory translation, Frame list format, short length disable */
743 +       /* we need to disable BMT if fsl-mc use iova addr */
744 +       if (smmu_disable)
745 +               dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
746 +       dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
747 +
748 +       dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
749 +}
750 +
751 +/* first frame list for descriptor buffer */
752 +static void dpaa2_qdma_populate_first_framel(
753 +               struct dpaa2_fl_entry *f_list,
754 +               struct dpaa2_qdma_comp *dpaa2_comp,
755 +               bool wrt_changed)
756 +{
757 +       struct dpaa2_qdma_sd_d *sdd;
758 +
759 +       sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
760 +       memset(sdd, 0, 2 * (sizeof(*sdd)));
761 +       /* source and destination descriptor */
762 +       sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */
763 +       sdd++;
764 +
765 +       /* dest descriptor CMD */
766 +       if (wrt_changed)
767 +               sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
768 +       else
769 +               sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
770 +
771 +       memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
772 +       /* first frame list to source descriptor */
773 +
774 +       dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
775 +       dpaa2_fl_set_len(f_list, 0x20);
776 +       dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
777 +
778 +       if (smmu_disable)
779 +               f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
780 +}
781 +
782 +/* source and destination frame list */
783 +static void dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
784 +               dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
785 +{
786 +       /* source frame list to source buffer */
787 +       memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
788 +
789 +
790 +       dpaa2_fl_set_addr(f_list, src);
791 +       dpaa2_fl_set_len(f_list, len);
792 +       dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); /* single buffer frame or scatter gather frame */
793 +       if (smmu_disable)
794 +               f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
795 +
796 +       f_list++;
797 +       /* destination frame list to destination buffer */
798 +       memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
799 +
800 +       dpaa2_fl_set_addr(f_list, dst);
801 +       dpaa2_fl_set_len(f_list, len);
802 +       dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
803 +       dpaa2_fl_set_final(f_list, QDMA_FL_F); /* single buffer frame or scatter gather frame */
804 +       if (smmu_disable)
805 +               f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
806 +}
807 +
808 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
809 +               struct dma_chan *chan, dma_addr_t dst,
810 +               dma_addr_t src, size_t len, unsigned long flags)
811 +{
812 +       struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
813 +       struct dpaa2_qdma_engine *dpaa2_qdma;
814 +       struct dpaa2_qdma_comp *dpaa2_comp;
815 +       struct dpaa2_fl_entry *f_list;
816 +       bool    wrt_changed;
817 +       uint32_t format;
818 +
819 +       dpaa2_qdma = dpaa2_chan->qdma;
820 +       dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
821 +       wrt_changed = dpaa2_qdma->qdma_wrtype_fixup;
822 +
823 +#ifdef LONG_FORMAT
824 +       format = QDMA_FD_LONG_FORMAT;
825 +#else
826 +       format = QDMA_FD_SHORT_FORMAT;
827 +#endif
828 +       /* populate Frame descriptor */
829 +       dpaa2_qdma_populate_fd(format, dpaa2_comp);
830 +
831 +       f_list = (struct dpaa2_fl_entry *)dpaa2_comp->fl_virt_addr;
832 +
833 +#ifdef LONG_FORMAT
834 +       /* first frame list for descriptor buffer (logn format) */
835 +       dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
836 +
837 +       f_list++;
838 +#endif
839 +
840 +       dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
841 +
842 +       return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
843 +}
844 +
845 +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
846 +               dma_cookie_t cookie, struct dma_tx_state *txstate)
847 +{
848 +       return dma_cookie_status(chan, cookie, txstate);
849 +}
850 +
851 +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
852 +{
853 +}
854 +
855 +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
856 +{
857 +       struct dpaa2_qdma_comp *dpaa2_comp;
858 +       struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
859 +       struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
860 +       struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
861 +       struct virt_dma_desc *vdesc;
862 +       struct dpaa2_fd *fd;
863 +       int err;
864 +       unsigned long flags;
865 +
866 +       spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
867 +       spin_lock(&dpaa2_chan->vchan.lock);
868 +       if (vchan_issue_pending(&dpaa2_chan->vchan)) {
869 +               vdesc = vchan_next_desc(&dpaa2_chan->vchan);
870 +               if (!vdesc)
871 +                       goto err_enqueue;
872 +               dpaa2_comp = to_fsl_qdma_comp(vdesc);
873 +
874 +               fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
875 +
876 +               list_del(&vdesc->node);
877 +               list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
878 +
879 +               /* TOBO: priority hard-coded to zero */
880 +               err = dpaa2_io_service_enqueue_fq(NULL,
881 +                               priv->tx_queue_attr[0].fqid, fd);
882 +               if (err) {
883 +                       list_del(&dpaa2_comp->list);
884 +                       list_add_tail(&dpaa2_comp->list,
885 +                               &dpaa2_chan->comp_free);
886 +               }
887 +
888 +       }
889 +err_enqueue:
890 +       spin_unlock(&dpaa2_chan->vchan.lock);
891 +       spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
892 +}
893 +
894 +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
895 +{
896 +       struct device *dev = &ls_dev->dev;
897 +       struct dpaa2_qdma_priv *priv;
898 +       struct dpaa2_qdma_priv_per_prio *ppriv;
899 +       uint8_t prio_def = DPDMAI_PRIO_NUM;
900 +       int err;
901 +       int i;
902 +
903 +       priv = dev_get_drvdata(dev);
904 +
905 +       priv->dev = dev;
906 +       priv->dpqdma_id = ls_dev->obj_desc.id;
907 +
908 +       /*Get the handle for the DPDMAI this interface is associate with */
909 +       err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
910 +       if (err) {
911 +               dev_err(dev, "dpdmai_open() failed\n");
912 +               return err;
913 +       }
914 +       dev_info(dev, "Opened dpdmai object successfully\n");
915 +
916 +       err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
917 +                               &priv->dpdmai_attr);
918 +       if (err) {
919 +               dev_err(dev, "dpdmai_get_attributes() failed\n");
920 +               return err;
921 +       }
922 +
923 +       if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
924 +               dev_err(dev, "DPDMAI major version mismatch\n"
925 +                            "Found %u.%u, supported version is %u.%u\n",
926 +                               priv->dpdmai_attr.version.major,
927 +                               priv->dpdmai_attr.version.minor,
928 +                               DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
929 +       }
930 +
931 +       if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
932 +               dev_err(dev, "DPDMAI minor version mismatch\n"
933 +                            "Found %u.%u, supported version is %u.%u\n",
934 +                               priv->dpdmai_attr.version.major,
935 +                               priv->dpdmai_attr.version.minor,
936 +                               DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
937 +       }
938 +
939 +       priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
940 +       ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
941 +       if (!ppriv) {
942 +               dev_err(dev, "kzalloc for ppriv failed\n");
943 +               return -1;
944 +       }
945 +       priv->ppriv = ppriv;
946 +
947 +       for (i = 0; i < priv->num_pairs; i++) {
948 +               err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
949 +                               i, &priv->rx_queue_attr[i]);
950 +               if (err) {
951 +                       dev_err(dev, "dpdmai_get_rx_queue() failed\n");
952 +                       return err;
953 +               }
954 +               ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
955 +
956 +               err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
957 +                               i, &priv->tx_queue_attr[i]);
958 +               if (err) {
959 +                       dev_err(dev, "dpdmai_get_tx_queue() failed\n");
960 +                       return err;
961 +               }
962 +               ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
963 +               ppriv->prio = i;
964 +               ppriv->priv = priv;
965 +               ppriv++;
966 +       }
967 +
968 +       return 0;
969 +}
970 +
971 +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
972 +{
973 +       struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
974 +                       struct dpaa2_qdma_priv_per_prio, nctx);
975 +       struct dpaa2_qdma_priv *priv = ppriv->priv;
976 +       struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
977 +       struct dpaa2_qdma_chan *qchan;
978 +       const struct dpaa2_fd *fd;
979 +       const struct dpaa2_fd *fd_eq;
980 +       struct dpaa2_dq *dq;
981 +       int err;
982 +       int is_last = 0;
983 +       uint8_t status;
984 +       int i;
985 +       int found;
986 +       uint32_t n_chans = priv->dpaa2_qdma->n_chans;
987 +
988 +       do {
989 +               err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
990 +                                               ppriv->store);
991 +       } while (err);
992 +
993 +       while (!is_last) {
994 +               do {
995 +                       dq = dpaa2_io_store_next(ppriv->store, &is_last);
996 +               } while (!is_last && !dq);
997 +               if (!dq) {
998 +                       dev_err(priv->dev, "FQID returned no valid frames!\n");
999 +                       continue;
1000 +               }
1001 +
1002 +               /* obtain FD and process the error */
1003 +               fd = dpaa2_dq_fd(dq);
1004 +
1005 +               status = dpaa2_fd_get_ctrl(fd) & 0xff;
1006 +               if (status)
1007 +                       dev_err(priv->dev, "FD error occurred\n");
1008 +               found = 0;
1009 +               for (i = 0; i < n_chans; i++) {
1010 +                       qchan = &priv->dpaa2_qdma->chans[i];
1011 +                       spin_lock(&qchan->queue_lock);
1012 +                       if (list_empty(&qchan->comp_used)) {
1013 +                               spin_unlock(&qchan->queue_lock);
1014 +                               continue;
1015 +                       }
1016 +                       list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
1017 +                               &qchan->comp_used, list) {
1018 +                               fd_eq = (struct dpaa2_fd *)
1019 +                                       dpaa2_comp->fd_virt_addr;
1020 +
1021 +                               if (le64_to_cpu(fd_eq->simple.addr) ==
1022 +                                               le64_to_cpu(fd->simple.addr)) {
1023 +
1024 +                                       list_del(&dpaa2_comp->list);
1025 +                                       list_add_tail(&dpaa2_comp->list,
1026 +                                                       &qchan->comp_free);
1027 +
1028 +                                       spin_lock(&qchan->vchan.lock);
1029 +                                       vchan_cookie_complete(
1030 +                                               &dpaa2_comp->vdesc);
1031 +                                       spin_unlock(&qchan->vchan.lock);
1032 +                                       found = 1;
1033 +                                       break;
1034 +                               }
1035 +                       }
1036 +                       spin_unlock(&qchan->queue_lock);
1037 +                       if (found)
1038 +                               break;
1039 +               }
1040 +       }
1041 +
1042 +       dpaa2_io_service_rearm(NULL, ctx);
1043 +}
1044 +
1045 +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
1046 +{
1047 +       int err, i, num;
1048 +       struct device *dev = priv->dev;
1049 +       struct dpaa2_qdma_priv_per_prio *ppriv;
1050 +
1051 +       num = priv->num_pairs;
1052 +       ppriv = priv->ppriv;
1053 +       for (i = 0; i < num; i++) {
1054 +               ppriv->nctx.is_cdan = 0;
1055 +               ppriv->nctx.desired_cpu = 1;
1056 +               ppriv->nctx.id = ppriv->rsp_fqid;
1057 +               ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
1058 +               err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
1059 +               if (err) {
1060 +                       dev_err(dev, "Notification register failed\n");
1061 +                       goto err_service;
1062 +               }
1063 +
1064 +               ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
1065 +                                               dev);
1066 +               if (!ppriv->store) {
1067 +                       dev_err(dev, "dpaa2_io_store_create() failed\n");
1068 +                       goto err_store;
1069 +               }
1070 +
1071 +               ppriv++;
1072 +       }
1073 +       return 0;
1074 +
1075 +err_store:
1076 +       dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
1077 +err_service:
1078 +       ppriv--;
1079 +       while (ppriv >= priv->ppriv) {
1080 +               dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
1081 +               dpaa2_io_store_destroy(ppriv->store);
1082 +               ppriv--;
1083 +       }
1084 +       return -1;
1085 +}
1086 +
1087 +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
1088 +{
1089 +       struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1090 +       int i;
1091 +
1092 +       for (i = 0; i < priv->num_pairs; i++) {
1093 +               dpaa2_io_store_destroy(ppriv->store);
1094 +               ppriv++;
1095 +       }
1096 +}
1097 +
1098 +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
1099 +{
1100 +       struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1101 +       struct device *dev = priv->dev;
1102 +       int i;
1103 +
1104 +       for (i = 0; i < priv->num_pairs; i++) {
1105 +               dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
1106 +               ppriv++;
1107 +       }
1108 +}
1109 +
1110 +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
1111 +{
1112 +       int err;
1113 +       struct dpdmai_rx_queue_cfg rx_queue_cfg;
1114 +       struct device *dev = priv->dev;
1115 +       struct dpaa2_qdma_priv_per_prio *ppriv;
1116 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1117 +       int i, num;
1118 +
1119 +       num = priv->num_pairs;
1120 +       ppriv = priv->ppriv;
1121 +       for (i = 0; i < num; i++) {
1122 +               rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
1123 +                                       DPDMAI_QUEUE_OPT_DEST;
1124 +               rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
1125 +               rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
1126 +               rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
1127 +               rx_queue_cfg.dest_cfg.priority = ppriv->prio;
1128 +               err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1129 +                               rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
1130 +               if (err) {
1131 +                       dev_err(dev, "dpdmai_set_rx_queue() failed\n");
1132 +                       return err;
1133 +               }
1134 +
1135 +               ppriv++;
1136 +       }
1137 +
1138 +       return 0;
1139 +}
1140 +
1141 +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
1142 +{
1143 +       int err = 0;
1144 +       struct device *dev = priv->dev;
1145 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1146 +       struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1147 +       int i;
1148 +
1149 +       for (i = 0; i < priv->num_pairs; i++) {
1150 +               ppriv->nctx.qman64 = 0;
1151 +               ppriv->nctx.dpio_id = 0;
1152 +               ppriv++;
1153 +       }
1154 +
1155 +       err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
1156 +       if (err)
1157 +               dev_err(dev, "dpdmai_reset() failed\n");
1158 +
1159 +       return err;
1160 +}
1161 +
1162 +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
1163 +                                               struct list_head *head)
1164 +{
1165 +       struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
1166 +       /* free the QDMA comp resource */
1167 +       list_for_each_entry_safe(comp_tmp, _comp_tmp,
1168 +                       head, list) {
1169 +               dma_pool_free(qchan->fd_pool,
1170 +                       comp_tmp->fd_virt_addr,
1171 +                       comp_tmp->fd_bus_addr);
1172 +               list_del(&comp_tmp->list);
1173 +               kfree(comp_tmp);
1174 +       }
1175 +
1176 +}
1177 +
1178 +static void __cold dpaa2_dpdmai_free_channels(
1179 +               struct dpaa2_qdma_engine *dpaa2_qdma)
1180 +{
1181 +       struct dpaa2_qdma_chan *qchan;
1182 +       int num, i;
1183 +
1184 +       num = dpaa2_qdma->n_chans;
1185 +       for (i = 0; i < num; i++) {
1186 +               qchan = &dpaa2_qdma->chans[i];
1187 +               dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
1188 +               dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
1189 +               dma_pool_destroy(qchan->fd_pool);
1190 +       }
1191 +}
1192 +
1193 +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
1194 +{
1195 +       struct dpaa2_qdma_chan *dpaa2_chan;
1196 +       struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
1197 +       int i;
1198 +
1199 +       INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
1200 +       for (i = 0; i < dpaa2_qdma->n_chans; i++) {
1201 +               dpaa2_chan = &dpaa2_qdma->chans[i];
1202 +               dpaa2_chan->qdma = dpaa2_qdma;
1203 +               dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
1204 +               vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
1205 +
1206 +               dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
1207 +                                       dev, FD_POOL_SIZE, 32, 0);
1208 +               if (!dpaa2_chan->fd_pool)
1209 +                       return -1;
1210 +
1211 +               spin_lock_init(&dpaa2_chan->queue_lock);
1212 +               INIT_LIST_HEAD(&dpaa2_chan->comp_used);
1213 +               INIT_LIST_HEAD(&dpaa2_chan->comp_free);
1214 +       }
1215 +       return 0;
1216 +}
1217 +
1218 +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
1219 +{
1220 +       struct dpaa2_qdma_priv *priv;
1221 +       struct device *dev = &dpdmai_dev->dev;
1222 +       struct dpaa2_qdma_engine *dpaa2_qdma;
1223 +       int err;
1224 +
1225 +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1226 +       if (!priv)
1227 +               return -ENOMEM;
1228 +       dev_set_drvdata(dev, priv);
1229 +       priv->dpdmai_dev = dpdmai_dev;
1230 +
1231 +       priv->iommu_domain = iommu_get_domain_for_dev(dev);
1232 +       if (priv->iommu_domain)
1233 +               smmu_disable = false;
1234 +
1235 +       /* obtain a MC portal */
1236 +       err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
1237 +       if (err) {
1238 +               if (err == -ENXIO)
1239 +                       err = -EPROBE_DEFER;
1240 +               else
1241 +                       dev_err(dev, "MC portal allocation failed\n");
1242 +               goto err_mcportal;
1243 +       }
1244 +
1245 +       /* DPDMAI initialization */
1246 +       err = dpaa2_qdma_setup(dpdmai_dev);
1247 +       if (err) {
1248 +               dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
1249 +               goto err_dpdmai_setup;
1250 +       }
1251 +
1252 +       /* DPIO */
1253 +       err = dpaa2_qdma_dpio_setup(priv);
1254 +       if (err) {
1255 +               dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
1256 +               goto err_dpio_setup;
1257 +       }
1258 +
1259 +       /* DPDMAI binding to DPIO */
1260 +       err = dpaa2_dpdmai_bind(priv);
1261 +       if (err) {
1262 +               dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
1263 +               goto err_bind;
1264 +       }
1265 +
1266 +       /* DPDMAI enable */
1267 +       err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1268 +       if (err) {
1269 +               dev_err(dev, "dpdmai_enable() faile\n");
1270 +               goto err_enable;
1271 +       }
1272 +
1273 +       dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
1274 +       if (!dpaa2_qdma) {
1275 +               err = -ENOMEM;
1276 +               goto err_eng;
1277 +       }
1278 +
1279 +       priv->dpaa2_qdma = dpaa2_qdma;
1280 +       dpaa2_qdma->priv = priv;
1281 +
1282 +       dpaa2_qdma->n_chans = NUM_CH;
1283 +
1284 +       err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
1285 +       if (err) {
1286 +               dev_err(dev, "QDMA alloc channels faile\n");
1287 +               goto err_reg;
1288 +       }
1289 +
1290 +       if (soc_device_match(soc_fixup_tuning))
1291 +               dpaa2_qdma->qdma_wrtype_fixup = true;
1292 +       else
1293 +               dpaa2_qdma->qdma_wrtype_fixup = false;
1294 +
1295 +       dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
1296 +       dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
1297 +       dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
1298 +
1299 +       dpaa2_qdma->dma_dev.dev = dev;
1300 +       dpaa2_qdma->dma_dev.device_alloc_chan_resources
1301 +               = dpaa2_qdma_alloc_chan_resources;
1302 +       dpaa2_qdma->dma_dev.device_free_chan_resources
1303 +               = dpaa2_qdma_free_chan_resources;
1304 +       dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
1305 +       dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
1306 +       dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
1307 +
1308 +       err = dma_async_device_register(&dpaa2_qdma->dma_dev);
1309 +       if (err) {
1310 +               dev_err(dev, "Can't register NXP QDMA engine.\n");
1311 +               goto err_reg;
1312 +       }
1313 +
1314 +       return 0;
1315 +
1316 +err_reg:
1317 +       dpaa2_dpdmai_free_channels(dpaa2_qdma);
1318 +       kfree(dpaa2_qdma);
1319 +err_eng:
1320 +       dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1321 +err_enable:
1322 +       dpaa2_dpdmai_dpio_unbind(priv);
1323 +err_bind:
1324 +       dpaa2_dpmai_store_free(priv);
1325 +       dpaa2_dpdmai_dpio_free(priv);
1326 +err_dpio_setup:
1327 +       dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
1328 +err_dpdmai_setup:
1329 +       fsl_mc_portal_free(priv->mc_io);
1330 +err_mcportal:
1331 +       kfree(priv->ppriv);
1332 +       kfree(priv);
1333 +       dev_set_drvdata(dev, NULL);
1334 +       return err;
1335 +}
1336 +
1337 +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
1338 +{
1339 +       struct device *dev;
1340 +       struct dpaa2_qdma_priv *priv;
1341 +       struct dpaa2_qdma_engine *dpaa2_qdma;
1342 +
1343 +       dev = &ls_dev->dev;
1344 +       priv = dev_get_drvdata(dev);
1345 +       dpaa2_qdma = priv->dpaa2_qdma;
1346 +
1347 +       dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
1348 +       dpaa2_dpdmai_dpio_unbind(priv);
1349 +       dpaa2_dpmai_store_free(priv);
1350 +       dpaa2_dpdmai_dpio_free(priv);
1351 +       dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
1352 +       fsl_mc_portal_free(priv->mc_io);
1353 +       dev_set_drvdata(dev, NULL);
1354 +       dpaa2_dpdmai_free_channels(dpaa2_qdma);
1355 +
1356 +       dma_async_device_unregister(&dpaa2_qdma->dma_dev);
1357 +       kfree(priv);
1358 +       kfree(dpaa2_qdma);
1359 +
1360 +       return 0;
1361 +}
1362 +
1363 +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
1364 +       {
1365 +               .vendor = FSL_MC_VENDOR_FREESCALE,
1366 +               .obj_type = "dpdmai",
1367 +       },
1368 +       { .vendor = 0x0 }
1369 +};
1370 +
1371 +static struct fsl_mc_driver dpaa2_qdma_driver = {
1372 +       .driver         = {
1373 +               .name   = "dpaa2-qdma",
1374 +               .owner  = THIS_MODULE,
1375 +       },
1376 +       .probe          = dpaa2_qdma_probe,
1377 +       .remove         = dpaa2_qdma_remove,
1378 +       .match_id_table = dpaa2_qdma_id_table
1379 +};
1380 +
1381 +static int __init dpaa2_qdma_driver_init(void)
1382 +{
1383 +       return fsl_mc_driver_register(&(dpaa2_qdma_driver));
1384 +}
1385 +late_initcall(dpaa2_qdma_driver_init);
1386 +
1387 +static void __exit fsl_qdma_exit(void)
1388 +{
1389 +       fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
1390 +}
1391 +module_exit(fsl_qdma_exit);
1392 +
1393 +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
1394 +MODULE_LICENSE("Dual BSD/GPL");
1395 --- /dev/null
1396 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1397 @@ -0,0 +1,181 @@
1398 +/* Copyright 2015 NXP Semiconductor Inc.
1399 + *
1400 + * Redistribution and use in source and binary forms, with or without
1401 + * modification, are permitted provided that the following conditions are met:
1402 + *     * Redistributions of source code must retain the above copyright
1403 + *     notice, this list of conditions and the following disclaimer.
1404 + *     * Redistributions in binary form must reproduce the above copyright
1405 + *      notice, this list of conditions and the following disclaimer in the
1406 + *      documentation and/or other materials provided with the distribution.
1407 + *     * Neither the name of NXP Semiconductor nor the
1408 + *      names of its contributors may be used to endorse or promote products
1409 + *      derived from this software without specific prior written permission.
1410 + *
1411 + *
1412 + * ALTERNATIVELY, this software may be distributed under the terms of the
1413 + * GNU General Public License ("GPL") as published by the Free Software
1414 + * Foundation, either version 2 of that License or (at your option) any
1415 + * later version.
1416 + *
1417 + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
1418 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1419 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1420 + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
1421 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1422 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1423 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1424 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1425 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1426 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1427 + */
1428 +
1429 +#ifndef __DPAA2_QDMA_H
1430 +#define __DPAA2_QDMA_H
1431 +
1432 +#define LONG_FORMAT 1
1433 +
1434 +#define DPAA2_QDMA_STORE_SIZE 16
1435 +#define NUM_CH 8
1436 +
1437 +#define QDMA_DMR_OFFSET        0x0
1438 +#define QDMA_DQ_EN (0 << 30)
1439 +#define QDMA_DQ_DIS (1 << 30)
1440 +
1441 +#define QDMA_DSR_M_OFFSET 0x10004
1442 +
1443 +struct dpaa2_qdma_sd_d {
1444 +       uint32_t rsv:32;
1445 +       union {
1446 +               struct {
1447 +                       uint32_t ssd:12; /* souce stride distance */
1448 +                       uint32_t sss:12; /* souce stride size */
1449 +                       uint32_t rsv1:8;
1450 +               } sdf;
1451 +               struct {
1452 +                       uint32_t dsd:12; /* Destination stride distance */
1453 +                       uint32_t dss:12; /* Destination stride size */
1454 +                       uint32_t rsv2:8;
1455 +               } ddf;
1456 +       } df;
1457 +       uint32_t rbpcmd;        /* Route-by-port command */
1458 +       uint32_t cmd;
1459 +} __attribute__((__packed__));
1460 +/* Source descriptor command read transaction type for RBP=0:
1461 + coherent copy of cacheable memory */
1462 +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
1463 +/* Destination descriptor command write transaction type for RBP=0:
1464 + coherent copy of cacheable memory */
1465 +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
1466 +#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
1467 +
1468 +#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */
1469 +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
1470 +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
1471 +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
1472 +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
1473 +
1474 +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
1475 +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
1476 +
1477 +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
1478 +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
1479 +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
1480 +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
1481 +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
1482 +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
1483 +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
1484 +
1485 +#define QMAN_FD_VA_ENABLE (1 << 14)  /* Address used is virtual address */
1486 +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
1487 +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
1488 +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
1489 +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
1490 +
1491 +#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
1492 +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
1493 +#define QDMA_FL_BMT_ENABLE (0x1 << 15)/* enable bypass memory translation */
1494 +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
1495 +#define QDMA_FL_SL_LONG (0x0 << 2)/* long length */
1496 +#define QDMA_FL_SL_SHORT 0x1 /* short length */
1497 +#define QDMA_FL_F (0x1)/* last frame list bit */
1498 +/*Description of Frame list table structure*/
1499 +
1500 +struct dpaa2_qdma_chan {
1501 +       struct virt_dma_chan            vchan;
1502 +       struct virt_dma_desc            vdesc;
1503 +       enum dma_status                 status;
1504 +       struct dpaa2_qdma_engine        *qdma;
1505 +
1506 +       struct mutex            dpaa2_queue_mutex;
1507 +       spinlock_t              queue_lock;
1508 +       struct dma_pool         *fd_pool;
1509 +
1510 +       struct list_head        comp_used;
1511 +       struct list_head        comp_free;
1512 +
1513 +};
1514 +
1515 +struct dpaa2_qdma_comp {
1516 +       dma_addr_t              fd_bus_addr;
1517 +       dma_addr_t              fl_bus_addr;
1518 +       dma_addr_t              desc_bus_addr;
1519 +       void                    *fd_virt_addr;
1520 +       void                    *fl_virt_addr;
1521 +       void                    *desc_virt_addr;
1522 +       struct dpaa2_qdma_chan  *qchan;
1523 +       struct virt_dma_desc    vdesc;
1524 +       struct list_head        list;
1525 +};
1526 +
1527 +struct dpaa2_qdma_engine {
1528 +       struct dma_device       dma_dev;
1529 +       u32                     n_chans;
1530 +       struct dpaa2_qdma_chan  chans[NUM_CH];
1531 +       bool                    qdma_wrtype_fixup;
1532 +
1533 +       struct dpaa2_qdma_priv *priv;
1534 +};
1535 +
1536 +/*
1537 + * dpaa2_qdma_priv - driver private data
1538 + */
1539 +struct dpaa2_qdma_priv {
1540 +       int dpqdma_id;
1541 +
1542 +       struct iommu_domain *iommu_domain;
1543 +       struct dpdmai_attr dpdmai_attr;
1544 +       struct device *dev;
1545 +       struct fsl_mc_io *mc_io;
1546 +       struct fsl_mc_device *dpdmai_dev;
1547 +
1548 +       struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
1549 +       struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
1550 +
1551 +       uint8_t num_pairs;
1552 +
1553 +       struct dpaa2_qdma_engine *dpaa2_qdma;
1554 +       struct dpaa2_qdma_priv_per_prio *ppriv;
1555 +};
1556 +
1557 +struct dpaa2_qdma_priv_per_prio {
1558 +       int req_fqid;
1559 +       int rsp_fqid;
1560 +       int prio;
1561 +
1562 +       struct dpaa2_io_store *store;
1563 +       struct dpaa2_io_notification_ctx nctx;
1564 +
1565 +       struct dpaa2_qdma_priv *priv;
1566 +};
1567 +
1568 +static struct soc_device_attribute soc_fixup_tuning[] = {
1569 +       { .family = "QorIQ LX2160A"},
1570 +       { },
1571 +};
1572 +
1573 +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
1574 +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
1575 +               sizeof(struct dpaa2_fl_entry) * 3 + \
1576 +               sizeof(struct dpaa2_qdma_sd_d) * 2)
1577 +
1578 +#endif /* __DPAA2_QDMA_H */
1579 --- /dev/null
1580 +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
1581 @@ -0,0 +1,515 @@
1582 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
1583 + *
1584 + * Redistribution and use in source and binary forms, with or without
1585 + * modification, are permitted provided that the following conditions are met:
1586 + * * Redistributions of source code must retain the above copyright
1587 + * notice, this list of conditions and the following disclaimer.
1588 + * * Redistributions in binary form must reproduce the above copyright
1589 + * notice, this list of conditions and the following disclaimer in the
1590 + * documentation and/or other materials provided with the distribution.
1591 + * * Neither the name of the above-listed copyright holders nor the
1592 + * names of any contributors may be used to endorse or promote products
1593 + * derived from this software without specific prior written permission.
1594 + *
1595 + *
1596 + * ALTERNATIVELY, this software may be distributed under the terms of the
1597 + * GNU General Public License ("GPL") as published by the Free Software
1598 + * Foundation, either version 2 of that License or (at your option) any
1599 + * later version.
1600 + *
1601 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1602 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1603 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1604 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
1605 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1606 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1607 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1608 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1609 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1610 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1611 + * POSSIBILITY OF SUCH DAMAGE.
1612 + */
1613 +#include <linux/types.h>
1614 +#include <linux/io.h>
1615 +#include "fsl_dpdmai.h"
1616 +#include "fsl_dpdmai_cmd.h"
1617 +#include <linux/fsl/mc.h>
1618 +
1619 +struct dpdmai_cmd_open {
1620 +       __le32 dpdmai_id;
1621 +};
1622 +
1623 +struct dpdmai_rsp_get_attributes {
1624 +       __le32 id;
1625 +       u8 num_of_priorities;
1626 +       u8 pad0[3];
1627 +       __le16 major;
1628 +       __le16 minor;
1629 +};
1630 +
1631 +
1632 +struct dpdmai_cmd_queue {
1633 +       __le32 dest_id;
1634 +       u8 priority;
1635 +       u8 queue;
1636 +       u8 dest_type;
1637 +       u8 pad;
1638 +       __le64 user_ctx;
1639 +       union {
1640 +               __le32 options;
1641 +               __le32 fqid;
1642 +       };
1643 +};
1644 +
1645 +struct dpdmai_rsp_get_tx_queue {
1646 +       __le64 pad;
1647 +       __le32 fqid;
1648 +};
1649 +
1650 +
1651 +int dpdmai_open(struct fsl_mc_io *mc_io,
1652 +               uint32_t cmd_flags,
1653 +               int dpdmai_id,
1654 +               uint16_t *token)
1655 +{
1656 +       struct fsl_mc_command cmd = { 0 };
1657 +       struct dpdmai_cmd_open *cmd_params;
1658 +       int err;
1659 +
1660 +       /* prepare command */
1661 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
1662 +                                         cmd_flags,
1663 +                                         0);
1664 +
1665 +       cmd_params = (struct dpdmai_cmd_open *)cmd.params;
1666 +       cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
1667 +
1668 +       /* send command to mc*/
1669 +       err = mc_send_command(mc_io, &cmd);
1670 +       if (err)
1671 +               return err;
1672 +
1673 +       /* retrieve response parameters */
1674 +       *token = mc_cmd_hdr_read_token(&cmd);
1675 +       return 0;
1676 +}
1677 +
1678 +int dpdmai_close(struct fsl_mc_io *mc_io,
1679 +                uint32_t cmd_flags,
1680 +                uint16_t token)
1681 +{
1682 +       struct fsl_mc_command cmd = { 0 };
1683 +
1684 +       /* prepare command */
1685 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
1686 +                                         cmd_flags, token);
1687 +
1688 +       /* send command to mc*/
1689 +       return mc_send_command(mc_io, &cmd);
1690 +}
1691 +
1692 +int dpdmai_create(struct fsl_mc_io *mc_io,
1693 +                 uint32_t cmd_flags,
1694 +                 const struct dpdmai_cfg *cfg,
1695 +                 uint16_t *token)
1696 +{
1697 +       struct fsl_mc_command cmd = { 0 };
1698 +       int err;
1699 +
1700 +       /* prepare command */
1701 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
1702 +                                         cmd_flags,
1703 +                                         0);
1704 +       DPDMAI_CMD_CREATE(cmd, cfg);
1705 +
1706 +       /* send command to mc*/
1707 +       err = mc_send_command(mc_io, &cmd);
1708 +       if (err)
1709 +               return err;
1710 +
1711 +       /* retrieve response parameters */
1712 +       *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
1713 +
1714 +       return 0;
1715 +}
1716 +
1717 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
1718 +                  uint32_t cmd_flags,
1719 +                  uint16_t token)
1720 +{
1721 +       struct fsl_mc_command cmd = { 0 };
1722 +
1723 +       /* prepare command */
1724 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
1725 +                                         cmd_flags,
1726 +                                         token);
1727 +
1728 +       /* send command to mc*/
1729 +       return mc_send_command(mc_io, &cmd);
1730 +}
1731 +
1732 +int dpdmai_enable(struct fsl_mc_io *mc_io,
1733 +                 uint32_t cmd_flags,
1734 +                 uint16_t token)
1735 +{
1736 +       struct fsl_mc_command cmd = { 0 };
1737 +
1738 +       /* prepare command */
1739 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
1740 +                                         cmd_flags,
1741 +                                         token);
1742 +
1743 +       /* send command to mc*/
1744 +       return mc_send_command(mc_io, &cmd);
1745 +}
1746 +
1747 +int dpdmai_disable(struct fsl_mc_io *mc_io,
1748 +                  uint32_t cmd_flags,
1749 +                  uint16_t token)
1750 +{
1751 +       struct fsl_mc_command cmd = { 0 };
1752 +
1753 +       /* prepare command */
1754 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
1755 +                                         cmd_flags,
1756 +                                         token);
1757 +
1758 +       /* send command to mc*/
1759 +       return mc_send_command(mc_io, &cmd);
1760 +}
1761 +
1762 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
1763 +                     uint32_t cmd_flags,
1764 +                     uint16_t token,
1765 +                     int *en)
1766 +{
1767 +       struct fsl_mc_command cmd = { 0 };
1768 +       int err;
1769 +       /* prepare command */
1770 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
1771 +                                         cmd_flags,
1772 +                                         token);
1773 +
1774 +       /* send command to mc*/
1775 +       err = mc_send_command(mc_io, &cmd);
1776 +       if (err)
1777 +               return err;
1778 +
1779 +       /* retrieve response parameters */
1780 +       DPDMAI_RSP_IS_ENABLED(cmd, *en);
1781 +
1782 +       return 0;
1783 +}
1784 +
1785 +int dpdmai_reset(struct fsl_mc_io *mc_io,
1786 +                uint32_t cmd_flags,
1787 +                uint16_t token)
1788 +{
1789 +       struct fsl_mc_command cmd = { 0 };
1790 +
1791 +       /* prepare command */
1792 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
1793 +                                         cmd_flags,
1794 +                                         token);
1795 +
1796 +       /* send command to mc*/
1797 +       return mc_send_command(mc_io, &cmd);
1798 +}
1799 +
1800 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
1801 +                  uint32_t cmd_flags,
1802 +                  uint16_t token,
1803 +                  uint8_t irq_index,
1804 +                  int *type,
1805 +                  struct dpdmai_irq_cfg        *irq_cfg)
1806 +{
1807 +       struct fsl_mc_command cmd = { 0 };
1808 +       int err;
1809 +
1810 +       /* prepare command */
1811 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
1812 +                                         cmd_flags,
1813 +                                         token);
1814 +       DPDMAI_CMD_GET_IRQ(cmd, irq_index);
1815 +
1816 +       /* send command to mc*/
1817 +       err = mc_send_command(mc_io, &cmd);
1818 +       if (err)
1819 +               return err;
1820 +
1821 +       /* retrieve response parameters */
1822 +       DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
1823 +
1824 +       return 0;
1825 +}
1826 +
1827 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
1828 +                  uint32_t cmd_flags,
1829 +                  uint16_t token,
1830 +                  uint8_t irq_index,
1831 +                  struct dpdmai_irq_cfg *irq_cfg)
1832 +{
1833 +       struct fsl_mc_command cmd = { 0 };
1834 +
1835 +       /* prepare command */
1836 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
1837 +                                         cmd_flags,
1838 +                                         token);
1839 +       DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
1840 +
1841 +       /* send command to mc*/
1842 +       return mc_send_command(mc_io, &cmd);
1843 +}
1844 +
1845 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
1846 +                         uint32_t cmd_flags,
1847 +                         uint16_t token,
1848 +                         uint8_t irq_index,
1849 +                         uint8_t *en)
1850 +{
1851 +       struct fsl_mc_command cmd = { 0 };
1852 +       int err;
1853 +
1854 +       /* prepare command */
1855 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
1856 +                                         cmd_flags,
1857 +                                         token);
1858 +       DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
1859 +
1860 +       /* send command to mc*/
1861 +       err = mc_send_command(mc_io, &cmd);
1862 +       if (err)
1863 +               return err;
1864 +
1865 +       /* retrieve response parameters */
1866 +       DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
1867 +
1868 +       return 0;
1869 +}
1870 +
1871 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
1872 +                         uint32_t cmd_flags,
1873 +                         uint16_t token,
1874 +                         uint8_t irq_index,
1875 +                         uint8_t en)
1876 +{
1877 +       struct fsl_mc_command cmd = { 0 };
1878 +
1879 +       /* prepare command */
1880 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
1881 +                                         cmd_flags,
1882 +                                         token);
1883 +       DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
1884 +
1885 +       /* send command to mc*/
1886 +       return mc_send_command(mc_io, &cmd);
1887 +}
1888 +
1889 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
1890 +                       uint32_t cmd_flags,
1891 +                       uint16_t token,
1892 +                       uint8_t irq_index,
1893 +                       uint32_t *mask)
1894 +{
1895 +       struct fsl_mc_command cmd = { 0 };
1896 +       int err;
1897 +
1898 +       /* prepare command */
1899 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
1900 +                                         cmd_flags,
1901 +                                         token);
1902 +       DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
1903 +
1904 +       /* send command to mc*/
1905 +       err = mc_send_command(mc_io, &cmd);
1906 +       if (err)
1907 +               return err;
1908 +
1909 +       /* retrieve response parameters */
1910 +       DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
1911 +
1912 +       return 0;
1913 +}
1914 +
1915 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
1916 +                       uint32_t cmd_flags,
1917 +                       uint16_t token,
1918 +                       uint8_t irq_index,
1919 +                       uint32_t mask)
1920 +{
1921 +       struct fsl_mc_command cmd = { 0 };
1922 +
1923 +       /* prepare command */
1924 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
1925 +                                         cmd_flags,
1926 +                                         token);
1927 +       DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
1928 +
1929 +       /* send command to mc*/
1930 +       return mc_send_command(mc_io, &cmd);
1931 +}
1932 +
1933 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
1934 +                         uint32_t cmd_flags,
1935 +                         uint16_t token,
1936 +                         uint8_t irq_index,
1937 +                         uint32_t *status)
1938 +{
1939 +       struct fsl_mc_command cmd = { 0 };
1940 +       int err;
1941 +
1942 +       /* prepare command */
1943 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
1944 +                                         cmd_flags,
1945 +                                         token);
1946 +       DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
1947 +
1948 +       /* send command to mc*/
1949 +       err = mc_send_command(mc_io, &cmd);
1950 +       if (err)
1951 +               return err;
1952 +
1953 +       /* retrieve response parameters */
1954 +       DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
1955 +
1956 +       return 0;
1957 +}
1958 +
1959 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
1960 +                           uint32_t cmd_flags,
1961 +                           uint16_t token,
1962 +                           uint8_t irq_index,
1963 +                           uint32_t status)
1964 +{
1965 +       struct fsl_mc_command cmd = { 0 };
1966 +
1967 +       /* prepare command */
1968 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
1969 +                                         cmd_flags,
1970 +                                         token);
1971 +       DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
1972 +
1973 +       /* send command to mc*/
1974 +       return mc_send_command(mc_io, &cmd);
1975 +}
1976 +
1977 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
1978 +                         uint32_t cmd_flags,
1979 +                         uint16_t token,
1980 +                         struct dpdmai_attr *attr)
1981 +{
1982 +       struct fsl_mc_command cmd = { 0 };
1983 +       int err;
1984 +       struct dpdmai_rsp_get_attributes *rsp_params;
1985 +
1986 +       /* prepare command */
1987 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
1988 +                                         cmd_flags,
1989 +                                         token);
1990 +
1991 +       /* send command to mc*/
1992 +       err = mc_send_command(mc_io, &cmd);
1993 +       if (err)
1994 +               return err;
1995 +
1996 +       /* retrieve response parameters */
1997 +       rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
1998 +       attr->id = le32_to_cpu(rsp_params->id);
1999 +       attr->version.major = le16_to_cpu(rsp_params->major);
2000 +       attr->version.minor = le16_to_cpu(rsp_params->minor);
2001 +       attr->num_of_priorities = rsp_params->num_of_priorities;
2002 +
2003 +
2004 +       return 0;
2005 +}
2006 +
2007 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2008 +                       uint32_t cmd_flags,
2009 +                       uint16_t token,
2010 +                       uint8_t priority,
2011 +                       const struct dpdmai_rx_queue_cfg *cfg)
2012 +{
2013 +       struct fsl_mc_command cmd = { 0 };
2014 +       struct dpdmai_cmd_queue *cmd_params;
2015 +
2016 +       /* prepare command */
2017 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
2018 +                                         cmd_flags,
2019 +                                         token);
2020 +
2021 +       cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2022 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
2023 +       cmd_params->priority = cfg->dest_cfg.priority;
2024 +       cmd_params->queue = priority;
2025 +       cmd_params->dest_type = cfg->dest_cfg.dest_type;
2026 +       cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
2027 +       cmd_params->options = cpu_to_le32(cfg->options);
2028 +
2029 +
2030 +       /* send command to mc*/
2031 +       return mc_send_command(mc_io, &cmd);
2032 +}
2033 +
2034 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2035 +                       uint32_t cmd_flags,
2036 +                       uint16_t token,
2037 +                       uint8_t priority, struct dpdmai_rx_queue_attr *attr)
2038 +{
2039 +       struct fsl_mc_command cmd = { 0 };
2040 +       struct dpdmai_cmd_queue *cmd_params;
2041 +       int err;
2042 +
2043 +       /* prepare command */
2044 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
2045 +                                         cmd_flags,
2046 +                                         token);
2047 +
2048 +       cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2049 +       cmd_params->queue = priority;
2050 +
2051 +       /* send command to mc*/
2052 +       err = mc_send_command(mc_io, &cmd);
2053 +       if (err)
2054 +               return err;
2055 +
2056 +       /* retrieve response parameters */
2057 +       attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
2058 +       attr->dest_cfg.priority = cmd_params->priority;
2059 +       attr->dest_cfg.dest_type = cmd_params->dest_type;
2060 +       attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
2061 +       attr->fqid = le32_to_cpu(cmd_params->fqid);
2062 +
2063 +       return 0;
2064 +}
2065 +
2066 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2067 +                       uint32_t cmd_flags,
2068 +                       uint16_t token,
2069 +                       uint8_t priority,
2070 +                       struct dpdmai_tx_queue_attr *attr)
2071 +{
2072 +       struct fsl_mc_command cmd = { 0 };
2073 +       struct dpdmai_cmd_queue *cmd_params;
2074 +       struct dpdmai_rsp_get_tx_queue *rsp_params;
2075 +       int err;
2076 +
2077 +       /* prepare command */
2078 +       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
2079 +                                         cmd_flags,
2080 +                                         token);
2081 +
2082 +       cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2083 +       cmd_params->queue = priority;
2084 +
2085 +       /* send command to mc*/
2086 +       err = mc_send_command(mc_io, &cmd);
2087 +       if (err)
2088 +               return err;
2089 +
2090 +       /* retrieve response parameters */
2091 +
2092 +       rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
2093 +       attr->fqid = le32_to_cpu(rsp_params->fqid);
2094 +
2095 +       return 0;
2096 +}
2097 --- /dev/null
2098 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
2099 @@ -0,0 +1,521 @@
2100 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
2101 + *
2102 + * Redistribution and use in source and binary forms, with or without
2103 + * modification, are permitted provided that the following conditions are met:
2104 + * * Redistributions of source code must retain the above copyright
2105 + * notice, this list of conditions and the following disclaimer.
2106 + * * Redistributions in binary form must reproduce the above copyright
2107 + * notice, this list of conditions and the following disclaimer in the
2108 + * documentation and/or other materials provided with the distribution.
2109 + * * Neither the name of the above-listed copyright holders nor the
2110 + * names of any contributors may be used to endorse or promote products
2111 + * derived from this software without specific prior written permission.
2112 + *
2113 + *
2114 + * ALTERNATIVELY, this software may be distributed under the terms of the
2115 + * GNU General Public License ("GPL") as published by the Free Software
2116 + * Foundation, either version 2 of that License or (at your option) any
2117 + * later version.
2118 + *
2119 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2120 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2121 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2122 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2123 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2124 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2125 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2126 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2127 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2128 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2129 + * POSSIBILITY OF SUCH DAMAGE.
2130 + */
2131 +#ifndef __FSL_DPDMAI_H
2132 +#define __FSL_DPDMAI_H
2133 +
2134 +struct fsl_mc_io;
2135 +
2136 +/* Data Path DMA Interface API
2137 + * Contains initialization APIs and runtime control APIs for DPDMAI
2138 + */
2139 +
2140 +/* General DPDMAI macros */
2141 +
2142 +/**
2143 + * Maximum number of Tx/Rx priorities per DPDMAI object
2144 + */
2145 +#define DPDMAI_PRIO_NUM                2
2146 +
2147 +/**
2148 + * All queues considered; see dpdmai_set_rx_queue()
2149 + */
2150 +#define DPDMAI_ALL_QUEUES      (uint8_t)(-1)
2151 +
2152 +/**
2153 + * dpdmai_open() - Open a control session for the specified object
2154 + * @mc_io:     Pointer to MC portal's I/O object
2155 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2156 + * @dpdmai_id: DPDMAI unique ID
2157 + * @token:     Returned token; use in subsequent API calls
2158 + *
2159 + * This function can be used to open a control session for an
2160 + * already created object; an object may have been declared in
2161 + * the DPL or by calling the dpdmai_create() function.
2162 + * This function returns a unique authentication token,
2163 + * associated with the specific object ID and the specific MC
2164 + * portal; this token must be used in all subsequent commands for
2165 + * this specific object.
2166 + *
2167 + * Return:     '0' on Success; Error code otherwise.
2168 + */
2169 +int dpdmai_open(struct fsl_mc_io       *mc_io,
2170 +               uint32_t                cmd_flags,
2171 +               int                     dpdmai_id,
2172 +               uint16_t                *token);
2173 +
2174 +/**
2175 + * dpdmai_close() - Close the control session of the object
2176 + * @mc_io:     Pointer to MC portal's I/O object
2177 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2178 + * @token:     Token of DPDMAI object
2179 + *
2180 + * After this function is called, no further operations are
2181 + * allowed on the object without opening a new control session.
2182 + *
2183 + * Return:     '0' on Success; Error code otherwise.
2184 + */
2185 +int dpdmai_close(struct fsl_mc_io      *mc_io,
2186 +                uint32_t               cmd_flags,
2187 +                uint16_t               token);
2188 +
2189 +/**
2190 + * struct dpdmai_cfg - Structure representing DPDMAI configuration
2191 + * @priorities: Priorities for the DMA hardware processing; valid priorities are
2192 + *     configured with values 1-8; the entry following last valid entry
2193 + *     should be configured with 0
2194 + */
2195 +struct dpdmai_cfg {
2196 +       uint8_t priorities[DPDMAI_PRIO_NUM];
2197 +};
2198 +
2199 +/**
2200 + * dpdmai_create() - Create the DPDMAI object
2201 + * @mc_io:     Pointer to MC portal's I/O object
2202 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2203 + * @cfg:       Configuration structure
2204 + * @token:     Returned token; use in subsequent API calls
2205 + *
2206 + * Create the DPDMAI object, allocate required resources and
2207 + * perform required initialization.
2208 + *
2209 + * The object can be created either by declaring it in the
2210 + * DPL file, or by calling this function.
2211 + *
2212 + * This function returns a unique authentication token,
2213 + * associated with the specific object ID and the specific MC
2214 + * portal; this token must be used in all subsequent calls to
2215 + * this specific object. For objects that are created using the
2216 + * DPL file, call dpdmai_open() function to get an authentication
2217 + * token first.
2218 + *
2219 + * Return:     '0' on Success; Error code otherwise.
2220 + */
2221 +int dpdmai_create(struct fsl_mc_io             *mc_io,
2222 +                 uint32_t                      cmd_flags,
2223 +                 const struct dpdmai_cfg       *cfg,
2224 +                 uint16_t                      *token);
2225 +
2226 +/**
2227 + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
2228 + * @mc_io:     Pointer to MC portal's I/O object
2229 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2230 + * @token:     Token of DPDMAI object
2231 + *
2232 + * Return:     '0' on Success; error code otherwise.
2233 + */
2234 +int dpdmai_destroy(struct fsl_mc_io    *mc_io,
2235 +                  uint32_t             cmd_flags,
2236 +                  uint16_t             token);
2237 +
2238 +/**
2239 + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
2240 + * @mc_io:     Pointer to MC portal's I/O object
2241 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2242 + * @token:     Token of DPDMAI object
2243 + *
2244 + * Return:     '0' on Success; Error code otherwise.
2245 + */
2246 +int dpdmai_enable(struct fsl_mc_io     *mc_io,
2247 +                 uint32_t              cmd_flags,
2248 +                 uint16_t              token);
2249 +
2250 +/**
2251 + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
2252 + * @mc_io:     Pointer to MC portal's I/O object
2253 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2254 + * @token:     Token of DPDMAI object
2255 + *
2256 + * Return:     '0' on Success; Error code otherwise.
2257 + */
2258 +int dpdmai_disable(struct fsl_mc_io    *mc_io,
2259 +                  uint32_t             cmd_flags,
2260 +                  uint16_t             token);
2261 +
2262 +/**
2263 + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
2264 + * @mc_io:     Pointer to MC portal's I/O object
2265 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2266 + * @token:     Token of DPDMAI object
2267 + * @en:                Returns '1' if object is enabled; '0' otherwise
2268 + *
2269 + * Return:     '0' on Success; Error code otherwise.
2270 + */
2271 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2272 +                     uint32_t          cmd_flags,
2273 +                     uint16_t          token,
2274 +                     int               *en);
2275 +
2276 +/**
2277 + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
2278 + * @mc_io:     Pointer to MC portal's I/O object
2279 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2280 + * @token:     Token of DPDMAI object
2281 + *
2282 + * Return:     '0' on Success; Error code otherwise.
2283 + */
2284 +int dpdmai_reset(struct fsl_mc_io      *mc_io,
2285 +                uint32_t               cmd_flags,
2286 +                uint16_t               token);
2287 +
2288 +/**
2289 + * struct dpdmai_irq_cfg - IRQ configuration
2290 + * @addr:      Address that must be written to signal a message-based interrupt
2291 + * @val:       Value to write into irq_addr address
2292 + * @irq_num: A user defined number associated with this IRQ
2293 + */
2294 +struct dpdmai_irq_cfg {
2295 +            uint64_t           addr;
2296 +            uint32_t           val;
2297 +            int                irq_num;
2298 +};
2299 +
2300 +/**
2301 + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
2302 + * @mc_io:     Pointer to MC portal's I/O object
2303 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2304 + * @token:     Token of DPDMAI object
2305 + * @irq_index: Identifies the interrupt index to configure
2306 + * @irq_cfg:   IRQ configuration
2307 + *
2308 + * Return:     '0' on Success; Error code otherwise.
2309 + */
2310 +int dpdmai_set_irq(struct fsl_mc_io            *mc_io,
2311 +                  uint32_t                     cmd_flags,
2312 +                  uint16_t                     token,
2313 +                  uint8_t                      irq_index,
2314 +                  struct dpdmai_irq_cfg        *irq_cfg);
2315 +
2316 +/**
2317 + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
2318 + *
2319 + * @mc_io:     Pointer to MC portal's I/O object
2320 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2321 + * @token:     Token of DPDMAI object
2322 + * @irq_index: The interrupt index to configure
2323 + * @type:      Interrupt type: 0 represents message interrupt
2324 + *             type (both irq_addr and irq_val are valid)
2325 + * @irq_cfg:   IRQ attributes
2326 + *
2327 + * Return:     '0' on Success; Error code otherwise.
2328 + */
2329 +int dpdmai_get_irq(struct fsl_mc_io            *mc_io,
2330 +                  uint32_t                     cmd_flags,
2331 +                  uint16_t                     token,
2332 +                  uint8_t                      irq_index,
2333 +                  int                          *type,
2334 +                  struct dpdmai_irq_cfg        *irq_cfg);
2335 +
2336 +/**
2337 + * dpdmai_set_irq_enable() - Set overall interrupt state.
2338 + * @mc_io:     Pointer to MC portal's I/O object
2339 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2340 + * @token:             Token of DPDMAI object
2341 + * @irq_index: The interrupt index to configure
2342 + * @en:                        Interrupt state - enable = 1, disable = 0
2343 + *
2344 + * Allows GPP software to control when interrupts are generated.
2345 + * Each interrupt can have up to 32 causes.  The enable/disable control's the
2346 + * overall interrupt state. if the interrupt is disabled no causes will cause
2347 + * an interrupt
2348 + *
2349 + * Return:     '0' on Success; Error code otherwise.
2350 + */
2351 +int dpdmai_set_irq_enable(struct fsl_mc_io     *mc_io,
2352 +                         uint32_t              cmd_flags,
2353 +                         uint16_t              token,
2354 +                         uint8_t               irq_index,
2355 +                         uint8_t               en);
2356 +
2357 +/**
2358 + * dpdmai_get_irq_enable() - Get overall interrupt state
2359 + * @mc_io:     Pointer to MC portal's I/O object
2360 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2361 + * @token:             Token of DPDMAI object
2362 + * @irq_index: The interrupt index to configure
2363 + * @en:                        Returned Interrupt state - enable = 1, disable = 0
2364 + *
2365 + * Return:     '0' on Success; Error code otherwise.
2366 + */
2367 +int dpdmai_get_irq_enable(struct fsl_mc_io     *mc_io,
2368 +                         uint32_t              cmd_flags,
2369 +                         uint16_t              token,
2370 +                         uint8_t               irq_index,
2371 +                         uint8_t               *en);
2372 +
2373 +/**
2374 + * dpdmai_set_irq_mask() - Set interrupt mask.
2375 + * @mc_io:     Pointer to MC portal's I/O object
2376 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2377 + * @token:             Token of DPDMAI object
2378 + * @irq_index: The interrupt index to configure
2379 + * @mask:              event mask to trigger interrupt;
2380 + *                             each bit:
2381 + *                                     0 = ignore event
2382 + *                                     1 = consider event for asserting IRQ
2383 + *
2384 + * Every interrupt can have up to 32 causes and the interrupt model supports
2385 + * masking/unmasking each cause independently
2386 + *
2387 + * Return:     '0' on Success; Error code otherwise.
2388 + */
2389 +int dpdmai_set_irq_mask(struct fsl_mc_io       *mc_io,
2390 +                       uint32_t                cmd_flags,
2391 +                       uint16_t                token,
2392 +                       uint8_t         irq_index,
2393 +                       uint32_t                mask);
2394 +
2395 +/**
2396 + * dpdmai_get_irq_mask() - Get interrupt mask.
2397 + * @mc_io:     Pointer to MC portal's I/O object
2398 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2399 + * @token:             Token of DPDMAI object
2400 + * @irq_index: The interrupt index to configure
2401 + * @mask:              Returned event mask to trigger interrupt
2402 + *
2403 + * Every interrupt can have up to 32 causes and the interrupt model supports
2404 + * masking/unmasking each cause independently
2405 + *
2406 + * Return:     '0' on Success; Error code otherwise.
2407 + */
2408 +int dpdmai_get_irq_mask(struct fsl_mc_io       *mc_io,
2409 +                       uint32_t                cmd_flags,
2410 +                       uint16_t                token,
2411 +                       uint8_t         irq_index,
2412 +                       uint32_t                *mask);
2413 +
2414 +/**
2415 + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
2416 + * @mc_io:     Pointer to MC portal's I/O object
2417 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2418 + * @token:             Token of DPDMAI object
2419 + * @irq_index: The interrupt index to configure
2420 + * @status:            Returned interrupts status - one bit per cause:
2421 + *                                     0 = no interrupt pending
2422 + *                                     1 = interrupt pending
2423 + *
2424 + * Return:     '0' on Success; Error code otherwise.
2425 + */
2426 +int dpdmai_get_irq_status(struct fsl_mc_io     *mc_io,
2427 +                         uint32_t              cmd_flags,
2428 +                         uint16_t              token,
2429 +                         uint8_t               irq_index,
2430 +                         uint32_t              *status);
2431 +
2432 +/**
2433 + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
2434 + * @mc_io:     Pointer to MC portal's I/O object
2435 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2436 + * @token:     Token of DPDMAI object
2437 + * @irq_index: The interrupt index to configure
2438 + * @status:    bits to clear (W1C) - one bit per cause:
2439 + *                     0 = don't change
2440 + *                     1 = clear status bit
2441 + *
2442 + * Return:     '0' on Success; Error code otherwise.
2443 + */
2444 +int dpdmai_clear_irq_status(struct fsl_mc_io   *mc_io,
2445 +                           uint32_t            cmd_flags,
2446 +                           uint16_t            token,
2447 +                           uint8_t             irq_index,
2448 +                           uint32_t            status);
2449 +
2450 +/**
2451 + * struct dpdmai_attr - Structure representing DPDMAI attributes
2452 + * @id: DPDMAI object ID
2453 + * @version: DPDMAI version
2454 + * @num_of_priorities: number of priorities
2455 + */
2456 +struct dpdmai_attr {
2457 +       int     id;
2458 +       /**
2459 +        * struct version - DPDMAI version
2460 +        * @major: DPDMAI major version
2461 +        * @minor: DPDMAI minor version
2462 +        */
2463 +       struct {
2464 +               uint16_t major;
2465 +               uint16_t minor;
2466 +       } version;
2467 +       uint8_t num_of_priorities;
2468 +};
2469 +
2470 +/**
2471 + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
2472 + * @mc_io:     Pointer to MC portal's I/O object
2473 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2474 + * @token:     Token of DPDMAI object
2475 + * @attr:      Returned object's attributes
2476 + *
2477 + * Return:     '0' on Success; Error code otherwise.
2478 + */
2479 +int dpdmai_get_attributes(struct fsl_mc_io     *mc_io,
2480 +                         uint32_t              cmd_flags,
2481 +                         uint16_t              token,
2482 +                         struct dpdmai_attr    *attr);
2483 +
2484 +/**
2485 + * enum dpdmai_dest - DPDMAI destination types
2486 + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
2487 + *     and does not generate FQDAN notifications; user is expected to dequeue
2488 + *     from the queue based on polling or other user-defined method
2489 + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
2490 + *     notifications to the specified DPIO; user is expected to dequeue
2491 + *     from the queue only after notification is received
2492 + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
2493 + *     FQDAN notifications, but is connected to the specified DPCON object;
2494 + *     user is expected to dequeue from the DPCON channel
2495 + */
2496 +enum dpdmai_dest {
2497 +       DPDMAI_DEST_NONE = 0,
2498 +       DPDMAI_DEST_DPIO = 1,
2499 +       DPDMAI_DEST_DPCON = 2
2500 +};
2501 +
2502 +/**
2503 + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
2504 + * @dest_type: Destination type
2505 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
2506 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
2507 + *     are 0-1 or 0-7, depending on the number of priorities in that
2508 + *     channel; not relevant for 'DPDMAI_DEST_NONE' option
2509 + */
2510 +struct dpdmai_dest_cfg {
2511 +       enum dpdmai_dest        dest_type;
2512 +       int                     dest_id;
2513 +       uint8_t         priority;
2514 +};
2515 +
2516 +/* DPDMAI queue modification options */
2517 +
2518 +/**
2519 + * Select to modify the user's context associated with the queue
2520 + */
2521 +#define DPDMAI_QUEUE_OPT_USER_CTX      0x00000001
2522 +
2523 +/**
2524 + * Select to modify the queue's destination
2525 + */
2526 +#define DPDMAI_QUEUE_OPT_DEST          0x00000002
2527 +
2528 +/**
2529 + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
2530 + * @options: Flags representing the suggested modifications to the queue;
2531 + *     Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
2532 + * @user_ctx: User context value provided in the frame descriptor of each
2533 + *     dequeued frame;
2534 + *     valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
2535 + * @dest_cfg: Queue destination parameters;
2536 + *     valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
2537 + */
2538 +struct dpdmai_rx_queue_cfg {
2539 +       uint32_t                options;
2540 +       uint64_t                user_ctx;
2541 +       struct dpdmai_dest_cfg  dest_cfg;
2542 +
2543 +};
2544 +
2545 +/**
2546 + * dpdmai_set_rx_queue() - Set Rx queue configuration
2547 + * @mc_io:     Pointer to MC portal's I/O object
2548 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2549 + * @token:     Token of DPDMAI object
2550 + * @priority:  Select the queue relative to number of
2551 + *                     priorities configured at DPDMAI creation; use
2552 + *                     DPDMAI_ALL_QUEUES to configure all Rx queues
2553 + *                     identically.
2554 + * @cfg:       Rx queue configuration
2555 + *
2556 + * Return:     '0' on Success; Error code otherwise.
2557 + */
2558 +int dpdmai_set_rx_queue(struct fsl_mc_io                       *mc_io,
2559 +                       uint32_t                                cmd_flags,
2560 +                       uint16_t                                token,
2561 +                       uint8_t                                 priority,
2562 +                       const struct dpdmai_rx_queue_cfg        *cfg);
2563 +
2564 +/**
2565 + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
2566 + * @user_ctx:  User context value provided in the frame descriptor of each
2567 + *      dequeued frame
2568 + * @dest_cfg: Queue destination configuration
2569 + * @fqid: Virtual FQID value to be used for dequeue operations
2570 + */
2571 +struct dpdmai_rx_queue_attr {
2572 +       uint64_t                user_ctx;
2573 +       struct dpdmai_dest_cfg  dest_cfg;
2574 +       uint32_t                fqid;
2575 +};
2576 +
2577 +/**
2578 + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
2579 + * @mc_io:     Pointer to MC portal's I/O object
2580 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2581 + * @token:     Token of DPDMAI object
2582 + * @priority:  Select the queue relative to number of
2583 + *                             priorities configured at DPDMAI creation
2584 + * @attr:      Returned Rx queue attributes
2585 + *
2586 + * Return:     '0' on Success; Error code otherwise.
2587 + */
2588 +int dpdmai_get_rx_queue(struct fsl_mc_io               *mc_io,
2589 +                       uint32_t                        cmd_flags,
2590 +                       uint16_t                        token,
2591 +                       uint8_t                         priority,
2592 +                       struct dpdmai_rx_queue_attr     *attr);
2593 +
2594 +/**
2595 + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
2596 + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
2597 + */
2598 +
2599 +struct dpdmai_tx_queue_attr {
2600 +       uint32_t fqid;
2601 +};
2602 +
2603 +/**
2604 + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
2605 + * @mc_io:     Pointer to MC portal's I/O object
2606 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2607 + * @token:     Token of DPDMAI object
2608 + * @priority:  Select the queue relative to number of
2609 + *                     priorities configured at DPDMAI creation
2610 + * @attr:      Returned Tx queue attributes
2611 + *
2612 + * Return:     '0' on Success; Error code otherwise.
2613 + */
2614 +int dpdmai_get_tx_queue(struct fsl_mc_io               *mc_io,
2615 +                       uint32_t                        cmd_flags,
2616 +                       uint16_t                        token,
2617 +                       uint8_t                         priority,
2618 +                       struct dpdmai_tx_queue_attr     *attr);
2619 +
2620 +#endif /* __FSL_DPDMAI_H */
2621 --- /dev/null
2622 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2623 @@ -0,0 +1,222 @@
2624 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
2625 + *
2626 + * Redistribution and use in source and binary forms, with or without
2627 + * modification, are permitted provided that the following conditions are met:
2628 + * * Redistributions of source code must retain the above copyright
2629 + * notice, this list of conditions and the following disclaimer.
2630 + * * Redistributions in binary form must reproduce the above copyright
2631 + * notice, this list of conditions and the following disclaimer in the
2632 + * documentation and/or other materials provided with the distribution.
2633 + * * Neither the name of the above-listed copyright holders nor the
2634 + * names of any contributors may be used to endorse or promote products
2635 + * derived from this software without specific prior written permission.
2636 + *
2637 + *
2638 + * ALTERNATIVELY, this software may be distributed under the terms of the
2639 + * GNU General Public License ("GPL") as published by the Free Software
2640 + * Foundation, either version 2 of that License or (at your option) any
2641 + * later version.
2642 + *
2643 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2644 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2645 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2646 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2647 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2648 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2649 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2650 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2651 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2652 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2653 + * POSSIBILITY OF SUCH DAMAGE.
2654 + */
2655 +#ifndef _FSL_DPDMAI_CMD_H
2656 +#define _FSL_DPDMAI_CMD_H
2657 +
2658 +/* DPDMAI Version */
2659 +#define DPDMAI_VER_MAJOR                               2
2660 +#define DPDMAI_VER_MINOR                               2
2661 +
2662 +#define DPDMAI_CMD_BASE_VERSION                        0
2663 +#define DPDMAI_CMD_ID_OFFSET                           4
2664 +
2665 +/* Command IDs */
2666 +#define DPDMAI_CMDID_CLOSE                           ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2667 +#define DPDMAI_CMDID_OPEN                            ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2668 +#define DPDMAI_CMDID_CREATE                          ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2669 +#define DPDMAI_CMDID_DESTROY                         ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2670 +
2671 +#define DPDMAI_CMDID_ENABLE                          ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2672 +#define DPDMAI_CMDID_DISABLE                         ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2673 +#define DPDMAI_CMDID_GET_ATTR                        ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2674 +#define DPDMAI_CMDID_RESET                           ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2675 +#define DPDMAI_CMDID_IS_ENABLED                      ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2676 +
2677 +#define DPDMAI_CMDID_SET_IRQ                         ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2678 +#define DPDMAI_CMDID_GET_IRQ                         ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2679 +#define DPDMAI_CMDID_SET_IRQ_ENABLE                  ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2680 +#define DPDMAI_CMDID_GET_IRQ_ENABLE                  ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2681 +#define DPDMAI_CMDID_SET_IRQ_MASK                    ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2682 +#define DPDMAI_CMDID_GET_IRQ_MASK                    ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2683 +#define DPDMAI_CMDID_GET_IRQ_STATUS                  ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2684 +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS                ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2685 +
2686 +#define DPDMAI_CMDID_SET_RX_QUEUE                    ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2687 +#define DPDMAI_CMDID_GET_RX_QUEUE                    ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2688 +#define DPDMAI_CMDID_GET_TX_QUEUE                    ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2689 +
2690 +
2691 +#define MC_CMD_HDR_TOKEN_O 32  /* Token field offset */
2692 +#define MC_CMD_HDR_TOKEN_S 16  /* Token field size */
2693 +
2694 +
2695 +#define MAKE_UMASK64(_width) \
2696 +       ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
2697 +       (uint64_t)-1))
2698 +
2699 +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
2700 +{
2701 +       return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
2702 +}
2703 +
2704 +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
2705 +{
2706 +       return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
2707 +}
2708 +
2709 +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
2710 +       ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
2711 +
2712 +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
2713 +       (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
2714 +
2715 +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
2716 +       ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
2717 +
2718 +/*                cmd, param, offset, width, type, arg_name */
2719 +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
2720 +       MC_CMD_OP(cmd, 0, 0,  32, int,      dpdmai_id)
2721 +
2722 +/*                cmd, param, offset, width, type, arg_name */
2723 +#define DPDMAI_CMD_CREATE(cmd, cfg) \
2724 +do { \
2725 +       MC_CMD_OP(cmd, 0, 8,  8,  uint8_t,  cfg->priorities[0]);\
2726 +       MC_CMD_OP(cmd, 0, 16, 8,  uint8_t,  cfg->priorities[1]);\
2727 +} while (0)
2728 +
2729 +/*                cmd, param, offset, width, type, arg_name */
2730 +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
2731 +       MC_RSP_OP(cmd, 0, 0,  1,  int,      en)
2732 +
2733 +/*                cmd, param, offset, width, type, arg_name */
2734 +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
2735 +do { \
2736 +       MC_CMD_OP(cmd, 0, 0,  8,  uint8_t,  irq_index);\
2737 +       MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
2738 +       MC_CMD_OP(cmd, 1, 0,  64, uint64_t, irq_cfg->addr);\
2739 +       MC_CMD_OP(cmd, 2, 0,  32, int,      irq_cfg->irq_num); \
2740 +} while (0)
2741 +
2742 +/*                cmd, param, offset, width, type, arg_name */
2743 +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
2744 +       MC_CMD_OP(cmd, 0, 32, 8,  uint8_t,  irq_index)
2745 +
2746 +/*                cmd, param, offset, width, type, arg_name */
2747 +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
2748 +do { \
2749 +       MC_RSP_OP(cmd, 0, 0,  32, uint32_t, irq_cfg->val); \
2750 +       MC_RSP_OP(cmd, 1, 0,  64, uint64_t, irq_cfg->addr);\
2751 +       MC_RSP_OP(cmd, 2, 0,  32, int,      irq_cfg->irq_num); \
2752 +       MC_RSP_OP(cmd, 2, 32, 32, int,      type); \
2753 +} while (0)
2754 +
2755 +/*                cmd, param, offset, width, type, arg_name */
2756 +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
2757 +do { \
2758 +       MC_CMD_OP(cmd, 0, 0,  8,  uint8_t,  enable_state); \
2759 +       MC_CMD_OP(cmd, 0, 32, 8,  uint8_t,  irq_index); \
2760 +} while (0)
2761 +
2762 +/*                cmd, param, offset, width, type, arg_name */
2763 +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
2764 +       MC_CMD_OP(cmd, 0, 32, 8,  uint8_t,  irq_index)
2765 +
2766 +/*                cmd, param, offset, width, type, arg_name */
2767 +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
2768 +       MC_RSP_OP(cmd, 0, 0,  8,  uint8_t,  enable_state)
2769 +
2770 +/*                cmd, param, offset, width, type, arg_name */
2771 +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
2772 +do { \
2773 +       MC_CMD_OP(cmd, 0, 0,  32, uint32_t, mask); \
2774 +       MC_CMD_OP(cmd, 0, 32, 8,  uint8_t,  irq_index); \
2775 +} while (0)
2776 +
2777 +/*                cmd, param, offset, width, type, arg_name */
2778 +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
2779 +       MC_CMD_OP(cmd, 0, 32, 8,  uint8_t,  irq_index)
2780 +
2781 +/*                cmd, param, offset, width, type, arg_name */
2782 +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
2783 +       MC_RSP_OP(cmd, 0, 0,  32, uint32_t, mask)
2784 +
2785 +/*                cmd, param, offset, width, type, arg_name */
2786 +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
2787 +do { \
2788 +       MC_CMD_OP(cmd, 0, 0,  32, uint32_t, status);\
2789 +       MC_CMD_OP(cmd, 0, 32, 8,  uint8_t,  irq_index);\
2790 +} while (0)
2791 +
2792 +/*                cmd, param, offset, width, type, arg_name */
2793 +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
2794 +       MC_RSP_OP(cmd, 0, 0,  32, uint32_t,  status)
2795 +
2796 +/*                cmd, param, offset, width, type, arg_name */
2797 +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
2798 +do { \
2799 +       MC_CMD_OP(cmd, 0, 0,  32, uint32_t, status); \
2800 +       MC_CMD_OP(cmd, 0, 32, 8,  uint8_t,  irq_index); \
2801 +} while (0)
2802 +
2803 +/*                cmd, param, offset, width, type, arg_name */
2804 +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
2805 +do { \
2806 +       MC_RSP_OP(cmd, 0, 0,  32, int,      attr->id); \
2807 +       MC_RSP_OP(cmd, 0, 32,  8,  uint8_t,  attr->num_of_priorities); \
2808 +       MC_RSP_OP(cmd, 1, 0,  16, uint16_t, attr->version.major);\
2809 +       MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
2810 +} while (0)
2811 +
2812 +/*                cmd, param, offset, width, type, arg_name */
2813 +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
2814 +do { \
2815 +       MC_CMD_OP(cmd, 0, 0,  32, int,      cfg->dest_cfg.dest_id); \
2816 +       MC_CMD_OP(cmd, 0, 32, 8,  uint8_t,  cfg->dest_cfg.priority); \
2817 +       MC_CMD_OP(cmd, 0, 40, 8,  uint8_t,  priority); \
2818 +       MC_CMD_OP(cmd, 0, 48, 4,  enum dpdmai_dest, cfg->dest_cfg.dest_type); \
2819 +       MC_CMD_OP(cmd, 1, 0,  64, uint64_t, cfg->user_ctx); \
2820 +       MC_CMD_OP(cmd, 2, 0,  32, uint32_t, cfg->options);\
2821 +} while (0)
2822 +
2823 +/*                cmd, param, offset, width, type, arg_name */
2824 +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
2825 +       MC_CMD_OP(cmd, 0, 40, 8,  uint8_t,  priority)
2826 +
2827 +/*                cmd, param, offset, width, type, arg_name */
2828 +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
2829 +do { \
2830 +       MC_RSP_OP(cmd, 0, 0,  32, int,      attr->dest_cfg.dest_id);\
2831 +       MC_RSP_OP(cmd, 0, 32, 8,  uint8_t,  attr->dest_cfg.priority);\
2832 +       MC_RSP_OP(cmd, 0, 48, 4,  enum dpdmai_dest, attr->dest_cfg.dest_type);\
2833 +       MC_RSP_OP(cmd, 1, 0,  64, uint64_t,  attr->user_ctx);\
2834 +       MC_RSP_OP(cmd, 2, 0,  32, uint32_t,  attr->fqid);\
2835 +} while (0)
2836 +
2837 +/*                cmd, param, offset, width, type, arg_name */
2838 +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
2839 +       MC_CMD_OP(cmd, 0, 40, 8,  uint8_t,  priority)
2840 +
2841 +/*                cmd, param, offset, width, type, arg_name */
2842 +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
2843 +       MC_RSP_OP(cmd, 1, 0,  32, uint32_t,  attr->fqid)
2844 +
2845 +#endif /* _FSL_DPDMAI_CMD_H */
2846 --- a/drivers/dma/fsl-edma.c
2847 +++ b/drivers/dma/fsl-edma.c
2848 @@ -146,6 +146,8 @@ struct fsl_edma_slave_config {
2849         u32                             dev_addr;
2850         u32                             burst;
2851         u32                             attr;
2852 +       dma_addr_t                      dma_dev_addr;
2853 +       enum dma_data_direction         dma_dir;
2854  };
2855  
2856  struct fsl_edma_chan {
2857 @@ -342,6 +344,53 @@ static int fsl_edma_resume(struct dma_ch
2858         return 0;
2859  }
2860  
2861 +static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
2862 +{
2863 +       if (fsl_chan->fsc.dma_dir != DMA_NONE)
2864 +               dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
2865 +                                  fsl_chan->fsc.dma_dev_addr,
2866 +                                  fsl_chan->fsc.burst, fsl_chan->fsc.dma_dir, 0);
2867 +       fsl_chan->fsc.dma_dir = DMA_NONE;
2868 +}
2869 +
2870 +static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
2871 +                                   enum dma_transfer_direction dir)
2872 +{
2873 +       struct device *dev = fsl_chan->vchan.chan.device->dev;
2874 +       enum dma_data_direction dma_dir;
2875 +
2876 +       switch (dir) {
2877 +       case DMA_MEM_TO_DEV:
2878 +               dma_dir = DMA_FROM_DEVICE;
2879 +               break;
2880 +       case DMA_DEV_TO_MEM:
2881 +               dma_dir = DMA_TO_DEVICE;
2882 +               break;
2883 +       case DMA_DEV_TO_DEV:
2884 +               dma_dir = DMA_BIDIRECTIONAL;
2885 +               break;
2886 +       default:
2887 +               dma_dir = DMA_NONE;
2888 +               break;
2889 +       }
2890 +
2891 +       /* Already mapped for this config? */
2892 +       if (fsl_chan->fsc.dma_dir == dma_dir)
2893 +               return true;
2894 +
2895 +       fsl_edma_unprep_slave_dma(fsl_chan);
2896 +       fsl_chan->fsc.dma_dev_addr = dma_map_resource(dev,
2897 +                                                     fsl_chan->fsc.dev_addr,
2898 +                                                     fsl_chan->fsc.burst,
2899 +                                                     dma_dir, 0);
2900 +       if (dma_mapping_error(dev, fsl_chan->fsc.dma_dev_addr))
2901 +               return false;
2902 +
2903 +       fsl_chan->fsc.dma_dir = dma_dir;
2904 +
2905 +       return true;
2906 +}
2907 +
2908  static int fsl_edma_slave_config(struct dma_chan *chan,
2909                                  struct dma_slave_config *cfg)
2910  {
2911 @@ -361,6 +410,7 @@ static int fsl_edma_slave_config(struct
2912         } else {
2913                         return -EINVAL;
2914         }
2915 +       fsl_edma_unprep_slave_dma(fsl_chan);
2916         return 0;
2917  }
2918  
2919 @@ -553,6 +603,9 @@ static struct dma_async_tx_descriptor *f
2920         if (!is_slave_direction(fsl_chan->fsc.dir))
2921                 return NULL;
2922  
2923 +       if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir))
2924 +               return NULL;
2925 +
2926         sg_len = buf_len / period_len;
2927         fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
2928         if (!fsl_desc)
2929 @@ -572,11 +625,11 @@ static struct dma_async_tx_descriptor *f
2930  
2931                 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
2932                         src_addr = dma_buf_next;
2933 -                       dst_addr = fsl_chan->fsc.dev_addr;
2934 +                       dst_addr = fsl_chan->fsc.dma_dev_addr;
2935                         soff = fsl_chan->fsc.addr_width;
2936                         doff = 0;
2937                 } else {
2938 -                       src_addr = fsl_chan->fsc.dev_addr;
2939 +                       src_addr = fsl_chan->fsc.dma_dev_addr;
2940                         dst_addr = dma_buf_next;
2941                         soff = 0;
2942                         doff = fsl_chan->fsc.addr_width;
2943 @@ -606,6 +659,9 @@ static struct dma_async_tx_descriptor *f
2944         if (!is_slave_direction(fsl_chan->fsc.dir))
2945                 return NULL;
2946  
2947 +       if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir))
2948 +               return NULL;
2949 +
2950         fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
2951         if (!fsl_desc)
2952                 return NULL;
2953 @@ -618,11 +674,11 @@ static struct dma_async_tx_descriptor *f
2954  
2955                 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
2956                         src_addr = sg_dma_address(sg);
2957 -                       dst_addr = fsl_chan->fsc.dev_addr;
2958 +                       dst_addr = fsl_chan->fsc.dma_dev_addr;
2959                         soff = fsl_chan->fsc.addr_width;
2960                         doff = 0;
2961                 } else {
2962 -                       src_addr = fsl_chan->fsc.dev_addr;
2963 +                       src_addr = fsl_chan->fsc.dma_dev_addr;
2964                         dst_addr = sg_dma_address(sg);
2965                         soff = 0;
2966                         doff = fsl_chan->fsc.addr_width;
2967 @@ -802,6 +858,7 @@ static void fsl_edma_free_chan_resources
2968         fsl_edma_chan_mux(fsl_chan, 0, false);
2969         fsl_chan->edesc = NULL;
2970         vchan_get_all_descriptors(&fsl_chan->vchan, &head);
2971 +       fsl_edma_unprep_slave_dma(fsl_chan);
2972         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
2973  
2974         vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
2975 @@ -937,6 +994,7 @@ static int fsl_edma_probe(struct platfor
2976                 fsl_chan->slave_id = 0;
2977                 fsl_chan->idle = true;
2978                 fsl_chan->vchan.desc_free = fsl_edma_free_desc;
2979 +               fsl_chan->fsc.dma_dir = DMA_NONE;
2980                 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
2981  
2982                 edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
2983 --- /dev/null
2984 +++ b/drivers/dma/fsl-qdma.c
2985 @@ -0,0 +1,1278 @@
2986 +/*
2987 + * Driver for NXP Layerscape Queue direct memory access controller (qDMA)
2988 + *
2989 + * Copyright 2017 NXP
2990 + *
2991 + * Author:
2992 + *  Jiaheng Fan <jiaheng.fan@nxp.com>
2993 + *  Wen He <wen.he_1@nxp.com>
2994 + *
2995 + * SPDX-License-Identifier: GPL-2.0+
2996 + */
2997 +
2998 +#include <linux/interrupt.h>
2999 +#include <linux/module.h>
3000 +#include <linux/delay.h>
3001 +#include <linux/of_irq.h>
3002 +#include <linux/of_address.h>
3003 +#include <linux/of_platform.h>
3004 +#include <linux/of_dma.h>
3005 +#include <linux/dma-mapping.h>
3006 +#include <linux/dmapool.h>
3007 +#include <linux/dmaengine.h>
3008 +#include <linux/slab.h>
3009 +#include <linux/spinlock.h>
3010 +
3011 +#include "virt-dma.h"
3012 +
3013 +#define FSL_QDMA_DMR                   0x0
3014 +#define FSL_QDMA_DSR                   0x4
3015 +#define FSL_QDMA_DEIER                 0xe00
3016 +#define FSL_QDMA_DEDR                  0xe04
3017 +#define FSL_QDMA_DECFDW0R              0xe10
3018 +#define FSL_QDMA_DECFDW1R              0xe14
3019 +#define FSL_QDMA_DECFDW2R              0xe18
3020 +#define FSL_QDMA_DECFDW3R              0xe1c
3021 +#define FSL_QDMA_DECFQIDR              0xe30
3022 +#define FSL_QDMA_DECBR                 0xe34
3023 +
3024 +#define FSL_QDMA_BCQMR(x)              (0xc0 + 0x100 * (x))
3025 +#define FSL_QDMA_BCQSR(x)              (0xc4 + 0x100 * (x))
3026 +#define FSL_QDMA_BCQEDPA_SADDR(x)      (0xc8 + 0x100 * (x))
3027 +#define FSL_QDMA_BCQDPA_SADDR(x)       (0xcc + 0x100 * (x))
3028 +#define FSL_QDMA_BCQEEPA_SADDR(x)      (0xd0 + 0x100 * (x))
3029 +#define FSL_QDMA_BCQEPA_SADDR(x)       (0xd4 + 0x100 * (x))
3030 +#define FSL_QDMA_BCQIER(x)             (0xe0 + 0x100 * (x))
3031 +#define FSL_QDMA_BCQIDR(x)             (0xe4 + 0x100 * (x))
3032 +
3033 +#define FSL_QDMA_SQDPAR                        0x80c
3034 +#define FSL_QDMA_SQEPAR                        0x814
3035 +#define FSL_QDMA_BSQMR                 0x800
3036 +#define FSL_QDMA_BSQSR                 0x804
3037 +#define FSL_QDMA_BSQICR                        0x828
3038 +#define FSL_QDMA_CQMR                  0xa00
3039 +#define FSL_QDMA_CQDSCR1               0xa08
3040 +#define FSL_QDMA_CQDSCR2                0xa0c
3041 +#define FSL_QDMA_CQIER                 0xa10
3042 +#define FSL_QDMA_CQEDR                 0xa14
3043 +#define FSL_QDMA_SQCCMR                        0xa20
3044 +
3045 +#define FSL_QDMA_SQICR_ICEN
3046 +
3047 +#define FSL_QDMA_CQIDR_CQT             0xff000000
3048 +#define FSL_QDMA_CQIDR_SQPE            0x800000
3049 +#define FSL_QDMA_CQIDR_SQT             0x8000
3050 +
3051 +#define FSL_QDMA_BCQIER_CQTIE          0x8000
3052 +#define FSL_QDMA_BCQIER_CQPEIE         0x800000
3053 +#define FSL_QDMA_BSQICR_ICEN           0x80000000
3054 +#define FSL_QDMA_BSQICR_ICST(x)                ((x) << 16)
3055 +#define FSL_QDMA_CQIER_MEIE            0x80000000
3056 +#define FSL_QDMA_CQIER_TEIE            0x1
3057 +#define FSL_QDMA_SQCCMR_ENTER_WM       0x200000
3058 +
3059 +#define FSL_QDMA_QUEUE_MAX             8
3060 +
3061 +#define FSL_QDMA_BCQMR_EN              0x80000000
3062 +#define FSL_QDMA_BCQMR_EI              0x40000000
3063 +#define FSL_QDMA_BCQMR_CD_THLD(x)      ((x) << 20)
3064 +#define FSL_QDMA_BCQMR_CQ_SIZE(x)      ((x) << 16)
3065 +
3066 +#define FSL_QDMA_BCQSR_QF              0x10000
3067 +#define FSL_QDMA_BCQSR_XOFF            0x1
3068 +
3069 +#define FSL_QDMA_BSQMR_EN              0x80000000
3070 +#define FSL_QDMA_BSQMR_DI              0x40000000
3071 +#define FSL_QDMA_BSQMR_CQ_SIZE(x)      ((x) << 16)
3072 +
3073 +#define FSL_QDMA_BSQSR_QE              0x20000
3074 +
3075 +#define FSL_QDMA_DMR_DQD               0x40000000
3076 +#define FSL_QDMA_DSR_DB                        0x80000000
3077 +
3078 +#define FSL_QDMA_COMMAND_BUFFER_SIZE   64
3079 +#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
3080 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN        64
3081 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX        16384
3082 +#define FSL_QDMA_QUEUE_NUM_MAX         8
3083 +
3084 +#define FSL_QDMA_CMD_RWTTYPE           0x4
3085 +#define FSL_QDMA_CMD_LWC                0x2
3086 +
3087 +#define FSL_QDMA_CMD_RWTTYPE_OFFSET    28
3088 +#define FSL_QDMA_CMD_NS_OFFSET         27
3089 +#define FSL_QDMA_CMD_DQOS_OFFSET       24
3090 +#define FSL_QDMA_CMD_WTHROTL_OFFSET    20
3091 +#define FSL_QDMA_CMD_DSEN_OFFSET       19
3092 +#define FSL_QDMA_CMD_LWC_OFFSET                16
3093 +
3094 +#define QDMA_CCDF_STATUS               20
3095 +#define QDMA_CCDF_OFFSET               20
3096 +#define QDMA_CCDF_MASK                 GENMASK(28, 20)
3097 +#define QDMA_CCDF_FOTMAT               BIT(29)
3098 +#define QDMA_CCDF_SER                  BIT(30)
3099 +
3100 +#define QDMA_SG_FIN                    BIT(30)
3101 +#define QDMA_SG_EXT                    BIT(31)
3102 +#define QDMA_SG_LEN_MASK               GENMASK(29, 0)
3103 +
3104 +#define QDMA_BIG_ENDIAN                        0x00000001
3105 +#define        COMP_TIMEOUT                    1000
3106 +#define COMMAND_QUEUE_OVERFLLOW                10
3107 +
3108 +#define QDMA_IN(fsl_qdma_engine, addr)                                 \
3109 +       (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ?            \
3110 +               ioread32be(addr) : ioread32(addr))
3111 +#define QDMA_OUT(fsl_qdma_engine, addr, val)                           \
3112 +       (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ?            \
3113 +               iowrite32be(val, addr) : iowrite32(val, addr))
3114 +
3115 +#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)                 \
3116 +       (((fsl_qdma_engine)->block_offset) * (x))
3117 +
3118 +static DEFINE_PER_CPU(u64, pre_addr);
3119 +static DEFINE_PER_CPU(u64, pre_queue);
3120 +
3121 +/* qDMA Command Descriptor Fotmats */
3122 +
3123 +struct fsl_qdma_format {
3124 +       __le32 status; /* ser, status */
3125 +       __le32 cfg;     /* format, offset */
3126 +       union {
3127 +               struct {
3128 +                       __le32 addr_lo; /* low 32-bits of 40-bit address */
3129 +                       u8 addr_hi;     /* high 8-bits of 40-bit address */
3130 +                       u8 __reserved1[2];
3131 +                       u8 cfg8b_w1; /* dd, queue */
3132 +               } __packed;
3133 +               __le64 data;
3134 +       };
3135 +} __packed;
3136 +
3137 +static inline u64
3138 +qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
3139 +{
3140 +       return le64_to_cpu(ccdf->data) & 0xffffffffffLLU;
3141 +}
3142 +
3143 +static inline void
3144 +qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
3145 +{
3146 +       ccdf->addr_hi = upper_32_bits(addr);
3147 +       ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
3148 +}
3149 +
3150 +static inline u64
3151 +qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
3152 +{
3153 +       return ccdf->cfg8b_w1 & 0xff;
3154 +}
3155 +
3156 +static inline int
3157 +qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
3158 +{
3159 +       return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
3160 +}
3161 +
3162 +static inline void
3163 +qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
3164 +{
3165 +       ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
3166 +}
3167 +
3168 +static inline int
3169 +qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
3170 +{
3171 +       return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
3172 +}
3173 +
3174 +static inline void
3175 +qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
3176 +{
3177 +       ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
3178 +}
3179 +
3180 +static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
3181 +{
3182 +       csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
3183 +}
3184 +
3185 +static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
3186 +{
3187 +       csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
3188 +}
3189 +
3190 +static inline void qdma_csgf_set_e(struct fsl_qdma_format *csgf, int len)
3191 +{
3192 +       csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
3193 +}
3194 +
3195 +/* qDMA Source Descriptor Format */
3196 +struct fsl_qdma_sdf {
3197 +       __le32 rev3;
3198 +       __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
3199 +       __le32 rev5;
3200 +       __le32 cmd;
3201 +} __packed;
3202 +
3203 +/* qDMA Destination Descriptor Format */
3204 +struct fsl_qdma_ddf {
3205 +       __le32 rev1;
3206 +       __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
3207 +       __le32 rev3;
3208 +       __le32 cmd;
3209 +} __packed;
3210 +
3211 +struct fsl_qdma_chan {
3212 +       struct virt_dma_chan            vchan;
3213 +       struct virt_dma_desc            vdesc;
3214 +       enum dma_status                 status;
3215 +       struct fsl_qdma_engine          *qdma;
3216 +       struct fsl_qdma_queue           *queue;
3217 +};
3218 +
3219 +struct fsl_qdma_queue {
3220 +       struct fsl_qdma_format  *virt_head;
3221 +       struct fsl_qdma_format  *virt_tail;
3222 +       struct list_head        comp_used;
3223 +       struct list_head        comp_free;
3224 +       struct dma_pool         *comp_pool;
3225 +       struct dma_pool         *desc_pool;
3226 +       spinlock_t              queue_lock;
3227 +       dma_addr_t              bus_addr;
3228 +       u32                     n_cq;
3229 +       u32                     id;
3230 +       struct fsl_qdma_format  *cq;
3231 +       void __iomem            *block_base;
3232 +};
3233 +
3234 +struct fsl_qdma_comp {
3235 +       dma_addr_t              bus_addr;
3236 +       dma_addr_t              desc_bus_addr;
3237 +       void                    *virt_addr;
3238 +       void                    *desc_virt_addr;
3239 +       struct fsl_qdma_chan    *qchan;
3240 +       struct virt_dma_desc    vdesc;
3241 +       struct list_head        list;
3242 +};
3243 +
3244 +struct fsl_qdma_engine {
3245 +       struct dma_device       dma_dev;
3246 +       void __iomem            *ctrl_base;
3247 +       void __iomem            *status_base;
3248 +       void __iomem            *block_base;
3249 +       u32                     n_chans;
3250 +       u32                     n_queues;
3251 +       struct mutex            fsl_qdma_mutex;
3252 +       int                     error_irq;
3253 +       int                     *queue_irq;
3254 +       bool                    big_endian;
3255 +       struct fsl_qdma_queue   *queue;
3256 +       struct fsl_qdma_queue   **status;
3257 +       struct fsl_qdma_chan    *chans;
3258 +       int                     block_number;
3259 +       int                     block_offset;
3260 +       int                     irq_base;
3261 +       int                     desc_allocated;
3262 +
3263 +};
3264 +
3265 +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
3266 +{
3267 +       return QDMA_IN(qdma, addr);
3268 +}
3269 +
3270 +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
3271 +                                               void __iomem *addr)
3272 +{
3273 +       QDMA_OUT(qdma, addr, val);
3274 +}
3275 +
3276 +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
3277 +{
3278 +       return container_of(chan, struct fsl_qdma_chan, vchan.chan);
3279 +}
3280 +
3281 +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
3282 +{
3283 +       return container_of(vd, struct fsl_qdma_comp, vdesc);
3284 +}
3285 +
3286 +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
3287 +{
3288 +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3289 +       struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3290 +       struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
3291 +       struct fsl_qdma_comp *comp_temp, *_comp_temp;
3292 +       unsigned long flags;
3293 +       LIST_HEAD(head);
3294 +
3295 +       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
3296 +       vchan_get_all_descriptors(&fsl_chan->vchan, &head);
3297 +       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
3298 +
3299 +       vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
3300 +
3301 +       if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
3302 +               return;
3303 +
3304 +       list_for_each_entry_safe(comp_temp, _comp_temp,
3305 +                               &fsl_queue->comp_used,  list) {
3306 +               dma_pool_free(fsl_queue->comp_pool,
3307 +                                        comp_temp->virt_addr,
3308 +                                        comp_temp->bus_addr);
3309 +               dma_pool_free(fsl_queue->desc_pool,
3310 +                               comp_temp->desc_virt_addr,
3311 +                               comp_temp->desc_bus_addr);
3312 +               list_del(&comp_temp->list);
3313 +               kfree(comp_temp);
3314 +       }
3315 +
3316 +       list_for_each_entry_safe(comp_temp, _comp_temp,
3317 +                               &fsl_queue->comp_free, list) {
3318 +               dma_pool_free(fsl_queue->comp_pool,
3319 +                                        comp_temp->virt_addr,
3320 +                                        comp_temp->bus_addr);
3321 +               dma_pool_free(fsl_queue->desc_pool,
3322 +                               comp_temp->desc_virt_addr,
3323 +                               comp_temp->desc_bus_addr);
3324 +               list_del(&comp_temp->list);
3325 +               kfree(comp_temp);
3326 +       }
3327 +
3328 +       dma_pool_destroy(fsl_queue->comp_pool);
3329 +       dma_pool_destroy(fsl_queue->desc_pool);
3330 +
3331 +       fsl_qdma->desc_allocated--;
3332 +       fsl_queue->comp_pool = NULL;
3333 +       fsl_queue->desc_pool = NULL;
3334 +}
3335 +
3336 +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
3337 +                                       dma_addr_t dst, dma_addr_t src, u32 len)
3338 +{
3339 +       struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
3340 +       struct fsl_qdma_sdf *sdf;
3341 +       struct fsl_qdma_ddf *ddf;
3342 +
3343 +       ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
3344 +       csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
3345 +       csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
3346 +       csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
3347 +       sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
3348 +       ddf = (struct fsl_qdma_ddf *)fsl_comp->desc_virt_addr + 1;
3349 +
3350 +       memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
3351 +       memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
3352 +       /* Head Command Descriptor(Frame Descriptor) */
3353 +       qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
3354 +       qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
3355 +       qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
3356 +       /* Status notification is enqueued to status queue. */
3357 +       /* Compound Command Descriptor(Frame List Table) */
3358 +       qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
3359 +       /* It must be 32 as Compound S/G Descriptor */
3360 +       qdma_csgf_set_len(csgf_desc, 32);
3361 +       qdma_desc_addr_set64(csgf_src, src);
3362 +       qdma_csgf_set_len(csgf_src, len);
3363 +       qdma_desc_addr_set64(csgf_dest, dst);
3364 +       qdma_csgf_set_len(csgf_dest, len);
3365 +       /* This entry is the last entry. */
3366 +       qdma_csgf_set_f(csgf_dest, len);
3367 +       /* Descriptor Buffer */
3368 +       sdf->cmd = cpu_to_le32(
3369 +                       FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3370 +       ddf->cmd = cpu_to_le32(
3371 +                       FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3372 +       ddf->cmd |= cpu_to_le32(
3373 +                       FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
3374 +}
3375 +
3376 +/*
3377 + * Pre-request command descriptor and compound S/G for enqueue.
3378 + */
3379 +static int fsl_qdma_pre_request_enqueue_comp_desc(struct fsl_qdma_queue *queue)
3380 +{
3381 +       struct fsl_qdma_comp *comp_temp;
3382 +       int i;
3383 +
3384 +       for (i = 0; i < queue->n_cq + COMMAND_QUEUE_OVERFLLOW; i++) {
3385 +               comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3386 +               if (!comp_temp)
3387 +                       return -ENOMEM;
3388 +               comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3389 +                                                     GFP_KERNEL,
3390 +                                                     &comp_temp->bus_addr);
3391 +
3392 +               if (!comp_temp->virt_addr) {
3393 +                       kfree(comp_temp);
3394 +                       return -ENOMEM;
3395 +               }
3396 +
3397 +               list_add_tail(&comp_temp->list, &queue->comp_free);
3398 +       }
3399 +
3400 +       return 0;
3401 +}
3402 +
3403 +/*
3404 + * Pre-request source and destination descriptor for enqueue.
3405 + */
3406 +static int fsl_qdma_pre_request_enqueue_sd_desc(struct fsl_qdma_queue *queue)
3407 +{
3408 +       struct fsl_qdma_comp *comp_temp, *_comp_temp;
3409 +
3410 +       list_for_each_entry_safe(comp_temp, _comp_temp,
3411 +                               &queue->comp_free, list) {
3412 +               comp_temp->desc_virt_addr = dma_pool_alloc(queue->desc_pool,
3413 +                                               GFP_KERNEL,
3414 +                                               &comp_temp->desc_bus_addr);
3415 +               if (!comp_temp->desc_virt_addr)
3416 +                       return -ENOMEM;
3417 +       }
3418 +
3419 +       return 0;
3420 +}
3421 +
3422 +/*
3423 + * Request a command descriptor for enqueue.
3424 + */
3425 +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
3426 +                                       struct fsl_qdma_chan *fsl_chan)
3427 +{
3428 +       struct fsl_qdma_comp *comp_temp;
3429 +       struct fsl_qdma_queue *queue = fsl_chan->queue;
3430 +       unsigned long flags;
3431 +       int timeout = COMP_TIMEOUT;
3432 +
3433 +       while (timeout) {
3434 +               spin_lock_irqsave(&queue->queue_lock, flags);
3435 +               if (!list_empty(&queue->comp_free)) {
3436 +                       comp_temp = list_first_entry(&queue->comp_free,
3437 +                                            struct fsl_qdma_comp,
3438 +                                            list);
3439 +                       list_del(&comp_temp->list);
3440 +
3441 +                       spin_unlock_irqrestore(&queue->queue_lock, flags);
3442 +                       comp_temp->qchan = fsl_chan;
3443 +                       return comp_temp;
3444 +               }
3445 +               spin_unlock_irqrestore(&queue->queue_lock, flags);
3446 +               udelay(1);
3447 +               timeout--;
3448 +       }
3449 +
3450 +       return NULL;
3451 +}
3452 +
3453 +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
3454 +                                       struct platform_device *pdev,
3455 +                                       struct fsl_qdma_engine *fsl_qdma)
3456 +{
3457 +       struct fsl_qdma_queue *queue_head, *queue_temp;
3458 +       int ret, len, i, j;
3459 +       unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
3460 +       int queue_num;
3461 +       int block_number;
3462 +
3463 +       queue_num = fsl_qdma->n_queues;
3464 +       block_number = fsl_qdma->block_number;
3465 +
3466 +       if (queue_num > FSL_QDMA_QUEUE_MAX)
3467 +               queue_num = FSL_QDMA_QUEUE_MAX;
3468 +       len = sizeof(*queue_head) * queue_num * block_number;
3469 +       queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
3470 +       if (!queue_head)
3471 +               return NULL;
3472 +
3473 +       ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
3474 +                                       queue_size, queue_num);
3475 +       if (ret) {
3476 +               dev_err(&pdev->dev, "Can't get queue-sizes.\n");
3477 +               return NULL;
3478 +       }
3479 +       for (j = 0; j < block_number; j++) {
3480 +               for (i = 0; i < queue_num; i++) {
3481 +                       if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
3482 +                          queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3483 +                               dev_err(&pdev->dev,
3484 +                                      "Get wrong queue-sizes.\n");
3485 +                               return NULL;
3486 +                       }
3487 +                       queue_temp = queue_head + i + (j * queue_num);
3488 +
3489 +                       queue_temp->cq =
3490 +                       dma_alloc_coherent(&pdev->dev,
3491 +                                       sizeof(struct fsl_qdma_format) *
3492 +                                       queue_size[i],
3493 +                                       &queue_temp->bus_addr,
3494 +                                       GFP_KERNEL);
3495 +                       if (!queue_temp->cq)
3496 +                               return NULL;
3497 +                       queue_temp->block_base = fsl_qdma->block_base +
3498 +                               FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3499 +                       queue_temp->n_cq = queue_size[i];
3500 +                       queue_temp->id = i;
3501 +                       queue_temp->virt_head = queue_temp->cq;
3502 +                       queue_temp->virt_tail = queue_temp->cq;
3503 +                       /*
3504 +                        * List for queue command buffer
3505 +                        */
3506 +                       INIT_LIST_HEAD(&queue_temp->comp_used);
3507 +                       spin_lock_init(&queue_temp->queue_lock);
3508 +               }
3509 +       }
3510 +       return queue_head;
3511 +}
3512 +
3513 +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
3514 +                                               struct platform_device *pdev)
3515 +{
3516 +       struct device_node *np = pdev->dev.of_node;
3517 +       struct fsl_qdma_queue *status_head;
3518 +       unsigned int status_size;
3519 +       int ret;
3520 +
3521 +       ret = of_property_read_u32(np, "status-sizes", &status_size);
3522 +       if (ret) {
3523 +               dev_err(&pdev->dev, "Can't get status-sizes.\n");
3524 +               return NULL;
3525 +       }
3526 +       if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3527 +                       || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3528 +               dev_err(&pdev->dev, "Get wrong status_size.\n");
3529 +               return NULL;
3530 +       }
3531 +       status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
3532 +                                                               GFP_KERNEL);
3533 +       if (!status_head)
3534 +               return NULL;
3535 +
3536 +       /*
3537 +        * Buffer for queue command
3538 +        */
3539 +       status_head->cq = dma_alloc_coherent(&pdev->dev,
3540 +                                               sizeof(struct fsl_qdma_format) *
3541 +                                               status_size,
3542 +                                               &status_head->bus_addr,
3543 +                                               GFP_KERNEL);
3544 +       if (!status_head->cq)
3545 +               return NULL;
3546 +       status_head->n_cq = status_size;
3547 +       status_head->virt_head = status_head->cq;
3548 +       status_head->virt_tail = status_head->cq;
3549 +       status_head->comp_pool = NULL;
3550 +
3551 +       return status_head;
3552 +}
3553 +
3554 +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
3555 +{
3556 +       void __iomem *ctrl = fsl_qdma->ctrl_base;
3557 +       void __iomem *block;
3558 +       int i, count = 5;
3559 +       int j;
3560 +       u32 reg;
3561 +
3562 +       /* Disable the command queue and wait for idle state. */
3563 +       reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3564 +       reg |= FSL_QDMA_DMR_DQD;
3565 +       qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3566 +       for (j = 0; j < fsl_qdma->block_number; j++) {
3567 +               block = fsl_qdma->block_base +
3568 +                       FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3569 +               for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
3570 +                       qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
3571 +       }
3572 +       while (1) {
3573 +               reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
3574 +               if (!(reg & FSL_QDMA_DSR_DB))
3575 +                       break;
3576 +               if (count-- < 0)
3577 +                       return -EBUSY;
3578 +               udelay(100);
3579 +       }
3580 +
3581 +       for (j = 0; j < fsl_qdma->block_number; j++) {
3582 +
3583 +               block = fsl_qdma->block_base +
3584 +                       FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3585 +
3586 +               /* Disable status queue. */
3587 +               qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
3588 +
3589 +               /*
3590 +                * clear the command queue interrupt detect register for
3591 +                * all queues.
3592 +                */
3593 +               qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3594 +       }
3595 +
3596 +       return 0;
3597 +}
3598 +
3599 +static int fsl_qdma_queue_transfer_complete(
3600 +                               struct fsl_qdma_engine *fsl_qdma,
3601 +                               void *block,
3602 +                               int id)
3603 +{
3604 +       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3605 +       struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
3606 +       struct fsl_qdma_queue *temp_queue;
3607 +       struct fsl_qdma_format *status_addr;
3608 +       struct fsl_qdma_comp *fsl_comp = NULL;
3609 +       u32 reg, i;
3610 +       bool duplicate, duplicate_handle;
3611 +
3612 +       while (1) {
3613 +               duplicate = 0;
3614 +               duplicate_handle = 0;
3615 +               reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
3616 +               if (reg & FSL_QDMA_BSQSR_QE)
3617 +                       return 0;
3618 +
3619 +               status_addr = fsl_status->virt_head;
3620 +
3621 +               if (qdma_ccdf_get_queue(status_addr) ==
3622 +                  __this_cpu_read(pre_queue) &&
3623 +                       qdma_ccdf_addr_get64(status_addr) ==
3624 +                       __this_cpu_read(pre_addr))
3625 +                       duplicate = 1;
3626 +               i = qdma_ccdf_get_queue(status_addr) +
3627 +                       id * fsl_qdma->n_queues;
3628 +               __this_cpu_write(pre_addr, qdma_ccdf_addr_get64(status_addr));
3629 +               __this_cpu_write(pre_queue, qdma_ccdf_get_queue(status_addr));
3630 +               temp_queue = fsl_queue + i;
3631 +
3632 +               spin_lock(&temp_queue->queue_lock);
3633 +               if (list_empty(&temp_queue->comp_used)) {
3634 +                       if (duplicate)
3635 +                               duplicate_handle = 1;
3636 +                       else {
3637 +                               spin_unlock(&temp_queue->queue_lock);
3638 +                               return -1;
3639 +                       }
3640 +               } else {
3641 +                       fsl_comp = list_first_entry(&temp_queue->comp_used,
3642 +                                                       struct fsl_qdma_comp,
3643 +                                                       list);
3644 +                       if (fsl_comp->bus_addr + 16 !=
3645 +                               __this_cpu_read(pre_addr)) {
3646 +                               if (duplicate)
3647 +                                       duplicate_handle = 1;
3648 +                               else {
3649 +                                       spin_unlock(&temp_queue->queue_lock);
3650 +                                       return -1;
3651 +                               }
3652 +                       }
3653 +
3654 +               }
3655 +
3656 +               if (duplicate_handle) {
3657 +                       reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3658 +                       reg |= FSL_QDMA_BSQMR_DI;
3659 +                       qdma_desc_addr_set64(status_addr, 0x0);
3660 +                       fsl_status->virt_head++;
3661 +                       if (fsl_status->virt_head == fsl_status->cq
3662 +                                                  + fsl_status->n_cq)
3663 +                               fsl_status->virt_head = fsl_status->cq;
3664 +                       qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3665 +                       spin_unlock(&temp_queue->queue_lock);
3666 +                       continue;
3667 +               }
3668 +               list_del(&fsl_comp->list);
3669 +
3670 +               reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3671 +               reg |= FSL_QDMA_BSQMR_DI;
3672 +               qdma_desc_addr_set64(status_addr, 0x0);
3673 +               fsl_status->virt_head++;
3674 +               if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
3675 +                       fsl_status->virt_head = fsl_status->cq;
3676 +               qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3677 +               spin_unlock(&temp_queue->queue_lock);
3678 +
3679 +               spin_lock(&fsl_comp->qchan->vchan.lock);
3680 +               vchan_cookie_complete(&fsl_comp->vdesc);
3681 +               fsl_comp->qchan->status = DMA_COMPLETE;
3682 +               spin_unlock(&fsl_comp->qchan->vchan.lock);
3683 +       }
3684 +       return 0;
3685 +}
3686 +
3687 +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
3688 +{
3689 +       struct fsl_qdma_engine *fsl_qdma = dev_id;
3690 +       unsigned int intr;
3691 +       void __iomem *status = fsl_qdma->status_base;
3692 +
3693 +       intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
3694 +
3695 +       if (intr)
3696 +               dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
3697 +
3698 +       qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3699 +       return IRQ_HANDLED;
3700 +}
3701 +
3702 +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
3703 +{
3704 +       struct fsl_qdma_engine *fsl_qdma = dev_id;
3705 +       unsigned int intr, reg;
3706 +       void __iomem *ctrl = fsl_qdma->ctrl_base;
3707 +       void __iomem *block;
3708 +       int id;
3709 +
3710 +       id = irq - fsl_qdma->irq_base;
3711 +       if (id < 0 && id > fsl_qdma->block_number) {
3712 +               dev_err(fsl_qdma->dma_dev.dev,
3713 +                       "irq %d is wrong irq_base is %d\n",
3714 +                       irq, fsl_qdma->irq_base);
3715 +       }
3716 +
3717 +       block = fsl_qdma->block_base +
3718 +               FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
3719 +
3720 +       intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
3721 +
3722 +       if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
3723 +               intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
3724 +
3725 +       if (intr != 0) {
3726 +               reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3727 +               reg |= FSL_QDMA_DMR_DQD;
3728 +               qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3729 +               qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
3730 +               dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
3731 +       }
3732 +
3733 +       qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3734 +
3735 +       return IRQ_HANDLED;
3736 +}
3737 +
3738 +static int
3739 +fsl_qdma_irq_init(struct platform_device *pdev,
3740 +                 struct fsl_qdma_engine *fsl_qdma)
3741 +{
3742 +       char irq_name[20];
3743 +       int i;
3744 +       int cpu;
3745 +       int ret;
3746 +
3747 +       fsl_qdma->error_irq = platform_get_irq_byname(pdev,
3748 +                                                       "qdma-error");
3749 +       if (fsl_qdma->error_irq < 0) {
3750 +               dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
3751 +               return fsl_qdma->error_irq;
3752 +       }
3753 +
3754 +       ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
3755 +                       fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
3756 +       if (ret) {
3757 +               dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
3758 +               return  ret;
3759 +       }
3760 +
3761 +       for (i = 0; i < fsl_qdma->block_number; i++) {
3762 +               sprintf(irq_name, "qdma-queue%d", i);
3763 +               fsl_qdma->queue_irq[i] = platform_get_irq_byname(pdev,
3764 +                                                               irq_name);
3765 +
3766 +               if (fsl_qdma->queue_irq[i] < 0) {
3767 +                       dev_err(&pdev->dev,
3768 +                                  "Can't get qdma queue %d irq.\n",
3769 +                                  i);
3770 +                       return fsl_qdma->queue_irq[i];
3771 +               }
3772 +
3773 +               ret = devm_request_irq(&pdev->dev,
3774 +                                     fsl_qdma->queue_irq[i],
3775 +                                     fsl_qdma_queue_handler,
3776 +                                     0,
3777 +                                     "qDMA queue",
3778 +                                     fsl_qdma);
3779 +               if (ret) {
3780 +                       dev_err(&pdev->dev,
3781 +                              "Can't register qDMA queue IRQ.\n");
3782 +                       return  ret;
3783 +               }
3784 +
3785 +               cpu = i % num_online_cpus();
3786 +               ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
3787 +                                          get_cpu_mask(cpu));
3788 +               if (ret) {
3789 +                       dev_err(&pdev->dev,
3790 +                              "Can't set cpu %d affinity to IRQ %d.\n",
3791 +                               cpu,
3792 +                               fsl_qdma->queue_irq[i]);
3793 +                       return  ret;
3794 +               }
3795 +
3796 +       }
3797 +
3798 +       return 0;
3799 +}
3800 +
3801 +static void fsl_qdma_irq_exit(
3802 +               struct platform_device *pdev, struct fsl_qdma_engine *fsl_qdma)
3803 +{
3804 +       if (fsl_qdma->queue_irq[0] == fsl_qdma->error_irq) {
3805 +               devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
3806 +       } else {
3807 +               devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
3808 +               devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
3809 +       }
3810 +}
3811 +
3812 +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
3813 +{
3814 +       struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3815 +       struct fsl_qdma_queue *temp;
3816 +       void __iomem *ctrl = fsl_qdma->ctrl_base;
3817 +       void __iomem *status = fsl_qdma->status_base;
3818 +       void __iomem *block;
3819 +       int i, j, ret;
3820 +       u32 reg;
3821 +
3822 +       /* Try to halt the qDMA engine first. */
3823 +       ret = fsl_qdma_halt(fsl_qdma);
3824 +       if (ret) {
3825 +               dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
3826 +               return ret;
3827 +       }
3828 +
3829 +       for (i = 0; i < fsl_qdma->block_number; i++) {
3830 +               /*
3831 +                * Clear the command queue interrupt detect register for
3832 +                * all queues.
3833 +                */
3834 +
3835 +               block = fsl_qdma->block_base +
3836 +                       FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
3837 +               qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3838 +       }
3839 +
3840 +       for (j = 0; j < fsl_qdma->block_number; j++) {
3841 +               block = fsl_qdma->block_base +
3842 +                       FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3843 +               for (i = 0; i < fsl_qdma->n_queues; i++) {
3844 +                       temp = fsl_queue + i + (j * fsl_qdma->n_queues);
3845 +                       /*
3846 +                        * Initialize Command Queue registers to
3847 +                        * point to the first
3848 +                        * command descriptor in memory.
3849 +                        * Dequeue Pointer Address Registers
3850 +                        * Enqueue Pointer Address Registers
3851 +                        */
3852 +
3853 +                       qdma_writel(fsl_qdma, temp->bus_addr,
3854 +                                          block + FSL_QDMA_BCQDPA_SADDR(i));
3855 +                       qdma_writel(fsl_qdma, temp->bus_addr,
3856 +                                          block + FSL_QDMA_BCQEPA_SADDR(i));
3857 +
3858 +                       /* Initialize the queue mode. */
3859 +                       reg = FSL_QDMA_BCQMR_EN;
3860 +                       reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
3861 +                       reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
3862 +                       qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
3863 +               }
3864 +
3865 +               /*
3866 +                * Workaround for erratum: ERR010812.
3867 +                * We must enable XOFF to avoid the enqueue rejection occurs.
3868 +                * Setting SQCCMR ENTER_WM to 0x20.
3869 +                */
3870 +
3871 +               qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
3872 +                                  block + FSL_QDMA_SQCCMR);
3873 +
3874 +               /*
3875 +                * Initialize status queue registers to point to the first
3876 +                * command descriptor in memory.
3877 +                * Dequeue Pointer Address Registers
3878 +                * Enqueue Pointer Address Registers
3879 +                */
3880 +
3881 +               qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
3882 +                                  block + FSL_QDMA_SQEPAR);
3883 +               qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
3884 +                                  block + FSL_QDMA_SQDPAR);
3885 +               /* Initialize status queue interrupt. */
3886 +               qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
3887 +                                  block + FSL_QDMA_BCQIER(0));
3888 +               qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
3889 +                                  FSL_QDMA_BSQICR_ICST(5) | 0x8000,
3890 +                                  block + FSL_QDMA_BSQICR);
3891 +               qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
3892 +                                  FSL_QDMA_CQIER_TEIE,
3893 +                                  block + FSL_QDMA_CQIER);
3894 +
3895 +               /* Initialize the status queue mode. */
3896 +               reg = FSL_QDMA_BSQMR_EN;
3897 +               reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(
3898 +                       fsl_qdma->status[j]->n_cq) - 6);
3899 +
3900 +               qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3901 +               reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3902 +
3903 +       }
3904 +
3905 +       /* Initialize controller interrupt register. */
3906 +       qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3907 +       qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
3908 +
3909 +       reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3910 +       reg &= ~FSL_QDMA_DMR_DQD;
3911 +       qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3912 +
3913 +       return 0;
3914 +}
3915 +
3916 +static struct dma_async_tx_descriptor *
3917 +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
3918 +               dma_addr_t src, size_t len, unsigned long flags)
3919 +{
3920 +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3921 +       struct fsl_qdma_comp *fsl_comp;
3922 +
3923 +       fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
3924 +
3925 +       if (!fsl_comp)
3926 +               return NULL;
3927 +
3928 +       fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
3929 +
3930 +       return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
3931 +}
3932 +
3933 +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
3934 +{
3935 +       struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3936 +       struct fsl_qdma_comp *fsl_comp;
3937 +       struct virt_dma_desc *vdesc;
3938 +       void __iomem *block = fsl_queue->block_base;
3939 +       u32 reg;
3940 +
3941 +       reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
3942 +       if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
3943 +               return;
3944 +       vdesc = vchan_next_desc(&fsl_chan->vchan);
3945 +       if (!vdesc)
3946 +               return;
3947 +       list_del(&vdesc->node);
3948 +       fsl_comp = to_fsl_qdma_comp(vdesc);
3949 +
3950 +       memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
3951 +       if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
3952 +               fsl_queue->virt_head = fsl_queue->cq;
3953 +
3954 +       list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
3955 +       barrier();
3956 +       reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
3957 +       reg |= FSL_QDMA_BCQMR_EI;
3958 +       qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
3959 +       fsl_chan->status = DMA_IN_PROGRESS;
3960 +}
3961 +
3962 +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
3963 +               dma_cookie_t cookie, struct dma_tx_state *txstate)
3964 +{
3965 +       return dma_cookie_status(chan, cookie, txstate);
3966 +}
3967 +
3968 +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
3969 +{
3970 +       struct fsl_qdma_comp *fsl_comp;
3971 +       struct fsl_qdma_queue *fsl_queue;
3972 +       unsigned long flags;
3973 +
3974 +       fsl_comp = to_fsl_qdma_comp(vdesc);
3975 +       fsl_queue = fsl_comp->qchan->queue;
3976 +
3977 +       spin_lock_irqsave(&fsl_queue->queue_lock, flags);
3978 +       list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
3979 +       spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
3980 +}
3981 +
3982 +static void fsl_qdma_issue_pending(struct dma_chan *chan)
3983 +{
3984 +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3985 +       struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3986 +       unsigned long flags;
3987 +
3988 +       spin_lock_irqsave(&fsl_queue->queue_lock, flags);
3989 +       spin_lock(&fsl_chan->vchan.lock);
3990 +       if (vchan_issue_pending(&fsl_chan->vchan))
3991 +               fsl_qdma_enqueue_desc(fsl_chan);
3992 +       spin_unlock(&fsl_chan->vchan.lock);
3993 +       spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
3994 +}
3995 +
3996 +static void fsl_qdma_synchronize(struct dma_chan *chan)
3997 +{
3998 +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3999 +
4000 +       vchan_synchronize(&fsl_chan->vchan);
4001 +}
4002 +
4003 +static int fsl_qdma_terminate_all(struct dma_chan *chan)
4004 +{
4005 +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4006 +       unsigned long flags;
4007 +       LIST_HEAD(head);
4008 +
4009 +       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
4010 +       vchan_get_all_descriptors(&fsl_chan->vchan, &head);
4011 +       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
4012 +       vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
4013 +       return 0;
4014 +}
4015 +
4016 +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
4017 +{
4018 +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4019 +       struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4020 +       struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
4021 +       int ret;
4022 +
4023 +       if (fsl_queue->comp_pool && fsl_queue->desc_pool)
4024 +               return fsl_qdma->desc_allocated;
4025 +
4026 +       INIT_LIST_HEAD(&fsl_queue->comp_free);
4027 +
4028 +       /*
4029 +        * The dma pool for queue command buffer
4030 +        */
4031 +       fsl_queue->comp_pool =
4032 +       dma_pool_create("comp_pool",
4033 +                      chan->device->dev,
4034 +                      FSL_QDMA_COMMAND_BUFFER_SIZE,
4035 +                      64, 0);
4036 +       if (!fsl_queue->comp_pool)
4037 +               return -ENOMEM;
4038 +
4039 +       /*
4040 +        * The dma pool for Descriptor(SD/DD) buffer
4041 +        */
4042 +       fsl_queue->desc_pool =
4043 +       dma_pool_create("desc_pool",
4044 +                      chan->device->dev,
4045 +                      FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
4046 +                      32, 0);
4047 +       if (!fsl_queue->desc_pool)
4048 +               goto err_desc_pool;
4049 +
4050 +       ret = fsl_qdma_pre_request_enqueue_comp_desc(fsl_queue);
4051 +       if (ret) {
4052 +               dev_err(chan->device->dev, "failed to alloc dma buffer for "
4053 +                               "comp S/G descriptor\n");
4054 +               goto err_mem;
4055 +       }
4056 +
4057 +       ret = fsl_qdma_pre_request_enqueue_sd_desc(fsl_queue);
4058 +       if (ret) {
4059 +               dev_err(chan->device->dev, "failed to alloc dma buffer for "
4060 +                               "S/D descriptor\n");
4061 +               goto err_mem;
4062 +       }
4063 +
4064 +       fsl_qdma->desc_allocated++;
4065 +       return fsl_qdma->desc_allocated;
4066 +
4067 +err_mem:
4068 +       dma_pool_destroy(fsl_queue->desc_pool);
4069 +err_desc_pool:
4070 +       dma_pool_destroy(fsl_queue->comp_pool);
4071 +       return -ENOMEM;
4072 +}
4073 +
4074 +static int fsl_qdma_probe(struct platform_device *pdev)
4075 +{
4076 +       struct device_node *np = pdev->dev.of_node;
4077 +       struct fsl_qdma_engine *fsl_qdma;
4078 +       struct fsl_qdma_chan *fsl_chan;
4079 +       struct resource *res;
4080 +       unsigned int len, chans, queues;
4081 +       int ret, i;
4082 +       int blk_num;
4083 +       int blk_off;
4084 +
4085 +       ret = of_property_read_u32(np, "channels", &chans);
4086 +       if (ret) {
4087 +               dev_err(&pdev->dev, "Can't get channels.\n");
4088 +               return ret;
4089 +       }
4090 +
4091 +       ret = of_property_read_u32(np, "block-offset", &blk_off);
4092 +       if (ret) {
4093 +               dev_err(&pdev->dev, "Can't get block-offset.\n");
4094 +               return ret;
4095 +       }
4096 +
4097 +       ret = of_property_read_u32(np, "block-number", &blk_num);
4098 +       if (ret) {
4099 +               dev_err(&pdev->dev, "Can't get block-number.\n");
4100 +               return ret;
4101 +       }
4102 +
4103 +       blk_num = min_t(int, blk_num, num_online_cpus());
4104 +
4105 +       len = sizeof(*fsl_qdma);
4106 +       fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4107 +       if (!fsl_qdma)
4108 +               return -ENOMEM;
4109 +
4110 +       len = sizeof(*fsl_chan) * chans;
4111 +       fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4112 +       if (!fsl_qdma->chans)
4113 +               return -ENOMEM;
4114 +
4115 +       len = sizeof(struct fsl_qdma_queue *) * blk_num;
4116 +       fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4117 +       if (!fsl_qdma->status)
4118 +               return -ENOMEM;
4119 +
4120 +       len = sizeof(int) * blk_num;
4121 +       fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4122 +       if (!fsl_qdma->queue_irq)
4123 +               return -ENOMEM;
4124 +
4125 +       ret = of_property_read_u32(np, "queues", &queues);
4126 +       if (ret) {
4127 +               dev_err(&pdev->dev, "Can't get queues.\n");
4128 +               return ret;
4129 +       }
4130 +
4131 +       fsl_qdma->desc_allocated = 0;
4132 +       fsl_qdma->n_chans = chans;
4133 +       fsl_qdma->n_queues = queues;
4134 +       fsl_qdma->block_number = blk_num;
4135 +       fsl_qdma->block_offset = blk_off;
4136 +
4137 +       mutex_init(&fsl_qdma->fsl_qdma_mutex);
4138 +
4139 +       for (i = 0; i < fsl_qdma->block_number; i++) {
4140 +               fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
4141 +               if (!fsl_qdma->status[i])
4142 +                       return -ENOMEM;
4143 +       }
4144 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4145 +       fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
4146 +       if (IS_ERR(fsl_qdma->ctrl_base))
4147 +               return PTR_ERR(fsl_qdma->ctrl_base);
4148 +
4149 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4150 +       fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
4151 +       if (IS_ERR(fsl_qdma->status_base))
4152 +               return PTR_ERR(fsl_qdma->status_base);
4153 +
4154 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
4155 +       fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
4156 +       if (IS_ERR(fsl_qdma->block_base))
4157 +               return PTR_ERR(fsl_qdma->block_base);
4158 +       fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
4159 +       if (!fsl_qdma->queue)
4160 +               return -ENOMEM;
4161 +
4162 +       ret = fsl_qdma_irq_init(pdev, fsl_qdma);
4163 +       if (ret)
4164 +               return ret;
4165 +
4166 +       fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
4167 +       fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
4168 +       INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
4169 +
4170 +       for (i = 0; i < fsl_qdma->n_chans; i++) {
4171 +               struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
4172 +
4173 +               fsl_chan->qdma = fsl_qdma;
4174 +               fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
4175 +                                                       fsl_qdma->block_number);
4176 +               fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
4177 +               vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
4178 +       }
4179 +
4180 +       dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
4181 +
4182 +       fsl_qdma->dma_dev.dev = &pdev->dev;
4183 +       fsl_qdma->dma_dev.device_free_chan_resources
4184 +               = fsl_qdma_free_chan_resources;
4185 +       fsl_qdma->dma_dev.device_alloc_chan_resources
4186 +               = fsl_qdma_alloc_chan_resources;
4187 +       fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
4188 +       fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
4189 +       fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
4190 +       fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
4191 +       fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
4192 +
4193 +       dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
4194 +
4195 +       platform_set_drvdata(pdev, fsl_qdma);
4196 +
4197 +       ret = dma_async_device_register(&fsl_qdma->dma_dev);
4198 +       if (ret) {
4199 +               dev_err(&pdev->dev,
4200 +                       "Can't register NXP Layerscape qDMA engine.\n");
4201 +               return ret;
4202 +       }
4203 +
4204 +       ret = fsl_qdma_reg_init(fsl_qdma);
4205 +       if (ret) {
4206 +               dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
4207 +               return ret;
4208 +       }
4209 +
4210 +       return 0;
4211 +}
4212 +
4213 +static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
4214 +{
4215 +       struct fsl_qdma_chan *chan, *_chan;
4216 +
4217 +       list_for_each_entry_safe(chan, _chan,
4218 +                               &dmadev->channels, vchan.chan.device_node) {
4219 +               list_del(&chan->vchan.chan.device_node);
4220 +               tasklet_kill(&chan->vchan.task);
4221 +       }
4222 +}
4223 +
4224 +static int fsl_qdma_remove(struct platform_device *pdev)
4225 +{
4226 +       struct device_node *np = pdev->dev.of_node;
4227 +       struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
4228 +       struct fsl_qdma_queue *status;
4229 +       int i;
4230 +
4231 +       fsl_qdma_irq_exit(pdev, fsl_qdma);
4232 +       fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
4233 +       of_dma_controller_free(np);
4234 +       dma_async_device_unregister(&fsl_qdma->dma_dev);
4235 +
4236 +       for (i = 0; i < fsl_qdma->block_number; i++) {
4237 +               status = fsl_qdma->status[i];
4238 +               dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
4239 +                               status->n_cq, status->cq, status->bus_addr);
4240 +       }
4241 +       return 0;
4242 +}
4243 +
4244 +static const struct of_device_id fsl_qdma_dt_ids[] = {
4245 +       { .compatible = "fsl,ls1021a-qdma", },
4246 +       { /* sentinel */ }
4247 +};
4248 +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
4249 +
4250 +static struct platform_driver fsl_qdma_driver = {
4251 +       .driver         = {
4252 +               .name   = "fsl-qdma",
4253 +               .of_match_table = fsl_qdma_dt_ids,
4254 +       },
4255 +       .probe          = fsl_qdma_probe,
4256 +       .remove         = fsl_qdma_remove,
4257 +};
4258 +
4259 +module_platform_driver(fsl_qdma_driver);
4260 +
4261 +MODULE_ALIAS("platform:fsl-qdma");
4262 +MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
4263 +MODULE_LICENSE("GPL v2");