Fresh pull from upstream
[librecmc/librecmc.git] / target / linux / ramips / patches-4.4 / 0047-DMA-ralink-add-rt2880-dma-engine.patch
1 From f1c4d9e622c800e1f38b3818f933ec7597d1ccfb Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sun, 27 Jul 2014 09:29:51 +0100
4 Subject: [PATCH 47/53] DMA: ralink: add rt2880 dma engine
5
6 Signed-off-by: John Crispin <blogic@openwrt.org>
7 ---
8  drivers/dma/Kconfig       |    6 +
9  drivers/dma/Makefile      |    1 +
10  drivers/dma/ralink-gdma.c |  577 +++++++++++++++++++++++++++++++++++++++++++++
11  include/linux/dmaengine.h |    1 +
12  4 files changed, 585 insertions(+)
13  create mode 100644 drivers/dma/ralink-gdma.c
14
15 --- a/drivers/dma/Kconfig
16 +++ b/drivers/dma/Kconfig
17 @@ -40,6 +40,18 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
18  config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
19         bool
20  
21 +config DMA_RALINK
22 +       tristate "RALINK DMA support"
23 +       depends on RALINK && !SOC_RT288X
24 +       select DMA_ENGINE
25 +       select DMA_VIRTUAL_CHANNELS
26 +
27 +config MTK_HSDMA
28 +       tristate "MTK HSDMA support"
29 +       depends on RALINK && SOC_MT7621
30 +       select DMA_ENGINE
31 +       select DMA_VIRTUAL_CHANNELS
32 +
33  config DMA_ENGINE
34         bool
35  
36 --- a/drivers/dma/Makefile
37 +++ b/drivers/dma/Makefile
38 @@ -65,5 +65,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-
39  obj-$(CONFIG_TI_EDMA) += edma.o
40  obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
41  obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
42 +obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
43 +obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
44  
45  obj-y += xilinx/
46 --- /dev/null
47 +++ b/drivers/dma/ralink-gdma.c
48 @@ -0,0 +1,928 @@
49 +/*
50 + *  Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
51 + *  GDMA4740 DMAC support
52 + *
53 + *  This program is free software; you can redistribute it and/or modify it
54 + *  under  the terms of the GNU General         Public License as published by the
55 + *  Free Software Foundation;  either version 2 of the License, or (at your
56 + *  option) any later version.
57 + *
58 + */
59 +
60 +#include <linux/dmaengine.h>
61 +#include <linux/dma-mapping.h>
62 +#include <linux/err.h>
63 +#include <linux/init.h>
64 +#include <linux/list.h>
65 +#include <linux/module.h>
66 +#include <linux/platform_device.h>
67 +#include <linux/slab.h>
68 +#include <linux/spinlock.h>
69 +#include <linux/irq.h>
70 +#include <linux/of_dma.h>
71 +#include <linux/reset.h>
72 +#include <linux/of_device.h>
73 +
74 +#include "virt-dma.h"
75 +
76 +#define GDMA_REG_SRC_ADDR(x)           (0x00 + (x) * 0x10)
77 +#define GDMA_REG_DST_ADDR(x)           (0x04 + (x) * 0x10)
78 +
79 +#define GDMA_REG_CTRL0(x)              (0x08 + (x) * 0x10)
80 +#define GDMA_REG_CTRL0_TX_MASK         0xffff
81 +#define GDMA_REG_CTRL0_TX_SHIFT                16
82 +#define GDMA_REG_CTRL0_CURR_MASK       0xff
83 +#define GDMA_REG_CTRL0_CURR_SHIFT      8
84 +#define        GDMA_REG_CTRL0_SRC_ADDR_FIXED   BIT(7)
85 +#define GDMA_REG_CTRL0_DST_ADDR_FIXED  BIT(6)
86 +#define GDMA_REG_CTRL0_BURST_MASK      0x7
87 +#define GDMA_REG_CTRL0_BURST_SHIFT     3
88 +#define        GDMA_REG_CTRL0_DONE_INT         BIT(2)
89 +#define        GDMA_REG_CTRL0_ENABLE           BIT(1)
90 +#define GDMA_REG_CTRL0_SW_MODE          BIT(0)
91 +
92 +#define GDMA_REG_CTRL1(x)              (0x0c + (x) * 0x10)
93 +#define GDMA_REG_CTRL1_SEG_MASK                0xf
94 +#define GDMA_REG_CTRL1_SEG_SHIFT       22
95 +#define GDMA_REG_CTRL1_REQ_MASK                0x3f
96 +#define GDMA_REG_CTRL1_SRC_REQ_SHIFT   16
97 +#define GDMA_REG_CTRL1_DST_REQ_SHIFT   8
98 +#define GDMA_REG_CTRL1_CONTINOUS       BIT(14)
99 +#define GDMA_REG_CTRL1_NEXT_MASK       0x1f
100 +#define GDMA_REG_CTRL1_NEXT_SHIFT      3
101 +#define GDMA_REG_CTRL1_COHERENT                BIT(2)
102 +#define GDMA_REG_CTRL1_FAIL            BIT(1)
103 +#define GDMA_REG_CTRL1_MASK            BIT(0)
104 +
105 +#define GDMA_REG_UNMASK_INT            0x200
106 +#define GDMA_REG_DONE_INT              0x204
107 +
108 +#define GDMA_REG_GCT                   0x220
109 +#define GDMA_REG_GCT_CHAN_MASK         0x3
110 +#define GDMA_REG_GCT_CHAN_SHIFT                3
111 +#define GDMA_REG_GCT_VER_MASK          0x3
112 +#define GDMA_REG_GCT_VER_SHIFT         1
113 +#define GDMA_REG_GCT_ARBIT_RR          BIT(0)
114 +
115 +#define GDMA_REG_REQSTS                        0x2a0
116 +#define GDMA_REG_ACKSTS                        0x2a4
117 +#define GDMA_REG_FINSTS                        0x2a8
118 +
119 +/* for RT305X gdma registers */
120 +#define GDMA_RT305X_CTRL0_REQ_MASK     0xf
121 +#define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT        12
122 +#define GDMA_RT305X_CTRL0_DST_REQ_SHIFT        8
123 +
124 +#define GDMA_RT305X_CTRL1_FAIL         BIT(4)
125 +#define GDMA_RT305X_CTRL1_NEXT_MASK    0x7
126 +#define GDMA_RT305X_CTRL1_NEXT_SHIFT   1
127 +
128 +#define GDMA_RT305X_STATUS_INT         0x80
129 +#define GDMA_RT305X_STATUS_SIGNAL      0x84
130 +#define GDMA_RT305X_GCT                        0x88
131 +
132 +/* for MT7621 gdma registers */
133 +#define GDMA_REG_PERF_START(x)         (0x230 + (x) * 0x8)
134 +#define GDMA_REG_PERF_END(x)           (0x234 + (x) * 0x8)
135 +
136 +enum gdma_dma_transfer_size {
137 +       GDMA_TRANSFER_SIZE_4BYTE        = 0,
138 +       GDMA_TRANSFER_SIZE_8BYTE        = 1,
139 +       GDMA_TRANSFER_SIZE_16BYTE       = 2,
140 +       GDMA_TRANSFER_SIZE_32BYTE       = 3,
141 +       GDMA_TRANSFER_SIZE_64BYTE       = 4,
142 +};
143 +
144 +struct gdma_dma_sg {
145 +       dma_addr_t src_addr;
146 +       dma_addr_t dst_addr;
147 +       u32 len;
148 +};
149 +
150 +struct gdma_dma_desc {
151 +       struct virt_dma_desc vdesc;
152 +
153 +       enum dma_transfer_direction direction;
154 +       bool cyclic;
155 +
156 +       u32 residue;
157 +       unsigned int num_sgs;
158 +       struct gdma_dma_sg sg[];
159 +};
160 +
161 +struct gdma_dmaengine_chan {
162 +       struct virt_dma_chan vchan;
163 +       unsigned int id;
164 +       unsigned int slave_id;
165 +
166 +       dma_addr_t fifo_addr;
167 +       enum gdma_dma_transfer_size burst_size;
168 +
169 +       struct gdma_dma_desc *desc;
170 +       unsigned int next_sg;
171 +};
172 +
173 +struct gdma_dma_dev {
174 +       struct dma_device ddev;
175 +       struct device_dma_parameters dma_parms;
176 +       struct gdma_data *data;
177 +       void __iomem *base;
178 +       struct tasklet_struct task;
179 +       volatile unsigned long chan_issued;
180 +       atomic_t cnt;
181 +
182 +       struct gdma_dmaengine_chan chan[];
183 +};
184 +
185 +struct gdma_data
186 +{
187 +       int chancnt;
188 +       u32 done_int_reg;
189 +       void (*init)(struct gdma_dma_dev *dma_dev);
190 +       int (*start_transfer)(struct gdma_dmaengine_chan *chan);
191 +};
192 +
193 +static struct gdma_dma_dev *gdma_dma_chan_get_dev(
194 +       struct gdma_dmaengine_chan *chan)
195 +{
196 +       return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
197 +               ddev);
198 +}
199 +
200 +static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
201 +{
202 +       return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
203 +}
204 +
205 +static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
206 +{
207 +       return container_of(vdesc, struct gdma_dma_desc, vdesc);
208 +}
209 +
210 +static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
211 +       unsigned int reg)
212 +{
213 +       return readl(dma_dev->base + reg);
214 +}
215 +
216 +static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
217 +       unsigned reg, uint32_t val)
218 +{
219 +       writel(val, dma_dev->base + reg);
220 +}
221 +
222 +static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
223 +{
224 +       return kzalloc(sizeof(struct gdma_dma_desc) +
225 +               sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
226 +}
227 +
228 +static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
229 +{
230 +       if (maxburst < 2)
231 +               return GDMA_TRANSFER_SIZE_4BYTE;
232 +       else if (maxburst < 4)
233 +               return GDMA_TRANSFER_SIZE_8BYTE;
234 +       else if (maxburst < 8)
235 +               return GDMA_TRANSFER_SIZE_16BYTE;
236 +       else if (maxburst < 16)
237 +               return GDMA_TRANSFER_SIZE_32BYTE;
238 +       else
239 +               return GDMA_TRANSFER_SIZE_64BYTE;
240 +}
241 +
242 +static int gdma_dma_config(struct dma_chan *c,
243 +               struct dma_slave_config *config)
244 +{
245 +       struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
246 +       struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
247 +
248 +       if (config->device_fc) {
249 +               dev_err(dma_dev->ddev.dev, "not support flow controller\n");
250 +               return -EINVAL;
251 +       }
252 +
253 +       switch (config->direction) {
254 +       case DMA_MEM_TO_DEV:
255 +               if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
256 +                       dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
257 +                       return -EINVAL;
258 +               }
259 +               chan->slave_id = config->slave_id;
260 +               chan->fifo_addr = config->dst_addr;
261 +               chan->burst_size = gdma_dma_maxburst(config->dst_maxburst);
262 +               break;
263 +       case DMA_DEV_TO_MEM:
264 +               if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
265 +                       dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
266 +                       return -EINVAL;
267 +               }
268 +               chan->slave_id = config->slave_id;
269 +               chan->fifo_addr = config->src_addr;
270 +               chan->burst_size = gdma_dma_maxburst(config->src_maxburst);
271 +               break;
272 +       default:
273 +               dev_err(dma_dev->ddev.dev, "direction type %d error\n",
274 +                               config->direction);
275 +               return -EINVAL;
276 +       }
277 +
278 +       return 0;
279 +}
280 +
281 +static int gdma_dma_terminate_all(struct dma_chan *c)
282 +{
283 +       struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
284 +       struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
285 +       unsigned long flags, timeout;
286 +       LIST_HEAD(head);
287 +       int i = 0;
288 +
289 +       spin_lock_irqsave(&chan->vchan.lock, flags);
290 +       chan->desc = NULL;
291 +       clear_bit(chan->id, &dma_dev->chan_issued);
292 +       vchan_get_all_descriptors(&chan->vchan, &head);
293 +       spin_unlock_irqrestore(&chan->vchan.lock, flags);
294 +
295 +       vchan_dma_desc_free_list(&chan->vchan, &head);
296 +
297 +       /* wait dma transfer complete */
298 +       timeout = jiffies + msecs_to_jiffies(5000);
299 +       while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) &
300 +                       GDMA_REG_CTRL0_ENABLE) {
301 +               if (time_after_eq(jiffies, timeout)) {
302 +                       dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n",
303 +                                       chan->id);
304 +                       /* restore to init value */
305 +                       gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0);
306 +                       break;
307 +               }
308 +               cpu_relax();
309 +               i++;
310 +       }
311 +
312 +       if (i)
313 +               dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n",
314 +                               chan->id, i);
315 +
316 +       return 0;
317 +}
318 +
319 +static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id)
320 +{
321 +       dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
322 +                       "ctr1 %08x, intr %08x, signal %08x\n", id,
323 +                       gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
324 +                       gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
325 +                       gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
326 +                       gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
327 +                       gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
328 +                       gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
329 +}
330 +
331 +static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
332 +{
333 +       struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
334 +       dma_addr_t src_addr, dst_addr;
335 +       struct gdma_dma_sg *sg;
336 +       uint32_t ctrl0, ctrl1;
337 +
338 +       /* verify chan is already stopped */
339 +       ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
340 +       if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
341 +               dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
342 +                               chan->id, ctrl0);
343 +               rt305x_dump_reg(dma_dev, chan->id);
344 +               return -EINVAL;
345 +       }
346 +
347 +       sg = &chan->desc->sg[chan->next_sg];
348 +       if (chan->desc->direction == DMA_MEM_TO_DEV) {
349 +               src_addr = sg->src_addr;
350 +               dst_addr = chan->fifo_addr;
351 +               ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED | \
352 +                       (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
353 +                       (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
354 +       } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
355 +               src_addr = chan->fifo_addr;
356 +               dst_addr = sg->dst_addr;
357 +               ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED | \
358 +                       (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
359 +                       (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
360 +       } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
361 +               /*
362 +                * TODO: memcpy function have bugs. sometime it will copy
363 +                * more 8 bytes data when using dmatest verify.
364 +                */
365 +               src_addr = sg->src_addr;
366 +               dst_addr = sg->dst_addr;
367 +               ctrl0 = GDMA_REG_CTRL0_SW_MODE | \
368 +                       (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
369 +                       (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT);
370 +       } else {
371 +               dev_err(dma_dev->ddev.dev, "direction type %d error\n",
372 +                               chan->desc->direction);
373 +               return -EINVAL;
374 +       }
375 +
376 +       ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
377 +                (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
378 +                GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
379 +       ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
380 +
381 +       chan->next_sg++;
382 +       gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
383 +       gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
384 +       gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
385 +
386 +       /* make sure next_sg is update */
387 +       wmb();
388 +       gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
389 +
390 +       return 0;
391 +}
392 +
393 +static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id)
394 +{
395 +       dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
396 +                       "ctr1 %08x, unmask %08x, done %08x, " \
397 +                       "req %08x, ack %08x, fin %08x\n", id,
398 +                       gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
399 +                       gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
400 +                       gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
401 +                       gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
402 +                       gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
403 +                       gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
404 +                       gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
405 +                       gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
406 +                       gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
407 +}
408 +
409 +static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
410 +{
411 +       struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
412 +       dma_addr_t src_addr, dst_addr;
413 +       struct gdma_dma_sg *sg;
414 +       uint32_t ctrl0, ctrl1;
415 +
416 +       /* verify chan is already stopped */
417 +       ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
418 +       if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
419 +               dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
420 +                               chan->id, ctrl0);
421 +               rt3883_dump_reg(dma_dev, chan->id);
422 +               return -EINVAL;
423 +       }
424 +
425 +       sg = &chan->desc->sg[chan->next_sg];
426 +       if (chan->desc->direction == DMA_MEM_TO_DEV) {
427 +               src_addr = sg->src_addr;
428 +               dst_addr = chan->fifo_addr;
429 +               ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED;
430 +               ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
431 +                       (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT);
432 +       } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
433 +               src_addr = chan->fifo_addr;
434 +               dst_addr = sg->dst_addr;
435 +               ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
436 +               ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
437 +                       (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
438 +                       GDMA_REG_CTRL1_COHERENT;
439 +       } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
440 +               src_addr = sg->src_addr;
441 +               dst_addr = sg->dst_addr;
442 +               ctrl0 = GDMA_REG_CTRL0_SW_MODE;
443 +               ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
444 +                       (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
445 +                       GDMA_REG_CTRL1_COHERENT;
446 +       } else {
447 +               dev_err(dma_dev->ddev.dev, "direction type %d error\n",
448 +                               chan->desc->direction);
449 +               return -EINVAL;
450 +       }
451 +
452 +       ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
453 +                (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
454 +                GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
455 +       ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
456 +
457 +       chan->next_sg++;
458 +       gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
459 +       gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
460 +       gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
461 +
462 +       /* make sure next_sg is update */
463 +       wmb();
464 +       gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
465 +
466 +       return 0;
467 +}
468 +
469 +static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev,
470 +               struct gdma_dmaengine_chan *chan)
471 +{
472 +       return dma_dev->data->start_transfer(chan);
473 +}
474 +
475 +static int gdma_next_desc(struct gdma_dmaengine_chan *chan)
476 +{
477 +       struct virt_dma_desc *vdesc;
478 +
479 +       vdesc = vchan_next_desc(&chan->vchan);
480 +       if (!vdesc) {
481 +               chan->desc = NULL;
482 +               return 0;
483 +       }
484 +       chan->desc = to_gdma_dma_desc(vdesc);
485 +       chan->next_sg = 0;
486 +
487 +       return 1;
488 +}
489 +
490 +static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev,
491 +               struct gdma_dmaengine_chan *chan)
492 +{
493 +       struct gdma_dma_desc *desc;
494 +       unsigned long flags;
495 +       int chan_issued;
496 +
497 +       chan_issued = 0;
498 +       spin_lock_irqsave(&chan->vchan.lock, flags);
499 +       desc = chan->desc;
500 +       if (desc) {
501 +               if (desc->cyclic) {
502 +                       vchan_cyclic_callback(&desc->vdesc);
503 +                       if (chan->next_sg == desc->num_sgs)
504 +                               chan->next_sg = 0;
505 +                       chan_issued = 1;
506 +               } else {
507 +                       desc->residue -= desc->sg[chan->next_sg - 1].len;
508 +                       if (chan->next_sg == desc->num_sgs) {
509 +                               list_del(&desc->vdesc.node);
510 +                               vchan_cookie_complete(&desc->vdesc);
511 +                               chan_issued = gdma_next_desc(chan);
512 +                       } else
513 +                               chan_issued = 1;
514 +               }
515 +       } else
516 +               dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n",
517 +                               chan->id);
518 +       if (chan_issued)
519 +               set_bit(chan->id, &dma_dev->chan_issued);
520 +       spin_unlock_irqrestore(&chan->vchan.lock, flags);
521 +}
522 +
523 +static irqreturn_t gdma_dma_irq(int irq, void *devid)
524 +{
525 +       struct gdma_dma_dev *dma_dev = devid;
526 +       u32 done, done_reg;
527 +       unsigned int i;
528 +
529 +       done_reg = dma_dev->data->done_int_reg;
530 +       done = gdma_dma_read(dma_dev, done_reg);
531 +       if (unlikely(!done))
532 +               return IRQ_NONE;
533 +
534 +       /* clean done bits */
535 +       gdma_dma_write(dma_dev, done_reg, done);
536 +
537 +       i = 0;
538 +       while (done) {
539 +               if (done & 0x1) {
540 +                       gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]);
541 +                       atomic_dec(&dma_dev->cnt);
542 +               }
543 +               done >>= 1;
544 +               i++;
545 +       }
546 +
547 +       /* start only have work to do */
548 +       if (dma_dev->chan_issued)
549 +               tasklet_schedule(&dma_dev->task);
550 +
551 +       return IRQ_HANDLED;
552 +}
553 +
554 +static void gdma_dma_issue_pending(struct dma_chan *c)
555 +{
556 +       struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
557 +       struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
558 +       unsigned long flags;
559 +
560 +       spin_lock_irqsave(&chan->vchan.lock, flags);
561 +       if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
562 +               if (gdma_next_desc(chan)) {
563 +                       set_bit(chan->id, &dma_dev->chan_issued);
564 +                       tasklet_schedule(&dma_dev->task);
565 +               } else
566 +                       dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n",
567 +                                       chan->id);
568 +       }
569 +       spin_unlock_irqrestore(&chan->vchan.lock, flags);
570 +}
571 +
572 +static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
573 +               struct dma_chan *c, struct scatterlist *sgl,
574 +               unsigned int sg_len, enum dma_transfer_direction direction,
575 +               unsigned long flags, void *context)
576 +{
577 +       struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
578 +       struct gdma_dma_desc *desc;
579 +       struct scatterlist *sg;
580 +       unsigned int i;
581 +
582 +       desc = gdma_dma_alloc_desc(sg_len);
583 +       if (!desc) {
584 +               dev_err(c->device->dev, "alloc sg decs error\n");
585 +               return NULL;
586 +       }
587 +       desc->residue = 0;
588 +
589 +       for_each_sg(sgl, sg, sg_len, i) {
590 +               if (direction == DMA_MEM_TO_DEV)
591 +                       desc->sg[i].src_addr = sg_dma_address(sg);
592 +               else if (direction == DMA_DEV_TO_MEM)
593 +                       desc->sg[i].dst_addr = sg_dma_address(sg);
594 +               else {
595 +                       dev_err(c->device->dev, "direction type %d error\n",
596 +                                       direction);
597 +                       goto free_desc;
598 +               }
599 +
600 +               if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) {
601 +                       dev_err(c->device->dev, "sg len too large %d\n",
602 +                                       sg_dma_len(sg));
603 +                       goto free_desc;
604 +               }
605 +               desc->sg[i].len = sg_dma_len(sg);
606 +               desc->residue += sg_dma_len(sg);
607 +       }
608 +
609 +       desc->num_sgs = sg_len;
610 +       desc->direction = direction;
611 +       desc->cyclic = false;
612 +
613 +       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
614 +
615 +free_desc:
616 +       kfree(desc);
617 +       return NULL;
618 +}
619 +
620 +static struct dma_async_tx_descriptor * gdma_dma_prep_dma_memcpy(
621 +               struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
622 +               size_t len, unsigned long flags)
623 +{
624 +       struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
625 +       struct gdma_dma_desc *desc;
626 +       unsigned int num_periods, i;
627 +       size_t xfer_count;
628 +
629 +       if (len <= 0)
630 +               return NULL;
631 +
632 +       chan->burst_size = gdma_dma_maxburst(len >> 2);
633 +
634 +       xfer_count = GDMA_REG_CTRL0_TX_MASK;
635 +       num_periods = DIV_ROUND_UP(len, xfer_count);
636 +
637 +       desc = gdma_dma_alloc_desc(num_periods);
638 +       if (!desc) {
639 +               dev_err(c->device->dev, "alloc memcpy decs error\n");
640 +               return NULL;
641 +       }
642 +       desc->residue = len;
643 +
644 +       for (i = 0; i < num_periods; i++) {
645 +               desc->sg[i].src_addr = src;
646 +               desc->sg[i].dst_addr = dest;
647 +               if (len > xfer_count) {
648 +                       desc->sg[i].len = xfer_count;
649 +               } else {
650 +                       desc->sg[i].len = len;
651 +               }
652 +               src += desc->sg[i].len;
653 +               dest += desc->sg[i].len;
654 +               len -= desc->sg[i].len;
655 +       }
656 +
657 +       desc->num_sgs = num_periods;
658 +       desc->direction = DMA_MEM_TO_MEM;
659 +       desc->cyclic = false;
660 +
661 +       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
662 +}
663 +
664 +static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
665 +       struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
666 +       size_t period_len, enum dma_transfer_direction direction,
667 +       unsigned long flags)
668 +{
669 +       struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
670 +       struct gdma_dma_desc *desc;
671 +       unsigned int num_periods, i;
672 +
673 +       if (buf_len % period_len)
674 +               return NULL;
675 +
676 +       if (period_len > GDMA_REG_CTRL0_TX_MASK) {
677 +               dev_err(c->device->dev, "cyclic len too large %d\n",
678 +                               period_len);
679 +               return NULL;
680 +       }
681 +
682 +       num_periods = buf_len / period_len;
683 +       desc = gdma_dma_alloc_desc(num_periods);
684 +       if (!desc) {
685 +               dev_err(c->device->dev, "alloc cyclic decs error\n");
686 +               return NULL;
687 +       }
688 +       desc->residue = buf_len;
689 +
690 +       for (i = 0; i < num_periods; i++) {
691 +               if (direction == DMA_MEM_TO_DEV)
692 +                       desc->sg[i].src_addr = buf_addr;
693 +               else if (direction == DMA_DEV_TO_MEM)
694 +                       desc->sg[i].dst_addr = buf_addr;
695 +               else {
696 +                       dev_err(c->device->dev, "direction type %d error\n",
697 +                                       direction);
698 +                       goto free_desc;
699 +               }
700 +               desc->sg[i].len = period_len;
701 +               buf_addr += period_len;
702 +       }
703 +
704 +       desc->num_sgs = num_periods;
705 +       desc->direction = direction;
706 +       desc->cyclic = true;
707 +
708 +       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
709 +
710 +free_desc:
711 +       kfree(desc);
712 +       return NULL;
713 +}
714 +
715 +static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
716 +       dma_cookie_t cookie, struct dma_tx_state *state)
717 +{
718 +       struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
719 +       struct virt_dma_desc *vdesc;
720 +       enum dma_status status;
721 +       unsigned long flags;
722 +       struct gdma_dma_desc *desc;
723 +
724 +       status = dma_cookie_status(c, cookie, state);
725 +       if (status == DMA_COMPLETE || !state)
726 +               return status;
727 +
728 +       spin_lock_irqsave(&chan->vchan.lock, flags);
729 +       desc = chan->desc;
730 +       if (desc && (cookie == desc->vdesc.tx.cookie)) {
731 +               /*
732 +                * We never update edesc->residue in the cyclic case, so we
733 +                * can tell the remaining room to the end of the circular
734 +                * buffer.
735 +                */
736 +               if (desc->cyclic)
737 +                       state->residue = desc->residue -
738 +                               ((chan->next_sg - 1) * desc->sg[0].len);
739 +               else
740 +                       state->residue = desc->residue;
741 +       } else if ((vdesc = vchan_find_desc(&chan->vchan, cookie)))
742 +               state->residue = to_gdma_dma_desc(vdesc)->residue;
743 +       spin_unlock_irqrestore(&chan->vchan.lock, flags);
744 +
745 +       dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue);
746 +
747 +       return status;
748 +}
749 +
750 +static void gdma_dma_free_chan_resources(struct dma_chan *c)
751 +{
752 +       vchan_free_chan_resources(to_virt_chan(c));
753 +}
754 +
755 +static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
756 +{
757 +       kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
758 +}
759 +
760 +static void gdma_dma_tasklet(unsigned long arg)
761 +{
762 +       struct gdma_dma_dev *dma_dev = (struct gdma_dma_dev *)arg;
763 +       struct gdma_dmaengine_chan *chan;
764 +       static unsigned int last_chan;
765 +       unsigned int i, chan_mask;
766 +
767 +       /* record last chan to round robin all chans */
768 +       i = last_chan;
769 +       chan_mask = dma_dev->data->chancnt - 1;
770 +       do {
771 +               /*
772 +                * on mt7621. when verify with dmatest with all
773 +                * channel is enable. we need to limit only two
774 +                * channel is working at the same time. otherwise the
775 +                * data will have problem.
776 +                */
777 +               if (atomic_read(&dma_dev->cnt) >= 2) {
778 +                       last_chan = i;
779 +                       break;
780 +               }
781 +
782 +               if (test_and_clear_bit(i, &dma_dev->chan_issued)) {
783 +                       chan = &dma_dev->chan[i];
784 +                       if (chan->desc) {
785 +                               atomic_inc(&dma_dev->cnt);
786 +                               gdma_start_transfer(dma_dev, chan);
787 +                       } else
788 +                               dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", chan->id);
789 +
790 +                       if (!dma_dev->chan_issued)
791 +                               break;
792 +               }
793 +
794 +               i = (i + 1) & chan_mask;
795 +       } while (i != last_chan);
796 +}
797 +
798 +static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
799 +{
800 +       uint32_t gct;
801 +
802 +       /* all chans round robin */
803 +       gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR);
804 +
805 +       gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT);
806 +       dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
807 +                       (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
808 +                       8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
809 +                               GDMA_REG_GCT_CHAN_MASK));
810 +}
811 +
812 +static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev)
813 +{
814 +       uint32_t gct;
815 +
816 +       /* all chans round robin */
817 +       gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
818 +
819 +       gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
820 +       dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
821 +                       (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
822 +                       8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
823 +                               GDMA_REG_GCT_CHAN_MASK));
824 +}
825 +
826 +static struct gdma_data rt305x_gdma_data = {
827 +       .chancnt = 8,
828 +       .done_int_reg = GDMA_RT305X_STATUS_INT,
829 +       .init = rt305x_gdma_init,
830 +       .start_transfer = rt305x_gdma_start_transfer,
831 +};
832 +
833 +static struct gdma_data rt3883_gdma_data = {
834 +       .chancnt = 16,
835 +       .done_int_reg = GDMA_REG_DONE_INT,
836 +       .init = rt3883_gdma_init,
837 +       .start_transfer = rt3883_gdma_start_transfer,
838 +};
839 +
840 +static const struct of_device_id gdma_of_match_table[] = {
841 +       { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data },
842 +       { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data },
843 +       { },
844 +};
845 +
846 +static int gdma_dma_probe(struct platform_device *pdev)
847 +{
848 +       const struct of_device_id *match;
849 +       struct gdma_dmaengine_chan *chan;
850 +       struct gdma_dma_dev *dma_dev;
851 +       struct dma_device *dd;
852 +       unsigned int i;
853 +       struct resource *res;
854 +       int ret;
855 +       int irq;
856 +       void __iomem *base;
857 +       struct gdma_data *data;
858 +
859 +       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
860 +       if (ret)
861 +               return ret;
862 +
863 +       match = of_match_device(gdma_of_match_table, &pdev->dev);
864 +       if (!match)
865 +               return -EINVAL;
866 +       data = (struct gdma_data *) match->data;
867 +
868 +       dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev) +
869 +                       (sizeof(struct gdma_dmaengine_chan) * data->chancnt),
870 +                       GFP_KERNEL);
871 +       if (!dma_dev) {
872 +               dev_err(&pdev->dev, "alloc dma device failed\n");
873 +               return -EINVAL;
874 +       }
875 +       dma_dev->data = data;
876 +
877 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
878 +       base = devm_ioremap_resource(&pdev->dev, res);
879 +       if (IS_ERR(base))
880 +               return PTR_ERR(base);
881 +       dma_dev->base = base;
882 +       tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev);
883 +
884 +       irq = platform_get_irq(pdev, 0);
885 +       if (irq < 0) {
886 +               dev_err(&pdev->dev, "failed to get irq\n");
887 +               return -EINVAL;
888 +       }
889 +       ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq,
890 +                       0, dev_name(&pdev->dev), dma_dev);
891 +       if (ret) {
892 +               dev_err(&pdev->dev, "failed to request irq\n");
893 +               return ret;
894 +       }
895 +
896 +       device_reset(&pdev->dev);
897 +
898 +       dd = &dma_dev->ddev;
899 +       dma_cap_set(DMA_MEMCPY, dd->cap_mask);
900 +       dma_cap_set(DMA_SLAVE, dd->cap_mask);
901 +       dma_cap_set(DMA_CYCLIC, dd->cap_mask);
902 +       dd->device_free_chan_resources = gdma_dma_free_chan_resources;
903 +       dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy;
904 +       dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
905 +       dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
906 +       dd->device_config = gdma_dma_config;
907 +       dd->device_terminate_all = gdma_dma_terminate_all;
908 +       dd->device_tx_status = gdma_dma_tx_status;
909 +       dd->device_issue_pending = gdma_dma_issue_pending;
910 +
911 +       dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
912 +       dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
913 +       dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
914 +       dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
915 +
916 +       dd->dev = &pdev->dev;
917 +       dd->dev->dma_parms = &dma_dev->dma_parms;
918 +       dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK);
919 +       INIT_LIST_HEAD(&dd->channels);
920 +
921 +       for (i = 0; i < data->chancnt; i++) {
922 +               chan = &dma_dev->chan[i];
923 +               chan->id = i;
924 +               chan->vchan.desc_free = gdma_dma_desc_free;
925 +               vchan_init(&chan->vchan, dd);
926 +       }
927 +
928 +       /* init hardware */
929 +       data->init(dma_dev);
930 +
931 +       ret = dma_async_device_register(dd);
932 +       if (ret) {
933 +               dev_err(&pdev->dev, "failed to register dma device\n");
934 +               return ret;
935 +       }
936 +
937 +       ret = of_dma_controller_register(pdev->dev.of_node,
938 +               of_dma_xlate_by_chan_id, dma_dev);
939 +       if (ret) {
940 +               dev_err(&pdev->dev, "failed to register of dma controller\n");
941 +               goto err_unregister;
942 +       }
943 +
944 +       platform_set_drvdata(pdev, dma_dev);
945 +
946 +       return 0;
947 +
948 +err_unregister:
949 +       dma_async_device_unregister(dd);
950 +       return ret;
951 +}
952 +
953 +static int gdma_dma_remove(struct platform_device *pdev)
954 +{
955 +       struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
956 +
957 +       tasklet_kill(&dma_dev->task);
958 +        of_dma_controller_free(pdev->dev.of_node);
959 +       dma_async_device_unregister(&dma_dev->ddev);
960 +
961 +       return 0;
962 +}
963 +
964 +static struct platform_driver gdma_dma_driver = {
965 +       .probe = gdma_dma_probe,
966 +       .remove = gdma_dma_remove,
967 +       .driver = {
968 +               .name = "gdma-rt2880",
969 +               .of_match_table = gdma_of_match_table,
970 +       },
971 +};
972 +module_platform_driver(gdma_dma_driver);
973 +
974 +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
975 +MODULE_DESCRIPTION("Ralink/MTK DMA driver");
976 +MODULE_LICENSE("GPL v2");
977 --- a/include/linux/dmaengine.h
978 +++ b/include/linux/dmaengine.h
979 @@ -496,6 +496,7 @@ static inline void dma_set_unmap(struct
980  struct dmaengine_unmap_data *
981  dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
982  void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
983 +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
984  #else
985  static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
986                                  struct dmaengine_unmap_data *unmap)
987 --- /dev/null
988 +++ b/drivers/dma/mtk-hsdma.c
989 @@ -0,0 +1,767 @@
990 +/*
991 + *  Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
992 + *  MTK HSDMA support
993 + *
994 + *  This program is free software; you can redistribute it and/or modify it
995 + *  under  the terms of the GNU General         Public License as published by the
996 + *  Free Software Foundation;  either version 2 of the License, or (at your
997 + *  option) any later version.
998 + *
999 + */
1000 +
1001 +#include <linux/dmaengine.h>
1002 +#include <linux/dma-mapping.h>
1003 +#include <linux/err.h>
1004 +#include <linux/init.h>
1005 +#include <linux/list.h>
1006 +#include <linux/module.h>
1007 +#include <linux/platform_device.h>
1008 +#include <linux/slab.h>
1009 +#include <linux/spinlock.h>
1010 +#include <linux/irq.h>
1011 +#include <linux/of_dma.h>
1012 +#include <linux/reset.h>
1013 +#include <linux/of_device.h>
1014 +
1015 +#include "virt-dma.h"
1016 +
1017 +#define HSDMA_BASE_OFFSET              0x800
1018 +
1019 +#define HSDMA_REG_TX_BASE              0x00
1020 +#define HSDMA_REG_TX_CNT               0x04
1021 +#define HSDMA_REG_TX_CTX               0x08
1022 +#define HSDMA_REG_TX_DTX               0x0c
1023 +#define HSDMA_REG_RX_BASE              0x100
1024 +#define HSDMA_REG_RX_CNT               0x104
1025 +#define HSDMA_REG_RX_CRX               0x108
1026 +#define HSDMA_REG_RX_DRX               0x10c
1027 +#define HSDMA_REG_INFO                 0x200
1028 +#define HSDMA_REG_GLO_CFG              0x204
1029 +#define HSDMA_REG_RST_CFG              0x208
1030 +#define HSDMA_REG_DELAY_INT            0x20c
1031 +#define HSDMA_REG_FREEQ_THRES          0x210
1032 +#define HSDMA_REG_INT_STATUS           0x220
1033 +#define HSDMA_REG_INT_MASK             0x228
1034 +#define HSDMA_REG_SCH_Q01              0x280
1035 +#define HSDMA_REG_SCH_Q23              0x284
1036 +
1037 +#define HSDMA_DESCS_MAX                        0xfff
1038 +#define HSDMA_DESCS_NUM                        8
1039 +#define HSDMA_DESCS_MASK               (HSDMA_DESCS_NUM - 1)
1040 +#define HSDMA_NEXT_DESC(x)             (((x) + 1) & HSDMA_DESCS_MASK)
1041 +
1042 +/* HSDMA_REG_INFO */
1043 +#define HSDMA_INFO_INDEX_MASK          0xf
1044 +#define HSDMA_INFO_INDEX_SHIFT         24
1045 +#define HSDMA_INFO_BASE_MASK           0xff
1046 +#define HSDMA_INFO_BASE_SHIFT          16
1047 +#define HSDMA_INFO_RX_MASK             0xff
1048 +#define HSDMA_INFO_RX_SHIFT            8
1049 +#define HSDMA_INFO_TX_MASK             0xff
1050 +#define HSDMA_INFO_TX_SHIFT            0
1051 +
1052 +/* HSDMA_REG_GLO_CFG */
1053 +#define HSDMA_GLO_TX_2B_OFFSET         BIT(31)
1054 +#define HSDMA_GLO_CLK_GATE             BIT(30)
1055 +#define HSDMA_GLO_BYTE_SWAP            BIT(29)
1056 +#define HSDMA_GLO_MULTI_DMA            BIT(10)
1057 +#define HSDMA_GLO_TWO_BUF              BIT(9)
1058 +#define HSDMA_GLO_32B_DESC             BIT(8)
1059 +#define HSDMA_GLO_BIG_ENDIAN           BIT(7)
1060 +#define HSDMA_GLO_TX_DONE              BIT(6)
1061 +#define HSDMA_GLO_BT_MASK              0x3
1062 +#define HSDMA_GLO_BT_SHIFT             4
1063 +#define HSDMA_GLO_RX_BUSY              BIT(3)
1064 +#define HSDMA_GLO_RX_DMA               BIT(2)
1065 +#define HSDMA_GLO_TX_BUSY              BIT(1)
1066 +#define HSDMA_GLO_TX_DMA               BIT(0)
1067 +
1068 +#define HSDMA_BT_SIZE_16BYTES          (0 << HSDMA_GLO_BT_SHIFT)
1069 +#define HSDMA_BT_SIZE_32BYTES          (1 << HSDMA_GLO_BT_SHIFT)
1070 +#define HSDMA_BT_SIZE_64BYTES          (2 << HSDMA_GLO_BT_SHIFT)
1071 +#define HSDMA_BT_SIZE_128BYTES         (3 << HSDMA_GLO_BT_SHIFT)
1072 +
1073 +#define HSDMA_GLO_DEFAULT              (HSDMA_GLO_MULTI_DMA | \
1074 +               HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
1075 +
1076 +/* HSDMA_REG_RST_CFG */
1077 +#define HSDMA_RST_RX_SHIFT             16
1078 +#define HSDMA_RST_TX_SHIFT             0
1079 +
1080 +/* HSDMA_REG_DELAY_INT */
1081 +#define HSDMA_DELAY_INT_EN             BIT(15)
1082 +#define HSDMA_DELAY_PEND_OFFSET                8
1083 +#define HSDMA_DELAY_TIME_OFFSET                0
1084 +#define HSDMA_DELAY_TX_OFFSET          16
1085 +#define HSDMA_DELAY_RX_OFFSET          0
1086 +
1087 +#define HSDMA_DELAY_INIT(x)            (HSDMA_DELAY_INT_EN | \
1088 +               ((x) << HSDMA_DELAY_PEND_OFFSET))
1089 +#define HSDMA_DELAY(x)                 ((HSDMA_DELAY_INIT(x) << \
1090 +               HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
1091 +
1092 +/* HSDMA_REG_INT_STATUS */
1093 +#define HSDMA_INT_DELAY_RX_COH         BIT(31)
1094 +#define HSDMA_INT_DELAY_RX_INT         BIT(30)
1095 +#define HSDMA_INT_DELAY_TX_COH         BIT(29)
1096 +#define HSDMA_INT_DELAY_TX_INT         BIT(28)
1097 +#define HSDMA_INT_RX_MASK              0x3
1098 +#define HSDMA_INT_RX_SHIFT             16
1099 +#define HSDMA_INT_RX_Q0                        BIT(16)
1100 +#define HSDMA_INT_TX_MASK              0xf
1101 +#define HSDMA_INT_TX_SHIFT             0
1102 +#define HSDMA_INT_TX_Q0                        BIT(0)
1103 +
1104 +/* tx/rx dma desc flags */
1105 +#define HSDMA_PLEN_MASK                        0x3fff
1106 +#define HSDMA_DESC_DONE                        BIT(31)
1107 +#define HSDMA_DESC_LS0                 BIT(30)
1108 +#define HSDMA_DESC_PLEN0(_x)           (((_x) & HSDMA_PLEN_MASK) << 16)
1109 +#define HSDMA_DESC_TAG                 BIT(15)
1110 +#define HSDMA_DESC_LS1                 BIT(14)
1111 +#define HSDMA_DESC_PLEN1(_x)           ((_x) & HSDMA_PLEN_MASK)
1112 +
1113 +/* align 4 bytes */
1114 +#define HSDMA_ALIGN_SIZE               3
1115 +/* align size 128bytes */
1116 +#define HSDMA_MAX_PLEN                 0x3f80
1117 +
1118 +struct hsdma_desc {
1119 +       u32 addr0;
1120 +       u32 flags;
1121 +       u32 addr1;
1122 +       u32 unused;
1123 +};
1124 +
1125 +struct mtk_hsdma_sg {
1126 +       dma_addr_t src_addr;
1127 +       dma_addr_t dst_addr;
1128 +       u32 len;
1129 +};
1130 +
1131 +struct mtk_hsdma_desc {
1132 +       struct virt_dma_desc vdesc;
1133 +       unsigned int num_sgs;
1134 +       struct mtk_hsdma_sg sg[1];
1135 +};
1136 +
1137 +struct mtk_hsdma_chan {
1138 +       struct virt_dma_chan vchan;
1139 +       unsigned int id;
1140 +       dma_addr_t desc_addr;
1141 +       int tx_idx;
1142 +       int rx_idx;
1143 +       struct hsdma_desc *tx_ring;
1144 +       struct hsdma_desc *rx_ring;
1145 +       struct mtk_hsdma_desc *desc;
1146 +       unsigned int next_sg;
1147 +};
1148 +
1149 +struct mtk_hsdam_engine {
1150 +       struct dma_device ddev;
1151 +       struct device_dma_parameters dma_parms;
1152 +       void __iomem *base;
1153 +       struct tasklet_struct task;
1154 +       volatile unsigned long chan_issued;
1155 +
1156 +       struct mtk_hsdma_chan chan[1];
1157 +};
1158 +
1159 +static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
1160 +               struct mtk_hsdma_chan *chan)
1161 +{
1162 +       return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
1163 +                       ddev);
1164 +}
1165 +
1166 +static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
1167 +{
1168 +       return container_of(c, struct mtk_hsdma_chan, vchan.chan);
1169 +}
1170 +
1171 +static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
1172 +               struct virt_dma_desc *vdesc)
1173 +{
1174 +       return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
1175 +}
1176 +
1177 +static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
1178 +{
1179 +       return readl(hsdma->base + reg);
1180 +}
1181 +
1182 +static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
1183 +               unsigned reg, u32 val)
1184 +{
1185 +       writel(val, hsdma->base + reg);
1186 +}
1187 +
1188 +static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
1189 +               struct mtk_hsdma_chan *chan)
1190 +{
1191 +       chan->tx_idx = 0;
1192 +       chan->rx_idx = HSDMA_DESCS_NUM - 1;
1193 +
1194 +       mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
1195 +       mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
1196 +
1197 +       mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
1198 +                       0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
1199 +       mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
1200 +                       0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
1201 +}
1202 +
1203 +static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
1204 +{
1205 +       dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
1206 +                       "tctx %08x, tdtx: %08x, rbase %08x, " \
1207 +                       "rcnt %08x, rctx %08x, rdtx %08x\n",
1208 +                       mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
1209 +                       mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
1210 +                       mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
1211 +                       mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
1212 +                       mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
1213 +                       mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
1214 +                       mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
1215 +                       mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
1216 +
1217 +       dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
1218 +                       "intr_stat %08x, intr_mask %08x\n",
1219 +                       mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
1220 +                       mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
1221 +                       mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
1222 +                       mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
1223 +                       mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
1224 +}
1225 +
1226 +static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
1227 +               struct mtk_hsdma_chan *chan)
1228 +{
1229 +       struct hsdma_desc *tx_desc;
1230 +       struct hsdma_desc *rx_desc;
1231 +       int i;
1232 +
1233 +       dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
1234 +                       chan->tx_idx, chan->rx_idx);
1235 +
1236 +       for (i = 0; i < HSDMA_DESCS_NUM; i++) {
1237 +               tx_desc = &chan->tx_ring[i];
1238 +               rx_desc = &chan->rx_ring[i];
1239 +
1240 +               dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
1241 +                               "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
1242 +                               i, tx_desc->addr0, tx_desc->flags, \
1243 +                               tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
1244 +       }
1245 +}
1246 +
1247 +static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
1248 +               struct mtk_hsdma_chan *chan)
1249 +{
1250 +       int i;
1251 +
1252 +       /* disable dma */
1253 +       mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
1254 +
1255 +       /* disable intr */
1256 +       mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
1257 +
1258 +       /* init desc value */
1259 +       for (i = 0; i < HSDMA_DESCS_NUM; i++) {
1260 +               chan->tx_ring[i].addr0 = 0;
1261 +               chan->tx_ring[i].flags = HSDMA_DESC_LS0 |
1262 +                       HSDMA_DESC_DONE;
1263 +       }
1264 +       for (i = 0; i < HSDMA_DESCS_NUM; i++) {
1265 +               chan->rx_ring[i].addr0 = 0;
1266 +               chan->rx_ring[i].flags = 0;
1267 +       }
1268 +
1269 +       /* reset */
1270 +       mtk_hsdma_reset_chan(hsdma, chan);
1271 +
1272 +       /* enable intr */
1273 +       mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
1274 +
1275 +       /* enable dma */
1276 +       mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
1277 +}
1278 +
1279 +static int mtk_hsdma_terminate_all(struct dma_chan *c)
1280 +{
1281 +       struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
1282 +       struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
1283 +       unsigned long timeout;
1284 +       LIST_HEAD(head);
1285 +
1286 +       spin_lock_bh(&chan->vchan.lock);
1287 +       chan->desc = NULL;
1288 +       clear_bit(chan->id, &hsdma->chan_issued);
1289 +       vchan_get_all_descriptors(&chan->vchan, &head);
1290 +       spin_unlock_bh(&chan->vchan.lock);
1291 +
1292 +       vchan_dma_desc_free_list(&chan->vchan, &head);
1293 +
1294 +       /* wait dma transfer complete */
1295 +       timeout = jiffies + msecs_to_jiffies(2000);
1296 +       while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
1297 +                       (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
1298 +               if (time_after_eq(jiffies, timeout)) {
1299 +                       hsdma_dump_desc(hsdma, chan);
1300 +                       mtk_hsdma_reset(hsdma, chan);
1301 +                       dev_err(hsdma->ddev.dev, "timeout, reset it\n");
1302 +                       break;
1303 +               }
1304 +               cpu_relax();
1305 +       }
1306 +
1307 +       return 0;
1308 +}
1309 +
1310 +static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
1311 +               struct mtk_hsdma_chan *chan)
1312 +{
1313 +       dma_addr_t src, dst;
1314 +       size_t len, tlen;
1315 +       struct hsdma_desc *tx_desc, *rx_desc;
1316 +       struct mtk_hsdma_sg *sg;
1317 +       unsigned int i;
1318 +       int rx_idx;
1319 +
1320 +       sg = &chan->desc->sg[0];
1321 +       len = sg->len;
1322 +       chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
1323 +
1324 +       /* tx desc */
1325 +       src = sg->src_addr;
1326 +       for (i = 0; i < chan->desc->num_sgs; i++) {
1327 +               if (len > HSDMA_MAX_PLEN)
1328 +                       tlen = HSDMA_MAX_PLEN;
1329 +               else
1330 +                       tlen = len;
1331 +
1332 +               if (i & 0x1) {
1333 +                       tx_desc->addr1 = src;
1334 +                       tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
1335 +               } else {
1336 +                       tx_desc = &chan->tx_ring[chan->tx_idx];
1337 +                       tx_desc->addr0 = src;
1338 +                       tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
1339 +
1340 +                       /* update index */
1341 +                       chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
1342 +               }
1343 +
1344 +               src += tlen;
1345 +               len -= tlen;
1346 +       }
1347 +       if (i & 0x1)
1348 +               tx_desc->flags |= HSDMA_DESC_LS0;
1349 +       else
1350 +               tx_desc->flags |= HSDMA_DESC_LS1;
1351 +
1352 +       /* rx desc */
1353 +       rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
1354 +       len = sg->len;
1355 +       dst = sg->dst_addr;
1356 +       for (i = 0; i < chan->desc->num_sgs; i++) {
1357 +               rx_desc = &chan->rx_ring[rx_idx];
1358 +               if (len > HSDMA_MAX_PLEN)
1359 +                       tlen = HSDMA_MAX_PLEN;
1360 +               else
1361 +                       tlen = len;
1362 +
1363 +               rx_desc->addr0 = dst;
1364 +               rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
1365 +
1366 +               dst += tlen;
1367 +               len -= tlen;
1368 +
1369 +               /* update index */
1370 +               rx_idx = HSDMA_NEXT_DESC(rx_idx);
1371 +       }
1372 +
1373 +       /* make sure desc and index all up to date */
1374 +       wmb();
1375 +       mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
1376 +
1377 +       return 0;
1378 +}
1379 +
1380 +static int gdma_next_desc(struct mtk_hsdma_chan *chan)
1381 +{
1382 +       struct virt_dma_desc *vdesc;
1383 +
1384 +       vdesc = vchan_next_desc(&chan->vchan);
1385 +       if (!vdesc) {
1386 +               chan->desc = NULL;
1387 +               return 0;
1388 +       }
1389 +       chan->desc = to_mtk_hsdma_desc(vdesc);
1390 +       chan->next_sg = 0;
1391 +
1392 +       return 1;
1393 +}
1394 +
1395 +static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
1396 +               struct mtk_hsdma_chan *chan)
1397 +{
1398 +       struct mtk_hsdma_desc *desc;
1399 +       int chan_issued;
1400 +
1401 +       chan_issued = 0;
1402 +       spin_lock_bh(&chan->vchan.lock);
1403 +       desc = chan->desc;
1404 +       if (likely(desc)) {
1405 +               if (chan->next_sg == desc->num_sgs) {
1406 +                       list_del(&desc->vdesc.node);
1407 +                       vchan_cookie_complete(&desc->vdesc);
1408 +                       chan_issued = gdma_next_desc(chan);
1409 +               }
1410 +       } else
1411 +               dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
1412 +
1413 +       if (chan_issued)
1414 +               set_bit(chan->id, &hsdma->chan_issued);
1415 +       spin_unlock_bh(&chan->vchan.lock);
1416 +}
1417 +
1418 +static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
1419 +{
1420 +       struct mtk_hsdam_engine *hsdma = devid;
1421 +       u32 status;
1422 +
1423 +       status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
1424 +       if (unlikely(!status))
1425 +               return IRQ_NONE;
1426 +
1427 +       if (likely(status & HSDMA_INT_RX_Q0))
1428 +               tasklet_schedule(&hsdma->task);
1429 +       else
1430 +               dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n",
1431 +                               status);
1432 +       /* clean intr bits */
1433 +       mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
1434 +
1435 +       return IRQ_HANDLED;
1436 +}
1437 +
1438 +static void mtk_hsdma_issue_pending(struct dma_chan *c)
1439 +{
1440 +       struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
1441 +       struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
1442 +
1443 +       spin_lock_bh(&chan->vchan.lock);
1444 +       if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
1445 +               if (gdma_next_desc(chan)) {
1446 +                       set_bit(chan->id, &hsdma->chan_issued);
1447 +                       tasklet_schedule(&hsdma->task);
1448 +               } else
1449 +                       dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
1450 +       }
1451 +       spin_unlock_bh(&chan->vchan.lock);
1452 +}
1453 +
1454 +static struct dma_async_tx_descriptor * mtk_hsdma_prep_dma_memcpy(
1455 +               struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
1456 +               size_t len, unsigned long flags)
1457 +{
1458 +       struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
1459 +       struct mtk_hsdma_desc *desc;
1460 +
1461 +       if (len <= 0)
1462 +               return NULL;
1463 +
1464 +       desc = kzalloc(sizeof(struct mtk_hsdma_desc), GFP_ATOMIC);
1465 +       if (!desc) {
1466 +               dev_err(c->device->dev, "alloc memcpy decs error\n");
1467 +               return NULL;
1468 +       }
1469 +
1470 +       desc->sg[0].src_addr = src;
1471 +       desc->sg[0].dst_addr = dest;
1472 +       desc->sg[0].len = len;
1473 +
1474 +       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1475 +}
1476 +
1477 +static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
1478 +               dma_cookie_t cookie, struct dma_tx_state *state)
1479 +{
1480 +       return dma_cookie_status(c, cookie, state);
1481 +}
1482 +
1483 +static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
1484 +{
1485 +       vchan_free_chan_resources(to_virt_chan(c));
1486 +}
1487 +
1488 +static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
1489 +{
1490 +       kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
1491 +}
1492 +
1493 +static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
1494 +{
1495 +       struct mtk_hsdma_chan *chan;
1496 +
1497 +       if (test_and_clear_bit(0, &hsdma->chan_issued)) {
1498 +               chan = &hsdma->chan[0];
1499 +               if (chan->desc) {
1500 +                       mtk_hsdma_start_transfer(hsdma, chan);
1501 +               } else
1502 +                       dev_dbg(hsdma->ddev.dev,"chan 0 no desc to issue\n");
1503 +       }
1504 +}
1505 +
1506 +static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
1507 +{
1508 +       struct mtk_hsdma_chan *chan;
1509 +       int next_idx, drx_idx, cnt;
1510 +
1511 +       chan = &hsdma->chan[0];
1512 +       next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
1513 +       drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
1514 +
1515 +       cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
1516 +       if (!cnt)
1517 +               return;
1518 +
1519 +       chan->next_sg += cnt;
1520 +       chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
1521 +
1522 +       /* update rx crx */
1523 +       wmb();
1524 +       mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
1525 +
1526 +       mtk_hsdma_chan_done(hsdma, chan);
1527 +}
1528 +
1529 +static void mtk_hsdma_tasklet(unsigned long arg)
1530 +{
1531 +       struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
1532 +
1533 +       mtk_hsdma_rx(hsdma);
1534 +       mtk_hsdma_tx(hsdma);
1535 +}
1536 +
1537 +static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
1538 +               struct mtk_hsdma_chan *chan)
1539 +{
1540 +       int i;
1541 +
1542 +       chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
1543 +                       2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
1544 +                       &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
1545 +       if (!chan->tx_ring)
1546 +               goto no_mem;
1547 +
1548 +       chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
1549 +
1550 +       /* init tx ring value */
1551 +       for (i = 0; i < HSDMA_DESCS_NUM; i++)
1552 +               chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
1553 +
1554 +       return 0;
1555 +no_mem:
1556 +       return -ENOMEM;
1557 +}
1558 +
1559 +static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
1560 +               struct mtk_hsdma_chan *chan)
1561 +{
1562 +       if (chan->tx_ring) {
1563 +               dma_free_coherent(hsdma->ddev.dev,
1564 +                               2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
1565 +                               chan->tx_ring, chan->desc_addr);
1566 +               chan->tx_ring = NULL;
1567 +               chan->rx_ring = NULL;
1568 +       }
1569 +}
1570 +
1571 +static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
1572 +{
1573 +       struct mtk_hsdma_chan *chan;
1574 +       int ret;
1575 +       u32 reg;
1576 +
1577 +       /* init desc */
1578 +       chan = &hsdma->chan[0];
1579 +       ret = mtk_hsdam_alloc_desc(hsdma, chan);
1580 +       if (ret)
1581 +               return ret;
1582 +
1583 +       /* tx */
1584 +       mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
1585 +       mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
1586 +       /* rx */
1587 +       mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
1588 +                       (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
1589 +       mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
1590 +       /* reset */
1591 +       mtk_hsdma_reset_chan(hsdma, chan);
1592 +
1593 +       /* enable rx intr */
1594 +       mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
1595 +
1596 +       /* enable dma */
1597 +       mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
1598 +
1599 +       /* hardware info */
1600 +       reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
1601 +       dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
1602 +                       (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
1603 +                       (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
1604 +
1605 +       hsdma_dump_reg(hsdma);
1606 +
1607 +       return ret;
1608 +}
1609 +
1610 +static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
1611 +{
1612 +       struct mtk_hsdma_chan *chan;
1613 +
1614 +       /* disable dma */
1615 +       mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
1616 +
1617 +       /* disable intr */
1618 +       mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
1619 +
1620 +       /* free desc */
1621 +       chan = &hsdma->chan[0];
1622 +       mtk_hsdam_free_desc(hsdma, chan);
1623 +
1624 +       /* tx */
1625 +       mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
1626 +       mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
1627 +       /* rx */
1628 +       mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
1629 +       mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
1630 +       /* reset */
1631 +       mtk_hsdma_reset_chan(hsdma, chan);
1632 +}
1633 +
1634 +static const struct of_device_id mtk_hsdma_of_match[] = {
1635 +       { .compatible = "mediatek,mt7621-hsdma" },
1636 +       { },
1637 +};
1638 +
1639 +static int mtk_hsdma_probe(struct platform_device *pdev)
1640 +{
1641 +       const struct of_device_id *match;
1642 +       struct mtk_hsdma_chan *chan;
1643 +       struct mtk_hsdam_engine *hsdma;
1644 +       struct dma_device *dd;
1645 +       struct resource *res;
1646 +       int ret;
1647 +       int irq;
1648 +       void __iomem *base;
1649 +
1650 +       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1651 +       if (ret)
1652 +               return ret;
1653 +
1654 +       match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
1655 +       if (!match)
1656 +               return -EINVAL;
1657 +
1658 +       hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
1659 +       if (!hsdma) {
1660 +               dev_err(&pdev->dev, "alloc dma device failed\n");
1661 +               return -EINVAL;
1662 +       }
1663 +
1664 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1665 +       base = devm_ioremap_resource(&pdev->dev, res);
1666 +       if (IS_ERR(base))
1667 +               return PTR_ERR(base);
1668 +       hsdma->base = base + HSDMA_BASE_OFFSET;
1669 +       tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
1670 +
1671 +       irq = platform_get_irq(pdev, 0);
1672 +       if (irq < 0) {
1673 +               dev_err(&pdev->dev, "failed to get irq\n");
1674 +               return -EINVAL;
1675 +       }
1676 +       ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
1677 +                       0, dev_name(&pdev->dev), hsdma);
1678 +       if (ret) {
1679 +               dev_err(&pdev->dev, "failed to request irq\n");
1680 +               return ret;
1681 +       }
1682 +
1683 +       device_reset(&pdev->dev);
1684 +
1685 +       dd = &hsdma->ddev;
1686 +       dma_cap_set(DMA_MEMCPY, dd->cap_mask);
1687 +       dd->copy_align = HSDMA_ALIGN_SIZE;
1688 +       dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
1689 +       dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
1690 +       dd->device_terminate_all = mtk_hsdma_terminate_all;
1691 +       dd->device_tx_status = mtk_hsdma_tx_status;
1692 +       dd->device_issue_pending = mtk_hsdma_issue_pending;
1693 +       dd->dev = &pdev->dev;
1694 +       dd->dev->dma_parms = &hsdma->dma_parms;
1695 +       dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
1696 +       INIT_LIST_HEAD(&dd->channels);
1697 +
1698 +       chan = &hsdma->chan[0];
1699 +       chan->id = 0;
1700 +       chan->vchan.desc_free = mtk_hsdma_desc_free;
1701 +       vchan_init(&chan->vchan, dd);
1702 +
1703 +       /* init hardware */
1704 +       ret = mtk_hsdma_init(hsdma);
1705 +       if (ret) {
1706 +               dev_err(&pdev->dev, "failed to alloc ring descs\n");
1707 +               return ret;
1708 +       }
1709 +
1710 +       ret = dma_async_device_register(dd);
1711 +       if (ret) {
1712 +               dev_err(&pdev->dev, "failed to register dma device\n");
1713 +               return ret;
1714 +       }
1715 +
1716 +       ret = of_dma_controller_register(pdev->dev.of_node,
1717 +                       of_dma_xlate_by_chan_id, hsdma);
1718 +       if (ret) {
1719 +               dev_err(&pdev->dev, "failed to register of dma controller\n");
1720 +               goto err_unregister;
1721 +       }
1722 +
1723 +       platform_set_drvdata(pdev, hsdma);
1724 +
1725 +       return 0;
1726 +
1727 +err_unregister:
1728 +       dma_async_device_unregister(dd);
1729 +       return ret;
1730 +}
1731 +
1732 +static int mtk_hsdma_remove(struct platform_device *pdev)
1733 +{
1734 +       struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
1735 +
1736 +       mtk_hsdma_uninit(hsdma);
1737 +
1738 +       of_dma_controller_free(pdev->dev.of_node);
1739 +       dma_async_device_unregister(&hsdma->ddev);
1740 +
1741 +       return 0;
1742 +}
1743 +
1744 +static struct platform_driver mtk_hsdma_driver = {
1745 +       .probe = mtk_hsdma_probe,
1746 +       .remove = mtk_hsdma_remove,
1747 +       .driver = {
1748 +               .name = "hsdma-mt7621",
1749 +               .of_match_table = mtk_hsdma_of_match,
1750 +       },
1751 +};
1752 +module_platform_driver(mtk_hsdma_driver);
1753 +
1754 +MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
1755 +MODULE_DESCRIPTION("MTK HSDMA driver");
1756 +MODULE_LICENSE("GPL v2");