Linux-libre 3.16.85-gnu
[librecmc/linux-libre.git] / drivers / dma / ste_dma40.c
1 /*
2  * Copyright (C) Ericsson AB 2007-2008
3  * Copyright (C) ST-Ericsson SA 2008-2010
4  * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6  * License terms: GNU General Public License (GPL) version 2
7  */
8
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/log2.h>
18 #include <linux/pm.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/err.h>
21 #include <linux/of.h>
22 #include <linux/of_dma.h>
23 #include <linux/amba/bus.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/platform_data/dma-ste-dma40.h>
26
27 #include "dmaengine.h"
28 #include "ste_dma40_ll.h"
29
30 #define D40_NAME "dma40"
31
32 #define D40_PHY_CHAN -1
33
34 /* For masking out/in 2 bit channel positions */
35 #define D40_CHAN_POS(chan)  (2 * (chan / 2))
36 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37
38 /* Maximum iterations taken before giving up suspending a channel */
39 #define D40_SUSPEND_MAX_IT 500
40
41 /* Milliseconds */
42 #define DMA40_AUTOSUSPEND_DELAY 100
43
44 /* Hardware requirement on LCLA alignment */
45 #define LCLA_ALIGNMENT 0x40000
46
47 /* Max number of links per event group */
48 #define D40_LCLA_LINK_PER_EVENT_GRP 128
49 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50
51 /* Max number of logical channels per physical channel */
52 #define D40_MAX_LOG_CHAN_PER_PHY 32
53
54 /* Attempts before giving up to trying to get pages that are aligned */
55 #define MAX_LCLA_ALLOC_ATTEMPTS 256
56
57 /* Bit markings for allocation map */
58 #define D40_ALLOC_FREE          BIT(31)
59 #define D40_ALLOC_PHY           BIT(30)
60 #define D40_ALLOC_LOG_FREE      0
61
62 #define D40_MEMCPY_MAX_CHANS    8
63
64 /* Reserved event lines for memcpy only. */
65 #define DB8500_DMA_MEMCPY_EV_0  51
66 #define DB8500_DMA_MEMCPY_EV_1  56
67 #define DB8500_DMA_MEMCPY_EV_2  57
68 #define DB8500_DMA_MEMCPY_EV_3  58
69 #define DB8500_DMA_MEMCPY_EV_4  59
70 #define DB8500_DMA_MEMCPY_EV_5  60
71
72 static int dma40_memcpy_channels[] = {
73         DB8500_DMA_MEMCPY_EV_0,
74         DB8500_DMA_MEMCPY_EV_1,
75         DB8500_DMA_MEMCPY_EV_2,
76         DB8500_DMA_MEMCPY_EV_3,
77         DB8500_DMA_MEMCPY_EV_4,
78         DB8500_DMA_MEMCPY_EV_5,
79 };
80
81 /* Default configuration for physcial memcpy */
82 static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83         .mode = STEDMA40_MODE_PHYSICAL,
84         .dir = DMA_MEM_TO_MEM,
85
86         .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87         .src_info.psize = STEDMA40_PSIZE_PHY_1,
88         .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
89
90         .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91         .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92         .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
93 };
94
95 /* Default configuration for logical memcpy */
96 static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97         .mode = STEDMA40_MODE_LOGICAL,
98         .dir = DMA_MEM_TO_MEM,
99
100         .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101         .src_info.psize = STEDMA40_PSIZE_LOG_1,
102         .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
103
104         .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105         .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106         .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
107 };
108
109 /**
110  * enum 40_command - The different commands and/or statuses.
111  *
112  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
113  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
114  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
115  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
116  */
117 enum d40_command {
118         D40_DMA_STOP            = 0,
119         D40_DMA_RUN             = 1,
120         D40_DMA_SUSPEND_REQ     = 2,
121         D40_DMA_SUSPENDED       = 3
122 };
123
124 /*
125  * enum d40_events - The different Event Enables for the event lines.
126  *
127  * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
128  * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
129  * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
130  * @D40_ROUND_EVENTLINE: Status check for event line.
131  */
132
133 enum d40_events {
134         D40_DEACTIVATE_EVENTLINE        = 0,
135         D40_ACTIVATE_EVENTLINE          = 1,
136         D40_SUSPEND_REQ_EVENTLINE       = 2,
137         D40_ROUND_EVENTLINE             = 3
138 };
139
140 /*
141  * These are the registers that has to be saved and later restored
142  * when the DMA hw is powered off.
143  * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
144  */
145 static u32 d40_backup_regs[] = {
146         D40_DREG_LCPA,
147         D40_DREG_LCLA,
148         D40_DREG_PRMSE,
149         D40_DREG_PRMSO,
150         D40_DREG_PRMOE,
151         D40_DREG_PRMOO,
152 };
153
154 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
155
156 /*
157  * since 9540 and 8540 has the same HW revision
158  * use v4a for 9540 or ealier
159  * use v4b for 8540 or later
160  * HW revision:
161  * DB8500ed has revision 0
162  * DB8500v1 has revision 2
163  * DB8500v2 has revision 3
164  * AP9540v1 has revision 4
165  * DB8540v1 has revision 4
166  * TODO: Check if all these registers have to be saved/restored on dma40 v4a
167  */
168 static u32 d40_backup_regs_v4a[] = {
169         D40_DREG_PSEG1,
170         D40_DREG_PSEG2,
171         D40_DREG_PSEG3,
172         D40_DREG_PSEG4,
173         D40_DREG_PCEG1,
174         D40_DREG_PCEG2,
175         D40_DREG_PCEG3,
176         D40_DREG_PCEG4,
177         D40_DREG_RSEG1,
178         D40_DREG_RSEG2,
179         D40_DREG_RSEG3,
180         D40_DREG_RSEG4,
181         D40_DREG_RCEG1,
182         D40_DREG_RCEG2,
183         D40_DREG_RCEG3,
184         D40_DREG_RCEG4,
185 };
186
187 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
188
189 static u32 d40_backup_regs_v4b[] = {
190         D40_DREG_CPSEG1,
191         D40_DREG_CPSEG2,
192         D40_DREG_CPSEG3,
193         D40_DREG_CPSEG4,
194         D40_DREG_CPSEG5,
195         D40_DREG_CPCEG1,
196         D40_DREG_CPCEG2,
197         D40_DREG_CPCEG3,
198         D40_DREG_CPCEG4,
199         D40_DREG_CPCEG5,
200         D40_DREG_CRSEG1,
201         D40_DREG_CRSEG2,
202         D40_DREG_CRSEG3,
203         D40_DREG_CRSEG4,
204         D40_DREG_CRSEG5,
205         D40_DREG_CRCEG1,
206         D40_DREG_CRCEG2,
207         D40_DREG_CRCEG3,
208         D40_DREG_CRCEG4,
209         D40_DREG_CRCEG5,
210 };
211
212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213
214 static u32 d40_backup_regs_chan[] = {
215         D40_CHAN_REG_SSCFG,
216         D40_CHAN_REG_SSELT,
217         D40_CHAN_REG_SSPTR,
218         D40_CHAN_REG_SSLNK,
219         D40_CHAN_REG_SDCFG,
220         D40_CHAN_REG_SDELT,
221         D40_CHAN_REG_SDPTR,
222         D40_CHAN_REG_SDLNK,
223 };
224
225 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226                              BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
227
228 /**
229  * struct d40_interrupt_lookup - lookup table for interrupt handler
230  *
231  * @src: Interrupt mask register.
232  * @clr: Interrupt clear register.
233  * @is_error: true if this is an error interrupt.
234  * @offset: start delta in the lookup_log_chans in d40_base. If equals to
235  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
236  */
237 struct d40_interrupt_lookup {
238         u32 src;
239         u32 clr;
240         bool is_error;
241         int offset;
242 };
243
244
245 static struct d40_interrupt_lookup il_v4a[] = {
246         {D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
247         {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248         {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249         {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250         {D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
251         {D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
252         {D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
253         {D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
254         {D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
255         {D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
256 };
257
258 static struct d40_interrupt_lookup il_v4b[] = {
259         {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false,  0},
260         {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261         {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262         {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263         {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264         {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true,   0},
265         {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true,  32},
266         {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true,  64},
267         {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true,  96},
268         {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true,  128},
269         {D40_DREG_CPCTIS,  D40_DREG_CPCICR,  false, D40_PHY_CHAN},
270         {D40_DREG_CPCEIS,  D40_DREG_CPCICR,  true,  D40_PHY_CHAN},
271 };
272
273 /**
274  * struct d40_reg_val - simple lookup struct
275  *
276  * @reg: The register.
277  * @val: The value that belongs to the register in reg.
278  */
279 struct d40_reg_val {
280         unsigned int reg;
281         unsigned int val;
282 };
283
284 static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285         /* Clock every part of the DMA block from start */
286         { .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
287
288         /* Interrupts on all logical channels */
289         { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290         { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291         { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292         { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293         { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294         { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295         { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296         { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297         { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298         { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299         { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300         { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
301 };
302 static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303         /* Clock every part of the DMA block from start */
304         { .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
305
306         /* Interrupts on all logical channels */
307         { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308         { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309         { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310         { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311         { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312         { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313         { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314         { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315         { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316         { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317         { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318         { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319         { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320         { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321         { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
322 };
323
324 /**
325  * struct d40_lli_pool - Structure for keeping LLIs in memory
326  *
327  * @base: Pointer to memory area when the pre_alloc_lli's are not large
328  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
329  * pre_alloc_lli is used.
330  * @dma_addr: DMA address, if mapped
331  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
332  * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
333  * one buffer to one buffer.
334  */
335 struct d40_lli_pool {
336         void    *base;
337         int      size;
338         dma_addr_t      dma_addr;
339         /* Space for dst and src, plus an extra for padding */
340         u8       pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
341 };
342
343 /**
344  * struct d40_desc - A descriptor is one DMA job.
345  *
346  * @lli_phy: LLI settings for physical channel. Both src and dst=
347  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
348  * lli_len equals one.
349  * @lli_log: Same as above but for logical channels.
350  * @lli_pool: The pool with two entries pre-allocated.
351  * @lli_len: Number of llis of current descriptor.
352  * @lli_current: Number of transferred llis.
353  * @lcla_alloc: Number of LCLA entries allocated.
354  * @txd: DMA engine struct. Used for among other things for communication
355  * during a transfer.
356  * @node: List entry.
357  * @is_in_client_list: true if the client owns this descriptor.
358  * @cyclic: true if this is a cyclic job
359  *
360  * This descriptor is used for both logical and physical transfers.
361  */
362 struct d40_desc {
363         /* LLI physical */
364         struct d40_phy_lli_bidir         lli_phy;
365         /* LLI logical */
366         struct d40_log_lli_bidir         lli_log;
367
368         struct d40_lli_pool              lli_pool;
369         int                              lli_len;
370         int                              lli_current;
371         int                              lcla_alloc;
372
373         struct dma_async_tx_descriptor   txd;
374         struct list_head                 node;
375
376         bool                             is_in_client_list;
377         bool                             cyclic;
378 };
379
380 /**
381  * struct d40_lcla_pool - LCLA pool settings and data.
382  *
383  * @base: The virtual address of LCLA. 18 bit aligned.
384  * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
385  * This pointer is only there for clean-up on error.
386  * @pages: The number of pages needed for all physical channels.
387  * Only used later for clean-up on error
388  * @lock: Lock to protect the content in this struct.
389  * @alloc_map: big map over which LCLA entry is own by which job.
390  */
391 struct d40_lcla_pool {
392         void            *base;
393         dma_addr_t      dma_addr;
394         void            *base_unaligned;
395         int              pages;
396         spinlock_t       lock;
397         struct d40_desc **alloc_map;
398 };
399
400 /**
401  * struct d40_phy_res - struct for handling eventlines mapped to physical
402  * channels.
403  *
404  * @lock: A lock protection this entity.
405  * @reserved: True if used by secure world or otherwise.
406  * @num: The physical channel number of this entity.
407  * @allocated_src: Bit mapped to show which src event line's are mapped to
408  * this physical channel. Can also be free or physically allocated.
409  * @allocated_dst: Same as for src but is dst.
410  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
411  * event line number.
412  * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
413  */
414 struct d40_phy_res {
415         spinlock_t lock;
416         bool       reserved;
417         int        num;
418         u32        allocated_src;
419         u32        allocated_dst;
420         bool       use_soft_lli;
421 };
422
423 struct d40_base;
424
425 /**
426  * struct d40_chan - Struct that describes a channel.
427  *
428  * @lock: A spinlock to protect this struct.
429  * @log_num: The logical number, if any of this channel.
430  * @pending_tx: The number of pending transfers. Used between interrupt handler
431  * and tasklet.
432  * @busy: Set to true when transfer is ongoing on this channel.
433  * @phy_chan: Pointer to physical channel which this instance runs on. If this
434  * point is NULL, then the channel is not allocated.
435  * @chan: DMA engine handle.
436  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
437  * transfer and call client callback.
438  * @client: Cliented owned descriptor list.
439  * @pending_queue: Submitted jobs, to be issued by issue_pending()
440  * @active: Active descriptor.
441  * @done: Completed jobs
442  * @queue: Queued jobs.
443  * @prepare_queue: Prepared jobs.
444  * @dma_cfg: The client configuration of this dma channel.
445  * @configured: whether the dma_cfg configuration is valid
446  * @base: Pointer to the device instance struct.
447  * @src_def_cfg: Default cfg register setting for src.
448  * @dst_def_cfg: Default cfg register setting for dst.
449  * @log_def: Default logical channel settings.
450  * @lcpa: Pointer to dst and src lcpa settings.
451  * @runtime_addr: runtime configured address.
452  * @runtime_direction: runtime configured direction.
453  *
454  * This struct can either "be" a logical or a physical channel.
455  */
456 struct d40_chan {
457         spinlock_t                       lock;
458         int                              log_num;
459         int                              pending_tx;
460         bool                             busy;
461         struct d40_phy_res              *phy_chan;
462         struct dma_chan                  chan;
463         struct tasklet_struct            tasklet;
464         struct list_head                 client;
465         struct list_head                 pending_queue;
466         struct list_head                 active;
467         struct list_head                 done;
468         struct list_head                 queue;
469         struct list_head                 prepare_queue;
470         struct stedma40_chan_cfg         dma_cfg;
471         bool                             configured;
472         struct d40_base                 *base;
473         /* Default register configurations */
474         u32                              src_def_cfg;
475         u32                              dst_def_cfg;
476         struct d40_def_lcsp              log_def;
477         struct d40_log_lli_full         *lcpa;
478         /* Runtime reconfiguration */
479         dma_addr_t                      runtime_addr;
480         enum dma_transfer_direction     runtime_direction;
481 };
482
483 /**
484  * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
485  * controller
486  *
487  * @backup: the pointer to the registers address array for backup
488  * @backup_size: the size of the registers address array for backup
489  * @realtime_en: the realtime enable register
490  * @realtime_clear: the realtime clear register
491  * @high_prio_en: the high priority enable register
492  * @high_prio_clear: the high priority clear register
493  * @interrupt_en: the interrupt enable register
494  * @interrupt_clear: the interrupt clear register
495  * @il: the pointer to struct d40_interrupt_lookup
496  * @il_size: the size of d40_interrupt_lookup array
497  * @init_reg: the pointer to the struct d40_reg_val
498  * @init_reg_size: the size of d40_reg_val array
499  */
500 struct d40_gen_dmac {
501         u32                             *backup;
502         u32                              backup_size;
503         u32                              realtime_en;
504         u32                              realtime_clear;
505         u32                              high_prio_en;
506         u32                              high_prio_clear;
507         u32                              interrupt_en;
508         u32                              interrupt_clear;
509         struct d40_interrupt_lookup     *il;
510         u32                              il_size;
511         struct d40_reg_val              *init_reg;
512         u32                              init_reg_size;
513 };
514
515 /**
516  * struct d40_base - The big global struct, one for each probe'd instance.
517  *
518  * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
519  * @execmd_lock: Lock for execute command usage since several channels share
520  * the same physical register.
521  * @dev: The device structure.
522  * @virtbase: The virtual base address of the DMA's register.
523  * @rev: silicon revision detected.
524  * @clk: Pointer to the DMA clock structure.
525  * @phy_start: Physical memory start of the DMA registers.
526  * @phy_size: Size of the DMA register map.
527  * @irq: The IRQ number.
528  * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
529  * transfers).
530  * @num_phy_chans: The number of physical channels. Read from HW. This
531  * is the number of available channels for this driver, not counting "Secure
532  * mode" allocated physical channels.
533  * @num_log_chans: The number of logical channels. Calculated from
534  * num_phy_chans.
535  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
536  * @dma_slave: dma_device channels that can do only do slave transfers.
537  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
538  * @phy_chans: Room for all possible physical channels in system.
539  * @log_chans: Room for all possible logical channels in system.
540  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
541  * to log_chans entries.
542  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
543  * to phy_chans entries.
544  * @plat_data: Pointer to provided platform_data which is the driver
545  * configuration.
546  * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
547  * @phy_res: Vector containing all physical channels.
548  * @lcla_pool: lcla pool settings and data.
549  * @lcpa_base: The virtual mapped address of LCPA.
550  * @phy_lcpa: The physical address of the LCPA.
551  * @lcpa_size: The size of the LCPA area.
552  * @desc_slab: cache for descriptors.
553  * @reg_val_backup: Here the values of some hardware registers are stored
554  * before the DMA is powered off. They are restored when the power is back on.
555  * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
556  * later
557  * @reg_val_backup_chan: Backup data for standard channel parameter registers.
558  * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
559  * @gen_dmac: the struct for generic registers values to represent u8500/8540
560  * DMA controller
561  */
562 struct d40_base {
563         spinlock_t                       interrupt_lock;
564         spinlock_t                       execmd_lock;
565         struct device                    *dev;
566         void __iomem                     *virtbase;
567         u8                                rev:4;
568         struct clk                       *clk;
569         phys_addr_t                       phy_start;
570         resource_size_t                   phy_size;
571         int                               irq;
572         int                               num_memcpy_chans;
573         int                               num_phy_chans;
574         int                               num_log_chans;
575         struct device_dma_parameters      dma_parms;
576         struct dma_device                 dma_both;
577         struct dma_device                 dma_slave;
578         struct dma_device                 dma_memcpy;
579         struct d40_chan                  *phy_chans;
580         struct d40_chan                  *log_chans;
581         struct d40_chan                 **lookup_log_chans;
582         struct d40_chan                 **lookup_phy_chans;
583         struct stedma40_platform_data    *plat_data;
584         struct regulator                 *lcpa_regulator;
585         /* Physical half channels */
586         struct d40_phy_res               *phy_res;
587         struct d40_lcla_pool              lcla_pool;
588         void                             *lcpa_base;
589         dma_addr_t                        phy_lcpa;
590         resource_size_t                   lcpa_size;
591         struct kmem_cache                *desc_slab;
592         u32                               reg_val_backup[BACKUP_REGS_SZ];
593         u32                               reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
594         u32                              *reg_val_backup_chan;
595         u16                               gcc_pwr_off_mask;
596         struct d40_gen_dmac               gen_dmac;
597 };
598
599 static struct device *chan2dev(struct d40_chan *d40c)
600 {
601         return &d40c->chan.dev->device;
602 }
603
604 static bool chan_is_physical(struct d40_chan *chan)
605 {
606         return chan->log_num == D40_PHY_CHAN;
607 }
608
609 static bool chan_is_logical(struct d40_chan *chan)
610 {
611         return !chan_is_physical(chan);
612 }
613
614 static void __iomem *chan_base(struct d40_chan *chan)
615 {
616         return chan->base->virtbase + D40_DREG_PCBASE +
617                chan->phy_chan->num * D40_DREG_PCDELTA;
618 }
619
620 #define d40_err(dev, format, arg...)            \
621         dev_err(dev, "[%s] " format, __func__, ## arg)
622
623 #define chan_err(d40c, format, arg...)          \
624         d40_err(chan2dev(d40c), format, ## arg)
625
626 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
627                               int lli_len)
628 {
629         bool is_log = chan_is_logical(d40c);
630         u32 align;
631         void *base;
632
633         if (is_log)
634                 align = sizeof(struct d40_log_lli);
635         else
636                 align = sizeof(struct d40_phy_lli);
637
638         if (lli_len == 1) {
639                 base = d40d->lli_pool.pre_alloc_lli;
640                 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
641                 d40d->lli_pool.base = NULL;
642         } else {
643                 d40d->lli_pool.size = lli_len * 2 * align;
644
645                 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
646                 d40d->lli_pool.base = base;
647
648                 if (d40d->lli_pool.base == NULL)
649                         return -ENOMEM;
650         }
651
652         if (is_log) {
653                 d40d->lli_log.src = PTR_ALIGN(base, align);
654                 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
655
656                 d40d->lli_pool.dma_addr = 0;
657         } else {
658                 d40d->lli_phy.src = PTR_ALIGN(base, align);
659                 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
660
661                 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
662                                                          d40d->lli_phy.src,
663                                                          d40d->lli_pool.size,
664                                                          DMA_TO_DEVICE);
665
666                 if (dma_mapping_error(d40c->base->dev,
667                                       d40d->lli_pool.dma_addr)) {
668                         kfree(d40d->lli_pool.base);
669                         d40d->lli_pool.base = NULL;
670                         d40d->lli_pool.dma_addr = 0;
671                         return -ENOMEM;
672                 }
673         }
674
675         return 0;
676 }
677
678 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
679 {
680         if (d40d->lli_pool.dma_addr)
681                 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
682                                  d40d->lli_pool.size, DMA_TO_DEVICE);
683
684         kfree(d40d->lli_pool.base);
685         d40d->lli_pool.base = NULL;
686         d40d->lli_pool.size = 0;
687         d40d->lli_log.src = NULL;
688         d40d->lli_log.dst = NULL;
689         d40d->lli_phy.src = NULL;
690         d40d->lli_phy.dst = NULL;
691 }
692
693 static int d40_lcla_alloc_one(struct d40_chan *d40c,
694                               struct d40_desc *d40d)
695 {
696         unsigned long flags;
697         int i;
698         int ret = -EINVAL;
699
700         spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
701
702         /*
703          * Allocate both src and dst at the same time, therefore the half
704          * start on 1 since 0 can't be used since zero is used as end marker.
705          */
706         for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
707                 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
708
709                 if (!d40c->base->lcla_pool.alloc_map[idx]) {
710                         d40c->base->lcla_pool.alloc_map[idx] = d40d;
711                         d40d->lcla_alloc++;
712                         ret = i;
713                         break;
714                 }
715         }
716
717         spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
718
719         return ret;
720 }
721
722 static int d40_lcla_free_all(struct d40_chan *d40c,
723                              struct d40_desc *d40d)
724 {
725         unsigned long flags;
726         int i;
727         int ret = -EINVAL;
728
729         if (chan_is_physical(d40c))
730                 return 0;
731
732         spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
733
734         for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
735                 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
736
737                 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
738                         d40c->base->lcla_pool.alloc_map[idx] = NULL;
739                         d40d->lcla_alloc--;
740                         if (d40d->lcla_alloc == 0) {
741                                 ret = 0;
742                                 break;
743                         }
744                 }
745         }
746
747         spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
748
749         return ret;
750
751 }
752
753 static void d40_desc_remove(struct d40_desc *d40d)
754 {
755         list_del(&d40d->node);
756 }
757
758 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
759 {
760         struct d40_desc *desc = NULL;
761
762         if (!list_empty(&d40c->client)) {
763                 struct d40_desc *d;
764                 struct d40_desc *_d;
765
766                 list_for_each_entry_safe(d, _d, &d40c->client, node) {
767                         if (async_tx_test_ack(&d->txd)) {
768                                 d40_desc_remove(d);
769                                 desc = d;
770                                 memset(desc, 0, sizeof(*desc));
771                                 break;
772                         }
773                 }
774         }
775
776         if (!desc)
777                 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
778
779         if (desc)
780                 INIT_LIST_HEAD(&desc->node);
781
782         return desc;
783 }
784
785 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
786 {
787
788         d40_pool_lli_free(d40c, d40d);
789         d40_lcla_free_all(d40c, d40d);
790         kmem_cache_free(d40c->base->desc_slab, d40d);
791 }
792
793 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
794 {
795         list_add_tail(&desc->node, &d40c->active);
796 }
797
798 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
799 {
800         struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
801         struct d40_phy_lli *lli_src = desc->lli_phy.src;
802         void __iomem *base = chan_base(chan);
803
804         writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
805         writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
806         writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
807         writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
808
809         writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
810         writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
811         writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
812         writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
813 }
814
815 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
816 {
817         list_add_tail(&desc->node, &d40c->done);
818 }
819
820 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
821 {
822         struct d40_lcla_pool *pool = &chan->base->lcla_pool;
823         struct d40_log_lli_bidir *lli = &desc->lli_log;
824         int lli_current = desc->lli_current;
825         int lli_len = desc->lli_len;
826         bool cyclic = desc->cyclic;
827         int curr_lcla = -EINVAL;
828         int first_lcla = 0;
829         bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
830         bool linkback;
831
832         /*
833          * We may have partially running cyclic transfers, in case we did't get
834          * enough LCLA entries.
835          */
836         linkback = cyclic && lli_current == 0;
837
838         /*
839          * For linkback, we need one LCLA even with only one link, because we
840          * can't link back to the one in LCPA space
841          */
842         if (linkback || (lli_len - lli_current > 1)) {
843                 /*
844                  * If the channel is expected to use only soft_lli don't
845                  * allocate a lcla. This is to avoid a HW issue that exists
846                  * in some controller during a peripheral to memory transfer
847                  * that uses linked lists.
848                  */
849                 if (!(chan->phy_chan->use_soft_lli &&
850                         chan->dma_cfg.dir == DMA_DEV_TO_MEM))
851                         curr_lcla = d40_lcla_alloc_one(chan, desc);
852
853                 first_lcla = curr_lcla;
854         }
855
856         /*
857          * For linkback, we normally load the LCPA in the loop since we need to
858          * link it to the second LCLA and not the first.  However, if we
859          * couldn't even get a first LCLA, then we have to run in LCPA and
860          * reload manually.
861          */
862         if (!linkback || curr_lcla == -EINVAL) {
863                 unsigned int flags = 0;
864
865                 if (curr_lcla == -EINVAL)
866                         flags |= LLI_TERM_INT;
867
868                 d40_log_lli_lcpa_write(chan->lcpa,
869                                        &lli->dst[lli_current],
870                                        &lli->src[lli_current],
871                                        curr_lcla,
872                                        flags);
873                 lli_current++;
874         }
875
876         if (curr_lcla < 0)
877                 goto out;
878
879         for (; lli_current < lli_len; lli_current++) {
880                 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
881                                            8 * curr_lcla * 2;
882                 struct d40_log_lli *lcla = pool->base + lcla_offset;
883                 unsigned int flags = 0;
884                 int next_lcla;
885
886                 if (lli_current + 1 < lli_len)
887                         next_lcla = d40_lcla_alloc_one(chan, desc);
888                 else
889                         next_lcla = linkback ? first_lcla : -EINVAL;
890
891                 if (cyclic || next_lcla == -EINVAL)
892                         flags |= LLI_TERM_INT;
893
894                 if (linkback && curr_lcla == first_lcla) {
895                         /* First link goes in both LCPA and LCLA */
896                         d40_log_lli_lcpa_write(chan->lcpa,
897                                                &lli->dst[lli_current],
898                                                &lli->src[lli_current],
899                                                next_lcla, flags);
900                 }
901
902                 /*
903                  * One unused LCLA in the cyclic case if the very first
904                  * next_lcla fails...
905                  */
906                 d40_log_lli_lcla_write(lcla,
907                                        &lli->dst[lli_current],
908                                        &lli->src[lli_current],
909                                        next_lcla, flags);
910
911                 /*
912                  * Cache maintenance is not needed if lcla is
913                  * mapped in esram
914                  */
915                 if (!use_esram_lcla) {
916                         dma_sync_single_range_for_device(chan->base->dev,
917                                                 pool->dma_addr, lcla_offset,
918                                                 2 * sizeof(struct d40_log_lli),
919                                                 DMA_TO_DEVICE);
920                 }
921                 curr_lcla = next_lcla;
922
923                 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
924                         lli_current++;
925                         break;
926                 }
927         }
928
929 out:
930         desc->lli_current = lli_current;
931 }
932
933 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
934 {
935         if (chan_is_physical(d40c)) {
936                 d40_phy_lli_load(d40c, d40d);
937                 d40d->lli_current = d40d->lli_len;
938         } else
939                 d40_log_lli_to_lcxa(d40c, d40d);
940 }
941
942 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
943 {
944         struct d40_desc *d;
945
946         if (list_empty(&d40c->active))
947                 return NULL;
948
949         d = list_first_entry(&d40c->active,
950                              struct d40_desc,
951                              node);
952         return d;
953 }
954
955 /* remove desc from current queue and add it to the pending_queue */
956 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
957 {
958         d40_desc_remove(desc);
959         desc->is_in_client_list = false;
960         list_add_tail(&desc->node, &d40c->pending_queue);
961 }
962
963 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
964 {
965         struct d40_desc *d;
966
967         if (list_empty(&d40c->pending_queue))
968                 return NULL;
969
970         d = list_first_entry(&d40c->pending_queue,
971                              struct d40_desc,
972                              node);
973         return d;
974 }
975
976 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
977 {
978         struct d40_desc *d;
979
980         if (list_empty(&d40c->queue))
981                 return NULL;
982
983         d = list_first_entry(&d40c->queue,
984                              struct d40_desc,
985                              node);
986         return d;
987 }
988
989 static struct d40_desc *d40_first_done(struct d40_chan *d40c)
990 {
991         if (list_empty(&d40c->done))
992                 return NULL;
993
994         return list_first_entry(&d40c->done, struct d40_desc, node);
995 }
996
997 static int d40_psize_2_burst_size(bool is_log, int psize)
998 {
999         if (is_log) {
1000                 if (psize == STEDMA40_PSIZE_LOG_1)
1001                         return 1;
1002         } else {
1003                 if (psize == STEDMA40_PSIZE_PHY_1)
1004                         return 1;
1005         }
1006
1007         return 2 << psize;
1008 }
1009
1010 /*
1011  * The dma only supports transmitting packages up to
1012  * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
1013  *
1014  * Calculate the total number of dma elements required to send the entire sg list.
1015  */
1016 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
1017 {
1018         int dmalen;
1019         u32 max_w = max(data_width1, data_width2);
1020         u32 min_w = min(data_width1, data_width2);
1021         u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1022
1023         if (seg_max > STEDMA40_MAX_SEG_SIZE)
1024                 seg_max -= max_w;
1025
1026         if (!IS_ALIGNED(size, max_w))
1027                 return -EINVAL;
1028
1029         if (size <= seg_max)
1030                 dmalen = 1;
1031         else {
1032                 dmalen = size / seg_max;
1033                 if (dmalen * seg_max < size)
1034                         dmalen++;
1035         }
1036         return dmalen;
1037 }
1038
1039 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1040                            u32 data_width1, u32 data_width2)
1041 {
1042         struct scatterlist *sg;
1043         int i;
1044         int len = 0;
1045         int ret;
1046
1047         for_each_sg(sgl, sg, sg_len, i) {
1048                 ret = d40_size_2_dmalen(sg_dma_len(sg),
1049                                         data_width1, data_width2);
1050                 if (ret < 0)
1051                         return ret;
1052                 len += ret;
1053         }
1054         return len;
1055 }
1056
1057 static int __d40_execute_command_phy(struct d40_chan *d40c,
1058                                      enum d40_command command)
1059 {
1060         u32 status;
1061         int i;
1062         void __iomem *active_reg;
1063         int ret = 0;
1064         unsigned long flags;
1065         u32 wmask;
1066
1067         if (command == D40_DMA_STOP) {
1068                 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1069                 if (ret)
1070                         return ret;
1071         }
1072
1073         spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1074
1075         if (d40c->phy_chan->num % 2 == 0)
1076                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1077         else
1078                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1079
1080         if (command == D40_DMA_SUSPEND_REQ) {
1081                 status = (readl(active_reg) &
1082                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1083                         D40_CHAN_POS(d40c->phy_chan->num);
1084
1085                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1086                         goto done;
1087         }
1088
1089         wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1090         writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1091                active_reg);
1092
1093         if (command == D40_DMA_SUSPEND_REQ) {
1094
1095                 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1096                         status = (readl(active_reg) &
1097                                   D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1098                                 D40_CHAN_POS(d40c->phy_chan->num);
1099
1100                         cpu_relax();
1101                         /*
1102                          * Reduce the number of bus accesses while
1103                          * waiting for the DMA to suspend.
1104                          */
1105                         udelay(3);
1106
1107                         if (status == D40_DMA_STOP ||
1108                             status == D40_DMA_SUSPENDED)
1109                                 break;
1110                 }
1111
1112                 if (i == D40_SUSPEND_MAX_IT) {
1113                         chan_err(d40c,
1114                                 "unable to suspend the chl %d (log: %d) status %x\n",
1115                                 d40c->phy_chan->num, d40c->log_num,
1116                                 status);
1117                         dump_stack();
1118                         ret = -EBUSY;
1119                 }
1120
1121         }
1122 done:
1123         spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1124         return ret;
1125 }
1126
1127 static void d40_term_all(struct d40_chan *d40c)
1128 {
1129         struct d40_desc *d40d;
1130         struct d40_desc *_d;
1131
1132         /* Release completed descriptors */
1133         while ((d40d = d40_first_done(d40c))) {
1134                 d40_desc_remove(d40d);
1135                 d40_desc_free(d40c, d40d);
1136         }
1137
1138         /* Release active descriptors */
1139         while ((d40d = d40_first_active_get(d40c))) {
1140                 d40_desc_remove(d40d);
1141                 d40_desc_free(d40c, d40d);
1142         }
1143
1144         /* Release queued descriptors waiting for transfer */
1145         while ((d40d = d40_first_queued(d40c))) {
1146                 d40_desc_remove(d40d);
1147                 d40_desc_free(d40c, d40d);
1148         }
1149
1150         /* Release pending descriptors */
1151         while ((d40d = d40_first_pending(d40c))) {
1152                 d40_desc_remove(d40d);
1153                 d40_desc_free(d40c, d40d);
1154         }
1155
1156         /* Release client owned descriptors */
1157         if (!list_empty(&d40c->client))
1158                 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1159                         d40_desc_remove(d40d);
1160                         d40_desc_free(d40c, d40d);
1161                 }
1162
1163         /* Release descriptors in prepare queue */
1164         if (!list_empty(&d40c->prepare_queue))
1165                 list_for_each_entry_safe(d40d, _d,
1166                                          &d40c->prepare_queue, node) {
1167                         d40_desc_remove(d40d);
1168                         d40_desc_free(d40c, d40d);
1169                 }
1170
1171         d40c->pending_tx = 0;
1172 }
1173
1174 static void __d40_config_set_event(struct d40_chan *d40c,
1175                                    enum d40_events event_type, u32 event,
1176                                    int reg)
1177 {
1178         void __iomem *addr = chan_base(d40c) + reg;
1179         int tries;
1180         u32 status;
1181
1182         switch (event_type) {
1183
1184         case D40_DEACTIVATE_EVENTLINE:
1185
1186                 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1187                        | ~D40_EVENTLINE_MASK(event), addr);
1188                 break;
1189
1190         case D40_SUSPEND_REQ_EVENTLINE:
1191                 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1192                           D40_EVENTLINE_POS(event);
1193
1194                 if (status == D40_DEACTIVATE_EVENTLINE ||
1195                     status == D40_SUSPEND_REQ_EVENTLINE)
1196                         break;
1197
1198                 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1199                        | ~D40_EVENTLINE_MASK(event), addr);
1200
1201                 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1202
1203                         status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1204                                   D40_EVENTLINE_POS(event);
1205
1206                         cpu_relax();
1207                         /*
1208                          * Reduce the number of bus accesses while
1209                          * waiting for the DMA to suspend.
1210                          */
1211                         udelay(3);
1212
1213                         if (status == D40_DEACTIVATE_EVENTLINE)
1214                                 break;
1215                 }
1216
1217                 if (tries == D40_SUSPEND_MAX_IT) {
1218                         chan_err(d40c,
1219                                 "unable to stop the event_line chl %d (log: %d)"
1220                                 "status %x\n", d40c->phy_chan->num,
1221                                  d40c->log_num, status);
1222                 }
1223                 break;
1224
1225         case D40_ACTIVATE_EVENTLINE:
1226         /*
1227          * The hardware sometimes doesn't register the enable when src and dst
1228          * event lines are active on the same logical channel.  Retry to ensure
1229          * it does.  Usually only one retry is sufficient.
1230          */
1231                 tries = 100;
1232                 while (--tries) {
1233                         writel((D40_ACTIVATE_EVENTLINE <<
1234                                 D40_EVENTLINE_POS(event)) |
1235                                 ~D40_EVENTLINE_MASK(event), addr);
1236
1237                         if (readl(addr) & D40_EVENTLINE_MASK(event))
1238                                 break;
1239                 }
1240
1241                 if (tries != 99)
1242                         dev_dbg(chan2dev(d40c),
1243                                 "[%s] workaround enable S%cLNK (%d tries)\n",
1244                                 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1245                                 100 - tries);
1246
1247                 WARN_ON(!tries);
1248                 break;
1249
1250         case D40_ROUND_EVENTLINE:
1251                 BUG();
1252                 break;
1253
1254         }
1255 }
1256
1257 static void d40_config_set_event(struct d40_chan *d40c,
1258                                  enum d40_events event_type)
1259 {
1260         u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1261
1262         /* Enable event line connected to device (or memcpy) */
1263         if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1264             (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1265                 __d40_config_set_event(d40c, event_type, event,
1266                                        D40_CHAN_REG_SSLNK);
1267
1268         if (d40c->dma_cfg.dir !=  DMA_DEV_TO_MEM)
1269                 __d40_config_set_event(d40c, event_type, event,
1270                                        D40_CHAN_REG_SDLNK);
1271 }
1272
1273 static u32 d40_chan_has_events(struct d40_chan *d40c)
1274 {
1275         void __iomem *chanbase = chan_base(d40c);
1276         u32 val;
1277
1278         val = readl(chanbase + D40_CHAN_REG_SSLNK);
1279         val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1280
1281         return val;
1282 }
1283
1284 static int
1285 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1286 {
1287         unsigned long flags;
1288         int ret = 0;
1289         u32 active_status;
1290         void __iomem *active_reg;
1291
1292         if (d40c->phy_chan->num % 2 == 0)
1293                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1294         else
1295                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1296
1297
1298         spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1299
1300         switch (command) {
1301         case D40_DMA_STOP:
1302         case D40_DMA_SUSPEND_REQ:
1303
1304                 active_status = (readl(active_reg) &
1305                                  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1306                                  D40_CHAN_POS(d40c->phy_chan->num);
1307
1308                 if (active_status == D40_DMA_RUN)
1309                         d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1310                 else
1311                         d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1312
1313                 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1314                         ret = __d40_execute_command_phy(d40c, command);
1315
1316                 break;
1317
1318         case D40_DMA_RUN:
1319
1320                 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1321                 ret = __d40_execute_command_phy(d40c, command);
1322                 break;
1323
1324         case D40_DMA_SUSPENDED:
1325                 BUG();
1326                 break;
1327         }
1328
1329         spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1330         return ret;
1331 }
1332
1333 static int d40_channel_execute_command(struct d40_chan *d40c,
1334                                        enum d40_command command)
1335 {
1336         if (chan_is_logical(d40c))
1337                 return __d40_execute_command_log(d40c, command);
1338         else
1339                 return __d40_execute_command_phy(d40c, command);
1340 }
1341
1342 static u32 d40_get_prmo(struct d40_chan *d40c)
1343 {
1344         static const unsigned int phy_map[] = {
1345                 [STEDMA40_PCHAN_BASIC_MODE]
1346                         = D40_DREG_PRMO_PCHAN_BASIC,
1347                 [STEDMA40_PCHAN_MODULO_MODE]
1348                         = D40_DREG_PRMO_PCHAN_MODULO,
1349                 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1350                         = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1351         };
1352         static const unsigned int log_map[] = {
1353                 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1354                         = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1355                 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1356                         = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1357                 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1358                         = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1359         };
1360
1361         if (chan_is_physical(d40c))
1362                 return phy_map[d40c->dma_cfg.mode_opt];
1363         else
1364                 return log_map[d40c->dma_cfg.mode_opt];
1365 }
1366
1367 static void d40_config_write(struct d40_chan *d40c)
1368 {
1369         u32 addr_base;
1370         u32 var;
1371
1372         /* Odd addresses are even addresses + 4 */
1373         addr_base = (d40c->phy_chan->num % 2) * 4;
1374         /* Setup channel mode to logical or physical */
1375         var = ((u32)(chan_is_logical(d40c)) + 1) <<
1376                 D40_CHAN_POS(d40c->phy_chan->num);
1377         writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1378
1379         /* Setup operational mode option register */
1380         var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1381
1382         writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1383
1384         if (chan_is_logical(d40c)) {
1385                 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1386                            & D40_SREG_ELEM_LOG_LIDX_MASK;
1387                 void __iomem *chanbase = chan_base(d40c);
1388
1389                 /* Set default config for CFG reg */
1390                 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1391                 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1392
1393                 /* Set LIDX for lcla */
1394                 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1395                 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1396
1397                 /* Clear LNK which will be used by d40_chan_has_events() */
1398                 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1399                 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1400         }
1401 }
1402
1403 static u32 d40_residue(struct d40_chan *d40c)
1404 {
1405         u32 num_elt;
1406
1407         if (chan_is_logical(d40c))
1408                 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1409                         >> D40_MEM_LCSP2_ECNT_POS;
1410         else {
1411                 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1412                 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1413                           >> D40_SREG_ELEM_PHY_ECNT_POS;
1414         }
1415
1416         return num_elt * d40c->dma_cfg.dst_info.data_width;
1417 }
1418
1419 static bool d40_tx_is_linked(struct d40_chan *d40c)
1420 {
1421         bool is_link;
1422
1423         if (chan_is_logical(d40c))
1424                 is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
1425         else
1426                 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1427                           & D40_SREG_LNK_PHYS_LNK_MASK;
1428
1429         return is_link;
1430 }
1431
1432 static int d40_pause(struct d40_chan *d40c)
1433 {
1434         int res = 0;
1435         unsigned long flags;
1436
1437         if (!d40c->busy)
1438                 return 0;
1439
1440         spin_lock_irqsave(&d40c->lock, flags);
1441         pm_runtime_get_sync(d40c->base->dev);
1442
1443         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1444
1445         pm_runtime_mark_last_busy(d40c->base->dev);
1446         pm_runtime_put_autosuspend(d40c->base->dev);
1447         spin_unlock_irqrestore(&d40c->lock, flags);
1448         return res;
1449 }
1450
1451 static int d40_resume(struct d40_chan *d40c)
1452 {
1453         int res = 0;
1454         unsigned long flags;
1455
1456         if (!d40c->busy)
1457                 return 0;
1458
1459         spin_lock_irqsave(&d40c->lock, flags);
1460         pm_runtime_get_sync(d40c->base->dev);
1461
1462         /* If bytes left to transfer or linked tx resume job */
1463         if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1464                 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1465
1466         pm_runtime_mark_last_busy(d40c->base->dev);
1467         pm_runtime_put_autosuspend(d40c->base->dev);
1468         spin_unlock_irqrestore(&d40c->lock, flags);
1469         return res;
1470 }
1471
1472 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1473 {
1474         struct d40_chan *d40c = container_of(tx->chan,
1475                                              struct d40_chan,
1476                                              chan);
1477         struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1478         unsigned long flags;
1479         dma_cookie_t cookie;
1480
1481         spin_lock_irqsave(&d40c->lock, flags);
1482         cookie = dma_cookie_assign(tx);
1483         d40_desc_queue(d40c, d40d);
1484         spin_unlock_irqrestore(&d40c->lock, flags);
1485
1486         return cookie;
1487 }
1488
1489 static int d40_start(struct d40_chan *d40c)
1490 {
1491         return d40_channel_execute_command(d40c, D40_DMA_RUN);
1492 }
1493
1494 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1495 {
1496         struct d40_desc *d40d;
1497         int err;
1498
1499         /* Start queued jobs, if any */
1500         d40d = d40_first_queued(d40c);
1501
1502         if (d40d != NULL) {
1503                 if (!d40c->busy) {
1504                         d40c->busy = true;
1505                         pm_runtime_get_sync(d40c->base->dev);
1506                 }
1507
1508                 /* Remove from queue */
1509                 d40_desc_remove(d40d);
1510
1511                 /* Add to active queue */
1512                 d40_desc_submit(d40c, d40d);
1513
1514                 /* Initiate DMA job */
1515                 d40_desc_load(d40c, d40d);
1516
1517                 /* Start dma job */
1518                 err = d40_start(d40c);
1519
1520                 if (err)
1521                         return NULL;
1522         }
1523
1524         return d40d;
1525 }
1526
1527 /* called from interrupt context */
1528 static void dma_tc_handle(struct d40_chan *d40c)
1529 {
1530         struct d40_desc *d40d;
1531
1532         /* Get first active entry from list */
1533         d40d = d40_first_active_get(d40c);
1534
1535         if (d40d == NULL)
1536                 return;
1537
1538         if (d40d->cyclic) {
1539                 /*
1540                  * If this was a paritially loaded list, we need to reloaded
1541                  * it, and only when the list is completed.  We need to check
1542                  * for done because the interrupt will hit for every link, and
1543                  * not just the last one.
1544                  */
1545                 if (d40d->lli_current < d40d->lli_len
1546                     && !d40_tx_is_linked(d40c)
1547                     && !d40_residue(d40c)) {
1548                         d40_lcla_free_all(d40c, d40d);
1549                         d40_desc_load(d40c, d40d);
1550                         (void) d40_start(d40c);
1551
1552                         if (d40d->lli_current == d40d->lli_len)
1553                                 d40d->lli_current = 0;
1554                 }
1555         } else {
1556                 d40_lcla_free_all(d40c, d40d);
1557
1558                 if (d40d->lli_current < d40d->lli_len) {
1559                         d40_desc_load(d40c, d40d);
1560                         /* Start dma job */
1561                         (void) d40_start(d40c);
1562                         return;
1563                 }
1564
1565                 if (d40_queue_start(d40c) == NULL) {
1566                         d40c->busy = false;
1567
1568                         pm_runtime_mark_last_busy(d40c->base->dev);
1569                         pm_runtime_put_autosuspend(d40c->base->dev);
1570                 }
1571
1572                 d40_desc_remove(d40d);
1573                 d40_desc_done(d40c, d40d);
1574         }
1575
1576         d40c->pending_tx++;
1577         tasklet_schedule(&d40c->tasklet);
1578
1579 }
1580
1581 static void dma_tasklet(unsigned long data)
1582 {
1583         struct d40_chan *d40c = (struct d40_chan *) data;
1584         struct d40_desc *d40d;
1585         unsigned long flags;
1586         bool callback_active;
1587         dma_async_tx_callback callback;
1588         void *callback_param;
1589
1590         spin_lock_irqsave(&d40c->lock, flags);
1591
1592         /* Get first entry from the done list */
1593         d40d = d40_first_done(d40c);
1594         if (d40d == NULL) {
1595                 /* Check if we have reached here for cyclic job */
1596                 d40d = d40_first_active_get(d40c);
1597                 if (d40d == NULL || !d40d->cyclic)
1598                         goto err;
1599         }
1600
1601         if (!d40d->cyclic)
1602                 dma_cookie_complete(&d40d->txd);
1603
1604         /*
1605          * If terminating a channel pending_tx is set to zero.
1606          * This prevents any finished active jobs to return to the client.
1607          */
1608         if (d40c->pending_tx == 0) {
1609                 spin_unlock_irqrestore(&d40c->lock, flags);
1610                 return;
1611         }
1612
1613         /* Callback to client */
1614         callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1615         callback = d40d->txd.callback;
1616         callback_param = d40d->txd.callback_param;
1617
1618         if (!d40d->cyclic) {
1619                 if (async_tx_test_ack(&d40d->txd)) {
1620                         d40_desc_remove(d40d);
1621                         d40_desc_free(d40c, d40d);
1622                 } else if (!d40d->is_in_client_list) {
1623                         d40_desc_remove(d40d);
1624                         d40_lcla_free_all(d40c, d40d);
1625                         list_add_tail(&d40d->node, &d40c->client);
1626                         d40d->is_in_client_list = true;
1627                 }
1628         }
1629
1630         d40c->pending_tx--;
1631
1632         if (d40c->pending_tx)
1633                 tasklet_schedule(&d40c->tasklet);
1634
1635         spin_unlock_irqrestore(&d40c->lock, flags);
1636
1637         if (callback_active && callback)
1638                 callback(callback_param);
1639
1640         return;
1641
1642 err:
1643         /* Rescue manouver if receiving double interrupts */
1644         if (d40c->pending_tx > 0)
1645                 d40c->pending_tx--;
1646         spin_unlock_irqrestore(&d40c->lock, flags);
1647 }
1648
1649 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1650 {
1651         int i;
1652         u32 idx;
1653         u32 row;
1654         long chan = -1;
1655         struct d40_chan *d40c;
1656         unsigned long flags;
1657         struct d40_base *base = data;
1658         u32 regs[base->gen_dmac.il_size];
1659         struct d40_interrupt_lookup *il = base->gen_dmac.il;
1660         u32 il_size = base->gen_dmac.il_size;
1661
1662         spin_lock_irqsave(&base->interrupt_lock, flags);
1663
1664         /* Read interrupt status of both logical and physical channels */
1665         for (i = 0; i < il_size; i++)
1666                 regs[i] = readl(base->virtbase + il[i].src);
1667
1668         for (;;) {
1669
1670                 chan = find_next_bit((unsigned long *)regs,
1671                                      BITS_PER_LONG * il_size, chan + 1);
1672
1673                 /* No more set bits found? */
1674                 if (chan == BITS_PER_LONG * il_size)
1675                         break;
1676
1677                 row = chan / BITS_PER_LONG;
1678                 idx = chan & (BITS_PER_LONG - 1);
1679
1680                 if (il[row].offset == D40_PHY_CHAN)
1681                         d40c = base->lookup_phy_chans[idx];
1682                 else
1683                         d40c = base->lookup_log_chans[il[row].offset + idx];
1684
1685                 if (!d40c) {
1686                         /*
1687                          * No error because this can happen if something else
1688                          * in the system is using the channel.
1689                          */
1690                         continue;
1691                 }
1692
1693                 /* ACK interrupt */
1694                 writel(BIT(idx), base->virtbase + il[row].clr);
1695
1696                 spin_lock(&d40c->lock);
1697
1698                 if (!il[row].is_error)
1699                         dma_tc_handle(d40c);
1700                 else
1701                         d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1702                                 chan, il[row].offset, idx);
1703
1704                 spin_unlock(&d40c->lock);
1705         }
1706
1707         spin_unlock_irqrestore(&base->interrupt_lock, flags);
1708
1709         return IRQ_HANDLED;
1710 }
1711
1712 static int d40_validate_conf(struct d40_chan *d40c,
1713                              struct stedma40_chan_cfg *conf)
1714 {
1715         int res = 0;
1716         bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1717
1718         if (!conf->dir) {
1719                 chan_err(d40c, "Invalid direction.\n");
1720                 res = -EINVAL;
1721         }
1722
1723         if ((is_log && conf->dev_type > d40c->base->num_log_chans)  ||
1724             (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1725             (conf->dev_type < 0)) {
1726                 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1727                 res = -EINVAL;
1728         }
1729
1730         if (conf->dir == DMA_DEV_TO_DEV) {
1731                 /*
1732                  * DMAC HW supports it. Will be added to this driver,
1733                  * in case any dma client requires it.
1734                  */
1735                 chan_err(d40c, "periph to periph not supported\n");
1736                 res = -EINVAL;
1737         }
1738
1739         if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1740             conf->src_info.data_width !=
1741             d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1742             conf->dst_info.data_width) {
1743                 /*
1744                  * The DMAC hardware only supports
1745                  * src (burst x width) == dst (burst x width)
1746                  */
1747
1748                 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1749                 res = -EINVAL;
1750         }
1751
1752         return res;
1753 }
1754
1755 static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1756                                bool is_src, int log_event_line, bool is_log,
1757                                bool *first_user)
1758 {
1759         unsigned long flags;
1760         spin_lock_irqsave(&phy->lock, flags);
1761
1762         *first_user = ((phy->allocated_src | phy->allocated_dst)
1763                         == D40_ALLOC_FREE);
1764
1765         if (!is_log) {
1766                 /* Physical interrupts are masked per physical full channel */
1767                 if (phy->allocated_src == D40_ALLOC_FREE &&
1768                     phy->allocated_dst == D40_ALLOC_FREE) {
1769                         phy->allocated_dst = D40_ALLOC_PHY;
1770                         phy->allocated_src = D40_ALLOC_PHY;
1771                         goto found;
1772                 } else
1773                         goto not_found;
1774         }
1775
1776         /* Logical channel */
1777         if (is_src) {
1778                 if (phy->allocated_src == D40_ALLOC_PHY)
1779                         goto not_found;
1780
1781                 if (phy->allocated_src == D40_ALLOC_FREE)
1782                         phy->allocated_src = D40_ALLOC_LOG_FREE;
1783
1784                 if (!(phy->allocated_src & BIT(log_event_line))) {
1785                         phy->allocated_src |= BIT(log_event_line);
1786                         goto found;
1787                 } else
1788                         goto not_found;
1789         } else {
1790                 if (phy->allocated_dst == D40_ALLOC_PHY)
1791                         goto not_found;
1792
1793                 if (phy->allocated_dst == D40_ALLOC_FREE)
1794                         phy->allocated_dst = D40_ALLOC_LOG_FREE;
1795
1796                 if (!(phy->allocated_dst & BIT(log_event_line))) {
1797                         phy->allocated_dst |= BIT(log_event_line);
1798                         goto found;
1799                 } else
1800                         goto not_found;
1801         }
1802
1803 not_found:
1804         spin_unlock_irqrestore(&phy->lock, flags);
1805         return false;
1806 found:
1807         spin_unlock_irqrestore(&phy->lock, flags);
1808         return true;
1809 }
1810
1811 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1812                                int log_event_line)
1813 {
1814         unsigned long flags;
1815         bool is_free = false;
1816
1817         spin_lock_irqsave(&phy->lock, flags);
1818         if (!log_event_line) {
1819                 phy->allocated_dst = D40_ALLOC_FREE;
1820                 phy->allocated_src = D40_ALLOC_FREE;
1821                 is_free = true;
1822                 goto out;
1823         }
1824
1825         /* Logical channel */
1826         if (is_src) {
1827                 phy->allocated_src &= ~BIT(log_event_line);
1828                 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1829                         phy->allocated_src = D40_ALLOC_FREE;
1830         } else {
1831                 phy->allocated_dst &= ~BIT(log_event_line);
1832                 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1833                         phy->allocated_dst = D40_ALLOC_FREE;
1834         }
1835
1836         is_free = ((phy->allocated_src | phy->allocated_dst) ==
1837                    D40_ALLOC_FREE);
1838
1839 out:
1840         spin_unlock_irqrestore(&phy->lock, flags);
1841
1842         return is_free;
1843 }
1844
1845 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1846 {
1847         int dev_type = d40c->dma_cfg.dev_type;
1848         int event_group;
1849         int event_line;
1850         struct d40_phy_res *phys;
1851         int i;
1852         int j;
1853         int log_num;
1854         int num_phy_chans;
1855         bool is_src;
1856         bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1857
1858         phys = d40c->base->phy_res;
1859         num_phy_chans = d40c->base->num_phy_chans;
1860
1861         if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1862                 log_num = 2 * dev_type;
1863                 is_src = true;
1864         } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1865                    d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1866                 /* dst event lines are used for logical memcpy */
1867                 log_num = 2 * dev_type + 1;
1868                 is_src = false;
1869         } else
1870                 return -EINVAL;
1871
1872         event_group = D40_TYPE_TO_GROUP(dev_type);
1873         event_line = D40_TYPE_TO_EVENT(dev_type);
1874
1875         if (!is_log) {
1876                 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1877                         /* Find physical half channel */
1878                         if (d40c->dma_cfg.use_fixed_channel) {
1879                                 i = d40c->dma_cfg.phy_channel;
1880                                 if (d40_alloc_mask_set(&phys[i], is_src,
1881                                                        0, is_log,
1882                                                        first_phy_user))
1883                                         goto found_phy;
1884                         } else {
1885                                 for (i = 0; i < num_phy_chans; i++) {
1886                                         if (d40_alloc_mask_set(&phys[i], is_src,
1887                                                        0, is_log,
1888                                                        first_phy_user))
1889                                                 goto found_phy;
1890                                 }
1891                         }
1892                 } else
1893                         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1894                                 int phy_num = j  + event_group * 2;
1895                                 for (i = phy_num; i < phy_num + 2; i++) {
1896                                         if (d40_alloc_mask_set(&phys[i],
1897                                                                is_src,
1898                                                                0,
1899                                                                is_log,
1900                                                                first_phy_user))
1901                                                 goto found_phy;
1902                                 }
1903                         }
1904                 return -EINVAL;
1905 found_phy:
1906                 d40c->phy_chan = &phys[i];
1907                 d40c->log_num = D40_PHY_CHAN;
1908                 goto out;
1909         }
1910         if (dev_type == -1)
1911                 return -EINVAL;
1912
1913         /* Find logical channel */
1914         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1915                 int phy_num = j + event_group * 2;
1916
1917                 if (d40c->dma_cfg.use_fixed_channel) {
1918                         i = d40c->dma_cfg.phy_channel;
1919
1920                         if ((i != phy_num) && (i != phy_num + 1)) {
1921                                 dev_err(chan2dev(d40c),
1922                                         "invalid fixed phy channel %d\n", i);
1923                                 return -EINVAL;
1924                         }
1925
1926                         if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1927                                                is_log, first_phy_user))
1928                                 goto found_log;
1929
1930                         dev_err(chan2dev(d40c),
1931                                 "could not allocate fixed phy channel %d\n", i);
1932                         return -EINVAL;
1933                 }
1934
1935                 /*
1936                  * Spread logical channels across all available physical rather
1937                  * than pack every logical channel at the first available phy
1938                  * channels.
1939                  */
1940                 if (is_src) {
1941                         for (i = phy_num; i < phy_num + 2; i++) {
1942                                 if (d40_alloc_mask_set(&phys[i], is_src,
1943                                                        event_line, is_log,
1944                                                        first_phy_user))
1945                                         goto found_log;
1946                         }
1947                 } else {
1948                         for (i = phy_num + 1; i >= phy_num; i--) {
1949                                 if (d40_alloc_mask_set(&phys[i], is_src,
1950                                                        event_line, is_log,
1951                                                        first_phy_user))
1952                                         goto found_log;
1953                         }
1954                 }
1955         }
1956         return -EINVAL;
1957
1958 found_log:
1959         d40c->phy_chan = &phys[i];
1960         d40c->log_num = log_num;
1961 out:
1962
1963         if (is_log)
1964                 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1965         else
1966                 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1967
1968         return 0;
1969
1970 }
1971
1972 static int d40_config_memcpy(struct d40_chan *d40c)
1973 {
1974         dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1975
1976         if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1977                 d40c->dma_cfg = dma40_memcpy_conf_log;
1978                 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1979
1980                 d40_log_cfg(&d40c->dma_cfg,
1981                             &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1982
1983         } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1984                    dma_has_cap(DMA_SLAVE, cap)) {
1985                 d40c->dma_cfg = dma40_memcpy_conf_phy;
1986
1987                 /* Generate interrrupt at end of transfer or relink. */
1988                 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1989
1990                 /* Generate interrupt on error. */
1991                 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1992                 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1993
1994         } else {
1995                 chan_err(d40c, "No memcpy\n");
1996                 return -EINVAL;
1997         }
1998
1999         return 0;
2000 }
2001
2002 static int d40_free_dma(struct d40_chan *d40c)
2003 {
2004
2005         int res = 0;
2006         u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2007         struct d40_phy_res *phy = d40c->phy_chan;
2008         bool is_src;
2009
2010         /* Terminate all queued and active transfers */
2011         d40_term_all(d40c);
2012
2013         if (phy == NULL) {
2014                 chan_err(d40c, "phy == null\n");
2015                 return -EINVAL;
2016         }
2017
2018         if (phy->allocated_src == D40_ALLOC_FREE &&
2019             phy->allocated_dst == D40_ALLOC_FREE) {
2020                 chan_err(d40c, "channel already free\n");
2021                 return -EINVAL;
2022         }
2023
2024         if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2025             d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2026                 is_src = false;
2027         else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2028                 is_src = true;
2029         else {
2030                 chan_err(d40c, "Unknown direction\n");
2031                 return -EINVAL;
2032         }
2033
2034         pm_runtime_get_sync(d40c->base->dev);
2035         res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2036         if (res) {
2037                 chan_err(d40c, "stop failed\n");
2038                 goto out;
2039         }
2040
2041         d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2042
2043         if (chan_is_logical(d40c))
2044                 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2045         else
2046                 d40c->base->lookup_phy_chans[phy->num] = NULL;
2047
2048         if (d40c->busy) {
2049                 pm_runtime_mark_last_busy(d40c->base->dev);
2050                 pm_runtime_put_autosuspend(d40c->base->dev);
2051         }
2052
2053         d40c->busy = false;
2054         d40c->phy_chan = NULL;
2055         d40c->configured = false;
2056 out:
2057
2058         pm_runtime_mark_last_busy(d40c->base->dev);
2059         pm_runtime_put_autosuspend(d40c->base->dev);
2060         return res;
2061 }
2062
2063 static bool d40_is_paused(struct d40_chan *d40c)
2064 {
2065         void __iomem *chanbase = chan_base(d40c);
2066         bool is_paused = false;
2067         unsigned long flags;
2068         void __iomem *active_reg;
2069         u32 status;
2070         u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2071
2072         spin_lock_irqsave(&d40c->lock, flags);
2073
2074         if (chan_is_physical(d40c)) {
2075                 if (d40c->phy_chan->num % 2 == 0)
2076                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2077                 else
2078                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2079
2080                 status = (readl(active_reg) &
2081                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2082                         D40_CHAN_POS(d40c->phy_chan->num);
2083                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2084                         is_paused = true;
2085
2086                 goto _exit;
2087         }
2088
2089         if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2090             d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2091                 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2092         } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2093                 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2094         } else {
2095                 chan_err(d40c, "Unknown direction\n");
2096                 goto _exit;
2097         }
2098
2099         status = (status & D40_EVENTLINE_MASK(event)) >>
2100                 D40_EVENTLINE_POS(event);
2101
2102         if (status != D40_DMA_RUN)
2103                 is_paused = true;
2104 _exit:
2105         spin_unlock_irqrestore(&d40c->lock, flags);
2106         return is_paused;
2107
2108 }
2109
2110 static u32 stedma40_residue(struct dma_chan *chan)
2111 {
2112         struct d40_chan *d40c =
2113                 container_of(chan, struct d40_chan, chan);
2114         u32 bytes_left;
2115         unsigned long flags;
2116
2117         spin_lock_irqsave(&d40c->lock, flags);
2118         bytes_left = d40_residue(d40c);
2119         spin_unlock_irqrestore(&d40c->lock, flags);
2120
2121         return bytes_left;
2122 }
2123
2124 static int
2125 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2126                 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2127                 unsigned int sg_len, dma_addr_t src_dev_addr,
2128                 dma_addr_t dst_dev_addr)
2129 {
2130         struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2131         struct stedma40_half_channel_info *src_info = &cfg->src_info;
2132         struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2133         int ret;
2134
2135         ret = d40_log_sg_to_lli(sg_src, sg_len,
2136                                 src_dev_addr,
2137                                 desc->lli_log.src,
2138                                 chan->log_def.lcsp1,
2139                                 src_info->data_width,
2140                                 dst_info->data_width);
2141
2142         ret = d40_log_sg_to_lli(sg_dst, sg_len,
2143                                 dst_dev_addr,
2144                                 desc->lli_log.dst,
2145                                 chan->log_def.lcsp3,
2146                                 dst_info->data_width,
2147                                 src_info->data_width);
2148
2149         return ret < 0 ? ret : 0;
2150 }
2151
2152 static int
2153 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2154                 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2155                 unsigned int sg_len, dma_addr_t src_dev_addr,
2156                 dma_addr_t dst_dev_addr)
2157 {
2158         struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2159         struct stedma40_half_channel_info *src_info = &cfg->src_info;
2160         struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2161         unsigned long flags = 0;
2162         int ret;
2163
2164         if (desc->cyclic)
2165                 flags |= LLI_CYCLIC | LLI_TERM_INT;
2166
2167         ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2168                                 desc->lli_phy.src,
2169                                 virt_to_phys(desc->lli_phy.src),
2170                                 chan->src_def_cfg,
2171                                 src_info, dst_info, flags);
2172
2173         ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2174                                 desc->lli_phy.dst,
2175                                 virt_to_phys(desc->lli_phy.dst),
2176                                 chan->dst_def_cfg,
2177                                 dst_info, src_info, flags);
2178
2179         dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2180                                    desc->lli_pool.size, DMA_TO_DEVICE);
2181
2182         return ret < 0 ? ret : 0;
2183 }
2184
2185 static struct d40_desc *
2186 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2187               unsigned int sg_len, unsigned long dma_flags)
2188 {
2189         struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2190         struct d40_desc *desc;
2191         int ret;
2192
2193         desc = d40_desc_get(chan);
2194         if (!desc)
2195                 return NULL;
2196
2197         desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2198                                         cfg->dst_info.data_width);
2199         if (desc->lli_len < 0) {
2200                 chan_err(chan, "Unaligned size\n");
2201                 goto err;
2202         }
2203
2204         ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2205         if (ret < 0) {
2206                 chan_err(chan, "Could not allocate lli\n");
2207                 goto err;
2208         }
2209
2210         desc->lli_current = 0;
2211         desc->txd.flags = dma_flags;
2212         desc->txd.tx_submit = d40_tx_submit;
2213
2214         dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2215
2216         return desc;
2217
2218 err:
2219         d40_desc_free(chan, desc);
2220         return NULL;
2221 }
2222
2223 static struct dma_async_tx_descriptor *
2224 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2225             struct scatterlist *sg_dst, unsigned int sg_len,
2226             enum dma_transfer_direction direction, unsigned long dma_flags)
2227 {
2228         struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2229         dma_addr_t src_dev_addr = 0;
2230         dma_addr_t dst_dev_addr = 0;
2231         struct d40_desc *desc;
2232         unsigned long flags;
2233         int ret;
2234
2235         if (!chan->phy_chan) {
2236                 chan_err(chan, "Cannot prepare unallocated channel\n");
2237                 return NULL;
2238         }
2239
2240         spin_lock_irqsave(&chan->lock, flags);
2241
2242         desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2243         if (desc == NULL)
2244                 goto err;
2245
2246         if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2247                 desc->cyclic = true;
2248
2249         if (direction == DMA_DEV_TO_MEM)
2250                 src_dev_addr = chan->runtime_addr;
2251         else if (direction == DMA_MEM_TO_DEV)
2252                 dst_dev_addr = chan->runtime_addr;
2253
2254         if (chan_is_logical(chan))
2255                 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2256                                       sg_len, src_dev_addr, dst_dev_addr);
2257         else
2258                 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2259                                       sg_len, src_dev_addr, dst_dev_addr);
2260
2261         if (ret) {
2262                 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2263                          chan_is_logical(chan) ? "log" : "phy", ret);
2264                 goto err;
2265         }
2266
2267         /*
2268          * add descriptor to the prepare queue in order to be able
2269          * to free them later in terminate_all
2270          */
2271         list_add_tail(&desc->node, &chan->prepare_queue);
2272
2273         spin_unlock_irqrestore(&chan->lock, flags);
2274
2275         return &desc->txd;
2276
2277 err:
2278         if (desc)
2279                 d40_desc_free(chan, desc);
2280         spin_unlock_irqrestore(&chan->lock, flags);
2281         return NULL;
2282 }
2283
2284 bool stedma40_filter(struct dma_chan *chan, void *data)
2285 {
2286         struct stedma40_chan_cfg *info = data;
2287         struct d40_chan *d40c =
2288                 container_of(chan, struct d40_chan, chan);
2289         int err;
2290
2291         if (data) {
2292                 err = d40_validate_conf(d40c, info);
2293                 if (!err)
2294                         d40c->dma_cfg = *info;
2295         } else
2296                 err = d40_config_memcpy(d40c);
2297
2298         if (!err)
2299                 d40c->configured = true;
2300
2301         return err == 0;
2302 }
2303 EXPORT_SYMBOL(stedma40_filter);
2304
2305 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2306 {
2307         bool realtime = d40c->dma_cfg.realtime;
2308         bool highprio = d40c->dma_cfg.high_priority;
2309         u32 rtreg;
2310         u32 event = D40_TYPE_TO_EVENT(dev_type);
2311         u32 group = D40_TYPE_TO_GROUP(dev_type);
2312         u32 bit = BIT(event);
2313         u32 prioreg;
2314         struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2315
2316         rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2317         /*
2318          * Due to a hardware bug, in some cases a logical channel triggered by
2319          * a high priority destination event line can generate extra packet
2320          * transactions.
2321          *
2322          * The workaround is to not set the high priority level for the
2323          * destination event lines that trigger logical channels.
2324          */
2325         if (!src && chan_is_logical(d40c))
2326                 highprio = false;
2327
2328         prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2329
2330         /* Destination event lines are stored in the upper halfword */
2331         if (!src)
2332                 bit <<= 16;
2333
2334         writel(bit, d40c->base->virtbase + prioreg + group * 4);
2335         writel(bit, d40c->base->virtbase + rtreg + group * 4);
2336 }
2337
2338 static void d40_set_prio_realtime(struct d40_chan *d40c)
2339 {
2340         if (d40c->base->rev < 3)
2341                 return;
2342
2343         if ((d40c->dma_cfg.dir ==  DMA_DEV_TO_MEM) ||
2344             (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2345                 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2346
2347         if ((d40c->dma_cfg.dir ==  DMA_MEM_TO_DEV) ||
2348             (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2349                 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2350 }
2351
2352 #define D40_DT_FLAGS_MODE(flags)       ((flags >> 0) & 0x1)
2353 #define D40_DT_FLAGS_DIR(flags)        ((flags >> 1) & 0x1)
2354 #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2355 #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2356 #define D40_DT_FLAGS_HIGH_PRIO(flags)  ((flags >> 4) & 0x1)
2357
2358 static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2359                                   struct of_dma *ofdma)
2360 {
2361         struct stedma40_chan_cfg cfg;
2362         dma_cap_mask_t cap;
2363         u32 flags;
2364
2365         memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2366
2367         dma_cap_zero(cap);
2368         dma_cap_set(DMA_SLAVE, cap);
2369
2370         cfg.dev_type = dma_spec->args[0];
2371         flags = dma_spec->args[2];
2372
2373         switch (D40_DT_FLAGS_MODE(flags)) {
2374         case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2375         case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2376         }
2377
2378         switch (D40_DT_FLAGS_DIR(flags)) {
2379         case 0:
2380                 cfg.dir = DMA_MEM_TO_DEV;
2381                 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2382                 break;
2383         case 1:
2384                 cfg.dir = DMA_DEV_TO_MEM;
2385                 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2386                 break;
2387         }
2388
2389         if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2390                 cfg.phy_channel = dma_spec->args[1];
2391                 cfg.use_fixed_channel = true;
2392         }
2393
2394         if (D40_DT_FLAGS_HIGH_PRIO(flags))
2395                 cfg.high_priority = true;
2396
2397         return dma_request_channel(cap, stedma40_filter, &cfg);
2398 }
2399
2400 /* DMA ENGINE functions */
2401 static int d40_alloc_chan_resources(struct dma_chan *chan)
2402 {
2403         int err;
2404         unsigned long flags;
2405         struct d40_chan *d40c =
2406                 container_of(chan, struct d40_chan, chan);
2407         bool is_free_phy;
2408         spin_lock_irqsave(&d40c->lock, flags);
2409
2410         dma_cookie_init(chan);
2411
2412         /* If no dma configuration is set use default configuration (memcpy) */
2413         if (!d40c->configured) {
2414                 err = d40_config_memcpy(d40c);
2415                 if (err) {
2416                         chan_err(d40c, "Failed to configure memcpy channel\n");
2417                         goto fail;
2418                 }
2419         }
2420
2421         err = d40_allocate_channel(d40c, &is_free_phy);
2422         if (err) {
2423                 chan_err(d40c, "Failed to allocate channel\n");
2424                 d40c->configured = false;
2425                 goto fail;
2426         }
2427
2428         pm_runtime_get_sync(d40c->base->dev);
2429
2430         d40_set_prio_realtime(d40c);
2431
2432         if (chan_is_logical(d40c)) {
2433                 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2434                         d40c->lcpa = d40c->base->lcpa_base +
2435                                 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2436                 else
2437                         d40c->lcpa = d40c->base->lcpa_base +
2438                                 d40c->dma_cfg.dev_type *
2439                                 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2440
2441                 /* Unmask the Global Interrupt Mask. */
2442                 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2443                 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2444         }
2445
2446         dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2447                  chan_is_logical(d40c) ? "logical" : "physical",
2448                  d40c->phy_chan->num,
2449                  d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2450
2451
2452         /*
2453          * Only write channel configuration to the DMA if the physical
2454          * resource is free. In case of multiple logical channels
2455          * on the same physical resource, only the first write is necessary.
2456          */
2457         if (is_free_phy)
2458                 d40_config_write(d40c);
2459 fail:
2460         pm_runtime_mark_last_busy(d40c->base->dev);
2461         pm_runtime_put_autosuspend(d40c->base->dev);
2462         spin_unlock_irqrestore(&d40c->lock, flags);
2463         return err;
2464 }
2465
2466 static void d40_free_chan_resources(struct dma_chan *chan)
2467 {
2468         struct d40_chan *d40c =
2469                 container_of(chan, struct d40_chan, chan);
2470         int err;
2471         unsigned long flags;
2472
2473         if (d40c->phy_chan == NULL) {
2474                 chan_err(d40c, "Cannot free unallocated channel\n");
2475                 return;
2476         }
2477
2478         spin_lock_irqsave(&d40c->lock, flags);
2479
2480         err = d40_free_dma(d40c);
2481
2482         if (err)
2483                 chan_err(d40c, "Failed to free channel\n");
2484         spin_unlock_irqrestore(&d40c->lock, flags);
2485 }
2486
2487 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2488                                                        dma_addr_t dst,
2489                                                        dma_addr_t src,
2490                                                        size_t size,
2491                                                        unsigned long dma_flags)
2492 {
2493         struct scatterlist dst_sg;
2494         struct scatterlist src_sg;
2495
2496         sg_init_table(&dst_sg, 1);
2497         sg_init_table(&src_sg, 1);
2498
2499         sg_dma_address(&dst_sg) = dst;
2500         sg_dma_address(&src_sg) = src;
2501
2502         sg_dma_len(&dst_sg) = size;
2503         sg_dma_len(&src_sg) = size;
2504
2505         return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
2506 }
2507
2508 static struct dma_async_tx_descriptor *
2509 d40_prep_memcpy_sg(struct dma_chan *chan,
2510                    struct scatterlist *dst_sg, unsigned int dst_nents,
2511                    struct scatterlist *src_sg, unsigned int src_nents,
2512                    unsigned long dma_flags)
2513 {
2514         if (dst_nents != src_nents)
2515                 return NULL;
2516
2517         return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2518 }
2519
2520 static struct dma_async_tx_descriptor *
2521 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2522                   unsigned int sg_len, enum dma_transfer_direction direction,
2523                   unsigned long dma_flags, void *context)
2524 {
2525         if (!is_slave_direction(direction))
2526                 return NULL;
2527
2528         return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2529 }
2530
2531 static struct dma_async_tx_descriptor *
2532 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2533                      size_t buf_len, size_t period_len,
2534                      enum dma_transfer_direction direction, unsigned long flags,
2535                      void *context)
2536 {
2537         unsigned int periods = buf_len / period_len;
2538         struct dma_async_tx_descriptor *txd;
2539         struct scatterlist *sg;
2540         int i;
2541
2542         sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2543         if (!sg)
2544                 return NULL;
2545
2546         for (i = 0; i < periods; i++) {
2547                 sg_dma_address(&sg[i]) = dma_addr;
2548                 sg_dma_len(&sg[i]) = period_len;
2549                 dma_addr += period_len;
2550         }
2551
2552         sg[periods].offset = 0;
2553         sg_dma_len(&sg[periods]) = 0;
2554         sg[periods].page_link =
2555                 ((unsigned long)sg | 0x01) & ~0x02;
2556
2557         txd = d40_prep_sg(chan, sg, sg, periods, direction,
2558                           DMA_PREP_INTERRUPT);
2559
2560         kfree(sg);
2561
2562         return txd;
2563 }
2564
2565 static enum dma_status d40_tx_status(struct dma_chan *chan,
2566                                      dma_cookie_t cookie,
2567                                      struct dma_tx_state *txstate)
2568 {
2569         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2570         enum dma_status ret;
2571
2572         if (d40c->phy_chan == NULL) {
2573                 chan_err(d40c, "Cannot read status of unallocated channel\n");
2574                 return -EINVAL;
2575         }
2576
2577         ret = dma_cookie_status(chan, cookie, txstate);
2578         if (ret != DMA_COMPLETE)
2579                 dma_set_residue(txstate, stedma40_residue(chan));
2580
2581         if (d40_is_paused(d40c))
2582                 ret = DMA_PAUSED;
2583
2584         return ret;
2585 }
2586
2587 static void d40_issue_pending(struct dma_chan *chan)
2588 {
2589         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2590         unsigned long flags;
2591
2592         if (d40c->phy_chan == NULL) {
2593                 chan_err(d40c, "Channel is not allocated!\n");
2594                 return;
2595         }
2596
2597         spin_lock_irqsave(&d40c->lock, flags);
2598
2599         list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2600
2601         /* Busy means that queued jobs are already being processed */
2602         if (!d40c->busy)
2603                 (void) d40_queue_start(d40c);
2604
2605         spin_unlock_irqrestore(&d40c->lock, flags);
2606 }
2607
2608 static void d40_terminate_all(struct dma_chan *chan)
2609 {
2610         unsigned long flags;
2611         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2612         int ret;
2613
2614         spin_lock_irqsave(&d40c->lock, flags);
2615
2616         pm_runtime_get_sync(d40c->base->dev);
2617         ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2618         if (ret)
2619                 chan_err(d40c, "Failed to stop channel\n");
2620
2621         d40_term_all(d40c);
2622         pm_runtime_mark_last_busy(d40c->base->dev);
2623         pm_runtime_put_autosuspend(d40c->base->dev);
2624         if (d40c->busy) {
2625                 pm_runtime_mark_last_busy(d40c->base->dev);
2626                 pm_runtime_put_autosuspend(d40c->base->dev);
2627         }
2628         d40c->busy = false;
2629
2630         spin_unlock_irqrestore(&d40c->lock, flags);
2631 }
2632
2633 static int
2634 dma40_config_to_halfchannel(struct d40_chan *d40c,
2635                             struct stedma40_half_channel_info *info,
2636                             u32 maxburst)
2637 {
2638         int psize;
2639
2640         if (chan_is_logical(d40c)) {
2641                 if (maxburst >= 16)
2642                         psize = STEDMA40_PSIZE_LOG_16;
2643                 else if (maxburst >= 8)
2644                         psize = STEDMA40_PSIZE_LOG_8;
2645                 else if (maxburst >= 4)
2646                         psize = STEDMA40_PSIZE_LOG_4;
2647                 else
2648                         psize = STEDMA40_PSIZE_LOG_1;
2649         } else {
2650                 if (maxburst >= 16)
2651                         psize = STEDMA40_PSIZE_PHY_16;
2652                 else if (maxburst >= 8)
2653                         psize = STEDMA40_PSIZE_PHY_8;
2654                 else if (maxburst >= 4)
2655                         psize = STEDMA40_PSIZE_PHY_4;
2656                 else
2657                         psize = STEDMA40_PSIZE_PHY_1;
2658         }
2659
2660         info->psize = psize;
2661         info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2662
2663         return 0;
2664 }
2665
2666 /* Runtime reconfiguration extension */
2667 static int d40_set_runtime_config(struct dma_chan *chan,
2668                                   struct dma_slave_config *config)
2669 {
2670         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2671         struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2672         enum dma_slave_buswidth src_addr_width, dst_addr_width;
2673         dma_addr_t config_addr;
2674         u32 src_maxburst, dst_maxburst;
2675         int ret;
2676
2677         src_addr_width = config->src_addr_width;
2678         src_maxburst = config->src_maxburst;
2679         dst_addr_width = config->dst_addr_width;
2680         dst_maxburst = config->dst_maxburst;
2681
2682         if (config->direction == DMA_DEV_TO_MEM) {
2683                 config_addr = config->src_addr;
2684
2685                 if (cfg->dir != DMA_DEV_TO_MEM)
2686                         dev_dbg(d40c->base->dev,
2687                                 "channel was not configured for peripheral "
2688                                 "to memory transfer (%d) overriding\n",
2689                                 cfg->dir);
2690                 cfg->dir = DMA_DEV_TO_MEM;
2691
2692                 /* Configure the memory side */
2693                 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2694                         dst_addr_width = src_addr_width;
2695                 if (dst_maxburst == 0)
2696                         dst_maxburst = src_maxburst;
2697
2698         } else if (config->direction == DMA_MEM_TO_DEV) {
2699                 config_addr = config->dst_addr;
2700
2701                 if (cfg->dir != DMA_MEM_TO_DEV)
2702                         dev_dbg(d40c->base->dev,
2703                                 "channel was not configured for memory "
2704                                 "to peripheral transfer (%d) overriding\n",
2705                                 cfg->dir);
2706                 cfg->dir = DMA_MEM_TO_DEV;
2707
2708                 /* Configure the memory side */
2709                 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2710                         src_addr_width = dst_addr_width;
2711                 if (src_maxburst == 0)
2712                         src_maxburst = dst_maxburst;
2713         } else {
2714                 dev_err(d40c->base->dev,
2715                         "unrecognized channel direction %d\n",
2716                         config->direction);
2717                 return -EINVAL;
2718         }
2719
2720         if (config_addr <= 0) {
2721                 dev_err(d40c->base->dev, "no address supplied\n");
2722                 return -EINVAL;
2723         }
2724
2725         if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2726                 dev_err(d40c->base->dev,
2727                         "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2728                         src_maxburst,
2729                         src_addr_width,
2730                         dst_maxburst,
2731                         dst_addr_width);
2732                 return -EINVAL;
2733         }
2734
2735         if (src_maxburst > 16) {
2736                 src_maxburst = 16;
2737                 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2738         } else if (dst_maxburst > 16) {
2739                 dst_maxburst = 16;
2740                 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2741         }
2742
2743         /* Only valid widths are; 1, 2, 4 and 8. */
2744         if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2745             src_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2746             dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2747             dst_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2748             !is_power_of_2(src_addr_width) ||
2749             !is_power_of_2(dst_addr_width))
2750                 return -EINVAL;
2751
2752         cfg->src_info.data_width = src_addr_width;
2753         cfg->dst_info.data_width = dst_addr_width;
2754
2755         ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2756                                           src_maxburst);
2757         if (ret)
2758                 return ret;
2759
2760         ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2761                                           dst_maxburst);
2762         if (ret)
2763                 return ret;
2764
2765         /* Fill in register values */
2766         if (chan_is_logical(d40c))
2767                 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2768         else
2769                 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2770
2771         /* These settings will take precedence later */
2772         d40c->runtime_addr = config_addr;
2773         d40c->runtime_direction = config->direction;
2774         dev_dbg(d40c->base->dev,
2775                 "configured channel %s for %s, data width %d/%d, "
2776                 "maxburst %d/%d elements, LE, no flow control\n",
2777                 dma_chan_name(chan),
2778                 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2779                 src_addr_width, dst_addr_width,
2780                 src_maxburst, dst_maxburst);
2781
2782         return 0;
2783 }
2784
2785 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2786                        unsigned long arg)
2787 {
2788         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2789
2790         if (d40c->phy_chan == NULL) {
2791                 chan_err(d40c, "Channel is not allocated!\n");
2792                 return -EINVAL;
2793         }
2794
2795         switch (cmd) {
2796         case DMA_TERMINATE_ALL:
2797                 d40_terminate_all(chan);
2798                 return 0;
2799         case DMA_PAUSE:
2800                 return d40_pause(d40c);
2801         case DMA_RESUME:
2802                 return d40_resume(d40c);
2803         case DMA_SLAVE_CONFIG:
2804                 return d40_set_runtime_config(chan,
2805                         (struct dma_slave_config *) arg);
2806         default:
2807                 break;
2808         }
2809
2810         /* Other commands are unimplemented */
2811         return -ENXIO;
2812 }
2813
2814 /* Initialization functions */
2815
2816 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2817                                  struct d40_chan *chans, int offset,
2818                                  int num_chans)
2819 {
2820         int i = 0;
2821         struct d40_chan *d40c;
2822
2823         INIT_LIST_HEAD(&dma->channels);
2824
2825         for (i = offset; i < offset + num_chans; i++) {
2826                 d40c = &chans[i];
2827                 d40c->base = base;
2828                 d40c->chan.device = dma;
2829
2830                 spin_lock_init(&d40c->lock);
2831
2832                 d40c->log_num = D40_PHY_CHAN;
2833
2834                 INIT_LIST_HEAD(&d40c->done);
2835                 INIT_LIST_HEAD(&d40c->active);
2836                 INIT_LIST_HEAD(&d40c->queue);
2837                 INIT_LIST_HEAD(&d40c->pending_queue);
2838                 INIT_LIST_HEAD(&d40c->client);
2839                 INIT_LIST_HEAD(&d40c->prepare_queue);
2840
2841                 tasklet_init(&d40c->tasklet, dma_tasklet,
2842                              (unsigned long) d40c);
2843
2844                 list_add_tail(&d40c->chan.device_node,
2845                               &dma->channels);
2846         }
2847 }
2848
2849 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2850 {
2851         if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2852                 dev->device_prep_slave_sg = d40_prep_slave_sg;
2853
2854         if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2855                 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2856
2857                 /*
2858                  * This controller can only access address at even
2859                  * 32bit boundaries, i.e. 2^2
2860                  */
2861                 dev->copy_align = 2;
2862         }
2863
2864         if (dma_has_cap(DMA_SG, dev->cap_mask))
2865                 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2866
2867         if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2868                 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2869
2870         dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2871         dev->device_free_chan_resources = d40_free_chan_resources;
2872         dev->device_issue_pending = d40_issue_pending;
2873         dev->device_tx_status = d40_tx_status;
2874         dev->device_control = d40_control;
2875         dev->dev = base->dev;
2876 }
2877
2878 static int __init d40_dmaengine_init(struct d40_base *base,
2879                                      int num_reserved_chans)
2880 {
2881         int err ;
2882
2883         d40_chan_init(base, &base->dma_slave, base->log_chans,
2884                       0, base->num_log_chans);
2885
2886         dma_cap_zero(base->dma_slave.cap_mask);
2887         dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2888         dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2889
2890         d40_ops_init(base, &base->dma_slave);
2891
2892         err = dma_async_device_register(&base->dma_slave);
2893
2894         if (err) {
2895                 d40_err(base->dev, "Failed to register slave channels\n");
2896                 goto failure1;
2897         }
2898
2899         d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2900                       base->num_log_chans, base->num_memcpy_chans);
2901
2902         dma_cap_zero(base->dma_memcpy.cap_mask);
2903         dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2904         dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2905
2906         d40_ops_init(base, &base->dma_memcpy);
2907
2908         err = dma_async_device_register(&base->dma_memcpy);
2909
2910         if (err) {
2911                 d40_err(base->dev,
2912                         "Failed to regsiter memcpy only channels\n");
2913                 goto failure2;
2914         }
2915
2916         d40_chan_init(base, &base->dma_both, base->phy_chans,
2917                       0, num_reserved_chans);
2918
2919         dma_cap_zero(base->dma_both.cap_mask);
2920         dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2921         dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2922         dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2923         dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2924
2925         d40_ops_init(base, &base->dma_both);
2926         err = dma_async_device_register(&base->dma_both);
2927
2928         if (err) {
2929                 d40_err(base->dev,
2930                         "Failed to register logical and physical capable channels\n");
2931                 goto failure3;
2932         }
2933         return 0;
2934 failure3:
2935         dma_async_device_unregister(&base->dma_memcpy);
2936 failure2:
2937         dma_async_device_unregister(&base->dma_slave);
2938 failure1:
2939         return err;
2940 }
2941
2942 /* Suspend resume functionality */
2943 #ifdef CONFIG_PM_SLEEP
2944 static int dma40_suspend(struct device *dev)
2945 {
2946         struct platform_device *pdev = to_platform_device(dev);
2947         struct d40_base *base = platform_get_drvdata(pdev);
2948         int ret;
2949
2950         ret = pm_runtime_force_suspend(dev);
2951         if (ret)
2952                 return ret;
2953
2954         if (base->lcpa_regulator)
2955                 ret = regulator_disable(base->lcpa_regulator);
2956         return ret;
2957 }
2958
2959 static int dma40_resume(struct device *dev)
2960 {
2961         struct platform_device *pdev = to_platform_device(dev);
2962         struct d40_base *base = platform_get_drvdata(pdev);
2963         int ret = 0;
2964
2965         if (base->lcpa_regulator) {
2966                 ret = regulator_enable(base->lcpa_regulator);
2967                 if (ret)
2968                         return ret;
2969         }
2970
2971         return pm_runtime_force_resume(dev);
2972 }
2973 #endif
2974
2975 #ifdef CONFIG_PM
2976 static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2977                          u32 *regaddr, int num, bool save)
2978 {
2979         int i;
2980
2981         for (i = 0; i < num; i++) {
2982                 void __iomem *addr = baseaddr + regaddr[i];
2983
2984                 if (save)
2985                         backup[i] = readl_relaxed(addr);
2986                 else
2987                         writel_relaxed(backup[i], addr);
2988         }
2989 }
2990
2991 static void d40_save_restore_registers(struct d40_base *base, bool save)
2992 {
2993         int i;
2994
2995         /* Save/Restore channel specific registers */
2996         for (i = 0; i < base->num_phy_chans; i++) {
2997                 void __iomem *addr;
2998                 int idx;
2999
3000                 if (base->phy_res[i].reserved)
3001                         continue;
3002
3003                 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
3004                 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
3005
3006                 dma40_backup(addr, &base->reg_val_backup_chan[idx],
3007                              d40_backup_regs_chan,
3008                              ARRAY_SIZE(d40_backup_regs_chan),
3009                              save);
3010         }
3011
3012         /* Save/Restore global registers */
3013         dma40_backup(base->virtbase, base->reg_val_backup,
3014                      d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
3015                      save);
3016
3017         /* Save/Restore registers only existing on dma40 v3 and later */
3018         if (base->gen_dmac.backup)
3019                 dma40_backup(base->virtbase, base->reg_val_backup_v4,
3020                              base->gen_dmac.backup,
3021                         base->gen_dmac.backup_size,
3022                         save);
3023 }
3024
3025 static int dma40_runtime_suspend(struct device *dev)
3026 {
3027         struct platform_device *pdev = to_platform_device(dev);
3028         struct d40_base *base = platform_get_drvdata(pdev);
3029
3030         d40_save_restore_registers(base, true);
3031
3032         /* Don't disable/enable clocks for v1 due to HW bugs */
3033         if (base->rev != 1)
3034                 writel_relaxed(base->gcc_pwr_off_mask,
3035                                base->virtbase + D40_DREG_GCC);
3036
3037         return 0;
3038 }
3039
3040 static int dma40_runtime_resume(struct device *dev)
3041 {
3042         struct platform_device *pdev = to_platform_device(dev);
3043         struct d40_base *base = platform_get_drvdata(pdev);
3044
3045         d40_save_restore_registers(base, false);
3046
3047         writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3048                        base->virtbase + D40_DREG_GCC);
3049         return 0;
3050 }
3051 #endif
3052
3053 static const struct dev_pm_ops dma40_pm_ops = {
3054         SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3055         SET_PM_RUNTIME_PM_OPS(dma40_runtime_suspend,
3056                                 dma40_runtime_resume,
3057                                 NULL)
3058 };
3059
3060 /* Initialization functions. */
3061
3062 static int __init d40_phy_res_init(struct d40_base *base)
3063 {
3064         int i;
3065         int num_phy_chans_avail = 0;
3066         u32 val[2];
3067         int odd_even_bit = -2;
3068         int gcc = D40_DREG_GCC_ENA;
3069
3070         val[0] = readl(base->virtbase + D40_DREG_PRSME);
3071         val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3072
3073         for (i = 0; i < base->num_phy_chans; i++) {
3074                 base->phy_res[i].num = i;
3075                 odd_even_bit += 2 * ((i % 2) == 0);
3076                 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3077                         /* Mark security only channels as occupied */
3078                         base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3079                         base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3080                         base->phy_res[i].reserved = true;
3081                         gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3082                                                        D40_DREG_GCC_SRC);
3083                         gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3084                                                        D40_DREG_GCC_DST);
3085
3086
3087                 } else {
3088                         base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3089                         base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3090                         base->phy_res[i].reserved = false;
3091                         num_phy_chans_avail++;
3092                 }
3093                 spin_lock_init(&base->phy_res[i].lock);
3094         }
3095
3096         /* Mark disabled channels as occupied */
3097         for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3098                 int chan = base->plat_data->disabled_channels[i];
3099
3100                 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3101                 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3102                 base->phy_res[chan].reserved = true;
3103                 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3104                                                D40_DREG_GCC_SRC);
3105                 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3106                                                D40_DREG_GCC_DST);
3107                 num_phy_chans_avail--;
3108         }
3109
3110         /* Mark soft_lli channels */
3111         for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3112                 int chan = base->plat_data->soft_lli_chans[i];
3113
3114                 base->phy_res[chan].use_soft_lli = true;
3115         }
3116
3117         dev_info(base->dev, "%d of %d physical DMA channels available\n",
3118                  num_phy_chans_avail, base->num_phy_chans);
3119
3120         /* Verify settings extended vs standard */
3121         val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3122
3123         for (i = 0; i < base->num_phy_chans; i++) {
3124
3125                 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3126                     (val[0] & 0x3) != 1)
3127                         dev_info(base->dev,
3128                                  "[%s] INFO: channel %d is misconfigured (%d)\n",
3129                                  __func__, i, val[0] & 0x3);
3130
3131                 val[0] = val[0] >> 2;
3132         }
3133
3134         /*
3135          * To keep things simple, Enable all clocks initially.
3136          * The clocks will get managed later post channel allocation.
3137          * The clocks for the event lines on which reserved channels exists
3138          * are not managed here.
3139          */
3140         writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3141         base->gcc_pwr_off_mask = gcc;
3142
3143         return num_phy_chans_avail;
3144 }
3145
3146 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3147 {
3148         struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3149         struct clk *clk = NULL;
3150         void __iomem *virtbase = NULL;
3151         struct resource *res = NULL;
3152         struct d40_base *base = NULL;
3153         int num_log_chans = 0;
3154         int num_phy_chans;
3155         int num_memcpy_chans;
3156         int clk_ret = -EINVAL;
3157         int i;
3158         u32 pid;
3159         u32 cid;
3160         u8 rev;
3161
3162         clk = clk_get(&pdev->dev, NULL);
3163         if (IS_ERR(clk)) {
3164                 d40_err(&pdev->dev, "No matching clock found\n");
3165                 goto failure;
3166         }
3167
3168         clk_ret = clk_prepare_enable(clk);
3169         if (clk_ret) {
3170                 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3171                 goto failure;
3172         }
3173
3174         /* Get IO for DMAC base address */
3175         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3176         if (!res)
3177                 goto failure;
3178
3179         if (request_mem_region(res->start, resource_size(res),
3180                                D40_NAME " I/O base") == NULL)
3181                 goto failure;
3182
3183         virtbase = ioremap(res->start, resource_size(res));
3184         if (!virtbase)
3185                 goto failure;
3186
3187         /* This is just a regular AMBA PrimeCell ID actually */
3188         for (pid = 0, i = 0; i < 4; i++)
3189                 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3190                         & 255) << (i * 8);
3191         for (cid = 0, i = 0; i < 4; i++)
3192                 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3193                         & 255) << (i * 8);
3194
3195         if (cid != AMBA_CID) {
3196                 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3197                 goto failure;
3198         }
3199         if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3200                 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3201                         AMBA_MANF_BITS(pid),
3202                         AMBA_VENDOR_ST);
3203                 goto failure;
3204         }
3205         /*
3206          * HW revision:
3207          * DB8500ed has revision 0
3208          * ? has revision 1
3209          * DB8500v1 has revision 2
3210          * DB8500v2 has revision 3
3211          * AP9540v1 has revision 4
3212          * DB8540v1 has revision 4
3213          */
3214         rev = AMBA_REV_BITS(pid);
3215         if (rev < 2) {
3216                 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3217                 goto failure;
3218         }
3219
3220         /* The number of physical channels on this HW */
3221         if (plat_data->num_of_phy_chans)
3222                 num_phy_chans = plat_data->num_of_phy_chans;
3223         else
3224                 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3225
3226         /* The number of channels used for memcpy */
3227         if (plat_data->num_of_memcpy_chans)
3228                 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3229         else
3230                 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3231
3232         num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3233
3234         dev_info(&pdev->dev,
3235                  "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3236                  rev, &res->start, num_phy_chans, num_log_chans);
3237
3238         base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3239                        (num_phy_chans + num_log_chans + num_memcpy_chans) *
3240                        sizeof(struct d40_chan), GFP_KERNEL);
3241
3242         if (base == NULL) {
3243                 d40_err(&pdev->dev, "Out of memory\n");
3244                 goto failure;
3245         }
3246
3247         base->rev = rev;
3248         base->clk = clk;
3249         base->num_memcpy_chans = num_memcpy_chans;
3250         base->num_phy_chans = num_phy_chans;
3251         base->num_log_chans = num_log_chans;
3252         base->phy_start = res->start;
3253         base->phy_size = resource_size(res);
3254         base->virtbase = virtbase;
3255         base->plat_data = plat_data;
3256         base->dev = &pdev->dev;
3257         base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3258         base->log_chans = &base->phy_chans[num_phy_chans];
3259
3260         if (base->plat_data->num_of_phy_chans == 14) {
3261                 base->gen_dmac.backup = d40_backup_regs_v4b;
3262                 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3263                 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3264                 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3265                 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3266                 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3267                 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3268                 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3269                 base->gen_dmac.il = il_v4b;
3270                 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3271                 base->gen_dmac.init_reg = dma_init_reg_v4b;
3272                 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3273         } else {
3274                 if (base->rev >= 3) {
3275                         base->gen_dmac.backup = d40_backup_regs_v4a;
3276                         base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3277                 }
3278                 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3279                 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3280                 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3281                 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3282                 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3283                 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3284                 base->gen_dmac.il = il_v4a;
3285                 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3286                 base->gen_dmac.init_reg = dma_init_reg_v4a;
3287                 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3288         }
3289
3290         base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
3291                                 GFP_KERNEL);
3292         if (!base->phy_res)
3293                 goto failure;
3294
3295         base->lookup_phy_chans = kzalloc(num_phy_chans *
3296                                          sizeof(struct d40_chan *),
3297                                          GFP_KERNEL);
3298         if (!base->lookup_phy_chans)
3299                 goto failure;
3300
3301         base->lookup_log_chans = kzalloc(num_log_chans *
3302                                          sizeof(struct d40_chan *),
3303                                          GFP_KERNEL);
3304         if (!base->lookup_log_chans)
3305                 goto failure;
3306
3307         base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3308                                             sizeof(d40_backup_regs_chan),
3309                                             GFP_KERNEL);
3310         if (!base->reg_val_backup_chan)
3311                 goto failure;
3312
3313         base->lcla_pool.alloc_map =
3314                 kzalloc(num_phy_chans * sizeof(struct d40_desc *)
3315                         * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
3316         if (!base->lcla_pool.alloc_map)
3317                 goto failure;
3318
3319         base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3320                                             0, SLAB_HWCACHE_ALIGN,
3321                                             NULL);
3322         if (base->desc_slab == NULL)
3323                 goto failure;
3324
3325         return base;
3326
3327 failure:
3328         if (!clk_ret)
3329                 clk_disable_unprepare(clk);
3330         if (!IS_ERR(clk))
3331                 clk_put(clk);
3332         if (virtbase)
3333                 iounmap(virtbase);
3334         if (res)
3335                 release_mem_region(res->start,
3336                                    resource_size(res));
3337         if (virtbase)
3338                 iounmap(virtbase);
3339
3340         if (base) {
3341                 kfree(base->lcla_pool.alloc_map);
3342                 kfree(base->reg_val_backup_chan);
3343                 kfree(base->lookup_log_chans);
3344                 kfree(base->lookup_phy_chans);
3345                 kfree(base->phy_res);
3346                 kfree(base);
3347         }
3348
3349         return NULL;
3350 }
3351
3352 static void __init d40_hw_init(struct d40_base *base)
3353 {
3354
3355         int i;
3356         u32 prmseo[2] = {0, 0};
3357         u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3358         u32 pcmis = 0;
3359         u32 pcicr = 0;
3360         struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3361         u32 reg_size = base->gen_dmac.init_reg_size;
3362
3363         for (i = 0; i < reg_size; i++)
3364                 writel(dma_init_reg[i].val,
3365                        base->virtbase + dma_init_reg[i].reg);
3366
3367         /* Configure all our dma channels to default settings */
3368         for (i = 0; i < base->num_phy_chans; i++) {
3369
3370                 activeo[i % 2] = activeo[i % 2] << 2;
3371
3372                 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3373                     == D40_ALLOC_PHY) {
3374                         activeo[i % 2] |= 3;
3375                         continue;
3376                 }
3377
3378                 /* Enable interrupt # */
3379                 pcmis = (pcmis << 1) | 1;
3380
3381                 /* Clear interrupt # */
3382                 pcicr = (pcicr << 1) | 1;
3383
3384                 /* Set channel to physical mode */
3385                 prmseo[i % 2] = prmseo[i % 2] << 2;
3386                 prmseo[i % 2] |= 1;
3387
3388         }
3389
3390         writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3391         writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3392         writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3393         writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3394
3395         /* Write which interrupt to enable */
3396         writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3397
3398         /* Write which interrupt to clear */
3399         writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3400
3401         /* These are __initdata and cannot be accessed after init */
3402         base->gen_dmac.init_reg = NULL;
3403         base->gen_dmac.init_reg_size = 0;
3404 }
3405
3406 static int __init d40_lcla_allocate(struct d40_base *base)
3407 {
3408         struct d40_lcla_pool *pool = &base->lcla_pool;
3409         unsigned long *page_list;
3410         int i, j;
3411         int ret = 0;
3412
3413         /*
3414          * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3415          * To full fill this hardware requirement without wasting 256 kb
3416          * we allocate pages until we get an aligned one.
3417          */
3418         page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
3419                             GFP_KERNEL);
3420
3421         if (!page_list) {
3422                 ret = -ENOMEM;
3423                 goto failure;
3424         }
3425
3426         /* Calculating how many pages that are required */
3427         base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3428
3429         for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3430                 page_list[i] = __get_free_pages(GFP_KERNEL,
3431                                                 base->lcla_pool.pages);
3432                 if (!page_list[i]) {
3433
3434                         d40_err(base->dev, "Failed to allocate %d pages.\n",
3435                                 base->lcla_pool.pages);
3436
3437                         for (j = 0; j < i; j++)
3438                                 free_pages(page_list[j], base->lcla_pool.pages);
3439                         goto failure;
3440                 }
3441
3442                 if ((virt_to_phys((void *)page_list[i]) &
3443                      (LCLA_ALIGNMENT - 1)) == 0)
3444                         break;
3445         }
3446
3447         for (j = 0; j < i; j++)
3448                 free_pages(page_list[j], base->lcla_pool.pages);
3449
3450         if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3451                 base->lcla_pool.base = (void *)page_list[i];
3452         } else {
3453                 /*
3454                  * After many attempts and no succees with finding the correct
3455                  * alignment, try with allocating a big buffer.
3456                  */
3457                 dev_warn(base->dev,
3458                          "[%s] Failed to get %d pages @ 18 bit align.\n",
3459                          __func__, base->lcla_pool.pages);
3460                 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3461                                                          base->num_phy_chans +
3462                                                          LCLA_ALIGNMENT,
3463                                                          GFP_KERNEL);
3464                 if (!base->lcla_pool.base_unaligned) {
3465                         ret = -ENOMEM;
3466                         goto failure;
3467                 }
3468
3469                 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3470                                                  LCLA_ALIGNMENT);
3471         }
3472
3473         pool->dma_addr = dma_map_single(base->dev, pool->base,
3474                                         SZ_1K * base->num_phy_chans,
3475                                         DMA_TO_DEVICE);
3476         if (dma_mapping_error(base->dev, pool->dma_addr)) {
3477                 pool->dma_addr = 0;
3478                 ret = -ENOMEM;
3479                 goto failure;
3480         }
3481
3482         writel(virt_to_phys(base->lcla_pool.base),
3483                base->virtbase + D40_DREG_LCLA);
3484 failure:
3485         kfree(page_list);
3486         return ret;
3487 }
3488
3489 static int __init d40_of_probe(struct platform_device *pdev,
3490                                struct device_node *np)
3491 {
3492         struct stedma40_platform_data *pdata;
3493         int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3494         const __be32 *list;
3495
3496         pdata = devm_kzalloc(&pdev->dev,
3497                              sizeof(struct stedma40_platform_data),
3498                              GFP_KERNEL);
3499         if (!pdata)
3500                 return -ENOMEM;
3501
3502         /* If absent this value will be obtained from h/w. */
3503         of_property_read_u32(np, "dma-channels", &num_phy);
3504         if (num_phy > 0)
3505                 pdata->num_of_phy_chans = num_phy;
3506
3507         list = of_get_property(np, "memcpy-channels", &num_memcpy);
3508         num_memcpy /= sizeof(*list);
3509
3510         if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3511                 d40_err(&pdev->dev,
3512                         "Invalid number of memcpy channels specified (%d)\n",
3513                         num_memcpy);
3514                 return -EINVAL;
3515         }
3516         pdata->num_of_memcpy_chans = num_memcpy;
3517
3518         of_property_read_u32_array(np, "memcpy-channels",
3519                                    dma40_memcpy_channels,
3520                                    num_memcpy);
3521
3522         list = of_get_property(np, "disabled-channels", &num_disabled);
3523         num_disabled /= sizeof(*list);
3524
3525         if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3526                 d40_err(&pdev->dev,
3527                         "Invalid number of disabled channels specified (%d)\n",
3528                         num_disabled);
3529                 return -EINVAL;
3530         }
3531
3532         of_property_read_u32_array(np, "disabled-channels",
3533                                    pdata->disabled_channels,
3534                                    num_disabled);
3535         pdata->disabled_channels[num_disabled] = -1;
3536
3537         pdev->dev.platform_data = pdata;
3538
3539         return 0;
3540 }
3541
3542 static int __init d40_probe(struct platform_device *pdev)
3543 {
3544         struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3545         struct device_node *np = pdev->dev.of_node;
3546         int ret = -ENOENT;
3547         struct d40_base *base = NULL;
3548         struct resource *res = NULL;
3549         int num_reserved_chans;
3550         u32 val;
3551
3552         if (!plat_data) {
3553                 if (np) {
3554                         if(d40_of_probe(pdev, np)) {
3555                                 ret = -ENOMEM;
3556                                 goto failure;
3557                         }
3558                 } else {
3559                         d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3560                         goto failure;
3561                 }
3562         }
3563
3564         base = d40_hw_detect_init(pdev);
3565         if (!base)
3566                 goto failure;
3567
3568         num_reserved_chans = d40_phy_res_init(base);
3569
3570         platform_set_drvdata(pdev, base);
3571
3572         spin_lock_init(&base->interrupt_lock);
3573         spin_lock_init(&base->execmd_lock);
3574
3575         /* Get IO for logical channel parameter address */
3576         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3577         if (!res) {
3578                 ret = -ENOENT;
3579                 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3580                 goto failure;
3581         }
3582         base->lcpa_size = resource_size(res);
3583         base->phy_lcpa = res->start;
3584
3585         if (request_mem_region(res->start, resource_size(res),
3586                                D40_NAME " I/O lcpa") == NULL) {
3587                 ret = -EBUSY;
3588                 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3589                 goto failure;
3590         }
3591
3592         /* We make use of ESRAM memory for this. */
3593         val = readl(base->virtbase + D40_DREG_LCPA);
3594         if (res->start != val && val != 0) {
3595                 dev_warn(&pdev->dev,
3596                          "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3597                          __func__, val, &res->start);
3598         } else
3599                 writel(res->start, base->virtbase + D40_DREG_LCPA);
3600
3601         base->lcpa_base = ioremap(res->start, resource_size(res));
3602         if (!base->lcpa_base) {
3603                 ret = -ENOMEM;
3604                 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3605                 goto failure;
3606         }
3607         /* If lcla has to be located in ESRAM we don't need to allocate */
3608         if (base->plat_data->use_esram_lcla) {
3609                 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3610                                                         "lcla_esram");
3611                 if (!res) {
3612                         ret = -ENOENT;
3613                         d40_err(&pdev->dev,
3614                                 "No \"lcla_esram\" memory resource\n");
3615                         goto failure;
3616                 }
3617                 base->lcla_pool.base = ioremap(res->start,
3618                                                 resource_size(res));
3619                 if (!base->lcla_pool.base) {
3620                         ret = -ENOMEM;
3621                         d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3622                         goto failure;
3623                 }
3624                 writel(res->start, base->virtbase + D40_DREG_LCLA);
3625
3626         } else {
3627                 ret = d40_lcla_allocate(base);
3628                 if (ret) {
3629                         d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3630                         goto failure;
3631                 }
3632         }
3633
3634         spin_lock_init(&base->lcla_pool.lock);
3635
3636         base->irq = platform_get_irq(pdev, 0);
3637
3638         ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3639         if (ret) {
3640                 d40_err(&pdev->dev, "No IRQ defined\n");
3641                 goto failure;
3642         }
3643
3644         if (base->plat_data->use_esram_lcla) {
3645
3646                 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3647                 if (IS_ERR(base->lcpa_regulator)) {
3648                         d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3649                         ret = PTR_ERR(base->lcpa_regulator);
3650                         base->lcpa_regulator = NULL;
3651                         goto failure;
3652                 }
3653
3654                 ret = regulator_enable(base->lcpa_regulator);
3655                 if (ret) {
3656                         d40_err(&pdev->dev,
3657                                 "Failed to enable lcpa_regulator\n");
3658                         regulator_put(base->lcpa_regulator);
3659                         base->lcpa_regulator = NULL;
3660                         goto failure;
3661                 }
3662         }
3663
3664         writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3665
3666         pm_runtime_irq_safe(base->dev);
3667         pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3668         pm_runtime_use_autosuspend(base->dev);
3669         pm_runtime_mark_last_busy(base->dev);
3670         pm_runtime_set_active(base->dev);
3671         pm_runtime_enable(base->dev);
3672
3673         ret = d40_dmaengine_init(base, num_reserved_chans);
3674         if (ret)
3675                 goto failure;
3676
3677         base->dev->dma_parms = &base->dma_parms;
3678         ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3679         if (ret) {
3680                 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3681                 goto failure;
3682         }
3683
3684         d40_hw_init(base);
3685
3686         if (np) {
3687                 ret = of_dma_controller_register(np, d40_xlate, NULL);
3688                 if (ret)
3689                         dev_err(&pdev->dev,
3690                                 "could not register of_dma_controller\n");
3691         }
3692
3693         dev_info(base->dev, "initialized\n");
3694         return 0;
3695
3696 failure:
3697         if (base) {
3698                 if (base->desc_slab)
3699                         kmem_cache_destroy(base->desc_slab);
3700                 if (base->virtbase)
3701                         iounmap(base->virtbase);
3702
3703                 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3704                         iounmap(base->lcla_pool.base);
3705                         base->lcla_pool.base = NULL;
3706                 }
3707
3708                 if (base->lcla_pool.dma_addr)
3709                         dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3710                                          SZ_1K * base->num_phy_chans,
3711                                          DMA_TO_DEVICE);
3712
3713                 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3714                         free_pages((unsigned long)base->lcla_pool.base,
3715                                    base->lcla_pool.pages);
3716
3717                 kfree(base->lcla_pool.base_unaligned);
3718
3719                 if (base->phy_lcpa)
3720                         release_mem_region(base->phy_lcpa,
3721                                            base->lcpa_size);
3722                 if (base->phy_start)
3723                         release_mem_region(base->phy_start,
3724                                            base->phy_size);
3725                 if (base->clk) {
3726                         clk_disable_unprepare(base->clk);
3727                         clk_put(base->clk);
3728                 }
3729
3730                 if (base->lcpa_regulator) {
3731                         regulator_disable(base->lcpa_regulator);
3732                         regulator_put(base->lcpa_regulator);
3733                 }
3734
3735                 kfree(base->lcla_pool.alloc_map);
3736                 kfree(base->lookup_log_chans);
3737                 kfree(base->lookup_phy_chans);
3738                 kfree(base->phy_res);
3739                 kfree(base);
3740         }
3741
3742         d40_err(&pdev->dev, "probe failed\n");
3743         return ret;
3744 }
3745
3746 static const struct of_device_id d40_match[] = {
3747         { .compatible = "stericsson,dma40", },
3748         {}
3749 };
3750
3751 static struct platform_driver d40_driver = {
3752         .driver = {
3753                 .owner = THIS_MODULE,
3754                 .name  = D40_NAME,
3755                 .pm = &dma40_pm_ops,
3756                 .of_match_table = d40_match,
3757         },
3758 };
3759
3760 static int __init stedma40_init(void)
3761 {
3762         return platform_driver_probe(&d40_driver, d40_probe);
3763 }
3764 subsys_initcall(stedma40_init);