1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * DMA driver for Xilinx Video DMA Engine
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
7 * Based on the Freescale DMA driver.
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
30 #include <linux/bitops.h>
31 #include <linux/dmapool.h>
32 #include <linux/dma/xilinx_dma.h>
33 #include <linux/init.h>
34 #include <linux/interrupt.h>
36 #include <linux/iopoll.h>
37 #include <linux/module.h>
38 #include <linux/of_address.h>
39 #include <linux/of_dma.h>
40 #include <linux/of_platform.h>
41 #include <linux/of_irq.h>
42 #include <linux/slab.h>
43 #include <linux/clk.h>
44 #include <linux/io-64-nonatomic-lo-hi.h>
46 #include "../dmaengine.h"
48 /* Register/Descriptor Offsets */
49 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
50 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
51 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
52 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
54 /* Control Registers */
55 #define XILINX_DMA_REG_DMACR 0x0000
56 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
57 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
58 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
59 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
60 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
61 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
62 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
63 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
64 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
65 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
66 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
67 #define XILINX_DMA_DMACR_RESET BIT(2)
68 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
69 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
70 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
71 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
72 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
73 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
75 #define XILINX_DMA_REG_DMASR 0x0004
76 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
77 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
78 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
79 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
80 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
81 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
82 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
83 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
84 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
85 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
86 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
87 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
88 #define XILINX_DMA_DMASR_SG_MASK BIT(3)
89 #define XILINX_DMA_DMASR_IDLE BIT(1)
90 #define XILINX_DMA_DMASR_HALTED BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
94 #define XILINX_DMA_REG_CURDESC 0x0008
95 #define XILINX_DMA_REG_TAILDESC 0x0010
96 #define XILINX_DMA_REG_REG_INDEX 0x0014
97 #define XILINX_DMA_REG_FRMSTORE 0x0018
98 #define XILINX_DMA_REG_THRESHOLD 0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
100 #define XILINX_DMA_REG_PARK_PTR 0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
103 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
104 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
105 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
107 /* Register Direct Mode Registers */
108 #define XILINX_DMA_REG_VSIZE 0x0000
109 #define XILINX_DMA_REG_HSIZE 0x0004
111 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
112 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
113 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
115 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
116 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
118 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
119 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
121 /* HW specific definitions */
122 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
124 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
125 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
126 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
127 XILINX_DMA_DMASR_ERR_IRQ)
129 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
130 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
131 XILINX_DMA_DMASR_SOF_LATE_ERR | \
132 XILINX_DMA_DMASR_SG_DEC_ERR | \
133 XILINX_DMA_DMASR_SG_SLV_ERR | \
134 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
135 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
136 XILINX_DMA_DMASR_DMA_DEC_ERR | \
137 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
138 XILINX_DMA_DMASR_DMA_INT_ERR)
141 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
142 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
143 * is enabled in the h/w system.
145 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
146 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
147 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
148 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
149 XILINX_DMA_DMASR_DMA_INT_ERR)
151 /* Axi VDMA Flush on Fsync bits */
152 #define XILINX_DMA_FLUSH_S2MM 3
153 #define XILINX_DMA_FLUSH_MM2S 2
154 #define XILINX_DMA_FLUSH_BOTH 1
156 /* Delay loop counter to prevent hardware failure */
157 #define XILINX_DMA_LOOP_COUNT 1000000
159 /* AXI DMA Specific Registers/Offsets */
160 #define XILINX_DMA_REG_SRCDSTADDR 0x18
161 #define XILINX_DMA_REG_BTT 0x28
163 /* AXI DMA Specific Masks/Bit fields */
164 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
165 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
166 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
167 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
168 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
169 #define XILINX_DMA_CR_COALESCE_SHIFT 16
170 #define XILINX_DMA_BD_SOP BIT(27)
171 #define XILINX_DMA_BD_EOP BIT(26)
172 #define XILINX_DMA_COALESCE_MAX 255
173 #define XILINX_DMA_NUM_DESCS 255
174 #define XILINX_DMA_NUM_APP_WORDS 5
176 /* Multi-Channel DMA Descriptor offsets*/
177 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
178 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
180 /* Multi-Channel DMA Masks/Shifts */
181 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
182 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
183 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
184 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
185 #define XILINX_DMA_BD_STRIDE_SHIFT 0
186 #define XILINX_DMA_BD_VSIZE_SHIFT 19
188 /* AXI CDMA Specific Registers/Offsets */
189 #define XILINX_CDMA_REG_SRCADDR 0x18
190 #define XILINX_CDMA_REG_DSTADDR 0x20
192 /* AXI CDMA Specific Masks */
193 #define XILINX_CDMA_CR_SGMODE BIT(3)
195 #define xilinx_prep_dma_addr_t(addr) \
196 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
198 * struct xilinx_vdma_desc_hw - Hardware Descriptor
199 * @next_desc: Next Descriptor Pointer @0x00
200 * @pad1: Reserved @0x04
201 * @buf_addr: Buffer address @0x08
202 * @buf_addr_msb: MSB of Buffer address @0x0C
203 * @vsize: Vertical Size @0x10
204 * @hsize: Horizontal Size @0x14
205 * @stride: Number of bytes between the first
206 * pixels of each horizontal line @0x18
208 struct xilinx_vdma_desc_hw {
219 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
220 * @next_desc: Next Descriptor Pointer @0x00
221 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
222 * @buf_addr: Buffer address @0x08
223 * @buf_addr_msb: MSB of Buffer address @0x0C
224 * @mcdma_control: Control field for mcdma @0x10
225 * @vsize_stride: Vsize and Stride field for mcdma @0x14
226 * @control: Control field @0x18
227 * @status: Status field @0x1C
228 * @app: APP Fields @0x20 - 0x30
230 struct xilinx_axidma_desc_hw {
239 u32 app[XILINX_DMA_NUM_APP_WORDS];
243 * struct xilinx_cdma_desc_hw - Hardware Descriptor
244 * @next_desc: Next Descriptor Pointer @0x00
245 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
246 * @src_addr: Source address @0x08
247 * @src_addr_msb: Source address MSB @0x0C
248 * @dest_addr: Destination address @0x10
249 * @dest_addr_msb: Destination address MSB @0x14
250 * @control: Control field @0x18
251 * @status: Status field @0x1C
253 struct xilinx_cdma_desc_hw {
265 * struct xilinx_vdma_tx_segment - Descriptor segment
266 * @hw: Hardware descriptor
267 * @node: Node in the descriptor segments list
268 * @phys: Physical address of segment
270 struct xilinx_vdma_tx_segment {
271 struct xilinx_vdma_desc_hw hw;
272 struct list_head node;
277 * struct xilinx_axidma_tx_segment - Descriptor segment
278 * @hw: Hardware descriptor
279 * @node: Node in the descriptor segments list
280 * @phys: Physical address of segment
282 struct xilinx_axidma_tx_segment {
283 struct xilinx_axidma_desc_hw hw;
284 struct list_head node;
289 * struct xilinx_cdma_tx_segment - Descriptor segment
290 * @hw: Hardware descriptor
291 * @node: Node in the descriptor segments list
292 * @phys: Physical address of segment
294 struct xilinx_cdma_tx_segment {
295 struct xilinx_cdma_desc_hw hw;
296 struct list_head node;
301 * struct xilinx_dma_tx_descriptor - Per Transaction structure
302 * @async_tx: Async transaction descriptor
303 * @segments: TX segments list
304 * @node: Node in the channel descriptors list
305 * @cyclic: Check for cyclic transfers.
307 struct xilinx_dma_tx_descriptor {
308 struct dma_async_tx_descriptor async_tx;
309 struct list_head segments;
310 struct list_head node;
315 * struct xilinx_dma_chan - Driver specific DMA channel structure
316 * @xdev: Driver specific device structure
317 * @ctrl_offset: Control registers offset
318 * @desc_offset: TX descriptor registers offset
319 * @lock: Descriptor operation lock
320 * @pending_list: Descriptors waiting
321 * @active_list: Descriptors ready to submit
322 * @done_list: Complete descriptors
323 * @free_seg_list: Free descriptors
324 * @common: DMA common channel
325 * @desc_pool: Descriptors pool
326 * @dev: The dma device
329 * @direction: Transfer direction
330 * @num_frms: Number of frames
331 * @has_sg: Support scatter transfers
332 * @cyclic: Check for cyclic transfers.
333 * @genlock: Support genlock mode
334 * @err: Channel has errors
335 * @idle: Check for channel idle
336 * @tasklet: Cleanup work after irq
337 * @config: Device configuration info
338 * @flush_on_fsync: Flush on Frame sync
339 * @desc_pendingcount: Descriptor pending count
340 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
341 * @desc_submitcount: Descriptor h/w submitted count
342 * @residue: Residue for AXI DMA
343 * @seg_v: Statically allocated segments base
344 * @seg_p: Physical allocated segments base
345 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
346 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
347 * @start_transfer: Differentiate b/w DMA IP's transfer
348 * @stop_transfer: Differentiate b/w DMA IP's quiesce
349 * @tdest: TDEST value for mcdma
350 * @has_vflip: S2MM vertical flip
352 struct xilinx_dma_chan {
353 struct xilinx_dma_device *xdev;
357 struct list_head pending_list;
358 struct list_head active_list;
359 struct list_head done_list;
360 struct list_head free_seg_list;
361 struct dma_chan common;
362 struct dma_pool *desc_pool;
366 enum dma_transfer_direction direction;
373 struct tasklet_struct tasklet;
374 struct xilinx_vdma_config config;
376 u32 desc_pendingcount;
378 u32 desc_submitcount;
380 struct xilinx_axidma_tx_segment *seg_v;
382 struct xilinx_axidma_tx_segment *cyclic_seg_v;
383 dma_addr_t cyclic_seg_p;
384 void (*start_transfer)(struct xilinx_dma_chan *chan);
385 int (*stop_transfer)(struct xilinx_dma_chan *chan);
391 * enum xdma_ip_type - DMA IP type.
393 * @XDMA_TYPE_AXIDMA: Axi dma ip.
394 * @XDMA_TYPE_CDMA: Axi cdma ip.
395 * @XDMA_TYPE_VDMA: Axi vdma ip.
399 XDMA_TYPE_AXIDMA = 0,
404 struct xilinx_dma_config {
405 enum xdma_ip_type dmatype;
406 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
407 struct clk **tx_clk, struct clk **txs_clk,
408 struct clk **rx_clk, struct clk **rxs_clk);
412 * struct xilinx_dma_device - DMA device structure
413 * @regs: I/O mapped base address
414 * @dev: Device Structure
415 * @common: DMA device structure
416 * @chan: Driver specific DMA channel
417 * @mcdma: Specifies whether Multi-Channel is present or not
418 * @flush_on_fsync: Flush on frame sync
419 * @ext_addr: Indicates 64 bit addressing is supported by dma device
420 * @pdev: Platform device structure pointer
421 * @dma_config: DMA config structure
422 * @axi_clk: DMA Axi4-lite interace clock
423 * @tx_clk: DMA mm2s clock
424 * @txs_clk: DMA mm2s stream clock
425 * @rx_clk: DMA s2mm clock
426 * @rxs_clk: DMA s2mm stream clock
427 * @nr_channels: Number of channels DMA device supports
428 * @chan_id: DMA channel identifier
429 * @max_buffer_len: Max buffer length
431 struct xilinx_dma_device {
434 struct dma_device common;
435 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
439 struct platform_device *pdev;
440 const struct xilinx_dma_config *dma_config;
452 #define to_xilinx_chan(chan) \
453 container_of(chan, struct xilinx_dma_chan, common)
454 #define to_dma_tx_descriptor(tx) \
455 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
456 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
457 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
458 cond, delay_us, timeout_us)
461 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
463 return ioread32(chan->xdev->regs + reg);
466 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
468 iowrite32(value, chan->xdev->regs + reg);
471 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
474 dma_write(chan, chan->desc_offset + reg, value);
477 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
479 return dma_read(chan, chan->ctrl_offset + reg);
482 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
485 dma_write(chan, chan->ctrl_offset + reg, value);
488 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
491 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
494 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
497 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
501 * vdma_desc_write_64 - 64-bit descriptor write
502 * @chan: Driver specific VDMA channel
503 * @reg: Register to write
504 * @value_lsb: lower address of the descriptor.
505 * @value_msb: upper address of the descriptor.
507 * Since vdma driver is trying to write to a register offset which is not a
508 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
509 * instead of a single 64 bit register write.
511 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
512 u32 value_lsb, u32 value_msb)
514 /* Write the lsb 32 bits*/
515 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
517 /* Write the msb 32 bits */
518 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
521 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
523 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
526 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
530 dma_writeq(chan, reg, addr);
532 dma_ctrl_write(chan, reg, addr);
535 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
536 struct xilinx_axidma_desc_hw *hw,
537 dma_addr_t buf_addr, size_t sg_used,
540 if (chan->ext_addr) {
541 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
542 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
545 hw->buf_addr = buf_addr + sg_used + period_len;
549 /* -----------------------------------------------------------------------------
550 * Descriptors and segments alloc and free
554 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
555 * @chan: Driver specific DMA channel
557 * Return: The allocated segment on success and NULL on failure.
559 static struct xilinx_vdma_tx_segment *
560 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
562 struct xilinx_vdma_tx_segment *segment;
565 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
569 segment->phys = phys;
575 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
576 * @chan: Driver specific DMA channel
578 * Return: The allocated segment on success and NULL on failure.
580 static struct xilinx_cdma_tx_segment *
581 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
583 struct xilinx_cdma_tx_segment *segment;
586 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
590 segment->phys = phys;
596 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
597 * @chan: Driver specific DMA channel
599 * Return: The allocated segment on success and NULL on failure.
601 static struct xilinx_axidma_tx_segment *
602 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
604 struct xilinx_axidma_tx_segment *segment = NULL;
607 spin_lock_irqsave(&chan->lock, flags);
608 if (!list_empty(&chan->free_seg_list)) {
609 segment = list_first_entry(&chan->free_seg_list,
610 struct xilinx_axidma_tx_segment,
612 list_del(&segment->node);
614 spin_unlock_irqrestore(&chan->lock, flags);
619 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
621 u32 next_desc = hw->next_desc;
622 u32 next_desc_msb = hw->next_desc_msb;
624 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
626 hw->next_desc = next_desc;
627 hw->next_desc_msb = next_desc_msb;
631 * xilinx_dma_free_tx_segment - Free transaction segment
632 * @chan: Driver specific DMA channel
633 * @segment: DMA transaction segment
635 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
636 struct xilinx_axidma_tx_segment *segment)
638 xilinx_dma_clean_hw_desc(&segment->hw);
640 list_add_tail(&segment->node, &chan->free_seg_list);
644 * xilinx_cdma_free_tx_segment - Free transaction segment
645 * @chan: Driver specific DMA channel
646 * @segment: DMA transaction segment
648 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
649 struct xilinx_cdma_tx_segment *segment)
651 dma_pool_free(chan->desc_pool, segment, segment->phys);
655 * xilinx_vdma_free_tx_segment - Free transaction segment
656 * @chan: Driver specific DMA channel
657 * @segment: DMA transaction segment
659 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
660 struct xilinx_vdma_tx_segment *segment)
662 dma_pool_free(chan->desc_pool, segment, segment->phys);
666 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
667 * @chan: Driver specific DMA channel
669 * Return: The allocated descriptor on success and NULL on failure.
671 static struct xilinx_dma_tx_descriptor *
672 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
674 struct xilinx_dma_tx_descriptor *desc;
676 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
680 INIT_LIST_HEAD(&desc->segments);
686 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
687 * @chan: Driver specific DMA channel
688 * @desc: DMA transaction descriptor
691 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
692 struct xilinx_dma_tx_descriptor *desc)
694 struct xilinx_vdma_tx_segment *segment, *next;
695 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
696 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
701 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
702 list_for_each_entry_safe(segment, next, &desc->segments, node) {
703 list_del(&segment->node);
704 xilinx_vdma_free_tx_segment(chan, segment);
706 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
707 list_for_each_entry_safe(cdma_segment, cdma_next,
708 &desc->segments, node) {
709 list_del(&cdma_segment->node);
710 xilinx_cdma_free_tx_segment(chan, cdma_segment);
713 list_for_each_entry_safe(axidma_segment, axidma_next,
714 &desc->segments, node) {
715 list_del(&axidma_segment->node);
716 xilinx_dma_free_tx_segment(chan, axidma_segment);
723 /* Required functions */
726 * xilinx_dma_free_desc_list - Free descriptors list
727 * @chan: Driver specific DMA channel
728 * @list: List to parse and delete the descriptor
730 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
731 struct list_head *list)
733 struct xilinx_dma_tx_descriptor *desc, *next;
735 list_for_each_entry_safe(desc, next, list, node) {
736 list_del(&desc->node);
737 xilinx_dma_free_tx_descriptor(chan, desc);
742 * xilinx_dma_free_descriptors - Free channel descriptors
743 * @chan: Driver specific DMA channel
745 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
749 spin_lock_irqsave(&chan->lock, flags);
751 xilinx_dma_free_desc_list(chan, &chan->pending_list);
752 xilinx_dma_free_desc_list(chan, &chan->done_list);
753 xilinx_dma_free_desc_list(chan, &chan->active_list);
755 spin_unlock_irqrestore(&chan->lock, flags);
759 * xilinx_dma_free_chan_resources - Free channel resources
760 * @dchan: DMA channel
762 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
764 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
767 dev_dbg(chan->dev, "Free all channel resources.\n");
769 xilinx_dma_free_descriptors(chan);
771 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
772 spin_lock_irqsave(&chan->lock, flags);
773 INIT_LIST_HEAD(&chan->free_seg_list);
774 spin_unlock_irqrestore(&chan->lock, flags);
776 /* Free memory that is allocated for BD */
777 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
778 XILINX_DMA_NUM_DESCS, chan->seg_v,
781 /* Free Memory that is allocated for cyclic DMA Mode */
782 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
783 chan->cyclic_seg_v, chan->cyclic_seg_p);
786 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
787 dma_pool_destroy(chan->desc_pool);
788 chan->desc_pool = NULL;
793 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
794 * @chan: Driver specific dma channel
795 * @desc: dma transaction descriptor
796 * @flags: flags for spin lock
798 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
799 struct xilinx_dma_tx_descriptor *desc,
800 unsigned long *flags)
802 dma_async_tx_callback callback;
803 void *callback_param;
805 callback = desc->async_tx.callback;
806 callback_param = desc->async_tx.callback_param;
808 spin_unlock_irqrestore(&chan->lock, *flags);
809 callback(callback_param);
810 spin_lock_irqsave(&chan->lock, *flags);
815 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
816 * @chan: Driver specific DMA channel
818 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
820 struct xilinx_dma_tx_descriptor *desc, *next;
823 spin_lock_irqsave(&chan->lock, flags);
825 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
826 struct dmaengine_desc_callback cb;
829 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
833 /* Remove from the list of running transactions */
834 list_del(&desc->node);
836 /* Run the link descriptor callback function */
837 dmaengine_desc_get_callback(&desc->async_tx, &cb);
838 if (dmaengine_desc_callback_valid(&cb)) {
839 spin_unlock_irqrestore(&chan->lock, flags);
840 dmaengine_desc_callback_invoke(&cb, NULL);
841 spin_lock_irqsave(&chan->lock, flags);
844 /* Run any dependencies, then free the descriptor */
845 dma_run_dependencies(&desc->async_tx);
846 xilinx_dma_free_tx_descriptor(chan, desc);
849 spin_unlock_irqrestore(&chan->lock, flags);
853 * xilinx_dma_do_tasklet - Schedule completion tasklet
854 * @data: Pointer to the Xilinx DMA channel structure
856 static void xilinx_dma_do_tasklet(unsigned long data)
858 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
860 xilinx_dma_chan_desc_cleanup(chan);
864 * xilinx_dma_alloc_chan_resources - Allocate channel resources
865 * @dchan: DMA channel
867 * Return: '0' on success and failure value on error
869 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
871 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
874 /* Has this channel already been allocated? */
879 * We need the descriptor to be aligned to 64bytes
880 * for meeting Xilinx VDMA specification requirement.
882 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
883 /* Allocate the buffer descriptors. */
884 chan->seg_v = dma_alloc_coherent(chan->dev,
885 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
886 &chan->seg_p, GFP_KERNEL);
889 "unable to allocate channel %d descriptors\n",
894 * For cyclic DMA mode we need to program the tail Descriptor
895 * register with a value which is not a part of the BD chain
896 * so allocating a desc segment during channel allocation for
897 * programming tail descriptor.
899 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
900 sizeof(*chan->cyclic_seg_v),
903 if (!chan->cyclic_seg_v) {
905 "unable to allocate desc segment for cyclic DMA\n");
906 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
907 XILINX_DMA_NUM_DESCS, chan->seg_v,
911 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
913 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
914 chan->seg_v[i].hw.next_desc =
915 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
916 ((i + 1) % XILINX_DMA_NUM_DESCS));
917 chan->seg_v[i].hw.next_desc_msb =
918 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
919 ((i + 1) % XILINX_DMA_NUM_DESCS));
920 chan->seg_v[i].phys = chan->seg_p +
921 sizeof(*chan->seg_v) * i;
922 list_add_tail(&chan->seg_v[i].node,
923 &chan->free_seg_list);
925 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
926 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
928 sizeof(struct xilinx_cdma_tx_segment),
929 __alignof__(struct xilinx_cdma_tx_segment),
932 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
934 sizeof(struct xilinx_vdma_tx_segment),
935 __alignof__(struct xilinx_vdma_tx_segment),
939 if (!chan->desc_pool &&
940 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
942 "unable to allocate channel %d descriptor pool\n",
947 dma_cookie_init(dchan);
949 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
950 /* For AXI DMA resetting once channel will reset the
951 * other channel as well so enable the interrupts here.
953 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
954 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
957 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
958 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
959 XILINX_CDMA_CR_SGMODE);
965 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
966 * @chan: Driver specific DMA channel
967 * @size: Total data that needs to be copied
968 * @done: Amount of data that has been already copied
970 * Return: Amount of data that has to be copied
972 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
977 copy = min_t(size_t, size - done,
978 chan->xdev->max_buffer_len);
980 if ((copy + done < size) &&
981 chan->xdev->common.copy_align) {
983 * If this is not the last descriptor, make sure
984 * the next one will be properly aligned
986 copy = rounddown(copy,
987 (1 << chan->xdev->common.copy_align));
993 * xilinx_dma_tx_status - Get DMA transaction status
994 * @dchan: DMA channel
995 * @cookie: Transaction identifier
996 * @txstate: Transaction state
998 * Return: DMA transaction status
1000 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1001 dma_cookie_t cookie,
1002 struct dma_tx_state *txstate)
1004 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1005 struct xilinx_dma_tx_descriptor *desc;
1006 struct xilinx_axidma_tx_segment *segment;
1007 struct xilinx_axidma_desc_hw *hw;
1008 enum dma_status ret;
1009 unsigned long flags;
1012 ret = dma_cookie_status(dchan, cookie, txstate);
1013 if (ret == DMA_COMPLETE || !txstate)
1016 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1017 spin_lock_irqsave(&chan->lock, flags);
1019 desc = list_last_entry(&chan->active_list,
1020 struct xilinx_dma_tx_descriptor, node);
1022 list_for_each_entry(segment, &desc->segments, node) {
1024 residue += (hw->control - hw->status) &
1025 chan->xdev->max_buffer_len;
1028 spin_unlock_irqrestore(&chan->lock, flags);
1030 chan->residue = residue;
1031 dma_set_residue(txstate, chan->residue);
1038 * xilinx_dma_stop_transfer - Halt DMA channel
1039 * @chan: Driver specific DMA channel
1041 * Return: '0' on success and failure value on error
1043 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1047 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1049 /* Wait for the hardware to halt */
1050 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1051 val & XILINX_DMA_DMASR_HALTED, 0,
1052 XILINX_DMA_LOOP_COUNT);
1056 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1057 * @chan: Driver specific DMA channel
1059 * Return: '0' on success and failure value on error
1061 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1065 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1066 val & XILINX_DMA_DMASR_IDLE, 0,
1067 XILINX_DMA_LOOP_COUNT);
1071 * xilinx_dma_start - Start DMA channel
1072 * @chan: Driver specific DMA channel
1074 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1079 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1081 /* Wait for the hardware to start */
1082 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1083 !(val & XILINX_DMA_DMASR_HALTED), 0,
1084 XILINX_DMA_LOOP_COUNT);
1087 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1088 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1095 * xilinx_vdma_start_transfer - Starts VDMA transfer
1096 * @chan: Driver specific channel struct pointer
1098 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1100 struct xilinx_vdma_config *config = &chan->config;
1101 struct xilinx_dma_tx_descriptor *desc;
1103 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1106 /* This function was invoked with lock held */
1113 if (list_empty(&chan->pending_list))
1116 desc = list_first_entry(&chan->pending_list,
1117 struct xilinx_dma_tx_descriptor, node);
1119 /* Configure the hardware using info in the config structure */
1120 if (chan->has_vflip) {
1121 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1122 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1123 reg |= config->vflip_en;
1124 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1128 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1130 if (config->frm_cnt_en)
1131 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1133 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1135 /* If not parking, enable circular mode */
1137 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1139 reg |= XILINX_DMA_DMACR_CIRC_EN;
1141 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1143 j = chan->desc_submitcount;
1144 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1145 if (chan->direction == DMA_MEM_TO_DEV) {
1146 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1147 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1149 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1150 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1152 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1154 /* Start the hardware */
1155 xilinx_dma_start(chan);
1160 /* Start the transfer */
1161 if (chan->desc_submitcount < chan->num_frms)
1162 i = chan->desc_submitcount;
1164 list_for_each_entry(segment, &desc->segments, node) {
1166 vdma_desc_write_64(chan,
1167 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1168 segment->hw.buf_addr,
1169 segment->hw.buf_addr_msb);
1171 vdma_desc_write(chan,
1172 XILINX_VDMA_REG_START_ADDRESS(i++),
1173 segment->hw.buf_addr);
1181 /* HW expects these parameters to be same for one transaction */
1182 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1183 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1185 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1187 chan->desc_submitcount++;
1188 chan->desc_pendingcount--;
1189 list_del(&desc->node);
1190 list_add_tail(&desc->node, &chan->active_list);
1191 if (chan->desc_submitcount == chan->num_frms)
1192 chan->desc_submitcount = 0;
1198 * xilinx_cdma_start_transfer - Starts cdma transfer
1199 * @chan: Driver specific channel struct pointer
1201 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1203 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1204 struct xilinx_cdma_tx_segment *tail_segment;
1205 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1213 if (list_empty(&chan->pending_list))
1216 head_desc = list_first_entry(&chan->pending_list,
1217 struct xilinx_dma_tx_descriptor, node);
1218 tail_desc = list_last_entry(&chan->pending_list,
1219 struct xilinx_dma_tx_descriptor, node);
1220 tail_segment = list_last_entry(&tail_desc->segments,
1221 struct xilinx_cdma_tx_segment, node);
1223 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1224 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1225 ctrl_reg |= chan->desc_pendingcount <<
1226 XILINX_DMA_CR_COALESCE_SHIFT;
1227 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1231 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1232 XILINX_CDMA_CR_SGMODE);
1234 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1235 XILINX_CDMA_CR_SGMODE);
1237 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1238 head_desc->async_tx.phys);
1240 /* Update tail ptr register which will start the transfer */
1241 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1242 tail_segment->phys);
1244 /* In simple mode */
1245 struct xilinx_cdma_tx_segment *segment;
1246 struct xilinx_cdma_desc_hw *hw;
1248 segment = list_first_entry(&head_desc->segments,
1249 struct xilinx_cdma_tx_segment,
1254 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1255 xilinx_prep_dma_addr_t(hw->src_addr));
1256 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1257 xilinx_prep_dma_addr_t(hw->dest_addr));
1259 /* Start the transfer */
1260 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1261 hw->control & chan->xdev->max_buffer_len);
1264 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1265 chan->desc_pendingcount = 0;
1270 * xilinx_dma_start_transfer - Starts DMA transfer
1271 * @chan: Driver specific channel struct pointer
1273 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1275 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1276 struct xilinx_axidma_tx_segment *tail_segment;
1282 if (list_empty(&chan->pending_list))
1288 head_desc = list_first_entry(&chan->pending_list,
1289 struct xilinx_dma_tx_descriptor, node);
1290 tail_desc = list_last_entry(&chan->pending_list,
1291 struct xilinx_dma_tx_descriptor, node);
1292 tail_segment = list_last_entry(&tail_desc->segments,
1293 struct xilinx_axidma_tx_segment, node);
1295 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1297 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1298 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1299 reg |= chan->desc_pendingcount <<
1300 XILINX_DMA_CR_COALESCE_SHIFT;
1301 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1304 if (chan->has_sg && !chan->xdev->mcdma)
1305 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1306 head_desc->async_tx.phys);
1308 if (chan->has_sg && chan->xdev->mcdma) {
1309 if (chan->direction == DMA_MEM_TO_DEV) {
1310 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1311 head_desc->async_tx.phys);
1314 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1315 head_desc->async_tx.phys);
1317 dma_ctrl_write(chan,
1318 XILINX_DMA_MCRX_CDESC(chan->tdest),
1319 head_desc->async_tx.phys);
1324 xilinx_dma_start(chan);
1329 /* Start the transfer */
1330 if (chan->has_sg && !chan->xdev->mcdma) {
1332 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1333 chan->cyclic_seg_v->phys);
1335 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1336 tail_segment->phys);
1337 } else if (chan->has_sg && chan->xdev->mcdma) {
1338 if (chan->direction == DMA_MEM_TO_DEV) {
1339 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1340 tail_segment->phys);
1343 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1344 tail_segment->phys);
1346 dma_ctrl_write(chan,
1347 XILINX_DMA_MCRX_TDESC(chan->tdest),
1348 tail_segment->phys);
1352 struct xilinx_axidma_tx_segment *segment;
1353 struct xilinx_axidma_desc_hw *hw;
1355 segment = list_first_entry(&head_desc->segments,
1356 struct xilinx_axidma_tx_segment,
1360 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1361 xilinx_prep_dma_addr_t(hw->buf_addr));
1363 /* Start the transfer */
1364 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1365 hw->control & chan->xdev->max_buffer_len);
1368 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1369 chan->desc_pendingcount = 0;
1374 * xilinx_dma_issue_pending - Issue pending transactions
1375 * @dchan: DMA channel
1377 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1379 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1380 unsigned long flags;
1382 spin_lock_irqsave(&chan->lock, flags);
1383 chan->start_transfer(chan);
1384 spin_unlock_irqrestore(&chan->lock, flags);
1388 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1389 * @chan : xilinx DMA channel
1393 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1395 struct xilinx_dma_tx_descriptor *desc, *next;
1397 /* This function was invoked with lock held */
1398 if (list_empty(&chan->active_list))
1401 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1402 list_del(&desc->node);
1404 dma_cookie_complete(&desc->async_tx);
1405 list_add_tail(&desc->node, &chan->done_list);
1410 * xilinx_dma_reset - Reset DMA channel
1411 * @chan: Driver specific DMA channel
1413 * Return: '0' on success and failure value on error
1415 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1420 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1422 /* Wait for the hardware to finish reset */
1423 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1424 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1425 XILINX_DMA_LOOP_COUNT);
1428 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1429 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1430 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1436 chan->desc_submitcount = 0;
1442 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1443 * @chan: Driver specific DMA channel
1445 * Return: '0' on success and failure value on error
1447 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1452 err = xilinx_dma_reset(chan);
1456 /* Enable interrupts */
1457 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1458 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1464 * xilinx_dma_irq_handler - DMA Interrupt handler
1466 * @data: Pointer to the Xilinx DMA channel structure
1468 * Return: IRQ_HANDLED/IRQ_NONE
1470 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1472 struct xilinx_dma_chan *chan = data;
1475 /* Read the status and ack the interrupts. */
1476 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1477 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1480 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1481 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1483 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1485 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1486 * error is recoverable, ignore it. Otherwise flag the error.
1488 * Only recoverable errors can be cleared in the DMASR register,
1489 * make sure not to write to other error bits to 1.
1491 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1493 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1494 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1496 if (!chan->flush_on_fsync ||
1497 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1499 "Channel %p has errors %x, cdr %x tdr %x\n",
1501 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1502 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1507 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1509 * Device takes too long to do the transfer when user requires
1512 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1515 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1516 spin_lock(&chan->lock);
1517 xilinx_dma_complete_descriptor(chan);
1519 chan->start_transfer(chan);
1520 spin_unlock(&chan->lock);
1523 tasklet_schedule(&chan->tasklet);
1528 * append_desc_queue - Queuing descriptor
1529 * @chan: Driver specific dma channel
1530 * @desc: dma transaction descriptor
1532 static void append_desc_queue(struct xilinx_dma_chan *chan,
1533 struct xilinx_dma_tx_descriptor *desc)
1535 struct xilinx_vdma_tx_segment *tail_segment;
1536 struct xilinx_dma_tx_descriptor *tail_desc;
1537 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1538 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1540 if (list_empty(&chan->pending_list))
1544 * Add the hardware descriptor to the chain of hardware descriptors
1545 * that already exists in memory.
1547 tail_desc = list_last_entry(&chan->pending_list,
1548 struct xilinx_dma_tx_descriptor, node);
1549 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1550 tail_segment = list_last_entry(&tail_desc->segments,
1551 struct xilinx_vdma_tx_segment,
1553 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1554 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1555 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1556 struct xilinx_cdma_tx_segment,
1558 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1560 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1561 struct xilinx_axidma_tx_segment,
1563 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1567 * Add the software descriptor and all children to the list
1568 * of pending transactions
1571 list_add_tail(&desc->node, &chan->pending_list);
1572 chan->desc_pendingcount++;
1574 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1575 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1576 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1577 chan->desc_pendingcount = chan->num_frms;
1582 * xilinx_dma_tx_submit - Submit DMA transaction
1583 * @tx: Async transaction descriptor
1585 * Return: cookie value on success and failure value on error
1587 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1589 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1590 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1591 dma_cookie_t cookie;
1592 unsigned long flags;
1596 xilinx_dma_free_tx_descriptor(chan, desc);
1602 * If reset fails, need to hard reset the system.
1603 * Channel is no longer functional
1605 err = xilinx_dma_chan_reset(chan);
1610 spin_lock_irqsave(&chan->lock, flags);
1612 cookie = dma_cookie_assign(tx);
1614 /* Put this transaction onto the tail of the pending queue */
1615 append_desc_queue(chan, desc);
1618 chan->cyclic = true;
1620 spin_unlock_irqrestore(&chan->lock, flags);
1626 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1627 * DMA_SLAVE transaction
1628 * @dchan: DMA channel
1629 * @xt: Interleaved template pointer
1630 * @flags: transfer ack flags
1632 * Return: Async transaction descriptor on success and NULL on failure
1634 static struct dma_async_tx_descriptor *
1635 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1636 struct dma_interleaved_template *xt,
1637 unsigned long flags)
1639 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1640 struct xilinx_dma_tx_descriptor *desc;
1641 struct xilinx_vdma_tx_segment *segment;
1642 struct xilinx_vdma_desc_hw *hw;
1644 if (!is_slave_direction(xt->dir))
1647 if (!xt->numf || !xt->sgl[0].size)
1650 if (xt->frame_size != 1)
1653 /* Allocate a transaction descriptor. */
1654 desc = xilinx_dma_alloc_tx_descriptor(chan);
1658 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1659 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1660 async_tx_ack(&desc->async_tx);
1662 /* Allocate the link descriptor from DMA pool */
1663 segment = xilinx_vdma_alloc_tx_segment(chan);
1667 /* Fill in the hardware descriptor */
1669 hw->vsize = xt->numf;
1670 hw->hsize = xt->sgl[0].size;
1671 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1672 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1673 hw->stride |= chan->config.frm_dly <<
1674 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1676 if (xt->dir != DMA_MEM_TO_DEV) {
1677 if (chan->ext_addr) {
1678 hw->buf_addr = lower_32_bits(xt->dst_start);
1679 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1681 hw->buf_addr = xt->dst_start;
1684 if (chan->ext_addr) {
1685 hw->buf_addr = lower_32_bits(xt->src_start);
1686 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1688 hw->buf_addr = xt->src_start;
1692 /* Insert the segment into the descriptor segments list. */
1693 list_add_tail(&segment->node, &desc->segments);
1695 /* Link the last hardware descriptor with the first. */
1696 segment = list_first_entry(&desc->segments,
1697 struct xilinx_vdma_tx_segment, node);
1698 desc->async_tx.phys = segment->phys;
1700 return &desc->async_tx;
1703 xilinx_dma_free_tx_descriptor(chan, desc);
1708 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1709 * @dchan: DMA channel
1710 * @dma_dst: destination address
1711 * @dma_src: source address
1712 * @len: transfer length
1713 * @flags: transfer ack flags
1715 * Return: Async transaction descriptor on success and NULL on failure
1717 static struct dma_async_tx_descriptor *
1718 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1719 dma_addr_t dma_src, size_t len, unsigned long flags)
1721 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1722 struct xilinx_dma_tx_descriptor *desc;
1723 struct xilinx_cdma_tx_segment *segment;
1724 struct xilinx_cdma_desc_hw *hw;
1726 if (!len || len > chan->xdev->max_buffer_len)
1729 desc = xilinx_dma_alloc_tx_descriptor(chan);
1733 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1734 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1736 /* Allocate the link descriptor from DMA pool */
1737 segment = xilinx_cdma_alloc_tx_segment(chan);
1743 hw->src_addr = dma_src;
1744 hw->dest_addr = dma_dst;
1745 if (chan->ext_addr) {
1746 hw->src_addr_msb = upper_32_bits(dma_src);
1747 hw->dest_addr_msb = upper_32_bits(dma_dst);
1750 /* Insert the segment into the descriptor segments list. */
1751 list_add_tail(&segment->node, &desc->segments);
1753 desc->async_tx.phys = segment->phys;
1754 hw->next_desc = segment->phys;
1756 return &desc->async_tx;
1759 xilinx_dma_free_tx_descriptor(chan, desc);
1764 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1765 * @dchan: DMA channel
1766 * @sgl: scatterlist to transfer to/from
1767 * @sg_len: number of entries in @scatterlist
1768 * @direction: DMA direction
1769 * @flags: transfer ack flags
1770 * @context: APP words of the descriptor
1772 * Return: Async transaction descriptor on success and NULL on failure
1774 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1775 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1776 enum dma_transfer_direction direction, unsigned long flags,
1779 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1780 struct xilinx_dma_tx_descriptor *desc;
1781 struct xilinx_axidma_tx_segment *segment = NULL;
1782 u32 *app_w = (u32 *)context;
1783 struct scatterlist *sg;
1788 if (!is_slave_direction(direction))
1791 /* Allocate a transaction descriptor. */
1792 desc = xilinx_dma_alloc_tx_descriptor(chan);
1796 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1797 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1799 /* Build transactions using information in the scatter gather list */
1800 for_each_sg(sgl, sg, sg_len, i) {
1803 /* Loop until the entire scatterlist entry is used */
1804 while (sg_used < sg_dma_len(sg)) {
1805 struct xilinx_axidma_desc_hw *hw;
1807 /* Get a free segment */
1808 segment = xilinx_axidma_alloc_tx_segment(chan);
1813 * Calculate the maximum number of bytes to transfer,
1814 * making sure it is less than the hw limit
1816 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
1820 /* Fill in the descriptor */
1821 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1826 if (chan->direction == DMA_MEM_TO_DEV) {
1828 memcpy(hw->app, app_w, sizeof(u32) *
1829 XILINX_DMA_NUM_APP_WORDS);
1835 * Insert the segment into the descriptor segments
1838 list_add_tail(&segment->node, &desc->segments);
1842 segment = list_first_entry(&desc->segments,
1843 struct xilinx_axidma_tx_segment, node);
1844 desc->async_tx.phys = segment->phys;
1846 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1847 if (chan->direction == DMA_MEM_TO_DEV) {
1848 segment->hw.control |= XILINX_DMA_BD_SOP;
1849 segment = list_last_entry(&desc->segments,
1850 struct xilinx_axidma_tx_segment,
1852 segment->hw.control |= XILINX_DMA_BD_EOP;
1855 return &desc->async_tx;
1858 xilinx_dma_free_tx_descriptor(chan, desc);
1863 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1864 * @dchan: DMA channel
1865 * @buf_addr: Physical address of the buffer
1866 * @buf_len: Total length of the cyclic buffers
1867 * @period_len: length of individual cyclic buffer
1868 * @direction: DMA direction
1869 * @flags: transfer ack flags
1871 * Return: Async transaction descriptor on success and NULL on failure
1873 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1874 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1875 size_t period_len, enum dma_transfer_direction direction,
1876 unsigned long flags)
1878 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1879 struct xilinx_dma_tx_descriptor *desc;
1880 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1881 size_t copy, sg_used;
1882 unsigned int num_periods;
1889 num_periods = buf_len / period_len;
1894 if (!is_slave_direction(direction))
1897 /* Allocate a transaction descriptor. */
1898 desc = xilinx_dma_alloc_tx_descriptor(chan);
1902 chan->direction = direction;
1903 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1904 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1906 for (i = 0; i < num_periods; ++i) {
1909 while (sg_used < period_len) {
1910 struct xilinx_axidma_desc_hw *hw;
1912 /* Get a free segment */
1913 segment = xilinx_axidma_alloc_tx_segment(chan);
1918 * Calculate the maximum number of bytes to transfer,
1919 * making sure it is less than the hw limit
1921 copy = xilinx_dma_calc_copysize(chan, period_len,
1924 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1929 prev->hw.next_desc = segment->phys;
1935 * Insert the segment into the descriptor segments
1938 list_add_tail(&segment->node, &desc->segments);
1942 head_segment = list_first_entry(&desc->segments,
1943 struct xilinx_axidma_tx_segment, node);
1944 desc->async_tx.phys = head_segment->phys;
1946 desc->cyclic = true;
1947 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1948 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1949 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1951 segment = list_last_entry(&desc->segments,
1952 struct xilinx_axidma_tx_segment,
1954 segment->hw.next_desc = (u32) head_segment->phys;
1956 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1957 if (direction == DMA_MEM_TO_DEV) {
1958 head_segment->hw.control |= XILINX_DMA_BD_SOP;
1959 segment->hw.control |= XILINX_DMA_BD_EOP;
1962 return &desc->async_tx;
1965 xilinx_dma_free_tx_descriptor(chan, desc);
1970 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1971 * DMA_SLAVE transaction
1972 * @dchan: DMA channel
1973 * @xt: Interleaved template pointer
1974 * @flags: transfer ack flags
1976 * Return: Async transaction descriptor on success and NULL on failure
1978 static struct dma_async_tx_descriptor *
1979 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1980 struct dma_interleaved_template *xt,
1981 unsigned long flags)
1983 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1984 struct xilinx_dma_tx_descriptor *desc;
1985 struct xilinx_axidma_tx_segment *segment;
1986 struct xilinx_axidma_desc_hw *hw;
1988 if (!is_slave_direction(xt->dir))
1991 if (!xt->numf || !xt->sgl[0].size)
1994 if (xt->frame_size != 1)
1997 /* Allocate a transaction descriptor. */
1998 desc = xilinx_dma_alloc_tx_descriptor(chan);
2002 chan->direction = xt->dir;
2003 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2004 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2006 /* Get a free segment */
2007 segment = xilinx_axidma_alloc_tx_segment(chan);
2013 /* Fill in the descriptor */
2014 if (xt->dir != DMA_MEM_TO_DEV)
2015 hw->buf_addr = xt->dst_start;
2017 hw->buf_addr = xt->src_start;
2019 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2020 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2021 XILINX_DMA_BD_VSIZE_MASK;
2022 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2023 XILINX_DMA_BD_STRIDE_MASK;
2024 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2027 * Insert the segment into the descriptor segments
2030 list_add_tail(&segment->node, &desc->segments);
2033 segment = list_first_entry(&desc->segments,
2034 struct xilinx_axidma_tx_segment, node);
2035 desc->async_tx.phys = segment->phys;
2037 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2038 if (xt->dir == DMA_MEM_TO_DEV) {
2039 segment->hw.control |= XILINX_DMA_BD_SOP;
2040 segment = list_last_entry(&desc->segments,
2041 struct xilinx_axidma_tx_segment,
2043 segment->hw.control |= XILINX_DMA_BD_EOP;
2046 return &desc->async_tx;
2049 xilinx_dma_free_tx_descriptor(chan, desc);
2054 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2055 * @dchan: Driver specific DMA Channel pointer
2057 * Return: '0' always.
2059 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2061 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2066 xilinx_dma_chan_reset(chan);
2068 err = chan->stop_transfer(chan);
2070 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2071 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2075 /* Remove and free all of the descriptors in the lists */
2076 xilinx_dma_free_descriptors(chan);
2080 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2081 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2082 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2083 chan->cyclic = false;
2086 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2087 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2088 XILINX_CDMA_CR_SGMODE);
2094 * xilinx_dma_channel_set_config - Configure VDMA channel
2095 * Run-time configuration for Axi VDMA, supports:
2096 * . halt the channel
2097 * . configure interrupt coalescing and inter-packet delay threshold
2098 * . start/stop parking
2101 * @dchan: DMA channel
2102 * @cfg: VDMA device configuration pointer
2104 * Return: '0' on success and failure value on error
2106 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2107 struct xilinx_vdma_config *cfg)
2109 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2113 return xilinx_dma_chan_reset(chan);
2115 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2117 chan->config.frm_dly = cfg->frm_dly;
2118 chan->config.park = cfg->park;
2120 /* genlock settings */
2121 chan->config.gen_lock = cfg->gen_lock;
2122 chan->config.master = cfg->master;
2124 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2125 if (cfg->gen_lock && chan->genlock) {
2126 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2127 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2128 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2131 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2132 chan->config.vflip_en = cfg->vflip_en;
2135 chan->config.park_frm = cfg->park_frm;
2137 chan->config.park_frm = -1;
2139 chan->config.coalesc = cfg->coalesc;
2140 chan->config.delay = cfg->delay;
2142 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2143 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2144 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2145 chan->config.coalesc = cfg->coalesc;
2148 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2149 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2150 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2151 chan->config.delay = cfg->delay;
2154 /* FSync Source selection */
2155 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2156 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2158 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2162 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2164 /* -----------------------------------------------------------------------------
2169 * xilinx_dma_chan_remove - Per Channel remove function
2170 * @chan: Driver specific DMA channel
2172 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2174 /* Disable all interrupts */
2175 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2176 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2179 free_irq(chan->irq, chan);
2181 tasklet_kill(&chan->tasklet);
2183 list_del(&chan->common.device_node);
2186 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2187 struct clk **tx_clk, struct clk **rx_clk,
2188 struct clk **sg_clk, struct clk **tmp_clk)
2194 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2195 if (IS_ERR(*axi_clk)) {
2196 err = PTR_ERR(*axi_clk);
2197 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2201 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2202 if (IS_ERR(*tx_clk))
2205 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2206 if (IS_ERR(*rx_clk))
2209 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2210 if (IS_ERR(*sg_clk))
2213 err = clk_prepare_enable(*axi_clk);
2215 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2219 err = clk_prepare_enable(*tx_clk);
2221 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2222 goto err_disable_axiclk;
2225 err = clk_prepare_enable(*rx_clk);
2227 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2228 goto err_disable_txclk;
2231 err = clk_prepare_enable(*sg_clk);
2233 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2234 goto err_disable_rxclk;
2240 clk_disable_unprepare(*rx_clk);
2242 clk_disable_unprepare(*tx_clk);
2244 clk_disable_unprepare(*axi_clk);
2249 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2250 struct clk **dev_clk, struct clk **tmp_clk,
2251 struct clk **tmp1_clk, struct clk **tmp2_clk)
2259 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2260 if (IS_ERR(*axi_clk)) {
2261 err = PTR_ERR(*axi_clk);
2262 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2266 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2267 if (IS_ERR(*dev_clk)) {
2268 err = PTR_ERR(*dev_clk);
2269 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2273 err = clk_prepare_enable(*axi_clk);
2275 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2279 err = clk_prepare_enable(*dev_clk);
2281 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2282 goto err_disable_axiclk;
2288 clk_disable_unprepare(*axi_clk);
2293 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2294 struct clk **tx_clk, struct clk **txs_clk,
2295 struct clk **rx_clk, struct clk **rxs_clk)
2299 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2300 if (IS_ERR(*axi_clk)) {
2301 err = PTR_ERR(*axi_clk);
2302 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2306 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2307 if (IS_ERR(*tx_clk))
2310 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2311 if (IS_ERR(*txs_clk))
2314 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2315 if (IS_ERR(*rx_clk))
2318 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2319 if (IS_ERR(*rxs_clk))
2322 err = clk_prepare_enable(*axi_clk);
2324 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2328 err = clk_prepare_enable(*tx_clk);
2330 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2331 goto err_disable_axiclk;
2334 err = clk_prepare_enable(*txs_clk);
2336 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2337 goto err_disable_txclk;
2340 err = clk_prepare_enable(*rx_clk);
2342 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2343 goto err_disable_txsclk;
2346 err = clk_prepare_enable(*rxs_clk);
2348 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2349 goto err_disable_rxclk;
2355 clk_disable_unprepare(*rx_clk);
2357 clk_disable_unprepare(*txs_clk);
2359 clk_disable_unprepare(*tx_clk);
2361 clk_disable_unprepare(*axi_clk);
2366 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2368 clk_disable_unprepare(xdev->rxs_clk);
2369 clk_disable_unprepare(xdev->rx_clk);
2370 clk_disable_unprepare(xdev->txs_clk);
2371 clk_disable_unprepare(xdev->tx_clk);
2372 clk_disable_unprepare(xdev->axi_clk);
2376 * xilinx_dma_chan_probe - Per Channel Probing
2377 * It get channel features from the device tree entry and
2378 * initialize special channel handling routines
2380 * @xdev: Driver specific device structure
2381 * @node: Device node
2382 * @chan_id: DMA Channel id
2384 * Return: '0' on success and failure value on error
2386 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2387 struct device_node *node, int chan_id)
2389 struct xilinx_dma_chan *chan;
2390 bool has_dre = false;
2394 /* Allocate and initialize the channel structure */
2395 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2399 chan->dev = xdev->dev;
2401 chan->desc_pendingcount = 0x0;
2402 chan->ext_addr = xdev->ext_addr;
2403 /* This variable ensures that descriptors are not
2404 * Submitted when dma engine is in progress. This variable is
2405 * Added to avoid polling for a bit in the status register to
2406 * Know dma state in the driver hot path.
2410 spin_lock_init(&chan->lock);
2411 INIT_LIST_HEAD(&chan->pending_list);
2412 INIT_LIST_HEAD(&chan->done_list);
2413 INIT_LIST_HEAD(&chan->active_list);
2414 INIT_LIST_HEAD(&chan->free_seg_list);
2416 /* Retrieve the channel properties from the device tree */
2417 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2419 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2421 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2423 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2426 width = value >> 3; /* Convert bits to bytes */
2428 /* If data width is greater than 8 bytes, DRE is not in hw */
2433 xdev->common.copy_align = fls(width - 1);
2435 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2436 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2437 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2438 chan->direction = DMA_MEM_TO_DEV;
2440 chan->tdest = chan_id;
2442 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2443 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2444 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2445 chan->config.park = 1;
2447 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2448 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2449 chan->flush_on_fsync = true;
2451 } else if (of_device_is_compatible(node,
2452 "xlnx,axi-vdma-s2mm-channel") ||
2453 of_device_is_compatible(node,
2454 "xlnx,axi-dma-s2mm-channel")) {
2455 chan->direction = DMA_DEV_TO_MEM;
2457 chan->tdest = chan_id - xdev->nr_channels;
2458 chan->has_vflip = of_property_read_bool(node,
2459 "xlnx,enable-vert-flip");
2460 if (chan->has_vflip) {
2461 chan->config.vflip_en = dma_read(chan,
2462 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2463 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2466 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2467 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2468 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2469 chan->config.park = 1;
2471 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2472 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2473 chan->flush_on_fsync = true;
2476 dev_err(xdev->dev, "Invalid channel compatible node\n");
2480 /* Request the interrupt */
2481 chan->irq = irq_of_parse_and_map(node, 0);
2482 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2483 "xilinx-dma-controller", chan);
2485 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2489 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2490 chan->start_transfer = xilinx_dma_start_transfer;
2491 chan->stop_transfer = xilinx_dma_stop_transfer;
2492 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2493 chan->start_transfer = xilinx_cdma_start_transfer;
2494 chan->stop_transfer = xilinx_cdma_stop_transfer;
2496 chan->start_transfer = xilinx_vdma_start_transfer;
2497 chan->stop_transfer = xilinx_dma_stop_transfer;
2500 /* check if SG is enabled (only for AXIDMA and CDMA) */
2501 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2502 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2503 XILINX_DMA_DMASR_SG_MASK)
2504 chan->has_sg = true;
2505 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2506 chan->has_sg ? "enabled" : "disabled");
2509 /* Initialize the tasklet */
2510 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2511 (unsigned long)chan);
2514 * Initialize the DMA channel and add it to the DMA engine channels
2517 chan->common.device = &xdev->common;
2519 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2520 xdev->chan[chan->id] = chan;
2522 /* Reset the channel */
2523 err = xilinx_dma_chan_reset(chan);
2525 dev_err(xdev->dev, "Reset channel failed\n");
2533 * xilinx_dma_child_probe - Per child node probe
2534 * It get number of dma-channels per child node from
2535 * device-tree and initializes all the channels.
2537 * @xdev: Driver specific device structure
2538 * @node: Device node
2542 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2543 struct device_node *node)
2545 int ret, i, nr_channels = 1;
2547 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2548 if ((ret < 0) && xdev->mcdma)
2549 dev_warn(xdev->dev, "missing dma-channels property\n");
2551 for (i = 0; i < nr_channels; i++)
2552 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2554 xdev->nr_channels += nr_channels;
2560 * of_dma_xilinx_xlate - Translation function
2561 * @dma_spec: Pointer to DMA specifier as found in the device tree
2562 * @ofdma: Pointer to DMA controller data
2564 * Return: DMA channel pointer on success and NULL on error
2566 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2567 struct of_dma *ofdma)
2569 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2570 int chan_id = dma_spec->args[0];
2572 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2575 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2578 static const struct xilinx_dma_config axidma_config = {
2579 .dmatype = XDMA_TYPE_AXIDMA,
2580 .clk_init = axidma_clk_init,
2583 static const struct xilinx_dma_config axicdma_config = {
2584 .dmatype = XDMA_TYPE_CDMA,
2585 .clk_init = axicdma_clk_init,
2588 static const struct xilinx_dma_config axivdma_config = {
2589 .dmatype = XDMA_TYPE_VDMA,
2590 .clk_init = axivdma_clk_init,
2593 static const struct of_device_id xilinx_dma_of_ids[] = {
2594 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2595 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2596 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2599 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2602 * xilinx_dma_probe - Driver probe function
2603 * @pdev: Pointer to the platform_device structure
2605 * Return: '0' on success and failure value on error
2607 static int xilinx_dma_probe(struct platform_device *pdev)
2609 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2610 struct clk **, struct clk **, struct clk **)
2612 struct device_node *node = pdev->dev.of_node;
2613 struct xilinx_dma_device *xdev;
2614 struct device_node *child, *np = pdev->dev.of_node;
2615 struct resource *io;
2616 u32 num_frames, addr_width, len_width;
2619 /* Allocate and initialize the DMA engine structure */
2620 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2624 xdev->dev = &pdev->dev;
2626 const struct of_device_id *match;
2628 match = of_match_node(xilinx_dma_of_ids, np);
2629 if (match && match->data) {
2630 xdev->dma_config = match->data;
2631 clk_init = xdev->dma_config->clk_init;
2635 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2636 &xdev->rx_clk, &xdev->rxs_clk);
2640 /* Request and map I/O memory */
2641 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2642 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2643 if (IS_ERR(xdev->regs))
2644 return PTR_ERR(xdev->regs);
2646 /* Retrieve the DMA engine properties from the device tree */
2647 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
2649 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2650 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2651 if (!of_property_read_u32(node, "xlnx,sg-length-width",
2653 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2654 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2656 "invalid xlnx,sg-length-width property value. Using default width\n");
2658 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2659 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2660 xdev->max_buffer_len =
2661 GENMASK(len_width - 1, 0);
2666 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2667 err = of_property_read_u32(node, "xlnx,num-fstores",
2671 "missing xlnx,num-fstores property\n");
2675 err = of_property_read_u32(node, "xlnx,flush-fsync",
2676 &xdev->flush_on_fsync);
2679 "missing xlnx,flush-fsync property\n");
2682 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2684 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2686 if (addr_width > 32)
2687 xdev->ext_addr = true;
2689 xdev->ext_addr = false;
2691 /* Set the dma mask bits */
2692 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2694 /* Initialize the DMA engine */
2695 xdev->common.dev = &pdev->dev;
2697 INIT_LIST_HEAD(&xdev->common.channels);
2698 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2699 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2700 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2703 xdev->common.device_alloc_chan_resources =
2704 xilinx_dma_alloc_chan_resources;
2705 xdev->common.device_free_chan_resources =
2706 xilinx_dma_free_chan_resources;
2707 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2708 xdev->common.device_tx_status = xilinx_dma_tx_status;
2709 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2710 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2711 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2712 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2713 xdev->common.device_prep_dma_cyclic =
2714 xilinx_dma_prep_dma_cyclic;
2715 xdev->common.device_prep_interleaved_dma =
2716 xilinx_dma_prep_interleaved;
2717 /* Residue calculation is supported by only AXI DMA */
2718 xdev->common.residue_granularity =
2719 DMA_RESIDUE_GRANULARITY_SEGMENT;
2720 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2721 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2722 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2724 xdev->common.device_prep_interleaved_dma =
2725 xilinx_vdma_dma_prep_interleaved;
2728 platform_set_drvdata(pdev, xdev);
2730 /* Initialize the channels */
2731 for_each_child_of_node(node, child) {
2732 err = xilinx_dma_child_probe(xdev, child);
2737 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2738 for (i = 0; i < xdev->nr_channels; i++)
2740 xdev->chan[i]->num_frms = num_frames;
2743 /* Register the DMA engine with the core */
2744 dma_async_device_register(&xdev->common);
2746 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2749 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2750 dma_async_device_unregister(&xdev->common);
2754 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2755 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2756 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2757 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2759 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2764 xdma_disable_allclks(xdev);
2766 for (i = 0; i < xdev->nr_channels; i++)
2768 xilinx_dma_chan_remove(xdev->chan[i]);
2774 * xilinx_dma_remove - Driver remove function
2775 * @pdev: Pointer to the platform_device structure
2777 * Return: Always '0'
2779 static int xilinx_dma_remove(struct platform_device *pdev)
2781 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2784 of_dma_controller_free(pdev->dev.of_node);
2786 dma_async_device_unregister(&xdev->common);
2788 for (i = 0; i < xdev->nr_channels; i++)
2790 xilinx_dma_chan_remove(xdev->chan[i]);
2792 xdma_disable_allclks(xdev);
2797 static struct platform_driver xilinx_vdma_driver = {
2799 .name = "xilinx-vdma",
2800 .of_match_table = xilinx_dma_of_ids,
2802 .probe = xilinx_dma_probe,
2803 .remove = xilinx_dma_remove,
2806 module_platform_driver(xilinx_vdma_driver);
2808 MODULE_AUTHOR("Xilinx, Inc.");
2809 MODULE_DESCRIPTION("Xilinx VDMA driver");
2810 MODULE_LICENSE("GPL v2");