1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
5 * Derived from linux/drivers/dma/bcm63xx-iudma.c:
6 * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu>
8 * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c:
9 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
11 * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h:
12 * Copyright (C) 2000-2010 Broadcom Corporation
14 * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c:
15 * Copyright (C) 2010 Broadcom Corporation
22 #include <dma-uclass.h>
34 #define DMA_CHAN_FLOWC(x) ((x) >> 1)
35 #define DMA_CHAN_MAX 16
36 #define DMA_CHAN_SIZE 0x10
37 #define DMA_CHAN_TOUT 500
39 /* DMA Global Configuration register */
40 #define DMA_CFG_REG 0x00
41 #define DMA_CFG_ENABLE_SHIFT 0
42 #define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT)
43 #define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1)
44 #define DMA_CFG_NCHANS_SHIFT 24
45 #define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT)
47 /* DMA Global Flow Control registers */
48 #define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c)
49 #define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c)
50 #define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c)
51 #define DMA_FLOWC_ALLOC_FORCE_SHIFT 31
52 #define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT)
54 /* DMA Global Reset register */
55 #define DMA_RST_REG 0x34
56 #define DMA_RST_CHAN_SHIFT 0
57 #define DMA_RST_CHAN_MASK(x) (1 << x)
59 /* DMA Channel Configuration register */
60 #define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00)
61 #define DMAC_CFG_ENABLE_SHIFT 0
62 #define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT)
63 #define DMAC_CFG_PKT_HALT_SHIFT 1
64 #define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT)
65 #define DMAC_CFG_BRST_HALT_SHIFT 2
66 #define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT)
68 /* DMA Channel Max Burst Length register */
69 #define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c)
71 /* DMA SRAM Descriptor Ring Start register */
72 #define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00)
74 /* DMA SRAM State/Bytes done/ring offset register */
75 #define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04)
77 /* DMA SRAM Buffer Descriptor status and length register */
78 #define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08)
80 /* DMA SRAM Buffer Descriptor status and length register */
81 #define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c)
83 /* DMA Descriptor Status */
84 #define DMAD_ST_CRC_SHIFT 8
85 #define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT)
86 #define DMAD_ST_WRAP_SHIFT 12
87 #define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT)
88 #define DMAD_ST_SOP_SHIFT 13
89 #define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT)
90 #define DMAD_ST_EOP_SHIFT 14
91 #define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT)
92 #define DMAD_ST_OWN_SHIFT 15
93 #define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT)
95 #define DMAD6348_ST_OV_ERR_SHIFT 0
96 #define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT)
97 #define DMAD6348_ST_CRC_ERR_SHIFT 1
98 #define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT)
99 #define DMAD6348_ST_RX_ERR_SHIFT 2
100 #define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT)
101 #define DMAD6348_ST_OS_ERR_SHIFT 4
102 #define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT)
103 #define DMAD6348_ST_UN_ERR_SHIFT 9
104 #define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT)
106 struct bcm6348_dma_desc {
112 struct bcm6348_chan_priv {
113 void __iomem *dma_ring;
114 uint8_t dma_ring_size;
121 struct bcm6348_iudma_hw {
125 struct bcm6348_iudma_priv {
126 const struct bcm6348_iudma_hw *hw;
130 struct bcm6348_chan_priv **ch_priv;
134 static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch)
139 static inline void bcm6348_iudma_fdc(void *ptr, ulong size)
141 ulong start = (ulong) ptr;
143 flush_dcache_range(start, start + size);
146 static inline void bcm6348_iudma_idc(void *ptr, ulong size)
148 ulong start = (ulong) ptr;
150 invalidate_dcache_range(start, start + size);
153 static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv,
156 unsigned int timeout = DMA_CHAN_TOUT;
161 if (timeout > DMA_CHAN_TOUT / 2)
162 halt = DMAC_CFG_PKT_HALT_MASK;
164 halt = DMAC_CFG_BRST_HALT_MASK;
166 /* try to stop dma channel */
167 writel_be(halt, priv->chan + DMAC_CFG_REG(ch));
170 /* check if channel was stopped */
171 cfg = readl_be(priv->chan + DMAC_CFG_REG(ch));
172 if (!(cfg & DMAC_CFG_ENABLE_MASK))
179 pr_err("unable to stop channel %u\n", ch);
181 /* reset dma channel */
182 setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch));
184 clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch));
187 static int bcm6348_iudma_disable(struct dma *dma)
189 struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
190 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
192 /* stop dma channel */
193 bcm6348_iudma_chan_stop(priv, dma->id);
195 /* dma flow control */
196 if (bcm6348_iudma_chan_is_rx(dma->id))
197 writel_be(DMA_FLOWC_ALLOC_FORCE_MASK,
198 DMA_FLOWC_ALLOC_REG(dma->id));
200 /* init channel config */
201 ch_priv->running = false;
202 ch_priv->desc_id = 0;
203 if (bcm6348_iudma_chan_is_rx(dma->id))
204 ch_priv->desc_cnt = 0;
206 ch_priv->desc_cnt = ch_priv->dma_ring_size;
211 static int bcm6348_iudma_enable(struct dma *dma)
213 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
214 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
215 struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
219 for (i = 0; i < ch_priv->desc_cnt; i++) {
220 if (bcm6348_iudma_chan_is_rx(dma->id)) {
221 ch_priv->busy_desc[i] = false;
222 dma_desc->status |= DMAD_ST_OWN_MASK;
224 dma_desc->status = 0;
225 dma_desc->length = 0;
226 dma_desc->address = 0;
229 if (i == ch_priv->desc_cnt - 1)
230 dma_desc->status |= DMAD_ST_WRAP_MASK;
235 /* init to first descriptor */
236 ch_priv->desc_id = 0;
238 /* force cache writeback */
239 bcm6348_iudma_fdc(ch_priv->dma_ring,
240 sizeof(*dma_desc) * ch_priv->desc_cnt);
243 writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id));
244 writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id));
245 writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id));
247 /* set dma ring start */
248 writel_be(virt_to_phys(ch_priv->dma_ring),
249 priv->sram + DMAS_RSTART_REG(dma->id));
251 /* set flow control */
252 if (bcm6348_iudma_chan_is_rx(dma->id)) {
255 setbits_be32(priv->base + DMA_CFG_REG,
256 DMA_CFG_FLOWC_ENABLE(dma->id));
258 val = ch_priv->desc_cnt / 3;
259 writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id));
261 val = (ch_priv->desc_cnt * 2) / 3;
262 writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id));
264 writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id));
267 /* set dma max burst */
268 writel_be(ch_priv->desc_cnt,
269 priv->chan + DMAC_BURST_REG(dma->id));
271 /* kick rx dma channel */
272 if (bcm6348_iudma_chan_is_rx(dma->id))
273 setbits_be32(priv->chan + DMAC_CFG_REG(dma->id),
274 DMAC_CFG_ENABLE_MASK);
276 /* channel is now enabled */
277 ch_priv->running = true;
282 static int bcm6348_iudma_request(struct dma *dma)
284 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
285 struct bcm6348_chan_priv *ch_priv;
287 /* check if channel is valid */
288 if (dma->id >= priv->n_channels)
291 /* alloc channel private data */
292 priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv));
293 if (!priv->ch_priv[dma->id])
295 ch_priv = priv->ch_priv[dma->id];
298 if (bcm6348_iudma_chan_is_rx(dma->id))
299 ch_priv->dma_ring_size = DMA_RX_DESC;
301 ch_priv->dma_ring_size = DMA_TX_DESC;
304 malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) *
305 ch_priv->dma_ring_size);
306 if (!ch_priv->dma_ring)
309 /* init channel config */
310 ch_priv->running = false;
311 ch_priv->desc_id = 0;
312 if (bcm6348_iudma_chan_is_rx(dma->id)) {
313 ch_priv->desc_cnt = 0;
314 ch_priv->busy_desc = calloc(ch_priv->desc_cnt, sizeof(bool));
316 ch_priv->desc_cnt = ch_priv->dma_ring_size;
317 ch_priv->busy_desc = NULL;
323 static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata)
325 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
326 const struct bcm6348_iudma_hw *hw = priv->hw;
327 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
328 struct bcm6348_dma_desc *dma_desc = dma_desc = ch_priv->dma_ring;
331 if (!ch_priv->running)
334 /* get dma ring descriptor address */
335 dma_desc += ch_priv->desc_id;
337 /* invalidate cache data */
338 bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc));
341 if (dma_desc->status & DMAD_ST_OWN_MASK)
345 if (!(dma_desc->status & DMAD_ST_EOP_MASK) ||
346 !(dma_desc->status & DMAD_ST_SOP_MASK) ||
347 (dma_desc->status & hw->err_mask)) {
348 pr_err("invalid pkt received (ch=%ld desc=%u) (st=%04x)\n",
349 dma->id, ch_priv->desc_id, dma_desc->status);
352 /* set dma buffer address */
353 *dst = phys_to_virt(dma_desc->address);
355 /* invalidate cache data */
356 bcm6348_iudma_idc(*dst, dma_desc->length);
358 /* return packet length */
359 ret = dma_desc->length;
362 /* busy dma descriptor */
363 ch_priv->busy_desc[ch_priv->desc_id] = true;
365 /* increment dma descriptor */
366 ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt;
371 static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len,
374 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
375 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
376 struct bcm6348_dma_desc *dma_desc;
379 if (!ch_priv->running)
383 bcm6348_iudma_fdc(src, len);
385 /* get dma ring descriptor address */
386 dma_desc = ch_priv->dma_ring;
387 dma_desc += ch_priv->desc_id;
389 /* config dma descriptor */
390 status = (DMAD_ST_OWN_MASK |
394 if (ch_priv->desc_id == ch_priv->desc_cnt - 1)
395 status |= DMAD_ST_WRAP_MASK;
397 /* set dma descriptor */
398 dma_desc->address = virt_to_phys(src);
399 dma_desc->length = len;
400 dma_desc->status = status;
403 bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc));
405 /* kick tx dma channel */
406 setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK);
408 /* poll dma status */
410 /* invalidate cache */
411 bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc));
413 if (!(dma_desc->status & DMAD_ST_OWN_MASK))
417 /* increment dma descriptor */
418 ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt;
423 static int bcm6348_iudma_free_rcv_buf(struct dma *dma, void *dst, size_t size)
425 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
426 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
427 struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
432 /* get dirty dma descriptor */
433 for (i = 0; i < ch_priv->desc_cnt; i++) {
434 if (phys_to_virt(dma_desc->address) == dst)
440 /* dma descriptor not found */
441 if (i == ch_priv->desc_cnt) {
442 pr_err("dirty dma descriptor not found\n");
446 /* invalidate cache */
447 bcm6348_iudma_idc(ch_priv->dma_ring,
448 sizeof(*dma_desc) * ch_priv->desc_cnt);
450 /* free dma descriptor */
451 ch_priv->busy_desc[i] = false;
453 status = DMAD_ST_OWN_MASK;
454 if (i == ch_priv->desc_cnt - 1)
455 status |= DMAD_ST_WRAP_MASK;
457 dma_desc->status |= status;
458 dma_desc->length = PKTSIZE_ALIGN;
460 /* tell dma we allocated one buffer */
461 writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id));
464 bcm6348_iudma_fdc(ch_priv->dma_ring,
465 sizeof(*dma_desc) * ch_priv->desc_cnt);
467 /* kick rx dma channel if disabled */
468 cfg = readl_be(priv->chan + DMAC_CFG_REG(dma->id));
469 if (!(cfg & DMAC_CFG_ENABLE_MASK))
470 setbits_be32(priv->chan + DMAC_CFG_REG(dma->id),
471 DMAC_CFG_ENABLE_MASK);
476 static int bcm6348_iudma_add_rcv_buf(struct dma *dma, void *dst, size_t size)
478 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
479 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
480 struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
482 /* no more dma descriptors available */
483 if (ch_priv->desc_cnt == ch_priv->dma_ring_size) {
484 pr_err("max number of buffers reached\n");
488 /* get next dma descriptor */
489 dma_desc += ch_priv->desc_cnt;
491 /* init dma descriptor */
492 dma_desc->address = virt_to_phys(dst);
493 dma_desc->length = size;
494 dma_desc->status = 0;
497 bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc));
499 /* increment dma descriptors */
505 static int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst,
508 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
509 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
511 /* only add new rx buffers if channel isn't running */
512 if (ch_priv->running)
513 return bcm6348_iudma_free_rcv_buf(dma, dst, size);
515 return bcm6348_iudma_add_rcv_buf(dma, dst, size);
518 static const struct dma_ops bcm6348_iudma_ops = {
519 .disable = bcm6348_iudma_disable,
520 .enable = bcm6348_iudma_enable,
521 .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf,
522 .request = bcm6348_iudma_request,
523 .receive = bcm6348_iudma_receive,
524 .send = bcm6348_iudma_send,
527 static const struct bcm6348_iudma_hw bcm6348_hw = {
528 .err_mask = (DMAD6348_ST_OV_ERR_MASK |
529 DMAD6348_ST_CRC_ERR_MASK |
530 DMAD6348_ST_RX_ERR_MASK |
531 DMAD6348_ST_OS_ERR_MASK |
532 DMAD6348_ST_UN_ERR_MASK),
535 static const struct bcm6348_iudma_hw bcm6368_hw = {
539 static const struct udevice_id bcm6348_iudma_ids[] = {
541 .compatible = "brcm,bcm6348-iudma",
542 .data = (ulong)&bcm6348_hw,
544 .compatible = "brcm,bcm6368-iudma",
545 .data = (ulong)&bcm6368_hw,
546 }, { /* sentinel */ }
549 static int bcm6348_iudma_probe(struct udevice *dev)
551 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
552 struct bcm6348_iudma_priv *priv = dev_get_priv(dev);
553 const struct bcm6348_iudma_hw *hw =
554 (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev);
558 uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM |
559 DMA_SUPPORTS_MEM_TO_DEV);
562 /* dma global base address */
563 priv->base = dev_remap_addr_name(dev, "dma");
567 /* dma channels base address */
568 priv->chan = dev_remap_addr_name(dev, "dma-channels");
572 /* dma sram base address */
573 priv->sram = dev_remap_addr_name(dev, "dma-sram");
577 /* get number of channels */
578 priv->n_channels = dev_read_u32_default(dev, "dma-channels", 8);
579 if (priv->n_channels > DMA_CHAN_MAX)
582 /* try to enable clocks */
587 ret = clk_get_by_index(dev, i, &clk);
591 ret = clk_enable(&clk);
593 pr_err("error enabling clock %d\n", i);
597 ret = clk_free(&clk);
599 pr_err("error freeing clock %d\n", i);
604 /* try to perform resets */
606 struct reset_ctl reset;
609 ret = reset_get_by_index(dev, i, &reset);
613 ret = reset_deassert(&reset);
615 pr_err("error deasserting reset %d\n", i);
619 ret = reset_free(&reset);
621 pr_err("error freeing reset %d\n", i);
626 /* disable dma controller */
627 clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK);
629 /* alloc channel private data pointers */
630 priv->ch_priv = calloc(priv->n_channels,
631 sizeof(struct bcm6348_chan_priv*));
635 /* stop dma channels */
636 for (ch = 0; ch < priv->n_channels; ch++)
637 bcm6348_iudma_chan_stop(priv, ch);
639 /* enable dma controller */
640 setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK);
645 U_BOOT_DRIVER(bcm6348_iudma) = {
646 .name = "bcm6348_iudma",
648 .of_match = bcm6348_iudma_ids,
649 .ops = &bcm6348_iudma_ops,
650 .priv_auto_alloc_size = sizeof(struct bcm6348_iudma_priv),
651 .probe = bcm6348_iudma_probe,