1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
5 * Derived from linux/drivers/dma/bcm63xx-iudma.c:
6 * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu>
8 * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c:
9 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
11 * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h:
12 * Copyright (C) 2000-2010 Broadcom Corporation
14 * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c:
15 * Copyright (C) 2010 Broadcom Corporation
21 #include <dma-uclass.h>
30 #define DMA_CHAN_FLOWC(x) ((x) >> 1)
31 #define DMA_CHAN_MAX 16
32 #define DMA_CHAN_SIZE 0x10
33 #define DMA_CHAN_TOUT 500
35 /* DMA Global Configuration register */
36 #define DMA_CFG_REG 0x00
37 #define DMA_CFG_ENABLE_SHIFT 0
38 #define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT)
39 #define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1)
40 #define DMA_CFG_NCHANS_SHIFT 24
41 #define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT)
43 /* DMA Global Flow Control registers */
44 #define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c)
45 #define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c)
46 #define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c)
47 #define DMA_FLOWC_ALLOC_FORCE_SHIFT 31
48 #define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT)
50 /* DMA Global Reset register */
51 #define DMA_RST_REG 0x34
52 #define DMA_RST_CHAN_SHIFT 0
53 #define DMA_RST_CHAN_MASK(x) (1 << x)
55 /* DMA Channel Configuration register */
56 #define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00)
57 #define DMAC_CFG_ENABLE_SHIFT 0
58 #define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT)
59 #define DMAC_CFG_PKT_HALT_SHIFT 1
60 #define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT)
61 #define DMAC_CFG_BRST_HALT_SHIFT 2
62 #define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT)
64 /* DMA Channel Max Burst Length register */
65 #define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c)
67 /* DMA SRAM Descriptor Ring Start register */
68 #define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00)
70 /* DMA SRAM State/Bytes done/ring offset register */
71 #define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04)
73 /* DMA SRAM Buffer Descriptor status and length register */
74 #define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08)
76 /* DMA SRAM Buffer Descriptor status and length register */
77 #define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c)
79 /* DMA Descriptor Status */
80 #define DMAD_ST_CRC_SHIFT 8
81 #define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT)
82 #define DMAD_ST_WRAP_SHIFT 12
83 #define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT)
84 #define DMAD_ST_SOP_SHIFT 13
85 #define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT)
86 #define DMAD_ST_EOP_SHIFT 14
87 #define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT)
88 #define DMAD_ST_OWN_SHIFT 15
89 #define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT)
91 #define DMAD6348_ST_OV_ERR_SHIFT 0
92 #define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT)
93 #define DMAD6348_ST_CRC_ERR_SHIFT 1
94 #define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT)
95 #define DMAD6348_ST_RX_ERR_SHIFT 2
96 #define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT)
97 #define DMAD6348_ST_OS_ERR_SHIFT 4
98 #define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT)
99 #define DMAD6348_ST_UN_ERR_SHIFT 9
100 #define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT)
102 struct bcm6348_dma_desc {
108 struct bcm6348_chan_priv {
109 void __iomem *dma_ring;
110 uint8_t dma_ring_size;
117 struct bcm6348_iudma_hw {
121 struct bcm6348_iudma_priv {
122 const struct bcm6348_iudma_hw *hw;
126 struct bcm6348_chan_priv **ch_priv;
130 static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch)
135 static inline void bcm6348_iudma_fdc(void *ptr, ulong size)
137 ulong start = (ulong) ptr;
139 flush_dcache_range(start, start + size);
142 static inline void bcm6348_iudma_idc(void *ptr, ulong size)
144 ulong start = (ulong) ptr;
146 invalidate_dcache_range(start, start + size);
149 static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv,
152 unsigned int timeout = DMA_CHAN_TOUT;
157 if (timeout > DMA_CHAN_TOUT / 2)
158 halt = DMAC_CFG_PKT_HALT_MASK;
160 halt = DMAC_CFG_BRST_HALT_MASK;
162 /* try to stop dma channel */
163 writel_be(halt, priv->chan + DMAC_CFG_REG(ch));
166 /* check if channel was stopped */
167 cfg = readl_be(priv->chan + DMAC_CFG_REG(ch));
168 if (!(cfg & DMAC_CFG_ENABLE_MASK))
175 pr_err("unable to stop channel %u\n", ch);
177 /* reset dma channel */
178 setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch));
180 clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch));
183 static int bcm6348_iudma_disable(struct dma *dma)
185 struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
186 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
188 /* stop dma channel */
189 bcm6348_iudma_chan_stop(priv, dma->id);
191 /* dma flow control */
192 if (bcm6348_iudma_chan_is_rx(dma->id))
193 writel_be(DMA_FLOWC_ALLOC_FORCE_MASK,
194 DMA_FLOWC_ALLOC_REG(dma->id));
196 /* init channel config */
197 ch_priv->running = false;
198 ch_priv->desc_id = 0;
199 if (bcm6348_iudma_chan_is_rx(dma->id))
200 ch_priv->desc_cnt = 0;
202 ch_priv->desc_cnt = ch_priv->dma_ring_size;
207 static int bcm6348_iudma_enable(struct dma *dma)
209 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
210 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
211 struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
215 for (i = 0; i < ch_priv->desc_cnt; i++) {
216 if (bcm6348_iudma_chan_is_rx(dma->id)) {
217 ch_priv->busy_desc[i] = false;
218 dma_desc->status |= DMAD_ST_OWN_MASK;
220 dma_desc->status = 0;
221 dma_desc->length = 0;
222 dma_desc->address = 0;
225 if (i == ch_priv->desc_cnt - 1)
226 dma_desc->status |= DMAD_ST_WRAP_MASK;
231 /* init to first descriptor */
232 ch_priv->desc_id = 0;
234 /* force cache writeback */
235 bcm6348_iudma_fdc(ch_priv->dma_ring,
236 sizeof(*dma_desc) * ch_priv->desc_cnt);
239 writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id));
240 writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id));
241 writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id));
243 /* set dma ring start */
244 writel_be(virt_to_phys(ch_priv->dma_ring),
245 priv->sram + DMAS_RSTART_REG(dma->id));
247 /* set flow control */
248 if (bcm6348_iudma_chan_is_rx(dma->id)) {
251 setbits_be32(priv->base + DMA_CFG_REG,
252 DMA_CFG_FLOWC_ENABLE(dma->id));
254 val = ch_priv->desc_cnt / 3;
255 writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id));
257 val = (ch_priv->desc_cnt * 2) / 3;
258 writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id));
260 writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id));
263 /* set dma max burst */
264 writel_be(ch_priv->desc_cnt,
265 priv->chan + DMAC_BURST_REG(dma->id));
267 /* kick rx dma channel */
268 if (bcm6348_iudma_chan_is_rx(dma->id))
269 setbits_be32(priv->chan + DMAC_CFG_REG(dma->id),
270 DMAC_CFG_ENABLE_MASK);
272 /* channel is now enabled */
273 ch_priv->running = true;
278 static int bcm6348_iudma_request(struct dma *dma)
280 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
281 struct bcm6348_chan_priv *ch_priv;
283 /* check if channel is valid */
284 if (dma->id >= priv->n_channels)
287 /* alloc channel private data */
288 priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv));
289 if (!priv->ch_priv[dma->id])
291 ch_priv = priv->ch_priv[dma->id];
294 if (bcm6348_iudma_chan_is_rx(dma->id))
295 ch_priv->dma_ring_size = DMA_RX_DESC;
297 ch_priv->dma_ring_size = DMA_TX_DESC;
300 malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) *
301 ch_priv->dma_ring_size);
302 if (!ch_priv->dma_ring)
305 /* init channel config */
306 ch_priv->running = false;
307 ch_priv->desc_id = 0;
308 if (bcm6348_iudma_chan_is_rx(dma->id)) {
309 ch_priv->desc_cnt = 0;
310 ch_priv->busy_desc = calloc(ch_priv->desc_cnt, sizeof(bool));
312 ch_priv->desc_cnt = ch_priv->dma_ring_size;
313 ch_priv->busy_desc = NULL;
319 static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata)
321 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
322 const struct bcm6348_iudma_hw *hw = priv->hw;
323 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
324 struct bcm6348_dma_desc *dma_desc = dma_desc = ch_priv->dma_ring;
327 /* get dma ring descriptor address */
328 dma_desc += ch_priv->desc_id;
330 /* invalidate cache data */
331 bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc));
334 if (dma_desc->status & DMAD_ST_OWN_MASK)
338 if (!(dma_desc->status & DMAD_ST_EOP_MASK) ||
339 !(dma_desc->status & DMAD_ST_SOP_MASK) ||
340 (dma_desc->status & hw->err_mask)) {
341 pr_err("invalid pkt received (ch=%ld desc=%u) (st=%04x)\n",
342 dma->id, ch_priv->desc_id, dma_desc->status);
345 /* set dma buffer address */
346 *dst = phys_to_virt(dma_desc->address);
348 /* invalidate cache data */
349 bcm6348_iudma_idc(*dst, dma_desc->length);
351 /* return packet length */
352 ret = dma_desc->length;
355 /* busy dma descriptor */
356 ch_priv->busy_desc[ch_priv->desc_id] = true;
358 /* increment dma descriptor */
359 ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt;
364 static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len,
367 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
368 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
369 struct bcm6348_dma_desc *dma_desc;
373 bcm6348_iudma_fdc(src, len);
375 /* get dma ring descriptor address */
376 dma_desc = ch_priv->dma_ring;
377 dma_desc += ch_priv->desc_id;
379 /* config dma descriptor */
380 status = (DMAD_ST_OWN_MASK |
384 if (ch_priv->desc_id == ch_priv->desc_cnt - 1)
385 status |= DMAD_ST_WRAP_MASK;
387 /* set dma descriptor */
388 dma_desc->address = virt_to_phys(src);
389 dma_desc->length = len;
390 dma_desc->status = status;
393 bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc));
395 /* kick tx dma channel */
396 setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK);
398 /* poll dma status */
400 /* invalidate cache */
401 bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc));
403 if (!(dma_desc->status & DMAD_ST_OWN_MASK))
407 /* increment dma descriptor */
408 ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt;
413 static int bcm6348_iudma_free_rcv_buf(struct dma *dma, void *dst, size_t size)
415 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
416 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
417 struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
422 /* get dirty dma descriptor */
423 for (i = 0; i < ch_priv->desc_cnt; i++) {
424 if (phys_to_virt(dma_desc->address) == dst)
430 /* dma descriptor not found */
431 if (i == ch_priv->desc_cnt) {
432 pr_err("dirty dma descriptor not found\n");
436 /* invalidate cache */
437 bcm6348_iudma_idc(ch_priv->dma_ring,
438 sizeof(*dma_desc) * ch_priv->desc_cnt);
440 /* free dma descriptor */
441 ch_priv->busy_desc[i] = false;
443 status = DMAD_ST_OWN_MASK;
444 if (i == ch_priv->desc_cnt - 1)
445 status |= DMAD_ST_WRAP_MASK;
447 dma_desc->status |= status;
448 dma_desc->length = PKTSIZE_ALIGN;
450 /* tell dma we allocated one buffer */
451 writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id));
454 bcm6348_iudma_fdc(ch_priv->dma_ring,
455 sizeof(*dma_desc) * ch_priv->desc_cnt);
457 /* kick rx dma channel if disabled */
458 cfg = readl_be(priv->chan + DMAC_CFG_REG(dma->id));
459 if (!(cfg & DMAC_CFG_ENABLE_MASK))
460 setbits_be32(priv->chan + DMAC_CFG_REG(dma->id),
461 DMAC_CFG_ENABLE_MASK);
466 static int bcm6348_iudma_add_rcv_buf(struct dma *dma, void *dst, size_t size)
468 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
469 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
470 struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
472 /* no more dma descriptors available */
473 if (ch_priv->desc_cnt == ch_priv->dma_ring_size) {
474 pr_err("max number of buffers reached\n");
478 /* get next dma descriptor */
479 dma_desc += ch_priv->desc_cnt;
481 /* init dma descriptor */
482 dma_desc->address = virt_to_phys(dst);
483 dma_desc->length = size;
484 dma_desc->status = 0;
487 bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc));
489 /* increment dma descriptors */
495 static int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst,
498 const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
499 struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
501 /* only add new rx buffers if channel isn't running */
502 if (ch_priv->running)
503 return bcm6348_iudma_free_rcv_buf(dma, dst, size);
505 return bcm6348_iudma_add_rcv_buf(dma, dst, size);
508 static const struct dma_ops bcm6348_iudma_ops = {
509 .disable = bcm6348_iudma_disable,
510 .enable = bcm6348_iudma_enable,
511 .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf,
512 .request = bcm6348_iudma_request,
513 .receive = bcm6348_iudma_receive,
514 .send = bcm6348_iudma_send,
517 static const struct bcm6348_iudma_hw bcm6348_hw = {
518 .err_mask = (DMAD6348_ST_OV_ERR_MASK |
519 DMAD6348_ST_CRC_ERR_MASK |
520 DMAD6348_ST_RX_ERR_MASK |
521 DMAD6348_ST_OS_ERR_MASK |
522 DMAD6348_ST_UN_ERR_MASK),
525 static const struct bcm6348_iudma_hw bcm6368_hw = {
529 static const struct udevice_id bcm6348_iudma_ids[] = {
531 .compatible = "brcm,bcm6348-iudma",
532 .data = (ulong)&bcm6348_hw,
534 .compatible = "brcm,bcm6368-iudma",
535 .data = (ulong)&bcm6368_hw,
536 }, { /* sentinel */ }
539 static int bcm6348_iudma_probe(struct udevice *dev)
541 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
542 struct bcm6348_iudma_priv *priv = dev_get_priv(dev);
543 const struct bcm6348_iudma_hw *hw =
544 (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev);
548 uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM |
549 DMA_SUPPORTS_MEM_TO_DEV);
552 /* dma global base address */
553 priv->base = dev_remap_addr_name(dev, "dma");
557 /* dma channels base address */
558 priv->chan = dev_remap_addr_name(dev, "dma-channels");
562 /* dma sram base address */
563 priv->sram = dev_remap_addr_name(dev, "dma-sram");
567 /* get number of channels */
568 priv->n_channels = dev_read_u32_default(dev, "dma-channels", 8);
569 if (priv->n_channels > DMA_CHAN_MAX)
572 /* try to enable clocks */
577 ret = clk_get_by_index(dev, i, &clk);
581 ret = clk_enable(&clk);
583 pr_err("error enabling clock %d\n", i);
587 ret = clk_free(&clk);
589 pr_err("error freeing clock %d\n", i);
594 /* try to perform resets */
596 struct reset_ctl reset;
599 ret = reset_get_by_index(dev, i, &reset);
603 ret = reset_deassert(&reset);
605 pr_err("error deasserting reset %d\n", i);
609 ret = reset_free(&reset);
611 pr_err("error freeing reset %d\n", i);
616 /* disable dma controller */
617 clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK);
619 /* alloc channel private data pointers */
620 priv->ch_priv = calloc(priv->n_channels,
621 sizeof(struct bcm6348_chan_priv*));
625 /* stop dma channels */
626 for (ch = 0; ch < priv->n_channels; ch++)
627 bcm6348_iudma_chan_stop(priv, ch);
629 /* enable dma controller */
630 setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK);
635 U_BOOT_DRIVER(bcm6348_iudma) = {
636 .name = "bcm6348_iudma",
638 .of_match = bcm6348_iudma_ids,
639 .ops = &bcm6348_iudma_ops,
640 .priv_auto_alloc_size = sizeof(struct bcm6348_iudma_priv),
641 .probe = bcm6348_iudma_probe,