1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
6 #define pr_fmt(fmt) "udma: " fmt
11 #include <asm/cache.h>
13 #include <asm/bitops.h>
15 #include <linux/bitops.h>
16 #include <linux/dma-mapping.h>
18 #include <dm/device_compat.h>
19 #include <dm/devres.h>
21 #include <dm/of_access.h>
23 #include <dma-uclass.h>
24 #include <linux/delay.h>
25 #include <dt-bindings/dma/k3-udma.h>
26 #include <linux/bitmap.h>
27 #include <linux/err.h>
28 #include <linux/soc/ti/k3-navss-ringacc.h>
29 #include <linux/soc/ti/cppi5.h>
30 #include <linux/soc/ti/ti-udma.h>
31 #include <linux/soc/ti/ti_sci_protocol.h>
33 #include "k3-udma-hwdef.h"
35 #if BITS_PER_LONG == 64
36 #define RINGACC_RING_USE_PROXY (0)
38 #define RINGACC_RING_USE_PROXY (1)
41 #define K3_UDMA_MAX_RFLOWS 1024
52 static const char * const mmr_names[] = {
53 "gcfg", "rchanrt", "tchanrt"
60 struct k3_nav_ring *t_ring; /* Transmit ring */
61 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
68 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
69 struct k3_nav_ring *r_ring; /* Receive ring*/
83 struct udma_tisci_rm {
84 const struct ti_sci_handle *tisci;
85 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
88 /* tisci information for PSI-L thread pairing/unpairing */
89 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
90 u32 tisci_navss_dev_id;
92 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
97 void __iomem *mmrs[MMR_LAST];
99 struct udma_tisci_rm tisci_rm;
100 struct k3_nav_ringacc *ringacc;
108 unsigned long *tchan_map;
109 unsigned long *rchan_map;
110 unsigned long *rflow_map;
111 unsigned long *rflow_map_reserved;
113 struct udma_tchan *tchans;
114 struct udma_rchan *rchans;
115 struct udma_rflow *rflows;
117 struct udma_chan *channels;
127 struct udma_tchan *tchan;
128 struct udma_rchan *rchan;
129 struct udma_rflow *rflow;
131 struct ti_udma_drv_chan_cfg_data cfg_data;
133 u32 bcnt; /* number of bytes completed since the start of the channel */
135 bool pkt_mode; /* TR or packet */
136 bool needs_epib; /* EPIB is needed for the communication or not */
137 u32 psd_size; /* size of Protocol Specific Data */
138 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
145 enum dma_direction dir;
147 struct cppi5_host_desc_t *desc_tx;
156 #define UDMA_CH_1000(ch) (ch * 0x1000)
157 #define UDMA_CH_100(ch) (ch * 0x100)
158 #define UDMA_CH_40(ch) (ch * 0x40)
161 #define UDMA_RX_DESC_NUM PKTBUFSRX
163 #define UDMA_RX_DESC_NUM 4
166 /* Generic register access functions */
167 static inline u32 udma_read(void __iomem *base, int reg)
171 v = __raw_readl(base + reg);
172 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
176 static inline void udma_write(void __iomem *base, int reg, u32 val)
178 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
179 __raw_writel(val, base + reg);
182 static inline void udma_update_bits(void __iomem *base, int reg,
187 orig = udma_read(base, reg);
192 udma_write(base, reg, tmp);
196 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
200 return udma_read(tchan->reg_rt, reg);
203 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
208 udma_write(tchan->reg_rt, reg, val);
212 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
216 return udma_read(rchan->reg_rt, reg);
219 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
224 udma_write(rchan->reg_rt, reg, val);
227 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
230 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
232 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
234 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
235 tisci_rm->tisci_navss_dev_id,
236 src_thread, dst_thread);
239 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
242 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
244 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
246 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
247 tisci_rm->tisci_navss_dev_id,
248 src_thread, dst_thread);
251 static inline char *udma_get_dir_text(enum dma_direction dir)
269 static inline bool udma_is_chan_running(struct udma_chan *uc)
276 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
277 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
279 udma_rchanrt_read(uc->rchan,
280 UDMA_RCHAN_RT_PEER_RT_EN_REG));
283 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
284 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
286 udma_tchanrt_read(uc->tchan,
287 UDMA_TCHAN_RT_PEER_RT_EN_REG));
290 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
291 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
297 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
303 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
305 struct k3_nav_ring *ring = NULL;
310 ring = uc->rchan->r_ring;
313 ring = uc->tchan->tc_ring;
316 ring = uc->tchan->tc_ring;
322 if (ring && k3_nav_ringacc_ring_get_occ(ring))
323 ret = k3_nav_ringacc_ring_pop(ring, addr);
328 static void udma_reset_rings(struct udma_chan *uc)
330 struct k3_nav_ring *ring1 = NULL;
331 struct k3_nav_ring *ring2 = NULL;
335 ring1 = uc->rchan->fd_ring;
336 ring2 = uc->rchan->r_ring;
339 ring1 = uc->tchan->t_ring;
340 ring2 = uc->tchan->tc_ring;
343 ring1 = uc->tchan->t_ring;
344 ring2 = uc->tchan->tc_ring;
351 k3_nav_ringacc_ring_reset_dma(ring1, 0);
353 k3_nav_ringacc_ring_reset(ring2);
356 static void udma_reset_counters(struct udma_chan *uc)
361 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
362 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
364 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
365 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
367 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
368 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
370 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
371 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
375 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
376 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
378 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
379 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
381 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
382 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
384 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
385 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
391 static inline int udma_stop_hard(struct udma_chan *uc)
393 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
397 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
398 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
401 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
402 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
405 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
406 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
415 static int udma_start(struct udma_chan *uc)
417 /* Channel is already running, no need to proceed further */
418 if (udma_is_chan_running(uc))
421 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
422 __func__, uc->id, udma_get_dir_text(uc->dir),
425 /* Make sure that we clear the teardown bit, if it is set */
428 /* Reset all counters */
429 udma_reset_counters(uc);
433 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
434 UDMA_CHAN_RT_CTL_EN);
437 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
438 UDMA_PEER_RT_EN_ENABLE);
440 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
442 udma_rchanrt_read(uc->rchan,
443 UDMA_RCHAN_RT_CTL_REG),
444 udma_rchanrt_read(uc->rchan,
445 UDMA_RCHAN_RT_PEER_RT_EN_REG));
449 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
450 UDMA_PEER_RT_EN_ENABLE);
452 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
453 UDMA_CHAN_RT_CTL_EN);
455 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
457 udma_tchanrt_read(uc->tchan,
458 UDMA_TCHAN_RT_CTL_REG),
459 udma_tchanrt_read(uc->tchan,
460 UDMA_TCHAN_RT_PEER_RT_EN_REG));
463 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
464 UDMA_CHAN_RT_CTL_EN);
465 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
466 UDMA_CHAN_RT_CTL_EN);
473 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
478 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
483 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
484 UDMA_CHAN_RT_CTL_EN |
485 UDMA_CHAN_RT_CTL_TDOWN);
487 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
489 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
490 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
493 printf(" %s TIMEOUT !\n", __func__);
499 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
500 if (val & UDMA_PEER_RT_EN_ENABLE)
501 printf("%s: peer not stopped TIMEOUT !\n", __func__);
504 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
509 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
510 UDMA_PEER_RT_EN_ENABLE |
511 UDMA_PEER_RT_EN_TEARDOWN);
513 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
515 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
516 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
519 printf("%s TIMEOUT !\n", __func__);
525 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
526 if (val & UDMA_PEER_RT_EN_ENABLE)
527 printf("%s: peer not stopped TIMEOUT !\n", __func__);
530 static inline int udma_stop(struct udma_chan *uc)
532 pr_debug("%s: chan:%d dir:%s\n",
533 __func__, uc->id, udma_get_dir_text(uc->dir));
535 udma_reset_counters(uc);
538 udma_stop_dev2mem(uc, true);
541 udma_stop_mem2dev(uc, true);
544 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
545 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
554 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
558 while (udma_pop_from_ring(uc, paddr)) {
566 static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
568 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
571 if (test_bit(id, ud->rflow_map)) {
572 dev_err(ud->dev, "rflow%d is in use\n", id);
573 return ERR_PTR(-ENOENT);
576 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
579 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
580 if (id >= ud->rflow_cnt)
581 return ERR_PTR(-ENOENT);
584 __set_bit(id, ud->rflow_map);
585 return &ud->rflows[id];
588 #define UDMA_RESERVE_RESOURCE(res) \
589 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
593 if (test_bit(id, ud->res##_map)) { \
594 dev_err(ud->dev, "res##%d is in use\n", id); \
595 return ERR_PTR(-ENOENT); \
598 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
599 if (id == ud->res##_cnt) { \
600 return ERR_PTR(-ENOENT); \
604 __set_bit(id, ud->res##_map); \
605 return &ud->res##s[id]; \
608 UDMA_RESERVE_RESOURCE(tchan);
609 UDMA_RESERVE_RESOURCE(rchan);
611 static int udma_get_tchan(struct udma_chan *uc)
613 struct udma_dev *ud = uc->ud;
616 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
617 uc->id, uc->tchan->id);
621 uc->tchan = __udma_reserve_tchan(ud, -1);
622 if (IS_ERR(uc->tchan))
623 return PTR_ERR(uc->tchan);
625 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
630 static int udma_get_rchan(struct udma_chan *uc)
632 struct udma_dev *ud = uc->ud;
635 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
636 uc->id, uc->rchan->id);
640 uc->rchan = __udma_reserve_rchan(ud, -1);
641 if (IS_ERR(uc->rchan))
642 return PTR_ERR(uc->rchan);
644 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
649 static int udma_get_chan_pair(struct udma_chan *uc)
651 struct udma_dev *ud = uc->ud;
654 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
655 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
656 uc->id, uc->tchan->id);
661 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
662 uc->id, uc->tchan->id);
664 } else if (uc->rchan) {
665 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
666 uc->id, uc->rchan->id);
670 /* Can be optimized, but let's have it like this for now */
671 end = min(ud->tchan_cnt, ud->rchan_cnt);
672 for (chan_id = 0; chan_id < end; chan_id++) {
673 if (!test_bit(chan_id, ud->tchan_map) &&
674 !test_bit(chan_id, ud->rchan_map))
681 __set_bit(chan_id, ud->tchan_map);
682 __set_bit(chan_id, ud->rchan_map);
683 uc->tchan = &ud->tchans[chan_id];
684 uc->rchan = &ud->rchans[chan_id];
686 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
691 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
693 struct udma_dev *ud = uc->ud;
696 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
697 uc->id, uc->rflow->id);
702 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
704 uc->rflow = __udma_reserve_rflow(ud, flow_id);
705 if (IS_ERR(uc->rflow))
706 return PTR_ERR(uc->rflow);
708 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
712 static void udma_put_rchan(struct udma_chan *uc)
714 struct udma_dev *ud = uc->ud;
717 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
719 __clear_bit(uc->rchan->id, ud->rchan_map);
724 static void udma_put_tchan(struct udma_chan *uc)
726 struct udma_dev *ud = uc->ud;
729 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
731 __clear_bit(uc->tchan->id, ud->tchan_map);
736 static void udma_put_rflow(struct udma_chan *uc)
738 struct udma_dev *ud = uc->ud;
741 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
743 __clear_bit(uc->rflow->id, ud->rflow_map);
748 static void udma_free_tx_resources(struct udma_chan *uc)
753 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
754 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
755 uc->tchan->t_ring = NULL;
756 uc->tchan->tc_ring = NULL;
761 static int udma_alloc_tx_resources(struct udma_chan *uc)
763 struct k3_nav_ring_cfg ring_cfg;
764 struct udma_dev *ud = uc->ud;
767 ret = udma_get_tchan(uc);
771 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
772 ud->ringacc, uc->tchan->id,
773 RINGACC_RING_USE_PROXY);
774 if (!uc->tchan->t_ring) {
779 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
780 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
781 if (!uc->tchan->tc_ring) {
786 memset(&ring_cfg, 0, sizeof(ring_cfg));
788 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
789 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
791 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
792 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
800 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
801 uc->tchan->tc_ring = NULL;
803 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
804 uc->tchan->t_ring = NULL;
811 static void udma_free_rx_resources(struct udma_chan *uc)
816 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
817 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
818 uc->rchan->fd_ring = NULL;
819 uc->rchan->r_ring = NULL;
825 static int udma_alloc_rx_resources(struct udma_chan *uc)
827 struct k3_nav_ring_cfg ring_cfg;
828 struct udma_dev *ud = uc->ud;
832 ret = udma_get_rchan(uc);
836 /* For MEM_TO_MEM we don't need rflow or rings */
837 if (uc->dir == DMA_MEM_TO_MEM)
840 ret = udma_get_rflow(uc, uc->rchan->id);
846 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
848 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
849 ud->ringacc, fd_ring_id,
850 RINGACC_RING_USE_PROXY);
851 if (!uc->rchan->fd_ring) {
856 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
857 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
858 if (!uc->rchan->r_ring) {
863 memset(&ring_cfg, 0, sizeof(ring_cfg));
865 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
866 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
868 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
869 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
877 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
878 uc->rchan->r_ring = NULL;
880 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
881 uc->rchan->fd_ring = NULL;
890 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
892 struct udma_dev *ud = uc->ud;
893 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
894 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
895 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
900 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
902 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
904 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
905 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
906 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
907 req.nav_id = tisci_rm->tisci_dev_id;
908 req.index = uc->tchan->id;
909 req.tx_chan_type = mode;
910 if (uc->dir == DMA_MEM_TO_MEM)
911 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
913 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
916 req.txcq_qnum = tc_ring;
918 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
920 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
925 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
927 struct udma_dev *ud = uc->ud;
928 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
929 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
930 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
931 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
932 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
933 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
938 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
940 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
942 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
943 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
944 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
945 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
946 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
947 req.nav_id = tisci_rm->tisci_dev_id;
948 req.index = uc->rchan->id;
949 req.rx_chan_type = mode;
950 if (uc->dir == DMA_MEM_TO_MEM) {
951 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
952 req.rxcq_qnum = tc_ring;
954 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
957 req.rxcq_qnum = rx_ring;
959 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
960 req.flowid_start = uc->rflow->id;
964 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
966 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
970 if (uc->dir == DMA_MEM_TO_MEM)
973 flow_req.valid_params =
974 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
975 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
976 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
977 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
978 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
979 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
980 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
981 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
982 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
983 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
984 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
985 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
986 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
987 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
989 flow_req.nav_id = tisci_rm->tisci_dev_id;
990 flow_req.flow_index = uc->rflow->id;
993 flow_req.rx_einfo_present = 1;
995 flow_req.rx_einfo_present = 0;
998 flow_req.rx_psinfo_present = 1;
1000 flow_req.rx_psinfo_present = 0;
1002 flow_req.rx_error_handling = 0;
1003 flow_req.rx_desc_type = 0;
1004 flow_req.rx_dest_qnum = rx_ring;
1005 flow_req.rx_src_tag_hi_sel = 2;
1006 flow_req.rx_src_tag_lo_sel = 4;
1007 flow_req.rx_dest_tag_hi_sel = 5;
1008 flow_req.rx_dest_tag_lo_sel = 4;
1009 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1010 flow_req.rx_fdq1_qnum = fd_ring;
1011 flow_req.rx_fdq2_qnum = fd_ring;
1012 flow_req.rx_fdq3_qnum = fd_ring;
1013 flow_req.rx_ps_location = 0;
1015 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1018 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1019 uc->rchan->id, uc->rflow->id, ret);
1024 static int udma_alloc_chan_resources(struct udma_chan *uc)
1026 struct udma_dev *ud = uc->ud;
1029 pr_debug("%s: chan:%d as %s\n",
1030 __func__, uc->id, udma_get_dir_text(uc->dir));
1033 case DMA_MEM_TO_MEM:
1034 /* Non synchronized - mem to mem type of transfer */
1035 ret = udma_get_chan_pair(uc);
1039 ret = udma_alloc_tx_resources(uc);
1043 ret = udma_alloc_rx_resources(uc);
1047 uc->src_thread = ud->psil_base + uc->tchan->id;
1048 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1050 case DMA_MEM_TO_DEV:
1051 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1052 ret = udma_alloc_tx_resources(uc);
1056 uc->src_thread = ud->psil_base + uc->tchan->id;
1057 uc->dst_thread = uc->slave_thread_id;
1058 if (!(uc->dst_thread & 0x8000))
1059 uc->dst_thread |= 0x8000;
1062 case DMA_DEV_TO_MEM:
1063 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1064 ret = udma_alloc_rx_resources(uc);
1068 uc->src_thread = uc->slave_thread_id;
1069 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1073 /* Can not happen */
1074 pr_debug("%s: chan:%d invalid direction (%u)\n",
1075 __func__, uc->id, uc->dir);
1079 /* We have channel indexes and rings */
1080 if (uc->dir == DMA_MEM_TO_MEM) {
1081 ret = udma_alloc_tchan_sci_req(uc);
1085 ret = udma_alloc_rchan_sci_req(uc);
1089 /* Slave transfer */
1090 if (uc->dir == DMA_MEM_TO_DEV) {
1091 ret = udma_alloc_tchan_sci_req(uc);
1095 ret = udma_alloc_rchan_sci_req(uc);
1101 if (udma_is_chan_running(uc)) {
1102 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1104 if (udma_is_chan_running(uc)) {
1105 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1111 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1113 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1120 udma_free_tx_resources(uc);
1121 udma_free_rx_resources(uc);
1122 uc->slave_thread_id = -1;
1126 static void udma_free_chan_resources(struct udma_chan *uc)
1128 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1130 /* Release PSI-L pairing */
1131 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1133 /* Reset the rings for a new start */
1134 udma_reset_rings(uc);
1135 udma_free_tx_resources(uc);
1136 udma_free_rx_resources(uc);
1138 uc->slave_thread_id = -1;
1139 uc->dir = DMA_MEM_TO_MEM;
1142 static int udma_get_mmrs(struct udevice *dev)
1144 struct udma_dev *ud = dev_get_priv(dev);
1147 for (i = 0; i < MMR_LAST; i++) {
1148 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1157 static int udma_setup_resources(struct udma_dev *ud)
1159 struct udevice *dev = ud->dev;
1162 struct ti_sci_resource_desc *rm_desc;
1163 struct ti_sci_resource *rm_res;
1164 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1165 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
1166 "ti,sci-rm-range-rchan",
1167 "ti,sci-rm-range-rflow" };
1169 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1170 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1172 ud->rflow_cnt = cap3 & 0x3fff;
1173 ud->tchan_cnt = cap2 & 0x1ff;
1174 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1175 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1176 ch_count = ud->tchan_cnt + ud->rchan_cnt;
1178 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1179 sizeof(unsigned long), GFP_KERNEL);
1180 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1182 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1183 sizeof(unsigned long), GFP_KERNEL);
1184 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1186 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1187 sizeof(unsigned long), GFP_KERNEL);
1188 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1189 sizeof(unsigned long),
1191 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1194 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1195 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1200 * RX flows with the same Ids as RX channels are reserved to be used
1201 * as default flows if remote HW can't generate flow_ids. Those
1202 * RX flows can be requested only explicitly by id.
1204 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1206 /* Get resource ranges from tisci */
1207 for (i = 0; i < RM_RANGE_LAST; i++)
1208 tisci_rm->rm_ranges[i] =
1209 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1210 tisci_rm->tisci_dev_id,
1211 (char *)range_names[i]);
1214 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1215 if (IS_ERR(rm_res)) {
1216 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1218 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1219 for (i = 0; i < rm_res->sets; i++) {
1220 rm_desc = &rm_res->desc[i];
1221 bitmap_clear(ud->tchan_map, rm_desc->start,
1226 /* rchan and matching default flow ranges */
1227 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1228 if (IS_ERR(rm_res)) {
1229 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1230 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1232 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1233 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1234 for (i = 0; i < rm_res->sets; i++) {
1235 rm_desc = &rm_res->desc[i];
1236 bitmap_clear(ud->rchan_map, rm_desc->start,
1238 bitmap_clear(ud->rflow_map, rm_desc->start,
1243 /* GP rflow ranges */
1244 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1245 if (IS_ERR(rm_res)) {
1246 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1247 ud->rflow_cnt - ud->rchan_cnt);
1249 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1250 ud->rflow_cnt - ud->rchan_cnt);
1251 for (i = 0; i < rm_res->sets; i++) {
1252 rm_desc = &rm_res->desc[i];
1253 bitmap_clear(ud->rflow_map, rm_desc->start,
1258 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1259 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1263 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1269 "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
1270 ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1275 static int udma_probe(struct udevice *dev)
1277 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1278 struct udma_dev *ud = dev_get_priv(dev);
1280 struct udevice *tmp;
1281 struct udevice *tisci_dev = NULL;
1282 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1283 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1286 ret = udma_get_mmrs(dev);
1290 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1291 "ti,ringacc", &tmp);
1292 ud->ringacc = dev_get_priv(tmp);
1293 if (IS_ERR(ud->ringacc))
1294 return PTR_ERR(ud->ringacc);
1296 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1297 if (!ud->psil_base) {
1299 "Missing ti,psil-base property, using %d.\n", ret);
1303 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1304 "ti,sci", &tisci_dev);
1306 debug("Failed to get TISCI phandle (%d)\n", ret);
1307 tisci_rm->tisci = NULL;
1310 tisci_rm->tisci = (struct ti_sci_handle *)
1311 (ti_sci_get_handle_from_sysfw(tisci_dev));
1313 tisci_rm->tisci_dev_id = -1;
1314 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1316 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1320 tisci_rm->tisci_navss_dev_id = -1;
1321 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1322 &tisci_rm->tisci_navss_dev_id);
1324 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1328 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1329 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1332 ud->ch_count = udma_setup_resources(ud);
1333 if (ud->ch_count <= 0)
1334 return ud->ch_count;
1337 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1338 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1339 tisci_rm->tisci_dev_id);
1340 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1342 for (i = 0; i < ud->tchan_cnt; i++) {
1343 struct udma_tchan *tchan = &ud->tchans[i];
1346 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1349 for (i = 0; i < ud->rchan_cnt; i++) {
1350 struct udma_rchan *rchan = &ud->rchans[i];
1353 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1356 for (i = 0; i < ud->rflow_cnt; i++) {
1357 struct udma_rflow *rflow = &ud->rflows[i];
1362 for (i = 0; i < ud->ch_count; i++) {
1363 struct udma_chan *uc = &ud->channels[i];
1367 uc->slave_thread_id = -1;
1370 uc->dir = DMA_MEM_TO_MEM;
1371 sprintf(uc->name, "UDMA chan%d\n", i);
1376 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1377 udma_read(ud->mmrs[MMR_GCFG], 0),
1378 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1379 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1380 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1381 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1383 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1388 static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1392 memcpy(&addr, &elem, sizeof(elem));
1393 return k3_nav_ringacc_ring_push(ring, &addr);
1396 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1397 dma_addr_t src, size_t len)
1399 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1400 struct cppi5_tr_type15_t *tr_req;
1402 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1403 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1404 unsigned long dummy;
1413 unsigned long align_to = __ffs(src | dest);
1418 * Keep simple: tr0: SZ_64K-alignment blocks,
1419 * tr1: the remaining
1422 tr0_cnt0 = (SZ_64K - BIT(align_to));
1423 if (len / tr0_cnt0 >= SZ_64K) {
1424 dev_err(uc->ud->dev, "size %zu is not supported\n",
1429 tr0_cnt1 = len / tr0_cnt0;
1430 tr1_cnt0 = len % tr0_cnt0;
1433 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1434 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1437 memset(tr_desc, 0, desc_size);
1439 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1440 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1441 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1443 tr_req = tr_desc + tr_size;
1445 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1446 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1447 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1449 tr_req[0].addr = src;
1450 tr_req[0].icnt0 = tr0_cnt0;
1451 tr_req[0].icnt1 = tr0_cnt1;
1452 tr_req[0].icnt2 = 1;
1453 tr_req[0].icnt3 = 1;
1454 tr_req[0].dim1 = tr0_cnt0;
1456 tr_req[0].daddr = dest;
1457 tr_req[0].dicnt0 = tr0_cnt0;
1458 tr_req[0].dicnt1 = tr0_cnt1;
1459 tr_req[0].dicnt2 = 1;
1460 tr_req[0].dicnt3 = 1;
1461 tr_req[0].ddim1 = tr0_cnt0;
1464 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1465 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1466 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1468 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1469 tr_req[1].icnt0 = tr1_cnt0;
1470 tr_req[1].icnt1 = 1;
1471 tr_req[1].icnt2 = 1;
1472 tr_req[1].icnt3 = 1;
1474 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1475 tr_req[1].dicnt0 = tr1_cnt0;
1476 tr_req[1].dicnt1 = 1;
1477 tr_req[1].dicnt2 = 1;
1478 tr_req[1].dicnt3 = 1;
1481 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1483 flush_dcache_range((unsigned long)tr_desc,
1484 ALIGN((unsigned long)tr_desc + desc_size,
1485 ARCH_DMA_MINALIGN));
1487 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
1492 static int udma_transfer(struct udevice *dev, int direction,
1493 void *dst, void *src, size_t len)
1495 struct udma_dev *ud = dev_get_priv(dev);
1496 /* Channel0 is reserved for memcpy */
1497 struct udma_chan *uc = &ud->channels[0];
1498 dma_addr_t paddr = 0;
1501 ret = udma_alloc_chan_resources(uc);
1505 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1507 udma_poll_completion(uc, &paddr);
1510 udma_free_chan_resources(uc);
1514 static int udma_request(struct dma *dma)
1516 struct udma_dev *ud = dev_get_priv(dma->dev);
1517 struct udma_chan *uc;
1518 unsigned long dummy;
1521 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1522 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1526 uc = &ud->channels[dma->id];
1527 ret = udma_alloc_chan_resources(uc);
1529 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1533 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1535 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1537 if (uc->dir == DMA_MEM_TO_DEV) {
1538 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1539 memset(uc->desc_tx, 0, uc->hdesc_size);
1541 uc->desc_rx = dma_alloc_coherent(
1542 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1543 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1547 uc->desc_rx_cur = 0;
1548 uc->num_rx_bufs = 0;
1550 if (uc->dir == DMA_DEV_TO_MEM) {
1551 uc->cfg_data.flow_id_base = uc->rflow->id;
1552 uc->cfg_data.flow_id_cnt = 1;
1558 static int udma_rfree(struct dma *dma)
1560 struct udma_dev *ud = dev_get_priv(dma->dev);
1561 struct udma_chan *uc;
1563 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1564 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1567 uc = &ud->channels[dma->id];
1569 if (udma_is_chan_running(uc))
1571 udma_free_chan_resources(uc);
1578 static int udma_enable(struct dma *dma)
1580 struct udma_dev *ud = dev_get_priv(dma->dev);
1581 struct udma_chan *uc;
1584 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1585 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1588 uc = &ud->channels[dma->id];
1590 ret = udma_start(uc);
1595 static int udma_disable(struct dma *dma)
1597 struct udma_dev *ud = dev_get_priv(dma->dev);
1598 struct udma_chan *uc;
1601 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1602 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1605 uc = &ud->channels[dma->id];
1607 if (udma_is_chan_running(uc))
1608 ret = udma_stop(uc);
1610 dev_err(dma->dev, "%s not running\n", __func__);
1615 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1617 struct udma_dev *ud = dev_get_priv(dma->dev);
1618 struct cppi5_host_desc_t *desc_tx;
1619 dma_addr_t dma_src = (dma_addr_t)src;
1620 struct ti_udma_drv_packet_data packet_data = { 0 };
1622 struct udma_chan *uc;
1627 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1629 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1630 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1633 uc = &ud->channels[dma->id];
1635 if (uc->dir != DMA_MEM_TO_DEV)
1638 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1640 desc_tx = uc->desc_tx;
1642 cppi5_hdesc_reset_hbdesc(desc_tx);
1644 cppi5_hdesc_init(desc_tx,
1645 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1647 cppi5_hdesc_set_pktlen(desc_tx, len);
1648 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1649 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1650 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1651 /* pass below information from caller */
1652 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1653 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1655 flush_dcache_range((unsigned long)dma_src,
1656 ALIGN((unsigned long)dma_src + len,
1657 ARCH_DMA_MINALIGN));
1658 flush_dcache_range((unsigned long)desc_tx,
1659 ALIGN((unsigned long)desc_tx + uc->hdesc_size,
1660 ARCH_DMA_MINALIGN));
1662 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
1664 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1669 udma_poll_completion(uc, &paddr);
1674 static int udma_receive(struct dma *dma, void **dst, void *metadata)
1676 struct udma_dev *ud = dev_get_priv(dma->dev);
1677 struct cppi5_host_desc_t *desc_rx;
1679 struct udma_chan *uc;
1680 u32 buf_dma_len, pkt_len;
1684 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1685 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1688 uc = &ud->channels[dma->id];
1690 if (uc->dir != DMA_DEV_TO_MEM)
1692 if (!uc->num_rx_bufs)
1695 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1696 if (ret && ret != -ENODATA) {
1697 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1699 } else if (ret == -ENODATA) {
1703 /* invalidate cache data */
1704 invalidate_dcache_range((ulong)desc_rx,
1705 (ulong)(desc_rx + uc->hdesc_size));
1707 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1708 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1710 /* invalidate cache data */
1711 invalidate_dcache_range((ulong)buf_dma,
1712 (ulong)(buf_dma + buf_dma_len));
1714 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1716 *dst = (void *)buf_dma;
1722 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1724 struct udma_dev *ud = dev_get_priv(dma->dev);
1725 struct udma_chan *uc = &ud->channels[0];
1726 ofnode chconf_node, slave_node;
1730 for (val = 0; val < ud->ch_count; val++) {
1731 uc = &ud->channels[val];
1736 if (val == ud->ch_count)
1739 uc->dir = DMA_DEV_TO_MEM;
1740 if (args->args[2] == UDMA_DIR_TX)
1741 uc->dir = DMA_MEM_TO_DEV;
1743 slave_node = ofnode_get_by_phandle(args->args[0]);
1744 if (!ofnode_valid(slave_node)) {
1745 dev_err(ud->dev, "slave node is missing\n");
1749 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1750 chconf_node = ofnode_find_subnode(slave_node, prop);
1751 if (!ofnode_valid(chconf_node)) {
1752 dev_err(ud->dev, "Channel configuration node is missing\n");
1756 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1757 if (val == UDMA_PKT_MODE)
1758 uc->pkt_mode = true;
1761 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1762 uc->static_tr_type = val;
1764 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1765 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1767 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1769 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1770 dev_err(ud->dev, "ti,psil-base is missing\n");
1774 uc->slave_thread_id = val + args->args[1];
1777 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1778 dma->id, uc->needs_epib,
1779 uc->psd_size, uc->metadata_size,
1780 uc->slave_thread_id);
1785 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1787 struct udma_dev *ud = dev_get_priv(dma->dev);
1788 struct cppi5_host_desc_t *desc_rx;
1790 struct udma_chan *uc;
1793 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1794 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1797 uc = &ud->channels[dma->id];
1799 if (uc->dir != DMA_DEV_TO_MEM)
1802 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1805 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1806 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1807 dma_dst = (dma_addr_t)dst;
1809 cppi5_hdesc_reset_hbdesc(desc_rx);
1811 cppi5_hdesc_init(desc_rx,
1812 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1814 cppi5_hdesc_set_pktlen(desc_rx, size);
1815 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1817 flush_dcache_range((unsigned long)desc_rx,
1818 ALIGN((unsigned long)desc_rx + uc->hdesc_size,
1819 ARCH_DMA_MINALIGN));
1821 udma_push_to_ring(uc->rchan->fd_ring, desc_rx);
1829 static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1831 struct udma_dev *ud = dev_get_priv(dma->dev);
1832 struct udma_chan *uc;
1834 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1835 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1840 case TI_UDMA_CHAN_PRIV_INFO:
1841 uc = &ud->channels[dma->id];
1842 *data = &uc->cfg_data;
1849 static const struct dma_ops udma_ops = {
1850 .transfer = udma_transfer,
1851 .of_xlate = udma_of_xlate,
1852 .request = udma_request,
1853 .rfree = udma_rfree,
1854 .enable = udma_enable,
1855 .disable = udma_disable,
1857 .receive = udma_receive,
1858 .prepare_rcv_buf = udma_prepare_rcv_buf,
1859 .get_cfg = udma_get_cfg,
1862 static const struct udevice_id udma_ids[] = {
1863 { .compatible = "ti,k3-navss-udmap" },
1864 { .compatible = "ti,j721e-navss-mcu-udmap" },
1868 U_BOOT_DRIVER(ti_edma3) = {
1871 .of_match = udma_ids,
1873 .probe = udma_probe,
1874 .priv_auto_alloc_size = sizeof(struct udma_dev),