1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
6 #define pr_fmt(fmt) "udma: " fmt
10 #include <asm/bitops.h>
12 #include <asm/dma-mapping.h>
15 #include <dm/of_access.h>
17 #include <dma-uclass.h>
18 #include <linux/delay.h>
19 #include <dt-bindings/dma/k3-udma.h>
20 #include <linux/soc/ti/k3-navss-ringacc.h>
21 #include <linux/soc/ti/cppi5.h>
22 #include <linux/soc/ti/ti-udma.h>
23 #include <linux/soc/ti/ti_sci_protocol.h>
25 #include "k3-udma-hwdef.h"
27 #if BITS_PER_LONG == 64
28 #define RINGACC_RING_USE_PROXY (0)
30 #define RINGACC_RING_USE_PROXY (1)
42 static const char * const mmr_names[] = {
43 "gcfg", "rchanrt", "tchanrt"
50 struct k3_nav_ring *t_ring; /* Transmit ring */
51 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
58 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
59 struct k3_nav_ring *r_ring; /* Receive ring*/
68 void __iomem *mmrs[MMR_LAST];
70 struct k3_nav_ringacc *ringacc;
78 unsigned long *tchan_map;
79 unsigned long *rchan_map;
80 unsigned long *rflow_map;
82 struct udma_tchan *tchans;
83 struct udma_rchan *rchans;
84 struct udma_rflow *rflows;
86 struct udma_chan *channels;
90 const struct ti_sci_handle *tisci;
91 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
92 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
94 u32 tisci_navss_dev_id;
102 struct udma_tchan *tchan;
103 struct udma_rchan *rchan;
104 struct udma_rflow *rflow;
106 u32 bcnt; /* number of bytes completed since the start of the channel */
108 bool pkt_mode; /* TR or packet */
109 bool needs_epib; /* EPIB is needed for the communication or not */
110 u32 psd_size; /* size of Protocol Specific Data */
111 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
118 enum dma_direction dir;
120 struct cppi5_host_desc_t *desc_tx;
129 #define UDMA_CH_1000(ch) (ch * 0x1000)
130 #define UDMA_CH_100(ch) (ch * 0x100)
131 #define UDMA_CH_40(ch) (ch * 0x40)
134 #define UDMA_RX_DESC_NUM PKTBUFSRX
136 #define UDMA_RX_DESC_NUM 4
139 /* Generic register access functions */
140 static inline u32 udma_read(void __iomem *base, int reg)
144 v = __raw_readl(base + reg);
145 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
149 static inline void udma_write(void __iomem *base, int reg, u32 val)
151 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
152 __raw_writel(val, base + reg);
155 static inline void udma_update_bits(void __iomem *base, int reg,
160 orig = udma_read(base, reg);
165 udma_write(base, reg, tmp);
169 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
173 return udma_read(tchan->reg_rt, reg);
176 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
181 udma_write(tchan->reg_rt, reg, val);
185 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
189 return udma_read(rchan->reg_rt, reg);
192 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
197 udma_write(rchan->reg_rt, reg, val);
200 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
203 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
204 return ud->tisci_psil_ops->pair(ud->tisci,
205 ud->tisci_navss_dev_id,
206 src_thread, dst_thread);
209 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
212 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
213 return ud->tisci_psil_ops->unpair(ud->tisci,
214 ud->tisci_navss_dev_id,
215 src_thread, dst_thread);
218 static inline char *udma_get_dir_text(enum dma_direction dir)
236 static inline bool udma_is_chan_running(struct udma_chan *uc)
243 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
244 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
246 udma_rchanrt_read(uc->rchan,
247 UDMA_RCHAN_RT_PEER_RT_EN_REG));
250 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
251 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
253 udma_tchanrt_read(uc->tchan,
254 UDMA_TCHAN_RT_PEER_RT_EN_REG));
257 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
258 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
264 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
270 static int udma_is_coherent(struct udma_chan *uc)
272 return uc->ud->is_coherent;
275 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
277 struct k3_nav_ring *ring = NULL;
282 ring = uc->rchan->r_ring;
285 ring = uc->tchan->tc_ring;
288 ring = uc->tchan->tc_ring;
294 if (ring && k3_nav_ringacc_ring_get_occ(ring))
295 ret = k3_nav_ringacc_ring_pop(ring, addr);
300 static void udma_reset_rings(struct udma_chan *uc)
302 struct k3_nav_ring *ring1 = NULL;
303 struct k3_nav_ring *ring2 = NULL;
307 ring1 = uc->rchan->fd_ring;
308 ring2 = uc->rchan->r_ring;
311 ring1 = uc->tchan->t_ring;
312 ring2 = uc->tchan->tc_ring;
315 ring1 = uc->tchan->t_ring;
316 ring2 = uc->tchan->tc_ring;
323 k3_nav_ringacc_ring_reset_dma(ring1, 0);
325 k3_nav_ringacc_ring_reset(ring2);
328 static void udma_reset_counters(struct udma_chan *uc)
333 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
334 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
336 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
337 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
339 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
340 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
342 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
343 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
347 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
348 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
350 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
351 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
353 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
354 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
356 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
357 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
363 static inline int udma_stop_hard(struct udma_chan *uc)
365 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
369 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
370 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
373 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
374 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
377 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
378 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
387 static int udma_start(struct udma_chan *uc)
389 /* Channel is already running, no need to proceed further */
390 if (udma_is_chan_running(uc))
393 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
394 __func__, uc->id, udma_get_dir_text(uc->dir),
397 /* Make sure that we clear the teardown bit, if it is set */
400 /* Reset all counters */
401 udma_reset_counters(uc);
405 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
406 UDMA_CHAN_RT_CTL_EN);
409 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
410 UDMA_PEER_RT_EN_ENABLE);
412 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
414 udma_rchanrt_read(uc->rchan,
415 UDMA_RCHAN_RT_CTL_REG),
416 udma_rchanrt_read(uc->rchan,
417 UDMA_RCHAN_RT_PEER_RT_EN_REG));
421 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
422 UDMA_PEER_RT_EN_ENABLE);
424 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
425 UDMA_CHAN_RT_CTL_EN);
427 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
429 udma_rchanrt_read(uc->rchan,
430 UDMA_TCHAN_RT_CTL_REG),
431 udma_rchanrt_read(uc->rchan,
432 UDMA_TCHAN_RT_PEER_RT_EN_REG));
435 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
436 UDMA_CHAN_RT_CTL_EN);
437 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
438 UDMA_CHAN_RT_CTL_EN);
445 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
450 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
455 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
456 UDMA_CHAN_RT_CTL_EN |
457 UDMA_CHAN_RT_CTL_TDOWN);
459 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
461 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
462 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
465 printf(" %s TIMEOUT !\n", __func__);
471 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
472 if (val & UDMA_PEER_RT_EN_ENABLE)
473 printf("%s: peer not stopped TIMEOUT !\n", __func__);
476 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
481 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
482 UDMA_PEER_RT_EN_ENABLE |
483 UDMA_PEER_RT_EN_TEARDOWN);
485 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
487 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
488 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
491 printf("%s TIMEOUT !\n", __func__);
497 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
498 if (val & UDMA_PEER_RT_EN_ENABLE)
499 printf("%s: peer not stopped TIMEOUT !\n", __func__);
502 static inline int udma_stop(struct udma_chan *uc)
504 pr_debug("%s: chan:%d dir:%s\n",
505 __func__, uc->id, udma_get_dir_text(uc->dir));
507 udma_reset_counters(uc);
510 udma_stop_dev2mem(uc, true);
513 udma_stop_mem2dev(uc, true);
516 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
517 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
526 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
530 while (udma_pop_from_ring(uc, paddr)) {
538 #define UDMA_RESERVE_RESOURCE(res) \
539 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
543 if (test_bit(id, ud->res##_map)) { \
544 dev_err(ud->dev, "res##%d is in use\n", id); \
545 return ERR_PTR(-ENOENT); \
548 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
549 if (id == ud->res##_cnt) { \
550 return ERR_PTR(-ENOENT); \
554 __set_bit(id, ud->res##_map); \
555 return &ud->res##s[id]; \
558 UDMA_RESERVE_RESOURCE(tchan);
559 UDMA_RESERVE_RESOURCE(rchan);
560 UDMA_RESERVE_RESOURCE(rflow);
562 static int udma_get_tchan(struct udma_chan *uc)
564 struct udma_dev *ud = uc->ud;
567 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
568 uc->id, uc->tchan->id);
572 uc->tchan = __udma_reserve_tchan(ud, -1);
573 if (IS_ERR(uc->tchan))
574 return PTR_ERR(uc->tchan);
576 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
578 if (udma_is_chan_running(uc)) {
579 dev_warn(ud->dev, "chan%d: tchan%d is running!\n", uc->id,
582 if (udma_is_chan_running(uc))
583 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
589 static int udma_get_rchan(struct udma_chan *uc)
591 struct udma_dev *ud = uc->ud;
594 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
595 uc->id, uc->rchan->id);
599 uc->rchan = __udma_reserve_rchan(ud, -1);
600 if (IS_ERR(uc->rchan))
601 return PTR_ERR(uc->rchan);
603 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
605 if (udma_is_chan_running(uc)) {
606 dev_warn(ud->dev, "chan%d: rchan%d is running!\n", uc->id,
609 if (udma_is_chan_running(uc))
610 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
616 static int udma_get_chan_pair(struct udma_chan *uc)
618 struct udma_dev *ud = uc->ud;
621 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
622 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
623 uc->id, uc->tchan->id);
628 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
629 uc->id, uc->tchan->id);
631 } else if (uc->rchan) {
632 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
633 uc->id, uc->rchan->id);
637 /* Can be optimized, but let's have it like this for now */
638 end = min(ud->tchan_cnt, ud->rchan_cnt);
639 for (chan_id = 0; chan_id < end; chan_id++) {
640 if (!test_bit(chan_id, ud->tchan_map) &&
641 !test_bit(chan_id, ud->rchan_map))
648 __set_bit(chan_id, ud->tchan_map);
649 __set_bit(chan_id, ud->rchan_map);
650 uc->tchan = &ud->tchans[chan_id];
651 uc->rchan = &ud->rchans[chan_id];
653 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
655 if (udma_is_chan_running(uc)) {
656 dev_warn(ud->dev, "chan%d: t/rchan%d pair is running!\n",
659 if (udma_is_chan_running(uc))
660 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
666 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
668 struct udma_dev *ud = uc->ud;
671 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
672 uc->id, uc->rflow->id);
677 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
679 uc->rflow = __udma_reserve_rflow(ud, flow_id);
680 if (IS_ERR(uc->rflow))
681 return PTR_ERR(uc->rflow);
683 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
687 static void udma_put_rchan(struct udma_chan *uc)
689 struct udma_dev *ud = uc->ud;
692 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
694 __clear_bit(uc->rchan->id, ud->rchan_map);
699 static void udma_put_tchan(struct udma_chan *uc)
701 struct udma_dev *ud = uc->ud;
704 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
706 __clear_bit(uc->tchan->id, ud->tchan_map);
711 static void udma_put_rflow(struct udma_chan *uc)
713 struct udma_dev *ud = uc->ud;
716 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
718 __clear_bit(uc->rflow->id, ud->rflow_map);
723 static void udma_free_tx_resources(struct udma_chan *uc)
728 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
729 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
730 uc->tchan->t_ring = NULL;
731 uc->tchan->tc_ring = NULL;
736 static int udma_alloc_tx_resources(struct udma_chan *uc)
738 struct k3_nav_ring_cfg ring_cfg;
739 struct udma_dev *ud = uc->ud;
742 ret = udma_get_tchan(uc);
746 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
747 ud->ringacc, uc->tchan->id,
748 RINGACC_RING_USE_PROXY);
749 if (!uc->tchan->t_ring) {
754 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
755 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
756 if (!uc->tchan->tc_ring) {
761 memset(&ring_cfg, 0, sizeof(ring_cfg));
763 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
764 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
766 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
767 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
775 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
776 uc->tchan->tc_ring = NULL;
778 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
779 uc->tchan->t_ring = NULL;
786 static void udma_free_rx_resources(struct udma_chan *uc)
791 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
792 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
793 uc->rchan->fd_ring = NULL;
794 uc->rchan->r_ring = NULL;
800 static int udma_alloc_rx_resources(struct udma_chan *uc)
802 struct k3_nav_ring_cfg ring_cfg;
803 struct udma_dev *ud = uc->ud;
807 ret = udma_get_rchan(uc);
811 /* For MEM_TO_MEM we don't need rflow or rings */
812 if (uc->dir == DMA_MEM_TO_MEM)
815 ret = udma_get_rflow(uc, uc->rchan->id);
821 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
823 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
824 ud->ringacc, fd_ring_id,
825 RINGACC_RING_USE_PROXY);
826 if (!uc->rchan->fd_ring) {
831 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
832 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
833 if (!uc->rchan->r_ring) {
838 memset(&ring_cfg, 0, sizeof(ring_cfg));
840 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
841 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
843 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
844 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
852 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
853 uc->rchan->r_ring = NULL;
855 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
856 uc->rchan->fd_ring = NULL;
865 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
867 struct udma_dev *ud = uc->ud;
868 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
869 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
874 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
876 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
878 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
879 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
880 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
881 req.nav_id = ud->tisci_dev_id;
882 req.index = uc->tchan->id;
883 req.tx_chan_type = mode;
884 if (uc->dir == DMA_MEM_TO_MEM)
885 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
887 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
890 req.txcq_qnum = tc_ring;
892 ret = ud->tisci_udmap_ops->tx_ch_cfg(ud->tisci, &req);
894 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
899 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
901 struct udma_dev *ud = uc->ud;
902 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
903 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
904 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
905 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
906 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
911 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
913 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
915 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
916 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
917 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
918 req.nav_id = ud->tisci_dev_id;
919 req.index = uc->rchan->id;
920 req.rx_chan_type = mode;
921 if (uc->dir == DMA_MEM_TO_MEM) {
922 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
923 req.rxcq_qnum = tc_ring;
925 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
928 req.rxcq_qnum = rx_ring;
930 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
931 req.flowid_start = uc->rflow->id;
934 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
935 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
938 ret = ud->tisci_udmap_ops->rx_ch_cfg(ud->tisci, &req);
940 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
944 if (uc->dir == DMA_MEM_TO_MEM)
947 flow_req.valid_params =
948 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
949 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
950 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
951 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
952 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
953 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
954 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
955 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
956 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
957 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
958 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
959 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
960 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
961 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
963 flow_req.nav_id = ud->tisci_dev_id;
964 flow_req.flow_index = uc->rflow->id;
967 flow_req.rx_einfo_present = 1;
969 flow_req.rx_einfo_present = 0;
972 flow_req.rx_psinfo_present = 1;
974 flow_req.rx_psinfo_present = 0;
976 flow_req.rx_error_handling = 0;
977 flow_req.rx_desc_type = 0;
978 flow_req.rx_dest_qnum = rx_ring;
979 flow_req.rx_src_tag_hi_sel = 2;
980 flow_req.rx_src_tag_lo_sel = 4;
981 flow_req.rx_dest_tag_hi_sel = 5;
982 flow_req.rx_dest_tag_lo_sel = 4;
983 flow_req.rx_fdq0_sz0_qnum = fd_ring;
984 flow_req.rx_fdq1_qnum = fd_ring;
985 flow_req.rx_fdq2_qnum = fd_ring;
986 flow_req.rx_fdq3_qnum = fd_ring;
987 flow_req.rx_ps_location = 0;
989 ret = ud->tisci_udmap_ops->rx_flow_cfg(ud->tisci, &flow_req);
991 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
992 uc->rchan->id, uc->rflow->id, ret);
997 static int udma_alloc_chan_resources(struct udma_chan *uc)
999 struct udma_dev *ud = uc->ud;
1002 pr_debug("%s: chan:%d as %s\n",
1003 __func__, uc->id, udma_get_dir_text(uc->dir));
1006 case DMA_MEM_TO_MEM:
1007 /* Non synchronized - mem to mem type of transfer */
1008 ret = udma_get_chan_pair(uc);
1012 ret = udma_alloc_tx_resources(uc);
1016 ret = udma_alloc_rx_resources(uc);
1020 uc->src_thread = ud->psil_base + uc->tchan->id;
1021 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1023 case DMA_MEM_TO_DEV:
1024 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1025 ret = udma_alloc_tx_resources(uc);
1029 uc->src_thread = ud->psil_base + uc->tchan->id;
1030 uc->dst_thread = uc->slave_thread_id;
1031 if (!(uc->dst_thread & 0x8000))
1032 uc->dst_thread |= 0x8000;
1035 case DMA_DEV_TO_MEM:
1036 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1037 ret = udma_alloc_rx_resources(uc);
1041 uc->src_thread = uc->slave_thread_id;
1042 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1046 /* Can not happen */
1047 pr_debug("%s: chan:%d invalid direction (%u)\n",
1048 __func__, uc->id, uc->dir);
1052 /* We have channel indexes and rings */
1053 if (uc->dir == DMA_MEM_TO_MEM) {
1054 ret = udma_alloc_tchan_sci_req(uc);
1058 ret = udma_alloc_rchan_sci_req(uc);
1062 /* Slave transfer */
1063 if (uc->dir == DMA_MEM_TO_DEV) {
1064 ret = udma_alloc_tchan_sci_req(uc);
1068 ret = udma_alloc_rchan_sci_req(uc);
1075 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1077 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1084 udma_free_tx_resources(uc);
1085 udma_free_rx_resources(uc);
1086 uc->slave_thread_id = -1;
1090 static void udma_free_chan_resources(struct udma_chan *uc)
1092 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1094 /* Release PSI-L pairing */
1095 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1097 /* Reset the rings for a new start */
1098 udma_reset_rings(uc);
1099 udma_free_tx_resources(uc);
1100 udma_free_rx_resources(uc);
1102 uc->slave_thread_id = -1;
1103 uc->dir = DMA_MEM_TO_MEM;
1106 static int udma_get_mmrs(struct udevice *dev)
1108 struct udma_dev *ud = dev_get_priv(dev);
1111 for (i = 0; i < MMR_LAST; i++) {
1112 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1121 #define UDMA_MAX_CHANNELS 192
1123 static int udma_probe(struct udevice *dev)
1125 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1126 struct udma_dev *ud = dev_get_priv(dev);
1129 struct udevice *tmp;
1130 struct udevice *tisci_dev = NULL;
1132 ret = udma_get_mmrs(dev);
1136 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1137 "ti,ringacc", &tmp);
1138 ud->ringacc = dev_get_priv(tmp);
1139 if (IS_ERR(ud->ringacc))
1140 return PTR_ERR(ud->ringacc);
1142 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1143 if (!ud->psil_base) {
1145 "Missing ti,psil-base property, using %d.\n", ret);
1149 ret = uclass_get_device_by_name(UCLASS_FIRMWARE, "dmsc", &tisci_dev);
1151 debug("TISCI RA RM get failed (%d)\n", ret);
1155 ud->tisci = (struct ti_sci_handle *)
1156 (ti_sci_get_handle_from_sysfw(tisci_dev));
1158 ret = dev_read_u32_default(dev, "ti,sci", 0);
1160 dev_err(dev, "TISCI RA RM disabled\n");
1165 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1167 ud->tisci_dev_id = -1;
1168 ret = dev_read_u32(dev, "ti,sci-dev-id", &ud->tisci_dev_id);
1170 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1174 ud->tisci_navss_dev_id = -1;
1175 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1176 &ud->tisci_navss_dev_id);
1178 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1182 ud->tisci_udmap_ops = &ud->tisci->ops.rm_udmap_ops;
1183 ud->tisci_psil_ops = &ud->tisci->ops.rm_psil_ops;
1186 ud->is_coherent = dev_read_bool(dev, "dma-coherent");
1188 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1189 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1191 ud->rflow_cnt = cap3 & 0x3fff;
1192 ud->tchan_cnt = cap2 & 0x1ff;
1193 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1194 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1195 ud->ch_count = ud->tchan_cnt + ud->rchan_cnt;
1198 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1199 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1201 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1203 ud->channels = devm_kcalloc(dev, ud->ch_count, sizeof(*ud->channels),
1205 ud->tchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->tchan_cnt),
1206 sizeof(unsigned long), GFP_KERNEL);
1207 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt,
1208 sizeof(*ud->tchans), GFP_KERNEL);
1209 ud->rchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
1210 sizeof(unsigned long), GFP_KERNEL);
1211 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt,
1212 sizeof(*ud->rchans), GFP_KERNEL);
1213 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1214 sizeof(unsigned long), GFP_KERNEL);
1215 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt,
1216 sizeof(*ud->rflows), GFP_KERNEL);
1218 if (!ud->channels || !ud->tchan_map || !ud->rchan_map ||
1219 !ud->rflow_map || !ud->tchans || !ud->rchans || !ud->rflows)
1222 for (i = 0; i < ud->tchan_cnt; i++) {
1223 struct udma_tchan *tchan = &ud->tchans[i];
1226 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1229 for (i = 0; i < ud->rchan_cnt; i++) {
1230 struct udma_rchan *rchan = &ud->rchans[i];
1233 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1236 for (i = 0; i < ud->rflow_cnt; i++) {
1237 struct udma_rflow *rflow = &ud->rflows[i];
1242 for (i = 0; i < ud->ch_count; i++) {
1243 struct udma_chan *uc = &ud->channels[i];
1247 uc->slave_thread_id = -1;
1250 uc->dir = DMA_MEM_TO_MEM;
1251 sprintf(uc->name, "UDMA chan%d\n", i);
1256 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1257 udma_read(ud->mmrs[MMR_GCFG], 0),
1258 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1259 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1260 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1261 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1263 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1268 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1269 dma_addr_t src, size_t len)
1271 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1272 struct cppi5_tr_type15_t *tr_req;
1274 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1275 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1276 unsigned long dummy;
1285 unsigned long align_to = __ffs(src | dest);
1290 * Keep simple: tr0: SZ_64K-alignment blocks,
1291 * tr1: the remaining
1294 tr0_cnt0 = (SZ_64K - BIT(align_to));
1295 if (len / tr0_cnt0 >= SZ_64K) {
1296 dev_err(uc->ud->dev, "size %zu is not supported\n",
1301 tr0_cnt1 = len / tr0_cnt0;
1302 tr1_cnt0 = len % tr0_cnt0;
1305 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1306 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1309 memset(tr_desc, 0, desc_size);
1311 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1312 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1313 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1315 tr_req = tr_desc + tr_size;
1317 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1318 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1319 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1321 tr_req[0].addr = src;
1322 tr_req[0].icnt0 = tr0_cnt0;
1323 tr_req[0].icnt1 = tr0_cnt1;
1324 tr_req[0].icnt2 = 1;
1325 tr_req[0].icnt3 = 1;
1326 tr_req[0].dim1 = tr0_cnt0;
1328 tr_req[0].daddr = dest;
1329 tr_req[0].dicnt0 = tr0_cnt0;
1330 tr_req[0].dicnt1 = tr0_cnt1;
1331 tr_req[0].dicnt2 = 1;
1332 tr_req[0].dicnt3 = 1;
1333 tr_req[0].ddim1 = tr0_cnt0;
1336 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1337 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1338 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1340 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1341 tr_req[1].icnt0 = tr1_cnt0;
1342 tr_req[1].icnt1 = 1;
1343 tr_req[1].icnt2 = 1;
1344 tr_req[1].icnt3 = 1;
1346 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1347 tr_req[1].dicnt0 = tr1_cnt0;
1348 tr_req[1].dicnt1 = 1;
1349 tr_req[1].dicnt2 = 1;
1350 tr_req[1].dicnt3 = 1;
1353 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1355 if (!udma_is_coherent(uc)) {
1356 flush_dcache_range((u64)tr_desc,
1357 ALIGN((u64)tr_desc + desc_size,
1358 ARCH_DMA_MINALIGN));
1361 k3_nav_ringacc_ring_push(uc->tchan->t_ring, &tr_desc);
1366 static int udma_transfer(struct udevice *dev, int direction,
1367 void *dst, void *src, size_t len)
1369 struct udma_dev *ud = dev_get_priv(dev);
1370 /* Channel0 is reserved for memcpy */
1371 struct udma_chan *uc = &ud->channels[0];
1372 dma_addr_t paddr = 0;
1375 ret = udma_alloc_chan_resources(uc);
1379 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1381 udma_poll_completion(uc, &paddr);
1384 udma_free_chan_resources(uc);
1388 static int udma_request(struct dma *dma)
1390 struct udma_dev *ud = dev_get_priv(dma->dev);
1391 struct udma_chan *uc;
1392 unsigned long dummy;
1395 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1396 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1400 uc = &ud->channels[dma->id];
1401 ret = udma_alloc_chan_resources(uc);
1403 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1407 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1409 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1411 if (uc->dir == DMA_MEM_TO_DEV) {
1412 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1413 memset(uc->desc_tx, 0, uc->hdesc_size);
1415 uc->desc_rx = dma_alloc_coherent(
1416 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1417 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1421 uc->desc_rx_cur = 0;
1422 uc->num_rx_bufs = 0;
1427 static int udma_free(struct dma *dma)
1429 struct udma_dev *ud = dev_get_priv(dma->dev);
1430 struct udma_chan *uc;
1432 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1433 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1436 uc = &ud->channels[dma->id];
1438 if (udma_is_chan_running(uc))
1440 udma_free_chan_resources(uc);
1447 static int udma_enable(struct dma *dma)
1449 struct udma_dev *ud = dev_get_priv(dma->dev);
1450 struct udma_chan *uc;
1453 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1454 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1457 uc = &ud->channels[dma->id];
1459 ret = udma_start(uc);
1464 static int udma_disable(struct dma *dma)
1466 struct udma_dev *ud = dev_get_priv(dma->dev);
1467 struct udma_chan *uc;
1470 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1471 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1474 uc = &ud->channels[dma->id];
1476 if (udma_is_chan_running(uc))
1477 ret = udma_stop(uc);
1479 dev_err(dma->dev, "%s not running\n", __func__);
1484 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1486 struct udma_dev *ud = dev_get_priv(dma->dev);
1487 struct cppi5_host_desc_t *desc_tx;
1488 dma_addr_t dma_src = (dma_addr_t)src;
1489 struct ti_udma_drv_packet_data packet_data = { 0 };
1491 struct udma_chan *uc;
1496 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1498 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1499 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1502 uc = &ud->channels[dma->id];
1504 if (uc->dir != DMA_MEM_TO_DEV)
1507 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1509 desc_tx = uc->desc_tx;
1511 cppi5_hdesc_reset_hbdesc(desc_tx);
1513 cppi5_hdesc_init(desc_tx,
1514 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1516 cppi5_hdesc_set_pktlen(desc_tx, len);
1517 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1518 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1519 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1520 /* pass below information from caller */
1521 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1522 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1524 if (!udma_is_coherent(uc)) {
1525 flush_dcache_range((u64)dma_src,
1526 ALIGN((u64)dma_src + len,
1527 ARCH_DMA_MINALIGN));
1528 flush_dcache_range((u64)desc_tx,
1529 ALIGN((u64)desc_tx + uc->hdesc_size,
1530 ARCH_DMA_MINALIGN));
1533 ret = k3_nav_ringacc_ring_push(uc->tchan->t_ring, &uc->desc_tx);
1535 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1540 udma_poll_completion(uc, &paddr);
1545 static int udma_receive(struct dma *dma, void **dst, void *metadata)
1547 struct udma_dev *ud = dev_get_priv(dma->dev);
1548 struct cppi5_host_desc_t *desc_rx;
1550 struct udma_chan *uc;
1551 u32 buf_dma_len, pkt_len;
1555 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1556 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1559 uc = &ud->channels[dma->id];
1561 if (uc->dir != DMA_DEV_TO_MEM)
1563 if (!uc->num_rx_bufs)
1566 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1567 if (ret && ret != -ENODATA) {
1568 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1570 } else if (ret == -ENODATA) {
1574 /* invalidate cache data */
1575 if (!udma_is_coherent(uc)) {
1576 invalidate_dcache_range((ulong)desc_rx,
1577 (ulong)(desc_rx + uc->hdesc_size));
1580 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1581 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1583 /* invalidate cache data */
1584 if (!udma_is_coherent(uc)) {
1585 invalidate_dcache_range((ulong)buf_dma,
1586 (ulong)(buf_dma + buf_dma_len));
1589 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1591 *dst = (void *)buf_dma;
1597 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1599 struct udma_dev *ud = dev_get_priv(dma->dev);
1600 struct udma_chan *uc = &ud->channels[0];
1601 ofnode chconf_node, slave_node;
1605 for (val = 0; val < ud->ch_count; val++) {
1606 uc = &ud->channels[val];
1611 if (val == ud->ch_count)
1614 uc->dir = DMA_DEV_TO_MEM;
1615 if (args->args[2] == UDMA_DIR_TX)
1616 uc->dir = DMA_MEM_TO_DEV;
1618 slave_node = ofnode_get_by_phandle(args->args[0]);
1619 if (!ofnode_valid(slave_node)) {
1620 dev_err(ud->dev, "slave node is missing\n");
1624 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1625 chconf_node = ofnode_find_subnode(slave_node, prop);
1626 if (!ofnode_valid(chconf_node)) {
1627 dev_err(ud->dev, "Channel configuration node is missing\n");
1631 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1632 if (val == UDMA_PKT_MODE)
1633 uc->pkt_mode = true;
1636 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1637 uc->static_tr_type = val;
1639 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1640 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1642 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1644 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1645 dev_err(ud->dev, "ti,psil-base is missing\n");
1649 uc->slave_thread_id = val + args->args[1];
1652 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1653 dma->id, uc->needs_epib,
1654 uc->psd_size, uc->metadata_size,
1655 uc->slave_thread_id);
1660 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1662 struct udma_dev *ud = dev_get_priv(dma->dev);
1663 struct cppi5_host_desc_t *desc_rx;
1665 struct udma_chan *uc;
1668 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1669 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1672 uc = &ud->channels[dma->id];
1674 if (uc->dir != DMA_DEV_TO_MEM)
1677 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1680 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1681 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1682 dma_dst = (dma_addr_t)dst;
1684 cppi5_hdesc_reset_hbdesc(desc_rx);
1686 cppi5_hdesc_init(desc_rx,
1687 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1689 cppi5_hdesc_set_pktlen(desc_rx, size);
1690 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1692 if (!udma_is_coherent(uc)) {
1693 flush_dcache_range((u64)desc_rx,
1694 ALIGN((u64)desc_rx + uc->hdesc_size,
1695 ARCH_DMA_MINALIGN));
1698 k3_nav_ringacc_ring_push(uc->rchan->fd_ring, &desc_rx);
1706 static const struct dma_ops udma_ops = {
1707 .transfer = udma_transfer,
1708 .of_xlate = udma_of_xlate,
1709 .request = udma_request,
1711 .enable = udma_enable,
1712 .disable = udma_disable,
1714 .receive = udma_receive,
1715 .prepare_rcv_buf = udma_prepare_rcv_buf,
1718 static const struct udevice_id udma_ids[] = {
1719 { .compatible = "ti,k3-navss-udmap" },
1723 U_BOOT_DRIVER(ti_edma3) = {
1726 .of_match = udma_ids,
1728 .probe = udma_probe,
1729 .priv_auto_alloc_size = sizeof(struct udma_dev),