1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
6 #define pr_fmt(fmt) "udma: " fmt
10 #include <asm/bitops.h>
12 #include <asm/dma-mapping.h>
15 #include <dm/of_access.h>
17 #include <dma-uclass.h>
18 #include <linux/delay.h>
19 #include <dt-bindings/dma/k3-udma.h>
20 #include <linux/soc/ti/k3-navss-ringacc.h>
21 #include <linux/soc/ti/cppi5.h>
22 #include <linux/soc/ti/ti-udma.h>
23 #include <linux/soc/ti/ti_sci_protocol.h>
25 #include "k3-udma-hwdef.h"
27 #if BITS_PER_LONG == 64
28 #define RINGACC_RING_USE_PROXY (0)
30 #define RINGACC_RING_USE_PROXY (1)
42 static const char * const mmr_names[] = {
43 "gcfg", "rchanrt", "tchanrt"
50 struct k3_nav_ring *t_ring; /* Transmit ring */
51 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
58 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
59 struct k3_nav_ring *r_ring; /* Receive ring*/
68 void __iomem *mmrs[MMR_LAST];
70 struct k3_nav_ringacc *ringacc;
78 unsigned long *tchan_map;
79 unsigned long *rchan_map;
80 unsigned long *rflow_map;
82 struct udma_tchan *tchans;
83 struct udma_rchan *rchans;
84 struct udma_rflow *rflows;
86 struct udma_chan *channels;
90 const struct ti_sci_handle *tisci;
91 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
92 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
94 u32 tisci_navss_dev_id;
102 struct udma_tchan *tchan;
103 struct udma_rchan *rchan;
104 struct udma_rflow *rflow;
106 u32 bcnt; /* number of bytes completed since the start of the channel */
108 bool pkt_mode; /* TR or packet */
109 bool needs_epib; /* EPIB is needed for the communication or not */
110 u32 psd_size; /* size of Protocol Specific Data */
111 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
118 enum dma_direction dir;
120 struct cppi5_host_desc_t *desc_tx;
129 #define UDMA_CH_1000(ch) (ch * 0x1000)
130 #define UDMA_CH_100(ch) (ch * 0x100)
131 #define UDMA_CH_40(ch) (ch * 0x40)
134 #define UDMA_RX_DESC_NUM PKTBUFSRX
136 #define UDMA_RX_DESC_NUM 4
139 /* Generic register access functions */
140 static inline u32 udma_read(void __iomem *base, int reg)
144 v = __raw_readl(base + reg);
145 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
149 static inline void udma_write(void __iomem *base, int reg, u32 val)
151 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
152 __raw_writel(val, base + reg);
155 static inline void udma_update_bits(void __iomem *base, int reg,
160 orig = udma_read(base, reg);
165 udma_write(base, reg, tmp);
169 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
173 return udma_read(tchan->reg_rt, reg);
176 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
181 udma_write(tchan->reg_rt, reg, val);
185 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
189 return udma_read(rchan->reg_rt, reg);
192 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
197 udma_write(rchan->reg_rt, reg, val);
200 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
203 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
204 return ud->tisci_psil_ops->pair(ud->tisci,
205 ud->tisci_navss_dev_id,
206 src_thread, dst_thread);
209 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
212 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
213 return ud->tisci_psil_ops->unpair(ud->tisci,
214 ud->tisci_navss_dev_id,
215 src_thread, dst_thread);
218 static inline char *udma_get_dir_text(enum dma_direction dir)
236 static inline bool udma_is_chan_running(struct udma_chan *uc)
243 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
244 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
246 udma_rchanrt_read(uc->rchan,
247 UDMA_RCHAN_RT_PEER_RT_EN_REG));
250 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
251 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
253 udma_tchanrt_read(uc->tchan,
254 UDMA_TCHAN_RT_PEER_RT_EN_REG));
257 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
258 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
264 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
270 static int udma_is_coherent(struct udma_chan *uc)
272 return uc->ud->is_coherent;
275 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
277 struct k3_nav_ring *ring = NULL;
282 ring = uc->rchan->r_ring;
285 ring = uc->tchan->tc_ring;
288 ring = uc->tchan->tc_ring;
294 if (ring && k3_nav_ringacc_ring_get_occ(ring))
295 ret = k3_nav_ringacc_ring_pop(ring, addr);
300 static void udma_reset_rings(struct udma_chan *uc)
302 struct k3_nav_ring *ring1 = NULL;
303 struct k3_nav_ring *ring2 = NULL;
307 ring1 = uc->rchan->fd_ring;
308 ring2 = uc->rchan->r_ring;
311 ring1 = uc->tchan->t_ring;
312 ring2 = uc->tchan->tc_ring;
315 ring1 = uc->tchan->t_ring;
316 ring2 = uc->tchan->tc_ring;
323 k3_nav_ringacc_ring_reset_dma(ring1, 0);
325 k3_nav_ringacc_ring_reset(ring2);
328 static void udma_reset_counters(struct udma_chan *uc)
333 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
334 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
336 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
337 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
339 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
340 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
342 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
343 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
347 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
348 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
350 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
351 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
353 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
354 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
356 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
357 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
363 static inline int udma_stop_hard(struct udma_chan *uc)
365 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
369 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
370 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
373 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
374 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
377 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
378 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
387 static int udma_start(struct udma_chan *uc)
389 /* Channel is already running, no need to proceed further */
390 if (udma_is_chan_running(uc))
393 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
394 __func__, uc->id, udma_get_dir_text(uc->dir),
397 /* Make sure that we clear the teardown bit, if it is set */
400 /* Reset all counters */
401 udma_reset_counters(uc);
405 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
406 UDMA_CHAN_RT_CTL_EN);
409 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
410 UDMA_PEER_RT_EN_ENABLE);
412 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
414 udma_rchanrt_read(uc->rchan,
415 UDMA_RCHAN_RT_CTL_REG),
416 udma_rchanrt_read(uc->rchan,
417 UDMA_RCHAN_RT_PEER_RT_EN_REG));
421 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
422 UDMA_PEER_RT_EN_ENABLE);
424 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
425 UDMA_CHAN_RT_CTL_EN);
427 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
429 udma_rchanrt_read(uc->rchan,
430 UDMA_TCHAN_RT_CTL_REG),
431 udma_rchanrt_read(uc->rchan,
432 UDMA_TCHAN_RT_PEER_RT_EN_REG));
435 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
436 UDMA_CHAN_RT_CTL_EN);
437 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
438 UDMA_CHAN_RT_CTL_EN);
445 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
450 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
455 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
456 UDMA_CHAN_RT_CTL_EN |
457 UDMA_CHAN_RT_CTL_TDOWN);
459 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
461 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
462 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
465 printf(" %s TIMEOUT !\n", __func__);
471 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
472 if (val & UDMA_PEER_RT_EN_ENABLE)
473 printf("%s: peer not stopped TIMEOUT !\n", __func__);
476 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
481 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
482 UDMA_PEER_RT_EN_ENABLE |
483 UDMA_PEER_RT_EN_TEARDOWN);
485 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
487 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
488 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
491 printf("%s TIMEOUT !\n", __func__);
497 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
498 if (val & UDMA_PEER_RT_EN_ENABLE)
499 printf("%s: peer not stopped TIMEOUT !\n", __func__);
502 static inline int udma_stop(struct udma_chan *uc)
504 pr_debug("%s: chan:%d dir:%s\n",
505 __func__, uc->id, udma_get_dir_text(uc->dir));
507 udma_reset_counters(uc);
510 udma_stop_dev2mem(uc, true);
513 udma_stop_mem2dev(uc, true);
516 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
517 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
526 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
530 while (udma_pop_from_ring(uc, paddr)) {
538 #define UDMA_RESERVE_RESOURCE(res) \
539 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
543 if (test_bit(id, ud->res##_map)) { \
544 dev_err(ud->dev, "res##%d is in use\n", id); \
545 return ERR_PTR(-ENOENT); \
548 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
549 if (id == ud->res##_cnt) { \
550 return ERR_PTR(-ENOENT); \
554 __set_bit(id, ud->res##_map); \
555 return &ud->res##s[id]; \
558 UDMA_RESERVE_RESOURCE(tchan);
559 UDMA_RESERVE_RESOURCE(rchan);
560 UDMA_RESERVE_RESOURCE(rflow);
562 static int udma_get_tchan(struct udma_chan *uc)
564 struct udma_dev *ud = uc->ud;
567 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
568 uc->id, uc->tchan->id);
572 uc->tchan = __udma_reserve_tchan(ud, -1);
573 if (IS_ERR(uc->tchan))
574 return PTR_ERR(uc->tchan);
576 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
581 static int udma_get_rchan(struct udma_chan *uc)
583 struct udma_dev *ud = uc->ud;
586 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
587 uc->id, uc->rchan->id);
591 uc->rchan = __udma_reserve_rchan(ud, -1);
592 if (IS_ERR(uc->rchan))
593 return PTR_ERR(uc->rchan);
595 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
600 static int udma_get_chan_pair(struct udma_chan *uc)
602 struct udma_dev *ud = uc->ud;
605 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
606 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
607 uc->id, uc->tchan->id);
612 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
613 uc->id, uc->tchan->id);
615 } else if (uc->rchan) {
616 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
617 uc->id, uc->rchan->id);
621 /* Can be optimized, but let's have it like this for now */
622 end = min(ud->tchan_cnt, ud->rchan_cnt);
623 for (chan_id = 0; chan_id < end; chan_id++) {
624 if (!test_bit(chan_id, ud->tchan_map) &&
625 !test_bit(chan_id, ud->rchan_map))
632 __set_bit(chan_id, ud->tchan_map);
633 __set_bit(chan_id, ud->rchan_map);
634 uc->tchan = &ud->tchans[chan_id];
635 uc->rchan = &ud->rchans[chan_id];
637 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
642 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
644 struct udma_dev *ud = uc->ud;
647 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
648 uc->id, uc->rflow->id);
653 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
655 uc->rflow = __udma_reserve_rflow(ud, flow_id);
656 if (IS_ERR(uc->rflow))
657 return PTR_ERR(uc->rflow);
659 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
663 static void udma_put_rchan(struct udma_chan *uc)
665 struct udma_dev *ud = uc->ud;
668 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
670 __clear_bit(uc->rchan->id, ud->rchan_map);
675 static void udma_put_tchan(struct udma_chan *uc)
677 struct udma_dev *ud = uc->ud;
680 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
682 __clear_bit(uc->tchan->id, ud->tchan_map);
687 static void udma_put_rflow(struct udma_chan *uc)
689 struct udma_dev *ud = uc->ud;
692 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
694 __clear_bit(uc->rflow->id, ud->rflow_map);
699 static void udma_free_tx_resources(struct udma_chan *uc)
704 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
705 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
706 uc->tchan->t_ring = NULL;
707 uc->tchan->tc_ring = NULL;
712 static int udma_alloc_tx_resources(struct udma_chan *uc)
714 struct k3_nav_ring_cfg ring_cfg;
715 struct udma_dev *ud = uc->ud;
718 ret = udma_get_tchan(uc);
722 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
723 ud->ringacc, uc->tchan->id,
724 RINGACC_RING_USE_PROXY);
725 if (!uc->tchan->t_ring) {
730 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
731 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
732 if (!uc->tchan->tc_ring) {
737 memset(&ring_cfg, 0, sizeof(ring_cfg));
739 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
740 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
742 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
743 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
751 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
752 uc->tchan->tc_ring = NULL;
754 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
755 uc->tchan->t_ring = NULL;
762 static void udma_free_rx_resources(struct udma_chan *uc)
767 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
768 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
769 uc->rchan->fd_ring = NULL;
770 uc->rchan->r_ring = NULL;
776 static int udma_alloc_rx_resources(struct udma_chan *uc)
778 struct k3_nav_ring_cfg ring_cfg;
779 struct udma_dev *ud = uc->ud;
783 ret = udma_get_rchan(uc);
787 /* For MEM_TO_MEM we don't need rflow or rings */
788 if (uc->dir == DMA_MEM_TO_MEM)
791 ret = udma_get_rflow(uc, uc->rchan->id);
797 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
799 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
800 ud->ringacc, fd_ring_id,
801 RINGACC_RING_USE_PROXY);
802 if (!uc->rchan->fd_ring) {
807 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
808 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
809 if (!uc->rchan->r_ring) {
814 memset(&ring_cfg, 0, sizeof(ring_cfg));
816 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
817 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
819 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
820 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
828 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
829 uc->rchan->r_ring = NULL;
831 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
832 uc->rchan->fd_ring = NULL;
841 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
843 struct udma_dev *ud = uc->ud;
844 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
845 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
850 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
852 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
854 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
857 req.nav_id = ud->tisci_dev_id;
858 req.index = uc->tchan->id;
859 req.tx_chan_type = mode;
860 if (uc->dir == DMA_MEM_TO_MEM)
861 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
863 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
866 req.txcq_qnum = tc_ring;
868 ret = ud->tisci_udmap_ops->tx_ch_cfg(ud->tisci, &req);
870 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
875 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
877 struct udma_dev *ud = uc->ud;
878 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
879 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
880 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
881 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
882 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
887 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
889 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
891 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
892 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
893 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
894 req.nav_id = ud->tisci_dev_id;
895 req.index = uc->rchan->id;
896 req.rx_chan_type = mode;
897 if (uc->dir == DMA_MEM_TO_MEM) {
898 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
899 req.rxcq_qnum = tc_ring;
901 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
904 req.rxcq_qnum = rx_ring;
906 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
907 req.flowid_start = uc->rflow->id;
910 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
911 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
914 ret = ud->tisci_udmap_ops->rx_ch_cfg(ud->tisci, &req);
916 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
920 if (uc->dir == DMA_MEM_TO_MEM)
923 flow_req.valid_params =
924 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
925 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
926 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
927 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
928 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
929 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
930 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
931 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
932 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
933 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
934 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
935 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
936 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
937 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
939 flow_req.nav_id = ud->tisci_dev_id;
940 flow_req.flow_index = uc->rflow->id;
943 flow_req.rx_einfo_present = 1;
945 flow_req.rx_einfo_present = 0;
948 flow_req.rx_psinfo_present = 1;
950 flow_req.rx_psinfo_present = 0;
952 flow_req.rx_error_handling = 0;
953 flow_req.rx_desc_type = 0;
954 flow_req.rx_dest_qnum = rx_ring;
955 flow_req.rx_src_tag_hi_sel = 2;
956 flow_req.rx_src_tag_lo_sel = 4;
957 flow_req.rx_dest_tag_hi_sel = 5;
958 flow_req.rx_dest_tag_lo_sel = 4;
959 flow_req.rx_fdq0_sz0_qnum = fd_ring;
960 flow_req.rx_fdq1_qnum = fd_ring;
961 flow_req.rx_fdq2_qnum = fd_ring;
962 flow_req.rx_fdq3_qnum = fd_ring;
963 flow_req.rx_ps_location = 0;
965 ret = ud->tisci_udmap_ops->rx_flow_cfg(ud->tisci, &flow_req);
967 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
968 uc->rchan->id, uc->rflow->id, ret);
973 static int udma_alloc_chan_resources(struct udma_chan *uc)
975 struct udma_dev *ud = uc->ud;
978 pr_debug("%s: chan:%d as %s\n",
979 __func__, uc->id, udma_get_dir_text(uc->dir));
983 /* Non synchronized - mem to mem type of transfer */
984 ret = udma_get_chan_pair(uc);
988 ret = udma_alloc_tx_resources(uc);
992 ret = udma_alloc_rx_resources(uc);
996 uc->src_thread = ud->psil_base + uc->tchan->id;
997 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1000 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1001 ret = udma_alloc_tx_resources(uc);
1005 uc->src_thread = ud->psil_base + uc->tchan->id;
1006 uc->dst_thread = uc->slave_thread_id;
1007 if (!(uc->dst_thread & 0x8000))
1008 uc->dst_thread |= 0x8000;
1011 case DMA_DEV_TO_MEM:
1012 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1013 ret = udma_alloc_rx_resources(uc);
1017 uc->src_thread = uc->slave_thread_id;
1018 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1022 /* Can not happen */
1023 pr_debug("%s: chan:%d invalid direction (%u)\n",
1024 __func__, uc->id, uc->dir);
1028 /* We have channel indexes and rings */
1029 if (uc->dir == DMA_MEM_TO_MEM) {
1030 ret = udma_alloc_tchan_sci_req(uc);
1034 ret = udma_alloc_rchan_sci_req(uc);
1038 /* Slave transfer */
1039 if (uc->dir == DMA_MEM_TO_DEV) {
1040 ret = udma_alloc_tchan_sci_req(uc);
1044 ret = udma_alloc_rchan_sci_req(uc);
1050 if (udma_is_chan_running(uc)) {
1051 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1053 if (udma_is_chan_running(uc)) {
1054 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1060 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1062 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1069 udma_free_tx_resources(uc);
1070 udma_free_rx_resources(uc);
1071 uc->slave_thread_id = -1;
1075 static void udma_free_chan_resources(struct udma_chan *uc)
1077 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1079 /* Release PSI-L pairing */
1080 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1082 /* Reset the rings for a new start */
1083 udma_reset_rings(uc);
1084 udma_free_tx_resources(uc);
1085 udma_free_rx_resources(uc);
1087 uc->slave_thread_id = -1;
1088 uc->dir = DMA_MEM_TO_MEM;
1091 static int udma_get_mmrs(struct udevice *dev)
1093 struct udma_dev *ud = dev_get_priv(dev);
1096 for (i = 0; i < MMR_LAST; i++) {
1097 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1106 #define UDMA_MAX_CHANNELS 192
1108 static int udma_probe(struct udevice *dev)
1110 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1111 struct udma_dev *ud = dev_get_priv(dev);
1114 struct udevice *tmp;
1115 struct udevice *tisci_dev = NULL;
1117 ret = udma_get_mmrs(dev);
1121 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1122 "ti,ringacc", &tmp);
1123 ud->ringacc = dev_get_priv(tmp);
1124 if (IS_ERR(ud->ringacc))
1125 return PTR_ERR(ud->ringacc);
1127 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1128 if (!ud->psil_base) {
1130 "Missing ti,psil-base property, using %d.\n", ret);
1134 ret = uclass_get_device_by_name(UCLASS_FIRMWARE, "dmsc", &tisci_dev);
1136 debug("TISCI RA RM get failed (%d)\n", ret);
1140 ud->tisci = (struct ti_sci_handle *)
1141 (ti_sci_get_handle_from_sysfw(tisci_dev));
1143 ret = dev_read_u32_default(dev, "ti,sci", 0);
1145 dev_err(dev, "TISCI RA RM disabled\n");
1150 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1152 ud->tisci_dev_id = -1;
1153 ret = dev_read_u32(dev, "ti,sci-dev-id", &ud->tisci_dev_id);
1155 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1159 ud->tisci_navss_dev_id = -1;
1160 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1161 &ud->tisci_navss_dev_id);
1163 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1167 ud->tisci_udmap_ops = &ud->tisci->ops.rm_udmap_ops;
1168 ud->tisci_psil_ops = &ud->tisci->ops.rm_psil_ops;
1171 ud->is_coherent = dev_read_bool(dev, "dma-coherent");
1173 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1174 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1176 ud->rflow_cnt = cap3 & 0x3fff;
1177 ud->tchan_cnt = cap2 & 0x1ff;
1178 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1179 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1180 ud->ch_count = ud->tchan_cnt + ud->rchan_cnt;
1183 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1184 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1186 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1188 ud->channels = devm_kcalloc(dev, ud->ch_count, sizeof(*ud->channels),
1190 ud->tchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->tchan_cnt),
1191 sizeof(unsigned long), GFP_KERNEL);
1192 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt,
1193 sizeof(*ud->tchans), GFP_KERNEL);
1194 ud->rchan_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
1195 sizeof(unsigned long), GFP_KERNEL);
1196 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt,
1197 sizeof(*ud->rchans), GFP_KERNEL);
1198 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1199 sizeof(unsigned long), GFP_KERNEL);
1200 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt,
1201 sizeof(*ud->rflows), GFP_KERNEL);
1203 if (!ud->channels || !ud->tchan_map || !ud->rchan_map ||
1204 !ud->rflow_map || !ud->tchans || !ud->rchans || !ud->rflows)
1207 for (i = 0; i < ud->tchan_cnt; i++) {
1208 struct udma_tchan *tchan = &ud->tchans[i];
1211 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1214 for (i = 0; i < ud->rchan_cnt; i++) {
1215 struct udma_rchan *rchan = &ud->rchans[i];
1218 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1221 for (i = 0; i < ud->rflow_cnt; i++) {
1222 struct udma_rflow *rflow = &ud->rflows[i];
1227 for (i = 0; i < ud->ch_count; i++) {
1228 struct udma_chan *uc = &ud->channels[i];
1232 uc->slave_thread_id = -1;
1235 uc->dir = DMA_MEM_TO_MEM;
1236 sprintf(uc->name, "UDMA chan%d\n", i);
1241 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1242 udma_read(ud->mmrs[MMR_GCFG], 0),
1243 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1244 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1245 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1246 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1248 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1253 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1254 dma_addr_t src, size_t len)
1256 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1257 struct cppi5_tr_type15_t *tr_req;
1259 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1260 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1261 unsigned long dummy;
1270 unsigned long align_to = __ffs(src | dest);
1275 * Keep simple: tr0: SZ_64K-alignment blocks,
1276 * tr1: the remaining
1279 tr0_cnt0 = (SZ_64K - BIT(align_to));
1280 if (len / tr0_cnt0 >= SZ_64K) {
1281 dev_err(uc->ud->dev, "size %zu is not supported\n",
1286 tr0_cnt1 = len / tr0_cnt0;
1287 tr1_cnt0 = len % tr0_cnt0;
1290 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1291 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1294 memset(tr_desc, 0, desc_size);
1296 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1297 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1298 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1300 tr_req = tr_desc + tr_size;
1302 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1303 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1304 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1306 tr_req[0].addr = src;
1307 tr_req[0].icnt0 = tr0_cnt0;
1308 tr_req[0].icnt1 = tr0_cnt1;
1309 tr_req[0].icnt2 = 1;
1310 tr_req[0].icnt3 = 1;
1311 tr_req[0].dim1 = tr0_cnt0;
1313 tr_req[0].daddr = dest;
1314 tr_req[0].dicnt0 = tr0_cnt0;
1315 tr_req[0].dicnt1 = tr0_cnt1;
1316 tr_req[0].dicnt2 = 1;
1317 tr_req[0].dicnt3 = 1;
1318 tr_req[0].ddim1 = tr0_cnt0;
1321 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1322 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1323 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1325 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1326 tr_req[1].icnt0 = tr1_cnt0;
1327 tr_req[1].icnt1 = 1;
1328 tr_req[1].icnt2 = 1;
1329 tr_req[1].icnt3 = 1;
1331 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1332 tr_req[1].dicnt0 = tr1_cnt0;
1333 tr_req[1].dicnt1 = 1;
1334 tr_req[1].dicnt2 = 1;
1335 tr_req[1].dicnt3 = 1;
1338 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1340 if (!udma_is_coherent(uc)) {
1341 flush_dcache_range((u64)tr_desc,
1342 ALIGN((u64)tr_desc + desc_size,
1343 ARCH_DMA_MINALIGN));
1346 k3_nav_ringacc_ring_push(uc->tchan->t_ring, &tr_desc);
1351 static int udma_transfer(struct udevice *dev, int direction,
1352 void *dst, void *src, size_t len)
1354 struct udma_dev *ud = dev_get_priv(dev);
1355 /* Channel0 is reserved for memcpy */
1356 struct udma_chan *uc = &ud->channels[0];
1357 dma_addr_t paddr = 0;
1360 ret = udma_alloc_chan_resources(uc);
1364 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1366 udma_poll_completion(uc, &paddr);
1369 udma_free_chan_resources(uc);
1373 static int udma_request(struct dma *dma)
1375 struct udma_dev *ud = dev_get_priv(dma->dev);
1376 struct udma_chan *uc;
1377 unsigned long dummy;
1380 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1381 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1385 uc = &ud->channels[dma->id];
1386 ret = udma_alloc_chan_resources(uc);
1388 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1392 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1394 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1396 if (uc->dir == DMA_MEM_TO_DEV) {
1397 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1398 memset(uc->desc_tx, 0, uc->hdesc_size);
1400 uc->desc_rx = dma_alloc_coherent(
1401 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1402 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1406 uc->desc_rx_cur = 0;
1407 uc->num_rx_bufs = 0;
1412 static int udma_free(struct dma *dma)
1414 struct udma_dev *ud = dev_get_priv(dma->dev);
1415 struct udma_chan *uc;
1417 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1418 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1421 uc = &ud->channels[dma->id];
1423 if (udma_is_chan_running(uc))
1425 udma_free_chan_resources(uc);
1432 static int udma_enable(struct dma *dma)
1434 struct udma_dev *ud = dev_get_priv(dma->dev);
1435 struct udma_chan *uc;
1438 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1439 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1442 uc = &ud->channels[dma->id];
1444 ret = udma_start(uc);
1449 static int udma_disable(struct dma *dma)
1451 struct udma_dev *ud = dev_get_priv(dma->dev);
1452 struct udma_chan *uc;
1455 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1456 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1459 uc = &ud->channels[dma->id];
1461 if (udma_is_chan_running(uc))
1462 ret = udma_stop(uc);
1464 dev_err(dma->dev, "%s not running\n", __func__);
1469 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1471 struct udma_dev *ud = dev_get_priv(dma->dev);
1472 struct cppi5_host_desc_t *desc_tx;
1473 dma_addr_t dma_src = (dma_addr_t)src;
1474 struct ti_udma_drv_packet_data packet_data = { 0 };
1476 struct udma_chan *uc;
1481 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1483 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1484 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1487 uc = &ud->channels[dma->id];
1489 if (uc->dir != DMA_MEM_TO_DEV)
1492 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1494 desc_tx = uc->desc_tx;
1496 cppi5_hdesc_reset_hbdesc(desc_tx);
1498 cppi5_hdesc_init(desc_tx,
1499 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1501 cppi5_hdesc_set_pktlen(desc_tx, len);
1502 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1503 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1504 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1505 /* pass below information from caller */
1506 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1507 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1509 if (!udma_is_coherent(uc)) {
1510 flush_dcache_range((u64)dma_src,
1511 ALIGN((u64)dma_src + len,
1512 ARCH_DMA_MINALIGN));
1513 flush_dcache_range((u64)desc_tx,
1514 ALIGN((u64)desc_tx + uc->hdesc_size,
1515 ARCH_DMA_MINALIGN));
1518 ret = k3_nav_ringacc_ring_push(uc->tchan->t_ring, &uc->desc_tx);
1520 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1525 udma_poll_completion(uc, &paddr);
1530 static int udma_receive(struct dma *dma, void **dst, void *metadata)
1532 struct udma_dev *ud = dev_get_priv(dma->dev);
1533 struct cppi5_host_desc_t *desc_rx;
1535 struct udma_chan *uc;
1536 u32 buf_dma_len, pkt_len;
1540 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1541 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1544 uc = &ud->channels[dma->id];
1546 if (uc->dir != DMA_DEV_TO_MEM)
1548 if (!uc->num_rx_bufs)
1551 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1552 if (ret && ret != -ENODATA) {
1553 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1555 } else if (ret == -ENODATA) {
1559 /* invalidate cache data */
1560 if (!udma_is_coherent(uc)) {
1561 invalidate_dcache_range((ulong)desc_rx,
1562 (ulong)(desc_rx + uc->hdesc_size));
1565 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1566 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1568 /* invalidate cache data */
1569 if (!udma_is_coherent(uc)) {
1570 invalidate_dcache_range((ulong)buf_dma,
1571 (ulong)(buf_dma + buf_dma_len));
1574 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1576 *dst = (void *)buf_dma;
1582 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1584 struct udma_dev *ud = dev_get_priv(dma->dev);
1585 struct udma_chan *uc = &ud->channels[0];
1586 ofnode chconf_node, slave_node;
1590 for (val = 0; val < ud->ch_count; val++) {
1591 uc = &ud->channels[val];
1596 if (val == ud->ch_count)
1599 uc->dir = DMA_DEV_TO_MEM;
1600 if (args->args[2] == UDMA_DIR_TX)
1601 uc->dir = DMA_MEM_TO_DEV;
1603 slave_node = ofnode_get_by_phandle(args->args[0]);
1604 if (!ofnode_valid(slave_node)) {
1605 dev_err(ud->dev, "slave node is missing\n");
1609 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1610 chconf_node = ofnode_find_subnode(slave_node, prop);
1611 if (!ofnode_valid(chconf_node)) {
1612 dev_err(ud->dev, "Channel configuration node is missing\n");
1616 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1617 if (val == UDMA_PKT_MODE)
1618 uc->pkt_mode = true;
1621 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1622 uc->static_tr_type = val;
1624 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1625 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1627 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1629 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1630 dev_err(ud->dev, "ti,psil-base is missing\n");
1634 uc->slave_thread_id = val + args->args[1];
1637 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1638 dma->id, uc->needs_epib,
1639 uc->psd_size, uc->metadata_size,
1640 uc->slave_thread_id);
1645 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1647 struct udma_dev *ud = dev_get_priv(dma->dev);
1648 struct cppi5_host_desc_t *desc_rx;
1650 struct udma_chan *uc;
1653 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1654 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1657 uc = &ud->channels[dma->id];
1659 if (uc->dir != DMA_DEV_TO_MEM)
1662 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1665 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1666 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1667 dma_dst = (dma_addr_t)dst;
1669 cppi5_hdesc_reset_hbdesc(desc_rx);
1671 cppi5_hdesc_init(desc_rx,
1672 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1674 cppi5_hdesc_set_pktlen(desc_rx, size);
1675 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1677 if (!udma_is_coherent(uc)) {
1678 flush_dcache_range((u64)desc_rx,
1679 ALIGN((u64)desc_rx + uc->hdesc_size,
1680 ARCH_DMA_MINALIGN));
1683 k3_nav_ringacc_ring_push(uc->rchan->fd_ring, &desc_rx);
1691 static const struct dma_ops udma_ops = {
1692 .transfer = udma_transfer,
1693 .of_xlate = udma_of_xlate,
1694 .request = udma_request,
1696 .enable = udma_enable,
1697 .disable = udma_disable,
1699 .receive = udma_receive,
1700 .prepare_rcv_buf = udma_prepare_rcv_buf,
1703 static const struct udevice_id udma_ids[] = {
1704 { .compatible = "ti,k3-navss-udmap" },
1708 U_BOOT_DRIVER(ti_edma3) = {
1711 .of_match = udma_ids,
1713 .probe = udma_probe,
1714 .priv_auto_alloc_size = sizeof(struct udma_dev),