1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
6 #define pr_fmt(fmt) "udma: " fmt
10 #include <asm/cache.h>
12 #include <asm/bitops.h>
14 #include <linux/dma-mapping.h>
16 #include <dm/device_compat.h>
17 #include <dm/devres.h>
19 #include <dm/of_access.h>
21 #include <dma-uclass.h>
22 #include <linux/delay.h>
23 #include <dt-bindings/dma/k3-udma.h>
24 #include <linux/bitmap.h>
25 #include <linux/err.h>
26 #include <linux/soc/ti/k3-navss-ringacc.h>
27 #include <linux/soc/ti/cppi5.h>
28 #include <linux/soc/ti/ti-udma.h>
29 #include <linux/soc/ti/ti_sci_protocol.h>
31 #include "k3-udma-hwdef.h"
33 #if BITS_PER_LONG == 64
34 #define RINGACC_RING_USE_PROXY (0)
36 #define RINGACC_RING_USE_PROXY (1)
39 #define K3_UDMA_MAX_RFLOWS 1024
50 static const char * const mmr_names[] = {
51 "gcfg", "rchanrt", "tchanrt"
58 struct k3_nav_ring *t_ring; /* Transmit ring */
59 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
66 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
67 struct k3_nav_ring *r_ring; /* Receive ring*/
81 struct udma_tisci_rm {
82 const struct ti_sci_handle *tisci;
83 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
86 /* tisci information for PSI-L thread pairing/unpairing */
87 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
88 u32 tisci_navss_dev_id;
90 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
95 void __iomem *mmrs[MMR_LAST];
97 struct udma_tisci_rm tisci_rm;
98 struct k3_nav_ringacc *ringacc;
106 unsigned long *tchan_map;
107 unsigned long *rchan_map;
108 unsigned long *rflow_map;
109 unsigned long *rflow_map_reserved;
111 struct udma_tchan *tchans;
112 struct udma_rchan *rchans;
113 struct udma_rflow *rflows;
115 struct udma_chan *channels;
125 struct udma_tchan *tchan;
126 struct udma_rchan *rchan;
127 struct udma_rflow *rflow;
129 struct ti_udma_drv_chan_cfg_data cfg_data;
131 u32 bcnt; /* number of bytes completed since the start of the channel */
133 bool pkt_mode; /* TR or packet */
134 bool needs_epib; /* EPIB is needed for the communication or not */
135 u32 psd_size; /* size of Protocol Specific Data */
136 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
143 enum dma_direction dir;
145 struct cppi5_host_desc_t *desc_tx;
154 #define UDMA_CH_1000(ch) (ch * 0x1000)
155 #define UDMA_CH_100(ch) (ch * 0x100)
156 #define UDMA_CH_40(ch) (ch * 0x40)
159 #define UDMA_RX_DESC_NUM PKTBUFSRX
161 #define UDMA_RX_DESC_NUM 4
164 /* Generic register access functions */
165 static inline u32 udma_read(void __iomem *base, int reg)
169 v = __raw_readl(base + reg);
170 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
174 static inline void udma_write(void __iomem *base, int reg, u32 val)
176 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
177 __raw_writel(val, base + reg);
180 static inline void udma_update_bits(void __iomem *base, int reg,
185 orig = udma_read(base, reg);
190 udma_write(base, reg, tmp);
194 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
198 return udma_read(tchan->reg_rt, reg);
201 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
206 udma_write(tchan->reg_rt, reg, val);
210 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
214 return udma_read(rchan->reg_rt, reg);
217 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
222 udma_write(rchan->reg_rt, reg, val);
225 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
228 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
230 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
232 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
233 tisci_rm->tisci_navss_dev_id,
234 src_thread, dst_thread);
237 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
240 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
242 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
244 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
245 tisci_rm->tisci_navss_dev_id,
246 src_thread, dst_thread);
249 static inline char *udma_get_dir_text(enum dma_direction dir)
267 static inline bool udma_is_chan_running(struct udma_chan *uc)
274 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
275 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
277 udma_rchanrt_read(uc->rchan,
278 UDMA_RCHAN_RT_PEER_RT_EN_REG));
281 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
282 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
284 udma_tchanrt_read(uc->tchan,
285 UDMA_TCHAN_RT_PEER_RT_EN_REG));
288 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
289 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
295 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
301 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
303 struct k3_nav_ring *ring = NULL;
308 ring = uc->rchan->r_ring;
311 ring = uc->tchan->tc_ring;
314 ring = uc->tchan->tc_ring;
320 if (ring && k3_nav_ringacc_ring_get_occ(ring))
321 ret = k3_nav_ringacc_ring_pop(ring, addr);
326 static void udma_reset_rings(struct udma_chan *uc)
328 struct k3_nav_ring *ring1 = NULL;
329 struct k3_nav_ring *ring2 = NULL;
333 ring1 = uc->rchan->fd_ring;
334 ring2 = uc->rchan->r_ring;
337 ring1 = uc->tchan->t_ring;
338 ring2 = uc->tchan->tc_ring;
341 ring1 = uc->tchan->t_ring;
342 ring2 = uc->tchan->tc_ring;
349 k3_nav_ringacc_ring_reset_dma(ring1, 0);
351 k3_nav_ringacc_ring_reset(ring2);
354 static void udma_reset_counters(struct udma_chan *uc)
359 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
360 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
362 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
363 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
365 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
366 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
368 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
369 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
373 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
374 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
376 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
377 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
379 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
380 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
382 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
383 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
389 static inline int udma_stop_hard(struct udma_chan *uc)
391 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
395 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
396 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
399 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
400 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
403 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
404 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
413 static int udma_start(struct udma_chan *uc)
415 /* Channel is already running, no need to proceed further */
416 if (udma_is_chan_running(uc))
419 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
420 __func__, uc->id, udma_get_dir_text(uc->dir),
423 /* Make sure that we clear the teardown bit, if it is set */
426 /* Reset all counters */
427 udma_reset_counters(uc);
431 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
432 UDMA_CHAN_RT_CTL_EN);
435 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
436 UDMA_PEER_RT_EN_ENABLE);
438 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
440 udma_rchanrt_read(uc->rchan,
441 UDMA_RCHAN_RT_CTL_REG),
442 udma_rchanrt_read(uc->rchan,
443 UDMA_RCHAN_RT_PEER_RT_EN_REG));
447 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
448 UDMA_PEER_RT_EN_ENABLE);
450 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
451 UDMA_CHAN_RT_CTL_EN);
453 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
455 udma_tchanrt_read(uc->tchan,
456 UDMA_TCHAN_RT_CTL_REG),
457 udma_tchanrt_read(uc->tchan,
458 UDMA_TCHAN_RT_PEER_RT_EN_REG));
461 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
462 UDMA_CHAN_RT_CTL_EN);
463 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
464 UDMA_CHAN_RT_CTL_EN);
471 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
476 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
481 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
482 UDMA_CHAN_RT_CTL_EN |
483 UDMA_CHAN_RT_CTL_TDOWN);
485 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
487 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
488 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
491 printf(" %s TIMEOUT !\n", __func__);
497 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
498 if (val & UDMA_PEER_RT_EN_ENABLE)
499 printf("%s: peer not stopped TIMEOUT !\n", __func__);
502 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
507 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
508 UDMA_PEER_RT_EN_ENABLE |
509 UDMA_PEER_RT_EN_TEARDOWN);
511 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
513 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
514 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
517 printf("%s TIMEOUT !\n", __func__);
523 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
524 if (val & UDMA_PEER_RT_EN_ENABLE)
525 printf("%s: peer not stopped TIMEOUT !\n", __func__);
528 static inline int udma_stop(struct udma_chan *uc)
530 pr_debug("%s: chan:%d dir:%s\n",
531 __func__, uc->id, udma_get_dir_text(uc->dir));
533 udma_reset_counters(uc);
536 udma_stop_dev2mem(uc, true);
539 udma_stop_mem2dev(uc, true);
542 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
543 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
552 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
556 while (udma_pop_from_ring(uc, paddr)) {
564 static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
566 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
569 if (test_bit(id, ud->rflow_map)) {
570 dev_err(ud->dev, "rflow%d is in use\n", id);
571 return ERR_PTR(-ENOENT);
574 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
577 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
578 if (id >= ud->rflow_cnt)
579 return ERR_PTR(-ENOENT);
582 __set_bit(id, ud->rflow_map);
583 return &ud->rflows[id];
586 #define UDMA_RESERVE_RESOURCE(res) \
587 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
591 if (test_bit(id, ud->res##_map)) { \
592 dev_err(ud->dev, "res##%d is in use\n", id); \
593 return ERR_PTR(-ENOENT); \
596 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
597 if (id == ud->res##_cnt) { \
598 return ERR_PTR(-ENOENT); \
602 __set_bit(id, ud->res##_map); \
603 return &ud->res##s[id]; \
606 UDMA_RESERVE_RESOURCE(tchan);
607 UDMA_RESERVE_RESOURCE(rchan);
609 static int udma_get_tchan(struct udma_chan *uc)
611 struct udma_dev *ud = uc->ud;
614 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
615 uc->id, uc->tchan->id);
619 uc->tchan = __udma_reserve_tchan(ud, -1);
620 if (IS_ERR(uc->tchan))
621 return PTR_ERR(uc->tchan);
623 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
628 static int udma_get_rchan(struct udma_chan *uc)
630 struct udma_dev *ud = uc->ud;
633 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
634 uc->id, uc->rchan->id);
638 uc->rchan = __udma_reserve_rchan(ud, -1);
639 if (IS_ERR(uc->rchan))
640 return PTR_ERR(uc->rchan);
642 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
647 static int udma_get_chan_pair(struct udma_chan *uc)
649 struct udma_dev *ud = uc->ud;
652 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
653 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
654 uc->id, uc->tchan->id);
659 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
660 uc->id, uc->tchan->id);
662 } else if (uc->rchan) {
663 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
664 uc->id, uc->rchan->id);
668 /* Can be optimized, but let's have it like this for now */
669 end = min(ud->tchan_cnt, ud->rchan_cnt);
670 for (chan_id = 0; chan_id < end; chan_id++) {
671 if (!test_bit(chan_id, ud->tchan_map) &&
672 !test_bit(chan_id, ud->rchan_map))
679 __set_bit(chan_id, ud->tchan_map);
680 __set_bit(chan_id, ud->rchan_map);
681 uc->tchan = &ud->tchans[chan_id];
682 uc->rchan = &ud->rchans[chan_id];
684 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
689 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
691 struct udma_dev *ud = uc->ud;
694 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
695 uc->id, uc->rflow->id);
700 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
702 uc->rflow = __udma_reserve_rflow(ud, flow_id);
703 if (IS_ERR(uc->rflow))
704 return PTR_ERR(uc->rflow);
706 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
710 static void udma_put_rchan(struct udma_chan *uc)
712 struct udma_dev *ud = uc->ud;
715 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
717 __clear_bit(uc->rchan->id, ud->rchan_map);
722 static void udma_put_tchan(struct udma_chan *uc)
724 struct udma_dev *ud = uc->ud;
727 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
729 __clear_bit(uc->tchan->id, ud->tchan_map);
734 static void udma_put_rflow(struct udma_chan *uc)
736 struct udma_dev *ud = uc->ud;
739 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
741 __clear_bit(uc->rflow->id, ud->rflow_map);
746 static void udma_free_tx_resources(struct udma_chan *uc)
751 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
752 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
753 uc->tchan->t_ring = NULL;
754 uc->tchan->tc_ring = NULL;
759 static int udma_alloc_tx_resources(struct udma_chan *uc)
761 struct k3_nav_ring_cfg ring_cfg;
762 struct udma_dev *ud = uc->ud;
765 ret = udma_get_tchan(uc);
769 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
770 ud->ringacc, uc->tchan->id,
771 RINGACC_RING_USE_PROXY);
772 if (!uc->tchan->t_ring) {
777 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
778 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
779 if (!uc->tchan->tc_ring) {
784 memset(&ring_cfg, 0, sizeof(ring_cfg));
786 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
787 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
789 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
790 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
798 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
799 uc->tchan->tc_ring = NULL;
801 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
802 uc->tchan->t_ring = NULL;
809 static void udma_free_rx_resources(struct udma_chan *uc)
814 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
815 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
816 uc->rchan->fd_ring = NULL;
817 uc->rchan->r_ring = NULL;
823 static int udma_alloc_rx_resources(struct udma_chan *uc)
825 struct k3_nav_ring_cfg ring_cfg;
826 struct udma_dev *ud = uc->ud;
830 ret = udma_get_rchan(uc);
834 /* For MEM_TO_MEM we don't need rflow or rings */
835 if (uc->dir == DMA_MEM_TO_MEM)
838 ret = udma_get_rflow(uc, uc->rchan->id);
844 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
846 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
847 ud->ringacc, fd_ring_id,
848 RINGACC_RING_USE_PROXY);
849 if (!uc->rchan->fd_ring) {
854 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
855 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
856 if (!uc->rchan->r_ring) {
861 memset(&ring_cfg, 0, sizeof(ring_cfg));
863 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
864 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
866 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
867 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
875 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
876 uc->rchan->r_ring = NULL;
878 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
879 uc->rchan->fd_ring = NULL;
888 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
890 struct udma_dev *ud = uc->ud;
891 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
892 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
893 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
898 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
900 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
902 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
903 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
904 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
905 req.nav_id = tisci_rm->tisci_dev_id;
906 req.index = uc->tchan->id;
907 req.tx_chan_type = mode;
908 if (uc->dir == DMA_MEM_TO_MEM)
909 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
911 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
914 req.txcq_qnum = tc_ring;
916 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
918 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
923 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
925 struct udma_dev *ud = uc->ud;
926 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
927 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
928 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
929 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
930 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
931 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
936 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
938 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
940 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
941 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
942 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
943 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
944 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
945 req.nav_id = tisci_rm->tisci_dev_id;
946 req.index = uc->rchan->id;
947 req.rx_chan_type = mode;
948 if (uc->dir == DMA_MEM_TO_MEM) {
949 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
950 req.rxcq_qnum = tc_ring;
952 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
955 req.rxcq_qnum = rx_ring;
957 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
958 req.flowid_start = uc->rflow->id;
962 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
964 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
968 if (uc->dir == DMA_MEM_TO_MEM)
971 flow_req.valid_params =
972 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
973 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
974 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
975 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
976 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
977 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
978 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
979 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
980 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
981 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
982 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
983 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
984 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
985 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
987 flow_req.nav_id = tisci_rm->tisci_dev_id;
988 flow_req.flow_index = uc->rflow->id;
991 flow_req.rx_einfo_present = 1;
993 flow_req.rx_einfo_present = 0;
996 flow_req.rx_psinfo_present = 1;
998 flow_req.rx_psinfo_present = 0;
1000 flow_req.rx_error_handling = 0;
1001 flow_req.rx_desc_type = 0;
1002 flow_req.rx_dest_qnum = rx_ring;
1003 flow_req.rx_src_tag_hi_sel = 2;
1004 flow_req.rx_src_tag_lo_sel = 4;
1005 flow_req.rx_dest_tag_hi_sel = 5;
1006 flow_req.rx_dest_tag_lo_sel = 4;
1007 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1008 flow_req.rx_fdq1_qnum = fd_ring;
1009 flow_req.rx_fdq2_qnum = fd_ring;
1010 flow_req.rx_fdq3_qnum = fd_ring;
1011 flow_req.rx_ps_location = 0;
1013 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1016 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1017 uc->rchan->id, uc->rflow->id, ret);
1022 static int udma_alloc_chan_resources(struct udma_chan *uc)
1024 struct udma_dev *ud = uc->ud;
1027 pr_debug("%s: chan:%d as %s\n",
1028 __func__, uc->id, udma_get_dir_text(uc->dir));
1031 case DMA_MEM_TO_MEM:
1032 /* Non synchronized - mem to mem type of transfer */
1033 ret = udma_get_chan_pair(uc);
1037 ret = udma_alloc_tx_resources(uc);
1041 ret = udma_alloc_rx_resources(uc);
1045 uc->src_thread = ud->psil_base + uc->tchan->id;
1046 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1048 case DMA_MEM_TO_DEV:
1049 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1050 ret = udma_alloc_tx_resources(uc);
1054 uc->src_thread = ud->psil_base + uc->tchan->id;
1055 uc->dst_thread = uc->slave_thread_id;
1056 if (!(uc->dst_thread & 0x8000))
1057 uc->dst_thread |= 0x8000;
1060 case DMA_DEV_TO_MEM:
1061 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1062 ret = udma_alloc_rx_resources(uc);
1066 uc->src_thread = uc->slave_thread_id;
1067 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1071 /* Can not happen */
1072 pr_debug("%s: chan:%d invalid direction (%u)\n",
1073 __func__, uc->id, uc->dir);
1077 /* We have channel indexes and rings */
1078 if (uc->dir == DMA_MEM_TO_MEM) {
1079 ret = udma_alloc_tchan_sci_req(uc);
1083 ret = udma_alloc_rchan_sci_req(uc);
1087 /* Slave transfer */
1088 if (uc->dir == DMA_MEM_TO_DEV) {
1089 ret = udma_alloc_tchan_sci_req(uc);
1093 ret = udma_alloc_rchan_sci_req(uc);
1099 if (udma_is_chan_running(uc)) {
1100 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1102 if (udma_is_chan_running(uc)) {
1103 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1109 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1111 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1118 udma_free_tx_resources(uc);
1119 udma_free_rx_resources(uc);
1120 uc->slave_thread_id = -1;
1124 static void udma_free_chan_resources(struct udma_chan *uc)
1126 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1128 /* Release PSI-L pairing */
1129 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1131 /* Reset the rings for a new start */
1132 udma_reset_rings(uc);
1133 udma_free_tx_resources(uc);
1134 udma_free_rx_resources(uc);
1136 uc->slave_thread_id = -1;
1137 uc->dir = DMA_MEM_TO_MEM;
1140 static int udma_get_mmrs(struct udevice *dev)
1142 struct udma_dev *ud = dev_get_priv(dev);
1145 for (i = 0; i < MMR_LAST; i++) {
1146 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1155 static int udma_setup_resources(struct udma_dev *ud)
1157 struct udevice *dev = ud->dev;
1160 struct ti_sci_resource_desc *rm_desc;
1161 struct ti_sci_resource *rm_res;
1162 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1163 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
1164 "ti,sci-rm-range-rchan",
1165 "ti,sci-rm-range-rflow" };
1167 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1168 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1170 ud->rflow_cnt = cap3 & 0x3fff;
1171 ud->tchan_cnt = cap2 & 0x1ff;
1172 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1173 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1174 ch_count = ud->tchan_cnt + ud->rchan_cnt;
1176 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1177 sizeof(unsigned long), GFP_KERNEL);
1178 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1180 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1181 sizeof(unsigned long), GFP_KERNEL);
1182 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1184 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1185 sizeof(unsigned long), GFP_KERNEL);
1186 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1187 sizeof(unsigned long),
1189 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1192 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1193 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1198 * RX flows with the same Ids as RX channels are reserved to be used
1199 * as default flows if remote HW can't generate flow_ids. Those
1200 * RX flows can be requested only explicitly by id.
1202 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1204 /* Get resource ranges from tisci */
1205 for (i = 0; i < RM_RANGE_LAST; i++)
1206 tisci_rm->rm_ranges[i] =
1207 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1208 tisci_rm->tisci_dev_id,
1209 (char *)range_names[i]);
1212 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1213 if (IS_ERR(rm_res)) {
1214 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1216 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1217 for (i = 0; i < rm_res->sets; i++) {
1218 rm_desc = &rm_res->desc[i];
1219 bitmap_clear(ud->tchan_map, rm_desc->start,
1224 /* rchan and matching default flow ranges */
1225 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1226 if (IS_ERR(rm_res)) {
1227 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1228 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1230 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1231 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1232 for (i = 0; i < rm_res->sets; i++) {
1233 rm_desc = &rm_res->desc[i];
1234 bitmap_clear(ud->rchan_map, rm_desc->start,
1236 bitmap_clear(ud->rflow_map, rm_desc->start,
1241 /* GP rflow ranges */
1242 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1243 if (IS_ERR(rm_res)) {
1244 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1245 ud->rflow_cnt - ud->rchan_cnt);
1247 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1248 ud->rflow_cnt - ud->rchan_cnt);
1249 for (i = 0; i < rm_res->sets; i++) {
1250 rm_desc = &rm_res->desc[i];
1251 bitmap_clear(ud->rflow_map, rm_desc->start,
1256 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1257 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1261 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1267 "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
1268 ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1273 static int udma_probe(struct udevice *dev)
1275 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1276 struct udma_dev *ud = dev_get_priv(dev);
1278 struct udevice *tmp;
1279 struct udevice *tisci_dev = NULL;
1280 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1281 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1284 ret = udma_get_mmrs(dev);
1288 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1289 "ti,ringacc", &tmp);
1290 ud->ringacc = dev_get_priv(tmp);
1291 if (IS_ERR(ud->ringacc))
1292 return PTR_ERR(ud->ringacc);
1294 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1295 if (!ud->psil_base) {
1297 "Missing ti,psil-base property, using %d.\n", ret);
1301 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1302 "ti,sci", &tisci_dev);
1304 debug("Failed to get TISCI phandle (%d)\n", ret);
1305 tisci_rm->tisci = NULL;
1308 tisci_rm->tisci = (struct ti_sci_handle *)
1309 (ti_sci_get_handle_from_sysfw(tisci_dev));
1311 tisci_rm->tisci_dev_id = -1;
1312 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1314 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1318 tisci_rm->tisci_navss_dev_id = -1;
1319 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1320 &tisci_rm->tisci_navss_dev_id);
1322 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1326 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1327 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1330 ud->ch_count = udma_setup_resources(ud);
1331 if (ud->ch_count <= 0)
1332 return ud->ch_count;
1335 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1336 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1337 tisci_rm->tisci_dev_id);
1338 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1340 for (i = 0; i < ud->tchan_cnt; i++) {
1341 struct udma_tchan *tchan = &ud->tchans[i];
1344 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1347 for (i = 0; i < ud->rchan_cnt; i++) {
1348 struct udma_rchan *rchan = &ud->rchans[i];
1351 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1354 for (i = 0; i < ud->rflow_cnt; i++) {
1355 struct udma_rflow *rflow = &ud->rflows[i];
1360 for (i = 0; i < ud->ch_count; i++) {
1361 struct udma_chan *uc = &ud->channels[i];
1365 uc->slave_thread_id = -1;
1368 uc->dir = DMA_MEM_TO_MEM;
1369 sprintf(uc->name, "UDMA chan%d\n", i);
1374 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1375 udma_read(ud->mmrs[MMR_GCFG], 0),
1376 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1377 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1378 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1379 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1381 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1386 static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1390 memcpy(&addr, &elem, sizeof(elem));
1391 return k3_nav_ringacc_ring_push(ring, &addr);
1394 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1395 dma_addr_t src, size_t len)
1397 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1398 struct cppi5_tr_type15_t *tr_req;
1400 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1401 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1402 unsigned long dummy;
1411 unsigned long align_to = __ffs(src | dest);
1416 * Keep simple: tr0: SZ_64K-alignment blocks,
1417 * tr1: the remaining
1420 tr0_cnt0 = (SZ_64K - BIT(align_to));
1421 if (len / tr0_cnt0 >= SZ_64K) {
1422 dev_err(uc->ud->dev, "size %zu is not supported\n",
1427 tr0_cnt1 = len / tr0_cnt0;
1428 tr1_cnt0 = len % tr0_cnt0;
1431 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1432 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1435 memset(tr_desc, 0, desc_size);
1437 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1438 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1439 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1441 tr_req = tr_desc + tr_size;
1443 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1444 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1445 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1447 tr_req[0].addr = src;
1448 tr_req[0].icnt0 = tr0_cnt0;
1449 tr_req[0].icnt1 = tr0_cnt1;
1450 tr_req[0].icnt2 = 1;
1451 tr_req[0].icnt3 = 1;
1452 tr_req[0].dim1 = tr0_cnt0;
1454 tr_req[0].daddr = dest;
1455 tr_req[0].dicnt0 = tr0_cnt0;
1456 tr_req[0].dicnt1 = tr0_cnt1;
1457 tr_req[0].dicnt2 = 1;
1458 tr_req[0].dicnt3 = 1;
1459 tr_req[0].ddim1 = tr0_cnt0;
1462 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1463 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1464 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1466 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1467 tr_req[1].icnt0 = tr1_cnt0;
1468 tr_req[1].icnt1 = 1;
1469 tr_req[1].icnt2 = 1;
1470 tr_req[1].icnt3 = 1;
1472 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1473 tr_req[1].dicnt0 = tr1_cnt0;
1474 tr_req[1].dicnt1 = 1;
1475 tr_req[1].dicnt2 = 1;
1476 tr_req[1].dicnt3 = 1;
1479 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1481 flush_dcache_range((unsigned long)tr_desc,
1482 ALIGN((unsigned long)tr_desc + desc_size,
1483 ARCH_DMA_MINALIGN));
1485 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
1490 static int udma_transfer(struct udevice *dev, int direction,
1491 void *dst, void *src, size_t len)
1493 struct udma_dev *ud = dev_get_priv(dev);
1494 /* Channel0 is reserved for memcpy */
1495 struct udma_chan *uc = &ud->channels[0];
1496 dma_addr_t paddr = 0;
1499 ret = udma_alloc_chan_resources(uc);
1503 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1505 udma_poll_completion(uc, &paddr);
1508 udma_free_chan_resources(uc);
1512 static int udma_request(struct dma *dma)
1514 struct udma_dev *ud = dev_get_priv(dma->dev);
1515 struct udma_chan *uc;
1516 unsigned long dummy;
1519 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1520 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1524 uc = &ud->channels[dma->id];
1525 ret = udma_alloc_chan_resources(uc);
1527 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1531 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1533 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1535 if (uc->dir == DMA_MEM_TO_DEV) {
1536 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1537 memset(uc->desc_tx, 0, uc->hdesc_size);
1539 uc->desc_rx = dma_alloc_coherent(
1540 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1541 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1545 uc->desc_rx_cur = 0;
1546 uc->num_rx_bufs = 0;
1548 if (uc->dir == DMA_DEV_TO_MEM) {
1549 uc->cfg_data.flow_id_base = uc->rflow->id;
1550 uc->cfg_data.flow_id_cnt = 1;
1556 static int udma_rfree(struct dma *dma)
1558 struct udma_dev *ud = dev_get_priv(dma->dev);
1559 struct udma_chan *uc;
1561 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1562 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1565 uc = &ud->channels[dma->id];
1567 if (udma_is_chan_running(uc))
1569 udma_free_chan_resources(uc);
1576 static int udma_enable(struct dma *dma)
1578 struct udma_dev *ud = dev_get_priv(dma->dev);
1579 struct udma_chan *uc;
1582 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1583 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1586 uc = &ud->channels[dma->id];
1588 ret = udma_start(uc);
1593 static int udma_disable(struct dma *dma)
1595 struct udma_dev *ud = dev_get_priv(dma->dev);
1596 struct udma_chan *uc;
1599 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1600 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1603 uc = &ud->channels[dma->id];
1605 if (udma_is_chan_running(uc))
1606 ret = udma_stop(uc);
1608 dev_err(dma->dev, "%s not running\n", __func__);
1613 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1615 struct udma_dev *ud = dev_get_priv(dma->dev);
1616 struct cppi5_host_desc_t *desc_tx;
1617 dma_addr_t dma_src = (dma_addr_t)src;
1618 struct ti_udma_drv_packet_data packet_data = { 0 };
1620 struct udma_chan *uc;
1625 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1627 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1628 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1631 uc = &ud->channels[dma->id];
1633 if (uc->dir != DMA_MEM_TO_DEV)
1636 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1638 desc_tx = uc->desc_tx;
1640 cppi5_hdesc_reset_hbdesc(desc_tx);
1642 cppi5_hdesc_init(desc_tx,
1643 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1645 cppi5_hdesc_set_pktlen(desc_tx, len);
1646 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1647 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1648 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1649 /* pass below information from caller */
1650 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1651 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1653 flush_dcache_range((unsigned long)dma_src,
1654 ALIGN((unsigned long)dma_src + len,
1655 ARCH_DMA_MINALIGN));
1656 flush_dcache_range((unsigned long)desc_tx,
1657 ALIGN((unsigned long)desc_tx + uc->hdesc_size,
1658 ARCH_DMA_MINALIGN));
1660 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
1662 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1667 udma_poll_completion(uc, &paddr);
1672 static int udma_receive(struct dma *dma, void **dst, void *metadata)
1674 struct udma_dev *ud = dev_get_priv(dma->dev);
1675 struct cppi5_host_desc_t *desc_rx;
1677 struct udma_chan *uc;
1678 u32 buf_dma_len, pkt_len;
1682 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1683 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1686 uc = &ud->channels[dma->id];
1688 if (uc->dir != DMA_DEV_TO_MEM)
1690 if (!uc->num_rx_bufs)
1693 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1694 if (ret && ret != -ENODATA) {
1695 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1697 } else if (ret == -ENODATA) {
1701 /* invalidate cache data */
1702 invalidate_dcache_range((ulong)desc_rx,
1703 (ulong)(desc_rx + uc->hdesc_size));
1705 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1706 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1708 /* invalidate cache data */
1709 invalidate_dcache_range((ulong)buf_dma,
1710 (ulong)(buf_dma + buf_dma_len));
1712 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1714 *dst = (void *)buf_dma;
1720 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1722 struct udma_dev *ud = dev_get_priv(dma->dev);
1723 struct udma_chan *uc = &ud->channels[0];
1724 ofnode chconf_node, slave_node;
1728 for (val = 0; val < ud->ch_count; val++) {
1729 uc = &ud->channels[val];
1734 if (val == ud->ch_count)
1737 uc->dir = DMA_DEV_TO_MEM;
1738 if (args->args[2] == UDMA_DIR_TX)
1739 uc->dir = DMA_MEM_TO_DEV;
1741 slave_node = ofnode_get_by_phandle(args->args[0]);
1742 if (!ofnode_valid(slave_node)) {
1743 dev_err(ud->dev, "slave node is missing\n");
1747 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1748 chconf_node = ofnode_find_subnode(slave_node, prop);
1749 if (!ofnode_valid(chconf_node)) {
1750 dev_err(ud->dev, "Channel configuration node is missing\n");
1754 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1755 if (val == UDMA_PKT_MODE)
1756 uc->pkt_mode = true;
1759 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1760 uc->static_tr_type = val;
1762 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1763 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1765 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1767 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1768 dev_err(ud->dev, "ti,psil-base is missing\n");
1772 uc->slave_thread_id = val + args->args[1];
1775 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1776 dma->id, uc->needs_epib,
1777 uc->psd_size, uc->metadata_size,
1778 uc->slave_thread_id);
1783 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1785 struct udma_dev *ud = dev_get_priv(dma->dev);
1786 struct cppi5_host_desc_t *desc_rx;
1788 struct udma_chan *uc;
1791 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1792 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1795 uc = &ud->channels[dma->id];
1797 if (uc->dir != DMA_DEV_TO_MEM)
1800 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1803 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1804 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1805 dma_dst = (dma_addr_t)dst;
1807 cppi5_hdesc_reset_hbdesc(desc_rx);
1809 cppi5_hdesc_init(desc_rx,
1810 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1812 cppi5_hdesc_set_pktlen(desc_rx, size);
1813 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1815 flush_dcache_range((unsigned long)desc_rx,
1816 ALIGN((unsigned long)desc_rx + uc->hdesc_size,
1817 ARCH_DMA_MINALIGN));
1819 udma_push_to_ring(uc->rchan->fd_ring, desc_rx);
1827 static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1829 struct udma_dev *ud = dev_get_priv(dma->dev);
1830 struct udma_chan *uc;
1832 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1833 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1838 case TI_UDMA_CHAN_PRIV_INFO:
1839 uc = &ud->channels[dma->id];
1840 *data = &uc->cfg_data;
1847 static const struct dma_ops udma_ops = {
1848 .transfer = udma_transfer,
1849 .of_xlate = udma_of_xlate,
1850 .request = udma_request,
1851 .rfree = udma_rfree,
1852 .enable = udma_enable,
1853 .disable = udma_disable,
1855 .receive = udma_receive,
1856 .prepare_rcv_buf = udma_prepare_rcv_buf,
1857 .get_cfg = udma_get_cfg,
1860 static const struct udevice_id udma_ids[] = {
1861 { .compatible = "ti,k3-navss-udmap" },
1862 { .compatible = "ti,j721e-navss-mcu-udmap" },
1866 U_BOOT_DRIVER(ti_edma3) = {
1869 .of_match = udma_ids,
1871 .probe = udma_probe,
1872 .priv_auto_alloc_size = sizeof(struct udma_dev),