1 // SPDX-License-Identifier: GPL-2.0+
3 * Multicore Navigator driver for TI Keystone 2 devices.
5 * (C) Copyright 2012-2014
6 * Texas Instruments Incorporated, <www.ti.com>
10 #include <asm/ti-common/keystone_nav.h>
12 struct qm_config qm_memmap = {
13 .stat_cfg = CONFIG_KSNAV_QM_QUEUE_STATUS_BASE,
14 .queue = (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE,
15 .mngr_vbusm = CONFIG_KSNAV_QM_BASE_ADDRESS,
16 .i_lram = CONFIG_KSNAV_QM_LINK_RAM_BASE,
17 .proxy = (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE,
18 .status_ram = CONFIG_KSNAV_QM_STATUS_RAM_BASE,
19 .mngr_cfg = (void *)CONFIG_KSNAV_QM_CONF_BASE,
20 .intd_cfg = CONFIG_KSNAV_QM_INTD_CONF_BASE,
21 .desc_mem = (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE,
22 .region_num = CONFIG_KSNAV_QM_REGION_NUM,
23 .pdsp_cmd = CONFIG_KSNAV_QM_PDSP1_CMD_BASE,
24 .pdsp_ctl = CONFIG_KSNAV_QM_PDSP1_CTRL_BASE,
25 .pdsp_iram = CONFIG_KSNAV_QM_PDSP1_IRAM_BASE,
26 .qpool_num = CONFIG_KSNAV_QM_QPOOL_NUM,
30 * We are going to use only one type of descriptors - host packet
31 * descriptors. We staticaly allocate memory for them here
33 struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc));
35 static struct qm_config *qm_cfg;
37 inline int num_of_desc_to_reg(int num_descr)
41 for (j = 0, num = 32; j < 15; j++, num *= 2) {
49 int _qm_init(struct qm_config *cfg)
55 qm_cfg->mngr_cfg->link_ram_base0 = qm_cfg->i_lram;
56 qm_cfg->mngr_cfg->link_ram_size0 = HDESC_NUM * 8 - 1;
57 qm_cfg->mngr_cfg->link_ram_base1 = 0;
58 qm_cfg->mngr_cfg->link_ram_size1 = 0;
59 qm_cfg->mngr_cfg->link_ram_base2 = 0;
61 qm_cfg->desc_mem[0].base_addr = (u32)desc_pool;
62 qm_cfg->desc_mem[0].start_idx = 0;
63 qm_cfg->desc_mem[0].desc_reg_size =
64 (((sizeof(struct qm_host_desc) >> 4) - 1) << 16) |
65 num_of_desc_to_reg(HDESC_NUM);
67 memset(desc_pool, 0, sizeof(desc_pool));
68 for (j = 0; j < HDESC_NUM; j++)
69 qm_push(&desc_pool[j], qm_cfg->qpool_num);
76 return _qm_init(&qm_memmap);
83 queue_close(qm_cfg->qpool_num);
85 qm_cfg->mngr_cfg->link_ram_base0 = 0;
86 qm_cfg->mngr_cfg->link_ram_size0 = 0;
87 qm_cfg->mngr_cfg->link_ram_base1 = 0;
88 qm_cfg->mngr_cfg->link_ram_size1 = 0;
89 qm_cfg->mngr_cfg->link_ram_base2 = 0;
91 for (j = 0; j < qm_cfg->region_num; j++) {
92 qm_cfg->desc_mem[j].base_addr = 0;
93 qm_cfg->desc_mem[j].start_idx = 0;
94 qm_cfg->desc_mem[j].desc_reg_size = 0;
100 void qm_push(struct qm_host_desc *hd, u32 qnum)
104 cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4);
105 regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1);
106 writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh);
109 void qm_buff_push(struct qm_host_desc *hd, u32 qnum,
110 void *buff_ptr, u32 buff_len)
112 hd->orig_buff_len = buff_len;
113 hd->buff_len = buff_len;
114 hd->orig_buff_ptr = (u32)buff_ptr;
115 hd->buff_ptr = (u32)buff_ptr;
119 struct qm_host_desc *qm_pop(u32 qnum)
123 uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf;
125 cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4);
127 return (struct qm_host_desc *)uhd;
130 struct qm_host_desc *qm_pop_from_free_pool(void)
132 return qm_pop(qm_cfg->qpool_num);
135 void queue_close(u32 qnum)
137 struct qm_host_desc *hd;
139 while ((hd = qm_pop(qnum)))
147 static int ksnav_rx_disable(struct pktdma_cfg *pktdma)
151 for (j = 0; j < pktdma->rx_ch_num; j++) {
152 v = readl(&pktdma->rx_ch[j].cfg_a);
153 if (!(v & CPDMA_CHAN_A_ENABLE))
156 writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->rx_ch[j].cfg_a);
157 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
159 v = readl(&pktdma->rx_ch[j].cfg_a);
160 if (!(v & CPDMA_CHAN_A_ENABLE))
163 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
166 /* Clear all of the flow registers */
167 for (j = 0; j < pktdma->rx_flow_num; j++) {
168 writel(0, &pktdma->rx_flows[j].control);
169 writel(0, &pktdma->rx_flows[j].tags);
170 writel(0, &pktdma->rx_flows[j].tag_sel);
171 writel(0, &pktdma->rx_flows[j].fdq_sel[0]);
172 writel(0, &pktdma->rx_flows[j].fdq_sel[1]);
173 writel(0, &pktdma->rx_flows[j].thresh[0]);
174 writel(0, &pktdma->rx_flows[j].thresh[1]);
175 writel(0, &pktdma->rx_flows[j].thresh[2]);
181 static int ksnav_tx_disable(struct pktdma_cfg *pktdma)
185 for (j = 0; j < pktdma->tx_ch_num; j++) {
186 v = readl(&pktdma->tx_ch[j].cfg_a);
187 if (!(v & CPDMA_CHAN_A_ENABLE))
190 writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->tx_ch[j].cfg_a);
191 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
193 v = readl(&pktdma->tx_ch[j].cfg_a);
194 if (!(v & CPDMA_CHAN_A_ENABLE))
197 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
203 int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers)
206 struct qm_host_desc *hd;
209 if (pktdma == NULL || rx_buffers == NULL ||
210 rx_buffers->buff_ptr == NULL || qm_cfg == NULL)
213 pktdma->rx_flow = rx_buffers->rx_flow;
216 rx_ptr = rx_buffers->buff_ptr;
218 for (j = 0; j < rx_buffers->num_buffs; j++) {
219 hd = qm_pop(qm_cfg->qpool_num);
223 qm_buff_push(hd, pktdma->rx_free_q,
224 rx_ptr, rx_buffers->buff_len);
226 rx_ptr += rx_buffers->buff_len;
229 ksnav_rx_disable(pktdma);
231 /* configure rx channels */
232 v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma->rx_rcv_q);
233 writel(v, &pktdma->rx_flows[pktdma->rx_flow].control);
234 writel(0, &pktdma->rx_flows[pktdma->rx_flow].tags);
235 writel(0, &pktdma->rx_flows[pktdma->rx_flow].tag_sel);
237 v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma->rx_free_q, 0,
240 writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[0]);
241 writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[1]);
242 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[0]);
243 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[1]);
244 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[2]);
246 for (j = 0; j < pktdma->rx_ch_num; j++)
247 writel(CPDMA_CHAN_A_ENABLE, &pktdma->rx_ch[j].cfg_a);
249 /* configure tx channels */
250 /* Disable loopback in the tx direction */
251 writel(0, &pktdma->global->emulation_control);
253 /* Set QM base address, only for K2x devices */
254 writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &pktdma->global->qm_base_addr[0]);
256 /* Enable all channels. The current state isn't important */
257 for (j = 0; j < pktdma->tx_ch_num; j++) {
258 writel(0, &pktdma->tx_ch[j].cfg_b);
259 writel(CPDMA_CHAN_A_ENABLE, &pktdma->tx_ch[j].cfg_a);
265 int ksnav_close(struct pktdma_cfg *pktdma)
270 ksnav_tx_disable(pktdma);
271 ksnav_rx_disable(pktdma);
273 queue_close(pktdma->rx_free_q);
274 queue_close(pktdma->rx_rcv_q);
275 queue_close(pktdma->tx_snd_q);
280 int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2)
282 struct qm_host_desc *hd;
284 hd = qm_pop(qm_cfg->qpool_num);
288 hd->desc_info = num_bytes;
289 hd->swinfo[2] = swinfo2;
290 hd->packet_info = qm_cfg->qpool_num;
292 qm_buff_push(hd, pktdma->tx_snd_q, pkt, num_bytes);
297 void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes)
299 struct qm_host_desc *hd;
301 hd = qm_pop(pktdma->rx_rcv_q);
305 *pkt = (u32 *)hd->buff_ptr;
306 *num_bytes = hd->desc_info & 0x3fffff;
311 void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd)
313 struct qm_host_desc *_hd = (struct qm_host_desc *)hd;
315 _hd->buff_len = _hd->orig_buff_len;
316 _hd->buff_ptr = _hd->orig_buff_ptr;
318 qm_push(_hd, pktdma->rx_free_q);