Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / stmicro / stmmac / dwxgmac2_dma.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6
7 #include <linux/iopoll.h>
8 #include "stmmac.h"
9 #include "dwxgmac2.h"
10
11 static int dwxgmac2_dma_reset(void __iomem *ioaddr)
12 {
13         u32 value = readl(ioaddr + XGMAC_DMA_MODE);
14
15         /* DMA SW reset */
16         writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
17
18         return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
19                                   !(value & XGMAC_SWR), 0, 100000);
20 }
21
22 static void dwxgmac2_dma_init(void __iomem *ioaddr,
23                               struct stmmac_dma_cfg *dma_cfg, int atds)
24 {
25         u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
26
27         if (dma_cfg->aal)
28                 value |= XGMAC_AAL;
29
30         writel(value | XGMAC_EAME, ioaddr + XGMAC_DMA_SYSBUS_MODE);
31 }
32
33 static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
34                                    struct stmmac_dma_cfg *dma_cfg, u32 chan)
35 {
36         u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
37
38         if (dma_cfg->pblx8)
39                 value |= XGMAC_PBLx8;
40
41         writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
42         writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
43 }
44
45 static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
46                                       struct stmmac_dma_cfg *dma_cfg,
47                                       dma_addr_t phy, u32 chan)
48 {
49         u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
50         u32 value;
51
52         value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
53         value &= ~XGMAC_RxPBL;
54         value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
55         writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
56
57         writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
58         writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
59 }
60
61 static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
62                                       struct stmmac_dma_cfg *dma_cfg,
63                                       dma_addr_t phy, u32 chan)
64 {
65         u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
66         u32 value;
67
68         value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
69         value &= ~XGMAC_TxPBL;
70         value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
71         value |= XGMAC_OSP;
72         writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
73
74         writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
75         writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
76 }
77
78 static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
79 {
80         u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
81         int i;
82
83         if (axi->axi_lpi_en)
84                 value |= XGMAC_EN_LPI;
85         if (axi->axi_xit_frm)
86                 value |= XGMAC_LPI_XIT_PKT;
87
88         value &= ~XGMAC_WR_OSR_LMT;
89         value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
90                 XGMAC_WR_OSR_LMT;
91
92         value &= ~XGMAC_RD_OSR_LMT;
93         value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
94                 XGMAC_RD_OSR_LMT;
95
96         if (!axi->axi_fb)
97                 value |= XGMAC_UNDEF;
98
99         value &= ~XGMAC_BLEN;
100         for (i = 0; i < AXI_BLEN; i++) {
101                 switch (axi->axi_blen[i]) {
102                 case 256:
103                         value |= XGMAC_BLEN256;
104                         break;
105                 case 128:
106                         value |= XGMAC_BLEN128;
107                         break;
108                 case 64:
109                         value |= XGMAC_BLEN64;
110                         break;
111                 case 32:
112                         value |= XGMAC_BLEN32;
113                         break;
114                 case 16:
115                         value |= XGMAC_BLEN16;
116                         break;
117                 case 8:
118                         value |= XGMAC_BLEN8;
119                         break;
120                 case 4:
121                         value |= XGMAC_BLEN4;
122                         break;
123                 }
124         }
125
126         writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
127         writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
128         writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
129 }
130
131 static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
132                                  u32 channel, int fifosz, u8 qmode)
133 {
134         u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
135         unsigned int rqs = fifosz / 256 - 1;
136
137         if (mode == SF_DMA_MODE) {
138                 value |= XGMAC_RSF;
139         } else {
140                 value &= ~XGMAC_RSF;
141                 value &= ~XGMAC_RTC;
142
143                 if (mode <= 64)
144                         value |= 0x0 << XGMAC_RTC_SHIFT;
145                 else if (mode <= 96)
146                         value |= 0x2 << XGMAC_RTC_SHIFT;
147                 else
148                         value |= 0x3 << XGMAC_RTC_SHIFT;
149         }
150
151         value &= ~XGMAC_RQS;
152         value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
153
154         if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
155                 u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
156                 unsigned int rfd, rfa;
157
158                 value |= XGMAC_EHFC;
159
160                 /* Set Threshold for Activating Flow Control to min 2 frames,
161                  * i.e. 1500 * 2 = 3000 bytes.
162                  *
163                  * Set Threshold for Deactivating Flow Control to min 1 frame,
164                  * i.e. 1500 bytes.
165                  */
166                 switch (fifosz) {
167                 case 4096:
168                         /* This violates the above formula because of FIFO size
169                          * limit therefore overflow may occur in spite of this.
170                          */
171                         rfd = 0x03; /* Full-2.5K */
172                         rfa = 0x01; /* Full-1.5K */
173                         break;
174
175                 case 8192:
176                         rfd = 0x06; /* Full-4K */
177                         rfa = 0x0a; /* Full-6K */
178                         break;
179
180                 case 16384:
181                         rfd = 0x06; /* Full-4K */
182                         rfa = 0x12; /* Full-10K */
183                         break;
184
185                 default:
186                         rfd = 0x06; /* Full-4K */
187                         rfa = 0x1e; /* Full-16K */
188                         break;
189                 }
190
191                 flow &= ~XGMAC_RFD;
192                 flow |= rfd << XGMAC_RFD_SHIFT;
193
194                 flow &= ~XGMAC_RFA;
195                 flow |= rfa << XGMAC_RFA_SHIFT;
196
197                 writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
198         }
199
200         writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
201
202         /* Enable MTL RX overflow */
203         value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
204         writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
205 }
206
207 static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
208                                  u32 channel, int fifosz, u8 qmode)
209 {
210         u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
211         unsigned int tqs = fifosz / 256 - 1;
212
213         if (mode == SF_DMA_MODE) {
214                 value |= XGMAC_TSF;
215         } else {
216                 value &= ~XGMAC_TSF;
217                 value &= ~XGMAC_TTC;
218
219                 if (mode <= 64)
220                         value |= 0x0 << XGMAC_TTC_SHIFT;
221                 else if (mode <= 96)
222                         value |= 0x2 << XGMAC_TTC_SHIFT;
223                 else if (mode <= 128)
224                         value |= 0x3 << XGMAC_TTC_SHIFT;
225                 else if (mode <= 192)
226                         value |= 0x4 << XGMAC_TTC_SHIFT;
227                 else if (mode <= 256)
228                         value |= 0x5 << XGMAC_TTC_SHIFT;
229                 else if (mode <= 384)
230                         value |= 0x6 << XGMAC_TTC_SHIFT;
231                 else
232                         value |= 0x7 << XGMAC_TTC_SHIFT;
233         }
234
235         /* Use static TC to Queue mapping */
236         value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
237
238         value &= ~XGMAC_TXQEN;
239         if (qmode != MTL_QUEUE_AVB)
240                 value |= 0x2 << XGMAC_TXQEN_SHIFT;
241         else
242                 value |= 0x1 << XGMAC_TXQEN_SHIFT;
243
244         value &= ~XGMAC_TQS;
245         value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
246
247         writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
248 }
249
250 static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
251 {
252         writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
253 }
254
255 static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
256 {
257         writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
258 }
259
260 static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
261 {
262         u32 value;
263
264         value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
265         value |= XGMAC_TXST;
266         writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
267
268         value = readl(ioaddr + XGMAC_TX_CONFIG);
269         value |= XGMAC_CONFIG_TE;
270         writel(value, ioaddr + XGMAC_TX_CONFIG);
271 }
272
273 static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
274 {
275         u32 value;
276
277         value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
278         value &= ~XGMAC_TXST;
279         writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
280
281         value = readl(ioaddr + XGMAC_TX_CONFIG);
282         value &= ~XGMAC_CONFIG_TE;
283         writel(value, ioaddr + XGMAC_TX_CONFIG);
284 }
285
286 static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
287 {
288         u32 value;
289
290         value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
291         value |= XGMAC_RXST;
292         writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
293
294         value = readl(ioaddr + XGMAC_RX_CONFIG);
295         value |= XGMAC_CONFIG_RE;
296         writel(value, ioaddr + XGMAC_RX_CONFIG);
297 }
298
299 static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
300 {
301         u32 value;
302
303         value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
304         value &= ~XGMAC_RXST;
305         writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
306 }
307
308 static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
309                                   struct stmmac_extra_stats *x, u32 chan)
310 {
311         u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
312         u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
313         int ret = 0;
314
315         /* ABNORMAL interrupts */
316         if (unlikely(intr_status & XGMAC_AIS)) {
317                 if (unlikely(intr_status & XGMAC_TPS)) {
318                         x->tx_process_stopped_irq++;
319                         ret |= tx_hard_error;
320                 }
321                 if (unlikely(intr_status & XGMAC_FBE)) {
322                         x->fatal_bus_error_irq++;
323                         ret |= tx_hard_error;
324                 }
325         }
326
327         /* TX/RX NORMAL interrupts */
328         if (likely(intr_status & XGMAC_NIS)) {
329                 x->normal_irq_n++;
330
331                 if (likely(intr_status & XGMAC_RI)) {
332                         x->rx_normal_irq_n++;
333                         ret |= handle_rx;
334                 }
335                 if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
336                         x->tx_normal_irq_n++;
337                         ret |= handle_tx;
338                 }
339         }
340
341         /* Clear interrupts */
342         writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
343
344         return ret;
345 }
346
347 static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
348                                     struct dma_features *dma_cap)
349 {
350         u32 hw_cap;
351
352         /*  MAC HW feature 0 */
353         hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
354         dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
355         dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
356         dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
357         dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
358         dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
359         dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
360         dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
361         dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
362
363         /* MAC HW feature 1 */
364         hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
365         dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
366
367         dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
368         switch (dma_cap->addr64) {
369         case 0:
370                 dma_cap->addr64 = 32;
371                 break;
372         case 1:
373                 dma_cap->addr64 = 40;
374                 break;
375         case 2:
376                 dma_cap->addr64 = 48;
377                 break;
378         default:
379                 dma_cap->addr64 = 32;
380                 break;
381         }
382
383         dma_cap->tx_fifo_size =
384                 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
385         dma_cap->rx_fifo_size =
386                 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
387
388         /* MAC HW feature 2 */
389         hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
390         dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
391         dma_cap->number_tx_channel =
392                 ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
393         dma_cap->number_rx_channel =
394                 ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
395         dma_cap->number_tx_queues =
396                 ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
397         dma_cap->number_rx_queues =
398                 ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
399 }
400
401 static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
402 {
403         u32 i;
404
405         for (i = 0; i < nchan; i++)
406                 writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i));
407 }
408
409 static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
410 {
411         writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
412 }
413
414 static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
415 {
416         writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
417 }
418
419 static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
420 {
421         writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
422 }
423
424 static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
425 {
426         writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
427 }
428
429 static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
430 {
431         u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
432
433         if (en)
434                 value |= XGMAC_TSE;
435         else
436                 value &= ~XGMAC_TSE;
437
438         writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
439 }
440
441 static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
442 {
443         u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
444
445         value &= ~XGMAC_TXQEN;
446         if (qmode != MTL_QUEUE_AVB) {
447                 value |= 0x2 << XGMAC_TXQEN_SHIFT;
448                 writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
449         } else {
450                 value |= 0x1 << XGMAC_TXQEN_SHIFT;
451         }
452
453         writel(value, ioaddr +  XGMAC_MTL_TXQ_OPMODE(channel));
454 }
455
456 static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
457 {
458         u32 value;
459
460         value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
461         value |= bfsize << 1;
462         writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
463 }
464
465 const struct stmmac_dma_ops dwxgmac210_dma_ops = {
466         .reset = dwxgmac2_dma_reset,
467         .init = dwxgmac2_dma_init,
468         .init_chan = dwxgmac2_dma_init_chan,
469         .init_rx_chan = dwxgmac2_dma_init_rx_chan,
470         .init_tx_chan = dwxgmac2_dma_init_tx_chan,
471         .axi = dwxgmac2_dma_axi,
472         .dump_regs = NULL,
473         .dma_rx_mode = dwxgmac2_dma_rx_mode,
474         .dma_tx_mode = dwxgmac2_dma_tx_mode,
475         .enable_dma_irq = dwxgmac2_enable_dma_irq,
476         .disable_dma_irq = dwxgmac2_disable_dma_irq,
477         .start_tx = dwxgmac2_dma_start_tx,
478         .stop_tx = dwxgmac2_dma_stop_tx,
479         .start_rx = dwxgmac2_dma_start_rx,
480         .stop_rx = dwxgmac2_dma_stop_rx,
481         .dma_interrupt = dwxgmac2_dma_interrupt,
482         .get_hw_feature = dwxgmac2_get_hw_feature,
483         .rx_watchdog = dwxgmac2_rx_watchdog,
484         .set_rx_ring_len = dwxgmac2_set_rx_ring_len,
485         .set_tx_ring_len = dwxgmac2_set_tx_ring_len,
486         .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
487         .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
488         .enable_tso = dwxgmac2_enable_tso,
489         .qmode = dwxgmac2_qmode,
490         .set_bfsize = dwxgmac2_set_bfsize,
491 };