Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / samsung / sxgbe / sxgbe_dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* 10G controller driver for Samsung SoCs
3  *
4  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5  *              http://www.samsung.com
6  *
7  * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8  */
9 #include <linux/delay.h>
10 #include <linux/export.h>
11 #include <linux/io.h>
12 #include <linux/netdevice.h>
13 #include <linux/phy.h>
14
15 #include "sxgbe_common.h"
16 #include "sxgbe_dma.h"
17 #include "sxgbe_reg.h"
18 #include "sxgbe_desc.h"
19
20 /* DMA core initialization */
21 static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
22 {
23         u32 reg_val;
24
25         reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
26
27         /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
28          * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
29          * burst_map is bitmap for  BLEN[4, 8, 16, 32, 64, 128 and 256].
30          * Set burst_map irrespective of fix_burst value.
31          */
32         if (!fix_burst)
33                 reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
34
35         /* write burst len map */
36         reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
37
38         writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
39
40         return 0;
41 }
42
43 static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
44                                    int fix_burst, int pbl, dma_addr_t dma_tx,
45                                    dma_addr_t dma_rx, int t_rsize, int r_rsize)
46 {
47         u32 reg_val;
48         dma_addr_t dma_addr;
49
50         reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
51         /* set the pbl */
52         if (fix_burst) {
53                 reg_val |= SXGBE_DMA_PBL_X8MODE;
54                 writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
55                 /* program the TX pbl */
56                 reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
57                 reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
58                 writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
59                 /* program the RX pbl */
60                 reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
61                 reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
62                 writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
63         }
64
65         /* program desc registers */
66         writel(upper_32_bits(dma_tx),
67                ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
68         writel(lower_32_bits(dma_tx),
69                ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
70
71         writel(upper_32_bits(dma_rx),
72                ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
73         writel(lower_32_bits(dma_rx),
74                ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
75
76         /* program tail pointers */
77         /* assumption: upper 32 bits are constant and
78          * same as TX/RX desc list
79          */
80         dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
81         writel(lower_32_bits(dma_addr),
82                ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
83
84         dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
85         writel(lower_32_bits(dma_addr),
86                ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
87         /* program the ring sizes */
88         writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
89         writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
90
91         /* Enable TX/RX interrupts */
92         writel(SXGBE_DMA_ENA_INT,
93                ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
94 }
95
96 static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
97 {
98         u32 tx_config;
99
100         tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
101         tx_config |= SXGBE_TX_START_DMA;
102         writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
103 }
104
105 static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
106 {
107         /* Enable TX/RX interrupts */
108         writel(SXGBE_DMA_ENA_INT,
109                ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
110 }
111
112 static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
113 {
114         /* Disable TX/RX interrupts */
115         writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
116 }
117
118 static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
119 {
120         int cnum;
121         u32 tx_ctl_reg;
122
123         for (cnum = 0; cnum < tchannels; cnum++) {
124                 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
125                 tx_ctl_reg |= SXGBE_TX_ENABLE;
126                 writel(tx_ctl_reg,
127                        ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
128         }
129 }
130
131 static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
132 {
133         u32 tx_ctl_reg;
134
135         tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
136         tx_ctl_reg |= SXGBE_TX_ENABLE;
137         writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
138 }
139
140 static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
141 {
142         u32 tx_ctl_reg;
143
144         tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
145         tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
146         writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
147 }
148
149 static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
150 {
151         int cnum;
152         u32 tx_ctl_reg;
153
154         for (cnum = 0; cnum < tchannels; cnum++) {
155                 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
156                 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
157                 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
158         }
159 }
160
161 static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
162 {
163         int cnum;
164         u32 rx_ctl_reg;
165
166         for (cnum = 0; cnum < rchannels; cnum++) {
167                 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
168                 rx_ctl_reg |= SXGBE_RX_ENABLE;
169                 writel(rx_ctl_reg,
170                        ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
171         }
172 }
173
174 static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
175 {
176         int cnum;
177         u32 rx_ctl_reg;
178
179         for (cnum = 0; cnum < rchannels; cnum++) {
180                 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
181                 rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
182                 writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
183         }
184 }
185
186 static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
187                                    struct sxgbe_extra_stats *x)
188 {
189         u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
190         u32 clear_val = 0;
191         u32 ret_val = 0;
192
193         /* TX Normal Interrupt Summary */
194         if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
195                 x->normal_irq_n++;
196                 if (int_status & SXGBE_DMA_INT_STATUS_TI) {
197                         ret_val |= handle_tx;
198                         x->tx_normal_irq_n++;
199                         clear_val |= SXGBE_DMA_INT_STATUS_TI;
200                 }
201
202                 if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
203                         x->tx_underflow_irq++;
204                         ret_val |= tx_bump_tc;
205                         clear_val |= SXGBE_DMA_INT_STATUS_TBU;
206                 }
207         } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
208                 /* TX Abnormal Interrupt Summary */
209                 if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
210                         ret_val |= tx_hard_error;
211                         clear_val |= SXGBE_DMA_INT_STATUS_TPS;
212                         x->tx_process_stopped_irq++;
213                 }
214
215                 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
216                         ret_val |= tx_hard_error;
217                         x->fatal_bus_error_irq++;
218
219                         /* Assumption: FBE bit is the combination of
220                          * all the bus access erros and cleared when
221                          * the respective error bits cleared
222                          */
223
224                         /* check for actual cause */
225                         if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
226                                 x->tx_read_transfer_err++;
227                                 clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
228                         } else {
229                                 x->tx_write_transfer_err++;
230                         }
231
232                         if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
233                                 x->tx_desc_access_err++;
234                                 clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
235                         } else {
236                                 x->tx_buffer_access_err++;
237                         }
238
239                         if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
240                                 x->tx_data_transfer_err++;
241                                 clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
242                         }
243                 }
244
245                 /* context descriptor error */
246                 if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
247                         x->tx_ctxt_desc_err++;
248                         clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
249                 }
250         }
251
252         /* clear the served bits */
253         writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
254
255         return ret_val;
256 }
257
258 static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
259                                    struct sxgbe_extra_stats *x)
260 {
261         u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
262         u32 clear_val = 0;
263         u32 ret_val = 0;
264
265         /* RX Normal Interrupt Summary */
266         if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
267                 x->normal_irq_n++;
268                 if (int_status & SXGBE_DMA_INT_STATUS_RI) {
269                         ret_val |= handle_rx;
270                         x->rx_normal_irq_n++;
271                         clear_val |= SXGBE_DMA_INT_STATUS_RI;
272                 }
273         } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
274                 /* RX Abnormal Interrupt Summary */
275                 if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
276                         ret_val |= rx_bump_tc;
277                         clear_val |= SXGBE_DMA_INT_STATUS_RBU;
278                         x->rx_underflow_irq++;
279                 }
280
281                 if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
282                         ret_val |= rx_hard_error;
283                         clear_val |= SXGBE_DMA_INT_STATUS_RPS;
284                         x->rx_process_stopped_irq++;
285                 }
286
287                 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
288                         ret_val |= rx_hard_error;
289                         x->fatal_bus_error_irq++;
290
291                         /* Assumption: FBE bit is the combination of
292                          * all the bus access erros and cleared when
293                          * the respective error bits cleared
294                          */
295
296                         /* check for actual cause */
297                         if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
298                                 x->rx_read_transfer_err++;
299                                 clear_val |= SXGBE_DMA_INT_STATUS_REB0;
300                         } else {
301                                 x->rx_write_transfer_err++;
302                         }
303
304                         if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
305                                 x->rx_desc_access_err++;
306                                 clear_val |= SXGBE_DMA_INT_STATUS_REB1;
307                         } else {
308                                 x->rx_buffer_access_err++;
309                         }
310
311                         if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
312                                 x->rx_data_transfer_err++;
313                                 clear_val |= SXGBE_DMA_INT_STATUS_REB2;
314                         }
315                 }
316         }
317
318         /* clear the served bits */
319         writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
320
321         return ret_val;
322 }
323
324 /* Program the HW RX Watchdog */
325 static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
326 {
327         u32 que_num;
328
329         SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
330                 writel(riwt,
331                        ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
332         }
333 }
334
335 static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
336 {
337         u32 ctrl;
338
339         ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
340         ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
341         writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
342 }
343
344 static const struct sxgbe_dma_ops sxgbe_dma_ops = {
345         .init                           = sxgbe_dma_init,
346         .cha_init                       = sxgbe_dma_channel_init,
347         .enable_dma_transmission        = sxgbe_enable_dma_transmission,
348         .enable_dma_irq                 = sxgbe_enable_dma_irq,
349         .disable_dma_irq                = sxgbe_disable_dma_irq,
350         .start_tx                       = sxgbe_dma_start_tx,
351         .start_tx_queue                 = sxgbe_dma_start_tx_queue,
352         .stop_tx                        = sxgbe_dma_stop_tx,
353         .stop_tx_queue                  = sxgbe_dma_stop_tx_queue,
354         .start_rx                       = sxgbe_dma_start_rx,
355         .stop_rx                        = sxgbe_dma_stop_rx,
356         .tx_dma_int_status              = sxgbe_tx_dma_int_status,
357         .rx_dma_int_status              = sxgbe_rx_dma_int_status,
358         .rx_watchdog                    = sxgbe_dma_rx_watchdog,
359         .enable_tso                     = sxgbe_enable_tso,
360 };
361
362 const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
363 {
364         return &sxgbe_dma_ops;
365 }