common: Drop net.h from common header
[oweals/u-boot.git] / drivers / net / bcm-sf2-eth-gmac.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2014-2017 Broadcom.
4  */
5
6 #ifdef BCM_GMAC_DEBUG
7 #ifndef DEBUG
8 #define DEBUG
9 #endif
10 #endif
11
12 #include <config.h>
13 #include <common.h>
14 #include <cpu_func.h>
15 #include <malloc.h>
16 #include <net.h>
17 #include <asm/cache.h>
18 #include <asm/io.h>
19 #include <phy.h>
20
21 #include "bcm-sf2-eth.h"
22 #include "bcm-sf2-eth-gmac.h"
23
24 #define SPINWAIT(exp, us) { \
25         uint countdown = (us) + 9; \
26         while ((exp) && (countdown >= 10)) {\
27                 udelay(10); \
28                 countdown -= 10; \
29         } \
30 }
31
32 #define RX_BUF_SIZE_ALIGNED     ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
33 #define TX_BUF_SIZE_ALIGNED     ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
34 #define DESCP_SIZE_ALIGNED      ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
35
36 static int gmac_disable_dma(struct eth_dma *dma, int dir);
37 static int gmac_enable_dma(struct eth_dma *dma, int dir);
38
39 /* DMA Descriptor */
40 typedef struct {
41         /* misc control bits */
42         uint32_t        ctrl1;
43         /* buffer count and address extension */
44         uint32_t        ctrl2;
45         /* memory address of the date buffer, bits 31:0 */
46         uint32_t        addrlow;
47         /* memory address of the date buffer, bits 63:32 */
48         uint32_t        addrhigh;
49 } dma64dd_t;
50
51 uint32_t g_dmactrlflags;
52
53 static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
54 {
55         debug("%s enter\n", __func__);
56
57         g_dmactrlflags &= ~mask;
58         g_dmactrlflags |= flags;
59
60         /* If trying to enable parity, check if parity is actually supported */
61         if (g_dmactrlflags & DMA_CTRL_PEN) {
62                 uint32_t control;
63
64                 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
65                 writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
66                 if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
67                         /*
68                          * We *can* disable it, therefore it is supported;
69                          * restore control register
70                          */
71                         writel(control, GMAC0_DMA_TX_CTRL_ADDR);
72                 } else {
73                         /* Not supported, don't allow it to be enabled */
74                         g_dmactrlflags &= ~DMA_CTRL_PEN;
75                 }
76         }
77
78         return g_dmactrlflags;
79 }
80
81 static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
82 {
83         uint32_t v = readl(reg);
84         v &= ~(value);
85         writel(v, reg);
86 }
87
88 static inline void reg32_set_bits(uint32_t reg, uint32_t value)
89 {
90         uint32_t v = readl(reg);
91         v |= value;
92         writel(v, reg);
93 }
94
95 #ifdef BCM_GMAC_DEBUG
96 static void dma_tx_dump(struct eth_dma *dma)
97 {
98         dma64dd_t *descp = NULL;
99         uint8_t *bufp;
100         int i;
101
102         printf("TX DMA Register:\n");
103         printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
104                readl(GMAC0_DMA_TX_CTRL_ADDR),
105                readl(GMAC0_DMA_TX_PTR_ADDR),
106                readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
107                readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
108                readl(GMAC0_DMA_TX_STATUS0_ADDR),
109                readl(GMAC0_DMA_TX_STATUS1_ADDR));
110
111         printf("TX Descriptors:\n");
112         for (i = 0; i < TX_BUF_NUM; i++) {
113                 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
114                 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
115                        descp->ctrl1, descp->ctrl2,
116                        descp->addrhigh, descp->addrlow);
117         }
118
119         printf("TX Buffers:\n");
120         /* Initialize TX DMA descriptor table */
121         for (i = 0; i < TX_BUF_NUM; i++) {
122                 bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
123                 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
124         }
125         printf("\n");
126 }
127
128 static void dma_rx_dump(struct eth_dma *dma)
129 {
130         dma64dd_t *descp = NULL;
131         uint8_t *bufp;
132         int i;
133
134         printf("RX DMA Register:\n");
135         printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
136                readl(GMAC0_DMA_RX_CTRL_ADDR),
137                readl(GMAC0_DMA_RX_PTR_ADDR),
138                readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
139                readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
140                readl(GMAC0_DMA_RX_STATUS0_ADDR),
141                readl(GMAC0_DMA_RX_STATUS1_ADDR));
142
143         printf("RX Descriptors:\n");
144         for (i = 0; i < RX_BUF_NUM; i++) {
145                 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
146                 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
147                        descp->ctrl1, descp->ctrl2,
148                        descp->addrhigh, descp->addrlow);
149         }
150
151         printf("RX Buffers:\n");
152         for (i = 0; i < RX_BUF_NUM; i++) {
153                 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
154                 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
155         }
156         printf("\n");
157 }
158 #endif
159
160 static int dma_tx_init(struct eth_dma *dma)
161 {
162         dma64dd_t *descp = NULL;
163         uint8_t *bufp;
164         int i;
165         uint32_t ctrl;
166
167         debug("%s enter\n", __func__);
168
169         /* clear descriptor memory */
170         memset((void *)(dma->tx_desc_aligned), 0,
171                TX_BUF_NUM * DESCP_SIZE_ALIGNED);
172         memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
173
174         /* Initialize TX DMA descriptor table */
175         for (i = 0; i < TX_BUF_NUM; i++) {
176                 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
177                 bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
178                 /* clear buffer memory */
179                 memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
180
181                 ctrl = 0;
182                 /* if last descr set endOfTable */
183                 if (i == (TX_BUF_NUM-1))
184                         ctrl = D64_CTRL1_EOT;
185                 descp->ctrl1 = ctrl;
186                 descp->ctrl2 = 0;
187                 descp->addrlow = (uint32_t)bufp;
188                 descp->addrhigh = 0;
189         }
190
191         /* flush descriptor and buffer */
192         descp = dma->tx_desc_aligned;
193         bufp = dma->tx_buf;
194         flush_dcache_range((unsigned long)descp,
195                            (unsigned long)descp +
196                            DESCP_SIZE_ALIGNED * TX_BUF_NUM);
197         flush_dcache_range((unsigned long)bufp,
198                            (unsigned long)bufp +
199                            TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
200
201         /* initialize the DMA channel */
202         writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
203         writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
204
205         /* now update the dma last descriptor */
206         writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
207                GMAC0_DMA_TX_PTR_ADDR);
208
209         return 0;
210 }
211
212 static int dma_rx_init(struct eth_dma *dma)
213 {
214         uint32_t last_desc;
215         dma64dd_t *descp = NULL;
216         uint8_t *bufp;
217         uint32_t ctrl;
218         int i;
219
220         debug("%s enter\n", __func__);
221
222         /* clear descriptor memory */
223         memset((void *)(dma->rx_desc_aligned), 0,
224                RX_BUF_NUM * DESCP_SIZE_ALIGNED);
225         /* clear buffer memory */
226         memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
227
228         /* Initialize RX DMA descriptor table */
229         for (i = 0; i < RX_BUF_NUM; i++) {
230                 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
231                 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
232                 ctrl = 0;
233                 /* if last descr set endOfTable */
234                 if (i == (RX_BUF_NUM - 1))
235                         ctrl = D64_CTRL1_EOT;
236                 descp->ctrl1 = ctrl;
237                 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
238                 descp->addrlow = (uint32_t)bufp;
239                 descp->addrhigh = 0;
240
241                 last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
242                                 + sizeof(dma64dd_t);
243         }
244
245         descp = dma->rx_desc_aligned;
246         bufp = dma->rx_buf;
247         /* flush descriptor and buffer */
248         flush_dcache_range((unsigned long)descp,
249                            (unsigned long)descp +
250                            DESCP_SIZE_ALIGNED * RX_BUF_NUM);
251         flush_dcache_range((unsigned long)(bufp),
252                            (unsigned long)bufp +
253                            RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
254
255         /* initailize the DMA channel */
256         writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
257         writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
258
259         /* now update the dma last descriptor */
260         writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
261
262         return 0;
263 }
264
265 static int dma_init(struct eth_dma *dma)
266 {
267         debug(" %s enter\n", __func__);
268
269         /*
270          * Default flags: For backwards compatibility both
271          * Rx Overflow Continue and Parity are DISABLED.
272          */
273         dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
274
275         debug("rx burst len 0x%x\n",
276               (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
277               >> D64_RC_BL_SHIFT);
278         debug("tx burst len 0x%x\n",
279               (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
280               >> D64_XC_BL_SHIFT);
281
282         dma_tx_init(dma);
283         dma_rx_init(dma);
284
285         /* From end of chip_init() */
286         /* enable the overflow continue feature and disable parity */
287         dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
288                       DMA_CTRL_ROC /* value */);
289
290         return 0;
291 }
292
293 static int dma_deinit(struct eth_dma *dma)
294 {
295         debug(" %s enter\n", __func__);
296
297         gmac_disable_dma(dma, MAC_DMA_RX);
298         gmac_disable_dma(dma, MAC_DMA_TX);
299
300         free(dma->tx_buf);
301         dma->tx_buf = NULL;
302         free(dma->tx_desc_aligned);
303         dma->tx_desc_aligned = NULL;
304
305         free(dma->rx_buf);
306         dma->rx_buf = NULL;
307         free(dma->rx_desc_aligned);
308         dma->rx_desc_aligned = NULL;
309
310         return 0;
311 }
312
313 int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
314 {
315         uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
316
317         /* kick off the dma */
318         size_t len = length;
319         int txout = dma->cur_tx_index;
320         uint32_t flags;
321         dma64dd_t *descp = NULL;
322         uint32_t ctrl;
323         uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
324                               sizeof(dma64dd_t)) & D64_XP_LD_MASK;
325         size_t buflen;
326
327         debug("%s enter\n", __func__);
328
329         /* load the buffer */
330         memcpy(bufp, packet, len);
331
332         /* Add 4 bytes for Ethernet FCS/CRC */
333         buflen = len + 4;
334
335         ctrl = (buflen & D64_CTRL2_BC_MASK);
336
337         /* the transmit will only be one frame or set SOF, EOF */
338         /* also set int on completion */
339         flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
340
341         /* txout points to the descriptor to uset */
342         /* if last descriptor then set EOT */
343         if (txout == (TX_BUF_NUM - 1)) {
344                 flags |= D64_CTRL1_EOT;
345                 last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
346         }
347
348         /* write the descriptor */
349         descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
350         descp->addrlow = (uint32_t)bufp;
351         descp->addrhigh = 0;
352         descp->ctrl1 = flags;
353         descp->ctrl2 = ctrl;
354
355         /* flush descriptor and buffer */
356         flush_dcache_range((unsigned long)dma->tx_desc_aligned,
357                            (unsigned long)dma->tx_desc_aligned +
358                            DESCP_SIZE_ALIGNED * TX_BUF_NUM);
359         flush_dcache_range((unsigned long)bufp,
360                            (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
361
362         /* now update the dma last descriptor */
363         writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
364
365         /* tx dma should be enabled so packet should go out */
366
367         /* update txout */
368         dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
369
370         return 0;
371 }
372
373 bool gmac_check_tx_done(struct eth_dma *dma)
374 {
375         /* wait for tx to complete */
376         uint32_t intstatus;
377         bool xfrdone = false;
378
379         debug("%s enter\n", __func__);
380
381         intstatus = readl(GMAC0_INT_STATUS_ADDR);
382
383         debug("int(0x%x)\n", intstatus);
384         if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
385                 xfrdone = true;
386                 /* clear the int bits */
387                 intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
388                 writel(intstatus, GMAC0_INT_STATUS_ADDR);
389         } else {
390                 debug("Tx int(0x%x)\n", intstatus);
391         }
392
393         return xfrdone;
394 }
395
396 int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
397 {
398         void *bufp, *datap;
399         size_t rcvlen = 0, buflen = 0;
400         uint32_t stat0 = 0, stat1 = 0;
401         uint32_t control, offset;
402         uint8_t statbuf[HWRXOFF*2];
403
404         int index, curr, active;
405         dma64dd_t *descp = NULL;
406
407         /* udelay(50); */
408
409         /*
410          * this api will check if a packet has been received.
411          * If so it will return the address of the buffer and current
412          * descriptor index will be incremented to the
413          * next descriptor. Once done with the frame the buffer should be
414          * added back onto the descriptor and the lastdscr should be updated
415          * to this descriptor.
416          */
417         index = dma->cur_rx_index;
418         offset = (uint32_t)(dma->rx_desc_aligned);
419         stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
420         stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
421         curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
422         active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
423
424         /* check if any frame */
425         if (index == curr)
426                 return -1;
427
428         debug("received packet\n");
429         debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
430         /* remove warning */
431         if (index == active)
432                 ;
433
434         /* get the packet pointer that corresponds to the rx descriptor */
435         bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
436
437         descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
438         /* flush descriptor and buffer */
439         flush_dcache_range((unsigned long)dma->rx_desc_aligned,
440                            (unsigned long)dma->rx_desc_aligned +
441                            DESCP_SIZE_ALIGNED * RX_BUF_NUM);
442         flush_dcache_range((unsigned long)bufp,
443                            (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
444
445         buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
446
447         stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
448         stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
449
450         debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
451               (uint32_t)bufp, index, buflen, stat0, stat1);
452
453         dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
454
455         /* get buffer offset */
456         control = readl(GMAC0_DMA_RX_CTRL_ADDR);
457         offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
458         rcvlen = *(uint16_t *)bufp;
459
460         debug("Received %d bytes\n", rcvlen);
461         /* copy status into temp buf then copy data from rx buffer */
462         memcpy(statbuf, bufp, offset);
463         datap = (void *)((uint32_t)bufp + offset);
464         memcpy(buf, datap, rcvlen);
465
466         /* update descriptor that is being added back on ring */
467         descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
468         descp->addrlow = (uint32_t)bufp;
469         descp->addrhigh = 0;
470         /* flush descriptor */
471         flush_dcache_range((unsigned long)dma->rx_desc_aligned,
472                            (unsigned long)dma->rx_desc_aligned +
473                            DESCP_SIZE_ALIGNED * RX_BUF_NUM);
474
475         /* set the lastdscr for the rx ring */
476         writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
477
478         return (int)rcvlen;
479 }
480
481 static int gmac_disable_dma(struct eth_dma *dma, int dir)
482 {
483         int status;
484
485         debug("%s enter\n", __func__);
486
487         if (dir == MAC_DMA_TX) {
488                 /* address PR8249/PR7577 issue */
489                 /* suspend tx DMA first */
490                 writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
491                 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
492                                      D64_XS0_XS_MASK)) !=
493                           D64_XS0_XS_DISABLED) &&
494                          (status != D64_XS0_XS_IDLE) &&
495                          (status != D64_XS0_XS_STOPPED), 10000);
496
497                 /*
498                  * PR2414 WAR: DMA engines are not disabled until
499                  * transfer finishes
500                  */
501                 writel(0, GMAC0_DMA_TX_CTRL_ADDR);
502                 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
503                                      D64_XS0_XS_MASK)) !=
504                           D64_XS0_XS_DISABLED), 10000);
505
506                 /* wait for the last transaction to complete */
507                 udelay(2);
508
509                 status = (status == D64_XS0_XS_DISABLED);
510         } else {
511                 /*
512                  * PR2414 WAR: DMA engines are not disabled until
513                  * transfer finishes
514                  */
515                 writel(0, GMAC0_DMA_RX_CTRL_ADDR);
516                 SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
517                                      D64_RS0_RS_MASK)) !=
518                           D64_RS0_RS_DISABLED), 10000);
519
520                 status = (status == D64_RS0_RS_DISABLED);
521         }
522
523         return status;
524 }
525
526 static int gmac_enable_dma(struct eth_dma *dma, int dir)
527 {
528         uint32_t control;
529
530         debug("%s enter\n", __func__);
531
532         if (dir == MAC_DMA_TX) {
533                 dma->cur_tx_index = 0;
534
535                 /*
536                  * These bits 20:18 (burstLen) of control register can be
537                  * written but will take effect only if these bits are
538                  * valid. So this will not affect previous versions
539                  * of the DMA. They will continue to have those bits set to 0.
540                  */
541                 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
542
543                 control |= D64_XC_XE;
544                 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
545                         control |= D64_XC_PD;
546
547                 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
548
549                 /* initailize the DMA channel */
550                 writel((uint32_t)(dma->tx_desc_aligned),
551                        GMAC0_DMA_TX_ADDR_LOW_ADDR);
552                 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
553         } else {
554                 dma->cur_rx_index = 0;
555
556                 control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
557                            D64_RC_AE) | D64_RC_RE;
558
559                 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
560                         control |= D64_RC_PD;
561
562                 if (g_dmactrlflags & DMA_CTRL_ROC)
563                         control |= D64_RC_OC;
564
565                 /*
566                  * These bits 20:18 (burstLen) of control register can be
567                  * written but will take effect only if these bits are
568                  * valid. So this will not affect previous versions
569                  * of the DMA. They will continue to have those bits set to 0.
570                  */
571                 control &= ~D64_RC_BL_MASK;
572                 /* Keep default Rx burstlen */
573                 control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
574                 control |= HWRXOFF << D64_RC_RO_SHIFT;
575
576                 writel(control, GMAC0_DMA_RX_CTRL_ADDR);
577
578                 /*
579                  * the rx descriptor ring should have
580                  * the addresses set properly;
581                  * set the lastdscr for the rx ring
582                  */
583                 writel(((uint32_t)(dma->rx_desc_aligned) +
584                         (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
585                        D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
586         }
587
588         return 0;
589 }
590
591 bool gmac_mii_busywait(unsigned int timeout)
592 {
593         uint32_t tmp = 0;
594
595         while (timeout > 10) {
596                 tmp = readl(GMAC_MII_CTRL_ADDR);
597                 if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
598                         udelay(10);
599                         timeout -= 10;
600                 } else {
601                         break;
602                 }
603         }
604         return tmp & (1 << GMAC_MII_BUSY_SHIFT);
605 }
606
607 int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
608 {
609         uint32_t tmp = 0;
610         u16 value = 0;
611
612         /* Busy wait timeout is 1ms */
613         if (gmac_mii_busywait(1000)) {
614                 pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
615                 return -1;
616         }
617
618         /* Read operation */
619         tmp = GMAC_MII_DATA_READ_CMD;
620         tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
621                 (reg << GMAC_MII_PHY_REG_SHIFT);
622         debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
623         writel(tmp, GMAC_MII_DATA_ADDR);
624
625         if (gmac_mii_busywait(1000)) {
626                 pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
627                 return -1;
628         }
629
630         value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
631         debug("MII read data 0x%x\n", value);
632         return value;
633 }
634
635 int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
636                       u16 value)
637 {
638         uint32_t tmp = 0;
639
640         /* Busy wait timeout is 1ms */
641         if (gmac_mii_busywait(1000)) {
642                 pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
643                 return -1;
644         }
645
646         /* Write operation */
647         tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
648         tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
649                 (reg << GMAC_MII_PHY_REG_SHIFT));
650         debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
651               tmp, phyaddr, reg, value);
652         writel(tmp, GMAC_MII_DATA_ADDR);
653
654         if (gmac_mii_busywait(1000)) {
655                 pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
656                 return -1;
657         }
658
659         return 0;
660 }
661
662 void gmac_init_reset(void)
663 {
664         debug("%s enter\n", __func__);
665
666         /* set command config reg CC_SR */
667         reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
668         udelay(GMAC_RESET_DELAY);
669 }
670
671 void gmac_clear_reset(void)
672 {
673         debug("%s enter\n", __func__);
674
675         /* clear command config reg CC_SR */
676         reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
677         udelay(GMAC_RESET_DELAY);
678 }
679
680 static void gmac_enable_local(bool en)
681 {
682         uint32_t cmdcfg;
683
684         debug("%s enter\n", __func__);
685
686         /* read command config reg */
687         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
688
689         /* put mac in reset */
690         gmac_init_reset();
691
692         cmdcfg |= CC_SR;
693
694         /* first deassert rx_ena and tx_ena while in reset */
695         cmdcfg &= ~(CC_RE | CC_TE);
696         /* write command config reg */
697         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
698
699         /* bring mac out of reset */
700         gmac_clear_reset();
701
702         /* if not enable exit now */
703         if (!en)
704                 return;
705
706         /* enable the mac transmit and receive paths now */
707         udelay(2);
708         cmdcfg &= ~CC_SR;
709         cmdcfg |= (CC_RE | CC_TE);
710
711         /* assert rx_ena and tx_ena when out of reset to enable the mac */
712         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
713
714         return;
715 }
716
717 int gmac_enable(void)
718 {
719         gmac_enable_local(1);
720
721         /* clear interrupts */
722         writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
723         return 0;
724 }
725
726 int gmac_disable(void)
727 {
728         gmac_enable_local(0);
729         return 0;
730 }
731
732 int gmac_set_speed(int speed, int duplex)
733 {
734         uint32_t cmdcfg;
735         uint32_t hd_ena;
736         uint32_t speed_cfg;
737
738         hd_ena = duplex ? 0 : CC_HD;
739         if (speed == 1000) {
740                 speed_cfg = 2;
741         } else if (speed == 100) {
742                 speed_cfg = 1;
743         } else if (speed == 10) {
744                 speed_cfg = 0;
745         } else {
746                 pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
747                 return -1;
748         }
749
750         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
751         cmdcfg &= ~(CC_ES_MASK | CC_HD);
752         cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
753
754         printf("Change GMAC speed to %dMB\n", speed);
755         debug("GMAC speed cfg 0x%x\n", cmdcfg);
756         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
757
758         return 0;
759 }
760
761 int gmac_set_mac_addr(unsigned char *mac)
762 {
763         /* set our local address */
764         debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
765               mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
766         writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
767         writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
768
769         return 0;
770 }
771
772 int gmac_mac_init(struct eth_device *dev)
773 {
774         struct eth_info *eth = (struct eth_info *)(dev->priv);
775         struct eth_dma *dma = &(eth->dma);
776
777         uint32_t tmp;
778         uint32_t cmdcfg;
779         int chipid;
780
781         debug("%s enter\n", __func__);
782
783         /* Always use GMAC0 */
784         printf("Using GMAC%d\n", 0);
785
786         /* Reset AMAC0 core */
787         writel(0, AMAC0_IDM_RESET_ADDR);
788         tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
789         /* Set clock */
790         tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
791         tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
792         /* Set Tx clock */
793         tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
794         writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
795
796         /* reset gmac */
797         /*
798          * As AMAC is just reset, NO need?
799          * set eth_data into loopback mode to ensure no rx traffic
800          * gmac_loopback(eth_data, TRUE);
801          * ET_TRACE(("%s gmac loopback\n", __func__));
802          * udelay(1);
803          */
804
805         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
806         cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
807                     CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
808                     CC_PAD_EN | CC_PF);
809         cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
810         /* put mac in reset */
811         gmac_init_reset();
812         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
813         gmac_clear_reset();
814
815         /* enable clear MIB on read */
816         reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
817         /* PHY: set smi_master to drive mdc_clk */
818         reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
819
820         /* clear persistent sw intstatus */
821         writel(0, GMAC0_INT_STATUS_ADDR);
822
823         if (dma_init(dma) < 0) {
824                 pr_err("%s: GMAC dma_init failed\n", __func__);
825                 goto err_exit;
826         }
827
828         chipid = CHIPID;
829         printf("%s: Chip ID: 0x%x\n", __func__, chipid);
830
831         /* set switch bypass mode */
832         tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
833         tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
834
835         /* Switch mode */
836         /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
837
838         writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
839
840         tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
841         tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
842         writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
843
844         /* Set MDIO to internal GPHY */
845         tmp = readl(GMAC_MII_CTRL_ADDR);
846         /* Select internal MDC/MDIO bus*/
847         tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
848         /* select MDC/MDIO connecting to on-chip internal PHYs */
849         tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
850         /*
851          * give bit[6:0](MDCDIV) with required divisor to set
852          * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
853          */
854         tmp |= 0x1A;
855
856         writel(tmp, GMAC_MII_CTRL_ADDR);
857
858         if (gmac_mii_busywait(1000)) {
859                 pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
860                 goto err_exit;
861         }
862
863         /* Configure GMAC0 */
864         /* enable one rx interrupt per received frame */
865         writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
866
867         /* read command config reg */
868         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
869         /* enable 802.3x tx flow control (honor received PAUSE frames) */
870         cmdcfg &= ~CC_RPI;
871         /* enable promiscuous mode */
872         cmdcfg |= CC_PROM;
873         /* Disable loopback mode */
874         cmdcfg &= ~CC_ML;
875         /* set the speed */
876         cmdcfg &= ~(CC_ES_MASK | CC_HD);
877         /* Set to 1Gbps and full duplex by default */
878         cmdcfg |= (2 << CC_ES_SHIFT);
879
880         /* put mac in reset */
881         gmac_init_reset();
882         /* write register */
883         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
884         /* bring mac out of reset */
885         gmac_clear_reset();
886
887         /* set max frame lengths; account for possible vlan tag */
888         writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
889
890         return 0;
891
892 err_exit:
893         dma_deinit(dma);
894         return -1;
895 }
896
897 int gmac_add(struct eth_device *dev)
898 {
899         struct eth_info *eth = (struct eth_info *)(dev->priv);
900         struct eth_dma *dma = &(eth->dma);
901         void *tmp;
902
903         /*
904          * Desc has to be 16-byte aligned. But for dcache flush it must be
905          * aligned to ARCH_DMA_MINALIGN.
906          */
907         tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
908         if (tmp == NULL) {
909                 printf("%s: Failed to allocate TX desc Buffer\n", __func__);
910                 return -1;
911         }
912
913         dma->tx_desc_aligned = (void *)tmp;
914         debug("TX Descriptor Buffer: %p; length: 0x%x\n",
915               dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
916
917         tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
918         if (tmp == NULL) {
919                 printf("%s: Failed to allocate TX Data Buffer\n", __func__);
920                 free(dma->tx_desc_aligned);
921                 return -1;
922         }
923         dma->tx_buf = (uint8_t *)tmp;
924         debug("TX Data Buffer: %p; length: 0x%x\n",
925               dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
926
927         /* Desc has to be 16-byte aligned */
928         tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
929         if (tmp == NULL) {
930                 printf("%s: Failed to allocate RX Descriptor\n", __func__);
931                 free(dma->tx_desc_aligned);
932                 free(dma->tx_buf);
933                 return -1;
934         }
935         dma->rx_desc_aligned = (void *)tmp;
936         debug("RX Descriptor Buffer: %p, length: 0x%x\n",
937               dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
938
939         tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
940         if (tmp == NULL) {
941                 printf("%s: Failed to allocate RX Data Buffer\n", __func__);
942                 free(dma->tx_desc_aligned);
943                 free(dma->tx_buf);
944                 free(dma->rx_desc_aligned);
945                 return -1;
946         }
947         dma->rx_buf = (uint8_t *)tmp;
948         debug("RX Data Buffer: %p; length: 0x%x\n",
949               dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
950
951         g_dmactrlflags = 0;
952
953         eth->phy_interface = PHY_INTERFACE_MODE_GMII;
954
955         dma->tx_packet = gmac_tx_packet;
956         dma->check_tx_done = gmac_check_tx_done;
957
958         dma->check_rx_done = gmac_check_rx_done;
959
960         dma->enable_dma = gmac_enable_dma;
961         dma->disable_dma = gmac_disable_dma;
962
963         eth->miiphy_read = gmac_miiphy_read;
964         eth->miiphy_write = gmac_miiphy_write;
965
966         eth->mac_init = gmac_mac_init;
967         eth->disable_mac = gmac_disable;
968         eth->enable_mac = gmac_enable;
969         eth->set_mac_addr = gmac_set_mac_addr;
970         eth->set_mac_speed = gmac_set_speed;
971
972         return 0;
973 }