eaadb2662a850ef0df10afd0f7604edbcf6e754b
[oweals/u-boot.git] / drivers / net / bcm-sf2-eth-gmac.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2014-2017 Broadcom.
4  */
5
6 #ifdef BCM_GMAC_DEBUG
7 #ifndef DEBUG
8 #define DEBUG
9 #endif
10 #endif
11
12 #include <config.h>
13 #include <common.h>
14 #include <cpu_func.h>
15 #include <log.h>
16 #include <malloc.h>
17 #include <net.h>
18 #include <asm/cache.h>
19 #include <asm/io.h>
20 #include <phy.h>
21
22 #include "bcm-sf2-eth.h"
23 #include "bcm-sf2-eth-gmac.h"
24
25 #define SPINWAIT(exp, us) { \
26         uint countdown = (us) + 9; \
27         while ((exp) && (countdown >= 10)) {\
28                 udelay(10); \
29                 countdown -= 10; \
30         } \
31 }
32
33 #define RX_BUF_SIZE_ALIGNED     ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
34 #define TX_BUF_SIZE_ALIGNED     ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
35 #define DESCP_SIZE_ALIGNED      ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
36
37 static int gmac_disable_dma(struct eth_dma *dma, int dir);
38 static int gmac_enable_dma(struct eth_dma *dma, int dir);
39
40 /* DMA Descriptor */
41 typedef struct {
42         /* misc control bits */
43         uint32_t        ctrl1;
44         /* buffer count and address extension */
45         uint32_t        ctrl2;
46         /* memory address of the date buffer, bits 31:0 */
47         uint32_t        addrlow;
48         /* memory address of the date buffer, bits 63:32 */
49         uint32_t        addrhigh;
50 } dma64dd_t;
51
52 uint32_t g_dmactrlflags;
53
54 static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
55 {
56         debug("%s enter\n", __func__);
57
58         g_dmactrlflags &= ~mask;
59         g_dmactrlflags |= flags;
60
61         /* If trying to enable parity, check if parity is actually supported */
62         if (g_dmactrlflags & DMA_CTRL_PEN) {
63                 uint32_t control;
64
65                 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
66                 writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
67                 if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
68                         /*
69                          * We *can* disable it, therefore it is supported;
70                          * restore control register
71                          */
72                         writel(control, GMAC0_DMA_TX_CTRL_ADDR);
73                 } else {
74                         /* Not supported, don't allow it to be enabled */
75                         g_dmactrlflags &= ~DMA_CTRL_PEN;
76                 }
77         }
78
79         return g_dmactrlflags;
80 }
81
82 static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
83 {
84         uint32_t v = readl(reg);
85         v &= ~(value);
86         writel(v, reg);
87 }
88
89 static inline void reg32_set_bits(uint32_t reg, uint32_t value)
90 {
91         uint32_t v = readl(reg);
92         v |= value;
93         writel(v, reg);
94 }
95
96 #ifdef BCM_GMAC_DEBUG
97 static void dma_tx_dump(struct eth_dma *dma)
98 {
99         dma64dd_t *descp = NULL;
100         uint8_t *bufp;
101         int i;
102
103         printf("TX DMA Register:\n");
104         printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
105                readl(GMAC0_DMA_TX_CTRL_ADDR),
106                readl(GMAC0_DMA_TX_PTR_ADDR),
107                readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
108                readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
109                readl(GMAC0_DMA_TX_STATUS0_ADDR),
110                readl(GMAC0_DMA_TX_STATUS1_ADDR));
111
112         printf("TX Descriptors:\n");
113         for (i = 0; i < TX_BUF_NUM; i++) {
114                 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
115                 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
116                        descp->ctrl1, descp->ctrl2,
117                        descp->addrhigh, descp->addrlow);
118         }
119
120         printf("TX Buffers:\n");
121         /* Initialize TX DMA descriptor table */
122         for (i = 0; i < TX_BUF_NUM; i++) {
123                 bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
124                 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
125         }
126         printf("\n");
127 }
128
129 static void dma_rx_dump(struct eth_dma *dma)
130 {
131         dma64dd_t *descp = NULL;
132         uint8_t *bufp;
133         int i;
134
135         printf("RX DMA Register:\n");
136         printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
137                readl(GMAC0_DMA_RX_CTRL_ADDR),
138                readl(GMAC0_DMA_RX_PTR_ADDR),
139                readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
140                readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
141                readl(GMAC0_DMA_RX_STATUS0_ADDR),
142                readl(GMAC0_DMA_RX_STATUS1_ADDR));
143
144         printf("RX Descriptors:\n");
145         for (i = 0; i < RX_BUF_NUM; i++) {
146                 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
147                 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
148                        descp->ctrl1, descp->ctrl2,
149                        descp->addrhigh, descp->addrlow);
150         }
151
152         printf("RX Buffers:\n");
153         for (i = 0; i < RX_BUF_NUM; i++) {
154                 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
155                 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
156         }
157         printf("\n");
158 }
159 #endif
160
161 static int dma_tx_init(struct eth_dma *dma)
162 {
163         dma64dd_t *descp = NULL;
164         uint8_t *bufp;
165         int i;
166         uint32_t ctrl;
167
168         debug("%s enter\n", __func__);
169
170         /* clear descriptor memory */
171         memset((void *)(dma->tx_desc_aligned), 0,
172                TX_BUF_NUM * DESCP_SIZE_ALIGNED);
173         memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
174
175         /* Initialize TX DMA descriptor table */
176         for (i = 0; i < TX_BUF_NUM; i++) {
177                 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
178                 bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
179                 /* clear buffer memory */
180                 memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
181
182                 ctrl = 0;
183                 /* if last descr set endOfTable */
184                 if (i == (TX_BUF_NUM-1))
185                         ctrl = D64_CTRL1_EOT;
186                 descp->ctrl1 = ctrl;
187                 descp->ctrl2 = 0;
188                 descp->addrlow = (uint32_t)bufp;
189                 descp->addrhigh = 0;
190         }
191
192         /* flush descriptor and buffer */
193         descp = dma->tx_desc_aligned;
194         bufp = dma->tx_buf;
195         flush_dcache_range((unsigned long)descp,
196                            (unsigned long)descp +
197                            DESCP_SIZE_ALIGNED * TX_BUF_NUM);
198         flush_dcache_range((unsigned long)bufp,
199                            (unsigned long)bufp +
200                            TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
201
202         /* initialize the DMA channel */
203         writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
204         writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
205
206         /* now update the dma last descriptor */
207         writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
208                GMAC0_DMA_TX_PTR_ADDR);
209
210         return 0;
211 }
212
213 static int dma_rx_init(struct eth_dma *dma)
214 {
215         uint32_t last_desc;
216         dma64dd_t *descp = NULL;
217         uint8_t *bufp;
218         uint32_t ctrl;
219         int i;
220
221         debug("%s enter\n", __func__);
222
223         /* clear descriptor memory */
224         memset((void *)(dma->rx_desc_aligned), 0,
225                RX_BUF_NUM * DESCP_SIZE_ALIGNED);
226         /* clear buffer memory */
227         memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
228
229         /* Initialize RX DMA descriptor table */
230         for (i = 0; i < RX_BUF_NUM; i++) {
231                 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
232                 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
233                 ctrl = 0;
234                 /* if last descr set endOfTable */
235                 if (i == (RX_BUF_NUM - 1))
236                         ctrl = D64_CTRL1_EOT;
237                 descp->ctrl1 = ctrl;
238                 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
239                 descp->addrlow = (uint32_t)bufp;
240                 descp->addrhigh = 0;
241
242                 last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
243                                 + sizeof(dma64dd_t);
244         }
245
246         descp = dma->rx_desc_aligned;
247         bufp = dma->rx_buf;
248         /* flush descriptor and buffer */
249         flush_dcache_range((unsigned long)descp,
250                            (unsigned long)descp +
251                            DESCP_SIZE_ALIGNED * RX_BUF_NUM);
252         flush_dcache_range((unsigned long)(bufp),
253                            (unsigned long)bufp +
254                            RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
255
256         /* initailize the DMA channel */
257         writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
258         writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
259
260         /* now update the dma last descriptor */
261         writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
262
263         return 0;
264 }
265
266 static int dma_init(struct eth_dma *dma)
267 {
268         debug(" %s enter\n", __func__);
269
270         /*
271          * Default flags: For backwards compatibility both
272          * Rx Overflow Continue and Parity are DISABLED.
273          */
274         dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
275
276         debug("rx burst len 0x%x\n",
277               (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
278               >> D64_RC_BL_SHIFT);
279         debug("tx burst len 0x%x\n",
280               (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
281               >> D64_XC_BL_SHIFT);
282
283         dma_tx_init(dma);
284         dma_rx_init(dma);
285
286         /* From end of chip_init() */
287         /* enable the overflow continue feature and disable parity */
288         dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
289                       DMA_CTRL_ROC /* value */);
290
291         return 0;
292 }
293
294 static int dma_deinit(struct eth_dma *dma)
295 {
296         debug(" %s enter\n", __func__);
297
298         gmac_disable_dma(dma, MAC_DMA_RX);
299         gmac_disable_dma(dma, MAC_DMA_TX);
300
301         free(dma->tx_buf);
302         dma->tx_buf = NULL;
303         free(dma->tx_desc_aligned);
304         dma->tx_desc_aligned = NULL;
305
306         free(dma->rx_buf);
307         dma->rx_buf = NULL;
308         free(dma->rx_desc_aligned);
309         dma->rx_desc_aligned = NULL;
310
311         return 0;
312 }
313
314 int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
315 {
316         uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
317
318         /* kick off the dma */
319         size_t len = length;
320         int txout = dma->cur_tx_index;
321         uint32_t flags;
322         dma64dd_t *descp = NULL;
323         uint32_t ctrl;
324         uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
325                               sizeof(dma64dd_t)) & D64_XP_LD_MASK;
326         size_t buflen;
327
328         debug("%s enter\n", __func__);
329
330         /* load the buffer */
331         memcpy(bufp, packet, len);
332
333         /* Add 4 bytes for Ethernet FCS/CRC */
334         buflen = len + 4;
335
336         ctrl = (buflen & D64_CTRL2_BC_MASK);
337
338         /* the transmit will only be one frame or set SOF, EOF */
339         /* also set int on completion */
340         flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
341
342         /* txout points to the descriptor to uset */
343         /* if last descriptor then set EOT */
344         if (txout == (TX_BUF_NUM - 1)) {
345                 flags |= D64_CTRL1_EOT;
346                 last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
347         }
348
349         /* write the descriptor */
350         descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
351         descp->addrlow = (uint32_t)bufp;
352         descp->addrhigh = 0;
353         descp->ctrl1 = flags;
354         descp->ctrl2 = ctrl;
355
356         /* flush descriptor and buffer */
357         flush_dcache_range((unsigned long)dma->tx_desc_aligned,
358                            (unsigned long)dma->tx_desc_aligned +
359                            DESCP_SIZE_ALIGNED * TX_BUF_NUM);
360         flush_dcache_range((unsigned long)bufp,
361                            (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
362
363         /* now update the dma last descriptor */
364         writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
365
366         /* tx dma should be enabled so packet should go out */
367
368         /* update txout */
369         dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
370
371         return 0;
372 }
373
374 bool gmac_check_tx_done(struct eth_dma *dma)
375 {
376         /* wait for tx to complete */
377         uint32_t intstatus;
378         bool xfrdone = false;
379
380         debug("%s enter\n", __func__);
381
382         intstatus = readl(GMAC0_INT_STATUS_ADDR);
383
384         debug("int(0x%x)\n", intstatus);
385         if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
386                 xfrdone = true;
387                 /* clear the int bits */
388                 intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
389                 writel(intstatus, GMAC0_INT_STATUS_ADDR);
390         } else {
391                 debug("Tx int(0x%x)\n", intstatus);
392         }
393
394         return xfrdone;
395 }
396
397 int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
398 {
399         void *bufp, *datap;
400         size_t rcvlen = 0, buflen = 0;
401         uint32_t stat0 = 0, stat1 = 0;
402         uint32_t control, offset;
403         uint8_t statbuf[HWRXOFF*2];
404
405         int index, curr, active;
406         dma64dd_t *descp = NULL;
407
408         /* udelay(50); */
409
410         /*
411          * this api will check if a packet has been received.
412          * If so it will return the address of the buffer and current
413          * descriptor index will be incremented to the
414          * next descriptor. Once done with the frame the buffer should be
415          * added back onto the descriptor and the lastdscr should be updated
416          * to this descriptor.
417          */
418         index = dma->cur_rx_index;
419         offset = (uint32_t)(dma->rx_desc_aligned);
420         stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
421         stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
422         curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
423         active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
424
425         /* check if any frame */
426         if (index == curr)
427                 return -1;
428
429         debug("received packet\n");
430         debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
431         /* remove warning */
432         if (index == active)
433                 ;
434
435         /* get the packet pointer that corresponds to the rx descriptor */
436         bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
437
438         descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
439         /* flush descriptor and buffer */
440         flush_dcache_range((unsigned long)dma->rx_desc_aligned,
441                            (unsigned long)dma->rx_desc_aligned +
442                            DESCP_SIZE_ALIGNED * RX_BUF_NUM);
443         flush_dcache_range((unsigned long)bufp,
444                            (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
445
446         buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
447
448         stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
449         stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
450
451         debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
452               (uint32_t)bufp, index, buflen, stat0, stat1);
453
454         dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
455
456         /* get buffer offset */
457         control = readl(GMAC0_DMA_RX_CTRL_ADDR);
458         offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
459         rcvlen = *(uint16_t *)bufp;
460
461         debug("Received %d bytes\n", rcvlen);
462         /* copy status into temp buf then copy data from rx buffer */
463         memcpy(statbuf, bufp, offset);
464         datap = (void *)((uint32_t)bufp + offset);
465         memcpy(buf, datap, rcvlen);
466
467         /* update descriptor that is being added back on ring */
468         descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
469         descp->addrlow = (uint32_t)bufp;
470         descp->addrhigh = 0;
471         /* flush descriptor */
472         flush_dcache_range((unsigned long)dma->rx_desc_aligned,
473                            (unsigned long)dma->rx_desc_aligned +
474                            DESCP_SIZE_ALIGNED * RX_BUF_NUM);
475
476         /* set the lastdscr for the rx ring */
477         writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
478
479         return (int)rcvlen;
480 }
481
482 static int gmac_disable_dma(struct eth_dma *dma, int dir)
483 {
484         int status;
485
486         debug("%s enter\n", __func__);
487
488         if (dir == MAC_DMA_TX) {
489                 /* address PR8249/PR7577 issue */
490                 /* suspend tx DMA first */
491                 writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
492                 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
493                                      D64_XS0_XS_MASK)) !=
494                           D64_XS0_XS_DISABLED) &&
495                          (status != D64_XS0_XS_IDLE) &&
496                          (status != D64_XS0_XS_STOPPED), 10000);
497
498                 /*
499                  * PR2414 WAR: DMA engines are not disabled until
500                  * transfer finishes
501                  */
502                 writel(0, GMAC0_DMA_TX_CTRL_ADDR);
503                 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
504                                      D64_XS0_XS_MASK)) !=
505                           D64_XS0_XS_DISABLED), 10000);
506
507                 /* wait for the last transaction to complete */
508                 udelay(2);
509
510                 status = (status == D64_XS0_XS_DISABLED);
511         } else {
512                 /*
513                  * PR2414 WAR: DMA engines are not disabled until
514                  * transfer finishes
515                  */
516                 writel(0, GMAC0_DMA_RX_CTRL_ADDR);
517                 SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
518                                      D64_RS0_RS_MASK)) !=
519                           D64_RS0_RS_DISABLED), 10000);
520
521                 status = (status == D64_RS0_RS_DISABLED);
522         }
523
524         return status;
525 }
526
527 static int gmac_enable_dma(struct eth_dma *dma, int dir)
528 {
529         uint32_t control;
530
531         debug("%s enter\n", __func__);
532
533         if (dir == MAC_DMA_TX) {
534                 dma->cur_tx_index = 0;
535
536                 /*
537                  * These bits 20:18 (burstLen) of control register can be
538                  * written but will take effect only if these bits are
539                  * valid. So this will not affect previous versions
540                  * of the DMA. They will continue to have those bits set to 0.
541                  */
542                 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
543
544                 control |= D64_XC_XE;
545                 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
546                         control |= D64_XC_PD;
547
548                 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
549
550                 /* initailize the DMA channel */
551                 writel((uint32_t)(dma->tx_desc_aligned),
552                        GMAC0_DMA_TX_ADDR_LOW_ADDR);
553                 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
554         } else {
555                 dma->cur_rx_index = 0;
556
557                 control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
558                            D64_RC_AE) | D64_RC_RE;
559
560                 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
561                         control |= D64_RC_PD;
562
563                 if (g_dmactrlflags & DMA_CTRL_ROC)
564                         control |= D64_RC_OC;
565
566                 /*
567                  * These bits 20:18 (burstLen) of control register can be
568                  * written but will take effect only if these bits are
569                  * valid. So this will not affect previous versions
570                  * of the DMA. They will continue to have those bits set to 0.
571                  */
572                 control &= ~D64_RC_BL_MASK;
573                 /* Keep default Rx burstlen */
574                 control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
575                 control |= HWRXOFF << D64_RC_RO_SHIFT;
576
577                 writel(control, GMAC0_DMA_RX_CTRL_ADDR);
578
579                 /*
580                  * the rx descriptor ring should have
581                  * the addresses set properly;
582                  * set the lastdscr for the rx ring
583                  */
584                 writel(((uint32_t)(dma->rx_desc_aligned) +
585                         (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
586                        D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
587         }
588
589         return 0;
590 }
591
592 bool gmac_mii_busywait(unsigned int timeout)
593 {
594         uint32_t tmp = 0;
595
596         while (timeout > 10) {
597                 tmp = readl(GMAC_MII_CTRL_ADDR);
598                 if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
599                         udelay(10);
600                         timeout -= 10;
601                 } else {
602                         break;
603                 }
604         }
605         return tmp & (1 << GMAC_MII_BUSY_SHIFT);
606 }
607
608 int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
609 {
610         uint32_t tmp = 0;
611         u16 value = 0;
612
613         /* Busy wait timeout is 1ms */
614         if (gmac_mii_busywait(1000)) {
615                 pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
616                 return -1;
617         }
618
619         /* Read operation */
620         tmp = GMAC_MII_DATA_READ_CMD;
621         tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
622                 (reg << GMAC_MII_PHY_REG_SHIFT);
623         debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
624         writel(tmp, GMAC_MII_DATA_ADDR);
625
626         if (gmac_mii_busywait(1000)) {
627                 pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
628                 return -1;
629         }
630
631         value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
632         debug("MII read data 0x%x\n", value);
633         return value;
634 }
635
636 int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
637                       u16 value)
638 {
639         uint32_t tmp = 0;
640
641         /* Busy wait timeout is 1ms */
642         if (gmac_mii_busywait(1000)) {
643                 pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
644                 return -1;
645         }
646
647         /* Write operation */
648         tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
649         tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
650                 (reg << GMAC_MII_PHY_REG_SHIFT));
651         debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
652               tmp, phyaddr, reg, value);
653         writel(tmp, GMAC_MII_DATA_ADDR);
654
655         if (gmac_mii_busywait(1000)) {
656                 pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
657                 return -1;
658         }
659
660         return 0;
661 }
662
663 void gmac_init_reset(void)
664 {
665         debug("%s enter\n", __func__);
666
667         /* set command config reg CC_SR */
668         reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
669         udelay(GMAC_RESET_DELAY);
670 }
671
672 void gmac_clear_reset(void)
673 {
674         debug("%s enter\n", __func__);
675
676         /* clear command config reg CC_SR */
677         reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
678         udelay(GMAC_RESET_DELAY);
679 }
680
681 static void gmac_enable_local(bool en)
682 {
683         uint32_t cmdcfg;
684
685         debug("%s enter\n", __func__);
686
687         /* read command config reg */
688         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
689
690         /* put mac in reset */
691         gmac_init_reset();
692
693         cmdcfg |= CC_SR;
694
695         /* first deassert rx_ena and tx_ena while in reset */
696         cmdcfg &= ~(CC_RE | CC_TE);
697         /* write command config reg */
698         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
699
700         /* bring mac out of reset */
701         gmac_clear_reset();
702
703         /* if not enable exit now */
704         if (!en)
705                 return;
706
707         /* enable the mac transmit and receive paths now */
708         udelay(2);
709         cmdcfg &= ~CC_SR;
710         cmdcfg |= (CC_RE | CC_TE);
711
712         /* assert rx_ena and tx_ena when out of reset to enable the mac */
713         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
714
715         return;
716 }
717
718 int gmac_enable(void)
719 {
720         gmac_enable_local(1);
721
722         /* clear interrupts */
723         writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
724         return 0;
725 }
726
727 int gmac_disable(void)
728 {
729         gmac_enable_local(0);
730         return 0;
731 }
732
733 int gmac_set_speed(int speed, int duplex)
734 {
735         uint32_t cmdcfg;
736         uint32_t hd_ena;
737         uint32_t speed_cfg;
738
739         hd_ena = duplex ? 0 : CC_HD;
740         if (speed == 1000) {
741                 speed_cfg = 2;
742         } else if (speed == 100) {
743                 speed_cfg = 1;
744         } else if (speed == 10) {
745                 speed_cfg = 0;
746         } else {
747                 pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
748                 return -1;
749         }
750
751         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
752         cmdcfg &= ~(CC_ES_MASK | CC_HD);
753         cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
754
755         printf("Change GMAC speed to %dMB\n", speed);
756         debug("GMAC speed cfg 0x%x\n", cmdcfg);
757         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
758
759         return 0;
760 }
761
762 int gmac_set_mac_addr(unsigned char *mac)
763 {
764         /* set our local address */
765         debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
766               mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
767         writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
768         writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
769
770         return 0;
771 }
772
773 int gmac_mac_init(struct eth_device *dev)
774 {
775         struct eth_info *eth = (struct eth_info *)(dev->priv);
776         struct eth_dma *dma = &(eth->dma);
777
778         uint32_t tmp;
779         uint32_t cmdcfg;
780         int chipid;
781
782         debug("%s enter\n", __func__);
783
784         /* Always use GMAC0 */
785         printf("Using GMAC%d\n", 0);
786
787         /* Reset AMAC0 core */
788         writel(0, AMAC0_IDM_RESET_ADDR);
789         tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
790         /* Set clock */
791         tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
792         tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
793         /* Set Tx clock */
794         tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
795         writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
796
797         /* reset gmac */
798         /*
799          * As AMAC is just reset, NO need?
800          * set eth_data into loopback mode to ensure no rx traffic
801          * gmac_loopback(eth_data, TRUE);
802          * ET_TRACE(("%s gmac loopback\n", __func__));
803          * udelay(1);
804          */
805
806         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
807         cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
808                     CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
809                     CC_PAD_EN | CC_PF);
810         cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
811         /* put mac in reset */
812         gmac_init_reset();
813         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
814         gmac_clear_reset();
815
816         /* enable clear MIB on read */
817         reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
818         /* PHY: set smi_master to drive mdc_clk */
819         reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
820
821         /* clear persistent sw intstatus */
822         writel(0, GMAC0_INT_STATUS_ADDR);
823
824         if (dma_init(dma) < 0) {
825                 pr_err("%s: GMAC dma_init failed\n", __func__);
826                 goto err_exit;
827         }
828
829         chipid = CHIPID;
830         printf("%s: Chip ID: 0x%x\n", __func__, chipid);
831
832         /* set switch bypass mode */
833         tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
834         tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
835
836         /* Switch mode */
837         /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
838
839         writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
840
841         tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
842         tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
843         writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
844
845         /* Set MDIO to internal GPHY */
846         tmp = readl(GMAC_MII_CTRL_ADDR);
847         /* Select internal MDC/MDIO bus*/
848         tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
849         /* select MDC/MDIO connecting to on-chip internal PHYs */
850         tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
851         /*
852          * give bit[6:0](MDCDIV) with required divisor to set
853          * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
854          */
855         tmp |= 0x1A;
856
857         writel(tmp, GMAC_MII_CTRL_ADDR);
858
859         if (gmac_mii_busywait(1000)) {
860                 pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
861                 goto err_exit;
862         }
863
864         /* Configure GMAC0 */
865         /* enable one rx interrupt per received frame */
866         writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
867
868         /* read command config reg */
869         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
870         /* enable 802.3x tx flow control (honor received PAUSE frames) */
871         cmdcfg &= ~CC_RPI;
872         /* enable promiscuous mode */
873         cmdcfg |= CC_PROM;
874         /* Disable loopback mode */
875         cmdcfg &= ~CC_ML;
876         /* set the speed */
877         cmdcfg &= ~(CC_ES_MASK | CC_HD);
878         /* Set to 1Gbps and full duplex by default */
879         cmdcfg |= (2 << CC_ES_SHIFT);
880
881         /* put mac in reset */
882         gmac_init_reset();
883         /* write register */
884         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
885         /* bring mac out of reset */
886         gmac_clear_reset();
887
888         /* set max frame lengths; account for possible vlan tag */
889         writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
890
891         return 0;
892
893 err_exit:
894         dma_deinit(dma);
895         return -1;
896 }
897
898 int gmac_add(struct eth_device *dev)
899 {
900         struct eth_info *eth = (struct eth_info *)(dev->priv);
901         struct eth_dma *dma = &(eth->dma);
902         void *tmp;
903
904         /*
905          * Desc has to be 16-byte aligned. But for dcache flush it must be
906          * aligned to ARCH_DMA_MINALIGN.
907          */
908         tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
909         if (tmp == NULL) {
910                 printf("%s: Failed to allocate TX desc Buffer\n", __func__);
911                 return -1;
912         }
913
914         dma->tx_desc_aligned = (void *)tmp;
915         debug("TX Descriptor Buffer: %p; length: 0x%x\n",
916               dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
917
918         tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
919         if (tmp == NULL) {
920                 printf("%s: Failed to allocate TX Data Buffer\n", __func__);
921                 free(dma->tx_desc_aligned);
922                 return -1;
923         }
924         dma->tx_buf = (uint8_t *)tmp;
925         debug("TX Data Buffer: %p; length: 0x%x\n",
926               dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
927
928         /* Desc has to be 16-byte aligned */
929         tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
930         if (tmp == NULL) {
931                 printf("%s: Failed to allocate RX Descriptor\n", __func__);
932                 free(dma->tx_desc_aligned);
933                 free(dma->tx_buf);
934                 return -1;
935         }
936         dma->rx_desc_aligned = (void *)tmp;
937         debug("RX Descriptor Buffer: %p, length: 0x%x\n",
938               dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
939
940         tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
941         if (tmp == NULL) {
942                 printf("%s: Failed to allocate RX Data Buffer\n", __func__);
943                 free(dma->tx_desc_aligned);
944                 free(dma->tx_buf);
945                 free(dma->rx_desc_aligned);
946                 return -1;
947         }
948         dma->rx_buf = (uint8_t *)tmp;
949         debug("RX Data Buffer: %p; length: 0x%x\n",
950               dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
951
952         g_dmactrlflags = 0;
953
954         eth->phy_interface = PHY_INTERFACE_MODE_GMII;
955
956         dma->tx_packet = gmac_tx_packet;
957         dma->check_tx_done = gmac_check_tx_done;
958
959         dma->check_rx_done = gmac_check_rx_done;
960
961         dma->enable_dma = gmac_enable_dma;
962         dma->disable_dma = gmac_disable_dma;
963
964         eth->miiphy_read = gmac_miiphy_read;
965         eth->miiphy_write = gmac_miiphy_write;
966
967         eth->mac_init = gmac_mac_init;
968         eth->disable_mac = gmac_disable;
969         eth->enable_mac = gmac_enable;
970         eth->set_mac_addr = gmac_set_mac_addr;
971         eth->set_mac_speed = gmac_set_speed;
972
973         return 0;
974 }