common: Drop linux/delay.h from common header
[oweals/u-boot.git] / drivers / net / bcm-sf2-eth-gmac.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2014-2017 Broadcom.
4  */
5
6 #ifdef BCM_GMAC_DEBUG
7 #ifndef DEBUG
8 #define DEBUG
9 #endif
10 #endif
11
12 #include <config.h>
13 #include <common.h>
14 #include <cpu_func.h>
15 #include <log.h>
16 #include <malloc.h>
17 #include <net.h>
18 #include <asm/cache.h>
19 #include <asm/io.h>
20 #include <phy.h>
21 #include <linux/delay.h>
22
23 #include "bcm-sf2-eth.h"
24 #include "bcm-sf2-eth-gmac.h"
25
26 #define SPINWAIT(exp, us) { \
27         uint countdown = (us) + 9; \
28         while ((exp) && (countdown >= 10)) {\
29                 udelay(10); \
30                 countdown -= 10; \
31         } \
32 }
33
34 #define RX_BUF_SIZE_ALIGNED     ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
35 #define TX_BUF_SIZE_ALIGNED     ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
36 #define DESCP_SIZE_ALIGNED      ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
37
38 static int gmac_disable_dma(struct eth_dma *dma, int dir);
39 static int gmac_enable_dma(struct eth_dma *dma, int dir);
40
41 /* DMA Descriptor */
42 typedef struct {
43         /* misc control bits */
44         uint32_t        ctrl1;
45         /* buffer count and address extension */
46         uint32_t        ctrl2;
47         /* memory address of the date buffer, bits 31:0 */
48         uint32_t        addrlow;
49         /* memory address of the date buffer, bits 63:32 */
50         uint32_t        addrhigh;
51 } dma64dd_t;
52
53 uint32_t g_dmactrlflags;
54
55 static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
56 {
57         debug("%s enter\n", __func__);
58
59         g_dmactrlflags &= ~mask;
60         g_dmactrlflags |= flags;
61
62         /* If trying to enable parity, check if parity is actually supported */
63         if (g_dmactrlflags & DMA_CTRL_PEN) {
64                 uint32_t control;
65
66                 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
67                 writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
68                 if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
69                         /*
70                          * We *can* disable it, therefore it is supported;
71                          * restore control register
72                          */
73                         writel(control, GMAC0_DMA_TX_CTRL_ADDR);
74                 } else {
75                         /* Not supported, don't allow it to be enabled */
76                         g_dmactrlflags &= ~DMA_CTRL_PEN;
77                 }
78         }
79
80         return g_dmactrlflags;
81 }
82
83 static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
84 {
85         uint32_t v = readl(reg);
86         v &= ~(value);
87         writel(v, reg);
88 }
89
90 static inline void reg32_set_bits(uint32_t reg, uint32_t value)
91 {
92         uint32_t v = readl(reg);
93         v |= value;
94         writel(v, reg);
95 }
96
97 #ifdef BCM_GMAC_DEBUG
98 static void dma_tx_dump(struct eth_dma *dma)
99 {
100         dma64dd_t *descp = NULL;
101         uint8_t *bufp;
102         int i;
103
104         printf("TX DMA Register:\n");
105         printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
106                readl(GMAC0_DMA_TX_CTRL_ADDR),
107                readl(GMAC0_DMA_TX_PTR_ADDR),
108                readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
109                readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
110                readl(GMAC0_DMA_TX_STATUS0_ADDR),
111                readl(GMAC0_DMA_TX_STATUS1_ADDR));
112
113         printf("TX Descriptors:\n");
114         for (i = 0; i < TX_BUF_NUM; i++) {
115                 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
116                 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
117                        descp->ctrl1, descp->ctrl2,
118                        descp->addrhigh, descp->addrlow);
119         }
120
121         printf("TX Buffers:\n");
122         /* Initialize TX DMA descriptor table */
123         for (i = 0; i < TX_BUF_NUM; i++) {
124                 bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
125                 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
126         }
127         printf("\n");
128 }
129
130 static void dma_rx_dump(struct eth_dma *dma)
131 {
132         dma64dd_t *descp = NULL;
133         uint8_t *bufp;
134         int i;
135
136         printf("RX DMA Register:\n");
137         printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
138                readl(GMAC0_DMA_RX_CTRL_ADDR),
139                readl(GMAC0_DMA_RX_PTR_ADDR),
140                readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
141                readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
142                readl(GMAC0_DMA_RX_STATUS0_ADDR),
143                readl(GMAC0_DMA_RX_STATUS1_ADDR));
144
145         printf("RX Descriptors:\n");
146         for (i = 0; i < RX_BUF_NUM; i++) {
147                 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
148                 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
149                        descp->ctrl1, descp->ctrl2,
150                        descp->addrhigh, descp->addrlow);
151         }
152
153         printf("RX Buffers:\n");
154         for (i = 0; i < RX_BUF_NUM; i++) {
155                 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
156                 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
157         }
158         printf("\n");
159 }
160 #endif
161
162 static int dma_tx_init(struct eth_dma *dma)
163 {
164         dma64dd_t *descp = NULL;
165         uint8_t *bufp;
166         int i;
167         uint32_t ctrl;
168
169         debug("%s enter\n", __func__);
170
171         /* clear descriptor memory */
172         memset((void *)(dma->tx_desc_aligned), 0,
173                TX_BUF_NUM * DESCP_SIZE_ALIGNED);
174         memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
175
176         /* Initialize TX DMA descriptor table */
177         for (i = 0; i < TX_BUF_NUM; i++) {
178                 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
179                 bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
180                 /* clear buffer memory */
181                 memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
182
183                 ctrl = 0;
184                 /* if last descr set endOfTable */
185                 if (i == (TX_BUF_NUM-1))
186                         ctrl = D64_CTRL1_EOT;
187                 descp->ctrl1 = ctrl;
188                 descp->ctrl2 = 0;
189                 descp->addrlow = (uint32_t)bufp;
190                 descp->addrhigh = 0;
191         }
192
193         /* flush descriptor and buffer */
194         descp = dma->tx_desc_aligned;
195         bufp = dma->tx_buf;
196         flush_dcache_range((unsigned long)descp,
197                            (unsigned long)descp +
198                            DESCP_SIZE_ALIGNED * TX_BUF_NUM);
199         flush_dcache_range((unsigned long)bufp,
200                            (unsigned long)bufp +
201                            TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
202
203         /* initialize the DMA channel */
204         writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
205         writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
206
207         /* now update the dma last descriptor */
208         writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
209                GMAC0_DMA_TX_PTR_ADDR);
210
211         return 0;
212 }
213
214 static int dma_rx_init(struct eth_dma *dma)
215 {
216         uint32_t last_desc;
217         dma64dd_t *descp = NULL;
218         uint8_t *bufp;
219         uint32_t ctrl;
220         int i;
221
222         debug("%s enter\n", __func__);
223
224         /* clear descriptor memory */
225         memset((void *)(dma->rx_desc_aligned), 0,
226                RX_BUF_NUM * DESCP_SIZE_ALIGNED);
227         /* clear buffer memory */
228         memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
229
230         /* Initialize RX DMA descriptor table */
231         for (i = 0; i < RX_BUF_NUM; i++) {
232                 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
233                 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
234                 ctrl = 0;
235                 /* if last descr set endOfTable */
236                 if (i == (RX_BUF_NUM - 1))
237                         ctrl = D64_CTRL1_EOT;
238                 descp->ctrl1 = ctrl;
239                 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
240                 descp->addrlow = (uint32_t)bufp;
241                 descp->addrhigh = 0;
242
243                 last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
244                                 + sizeof(dma64dd_t);
245         }
246
247         descp = dma->rx_desc_aligned;
248         bufp = dma->rx_buf;
249         /* flush descriptor and buffer */
250         flush_dcache_range((unsigned long)descp,
251                            (unsigned long)descp +
252                            DESCP_SIZE_ALIGNED * RX_BUF_NUM);
253         flush_dcache_range((unsigned long)(bufp),
254                            (unsigned long)bufp +
255                            RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
256
257         /* initailize the DMA channel */
258         writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
259         writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
260
261         /* now update the dma last descriptor */
262         writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
263
264         return 0;
265 }
266
267 static int dma_init(struct eth_dma *dma)
268 {
269         debug(" %s enter\n", __func__);
270
271         /*
272          * Default flags: For backwards compatibility both
273          * Rx Overflow Continue and Parity are DISABLED.
274          */
275         dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
276
277         debug("rx burst len 0x%x\n",
278               (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
279               >> D64_RC_BL_SHIFT);
280         debug("tx burst len 0x%x\n",
281               (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
282               >> D64_XC_BL_SHIFT);
283
284         dma_tx_init(dma);
285         dma_rx_init(dma);
286
287         /* From end of chip_init() */
288         /* enable the overflow continue feature and disable parity */
289         dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
290                       DMA_CTRL_ROC /* value */);
291
292         return 0;
293 }
294
295 static int dma_deinit(struct eth_dma *dma)
296 {
297         debug(" %s enter\n", __func__);
298
299         gmac_disable_dma(dma, MAC_DMA_RX);
300         gmac_disable_dma(dma, MAC_DMA_TX);
301
302         free(dma->tx_buf);
303         dma->tx_buf = NULL;
304         free(dma->tx_desc_aligned);
305         dma->tx_desc_aligned = NULL;
306
307         free(dma->rx_buf);
308         dma->rx_buf = NULL;
309         free(dma->rx_desc_aligned);
310         dma->rx_desc_aligned = NULL;
311
312         return 0;
313 }
314
315 int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
316 {
317         uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
318
319         /* kick off the dma */
320         size_t len = length;
321         int txout = dma->cur_tx_index;
322         uint32_t flags;
323         dma64dd_t *descp = NULL;
324         uint32_t ctrl;
325         uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
326                               sizeof(dma64dd_t)) & D64_XP_LD_MASK;
327         size_t buflen;
328
329         debug("%s enter\n", __func__);
330
331         /* load the buffer */
332         memcpy(bufp, packet, len);
333
334         /* Add 4 bytes for Ethernet FCS/CRC */
335         buflen = len + 4;
336
337         ctrl = (buflen & D64_CTRL2_BC_MASK);
338
339         /* the transmit will only be one frame or set SOF, EOF */
340         /* also set int on completion */
341         flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
342
343         /* txout points to the descriptor to uset */
344         /* if last descriptor then set EOT */
345         if (txout == (TX_BUF_NUM - 1)) {
346                 flags |= D64_CTRL1_EOT;
347                 last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
348         }
349
350         /* write the descriptor */
351         descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
352         descp->addrlow = (uint32_t)bufp;
353         descp->addrhigh = 0;
354         descp->ctrl1 = flags;
355         descp->ctrl2 = ctrl;
356
357         /* flush descriptor and buffer */
358         flush_dcache_range((unsigned long)dma->tx_desc_aligned,
359                            (unsigned long)dma->tx_desc_aligned +
360                            DESCP_SIZE_ALIGNED * TX_BUF_NUM);
361         flush_dcache_range((unsigned long)bufp,
362                            (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
363
364         /* now update the dma last descriptor */
365         writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
366
367         /* tx dma should be enabled so packet should go out */
368
369         /* update txout */
370         dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
371
372         return 0;
373 }
374
375 bool gmac_check_tx_done(struct eth_dma *dma)
376 {
377         /* wait for tx to complete */
378         uint32_t intstatus;
379         bool xfrdone = false;
380
381         debug("%s enter\n", __func__);
382
383         intstatus = readl(GMAC0_INT_STATUS_ADDR);
384
385         debug("int(0x%x)\n", intstatus);
386         if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
387                 xfrdone = true;
388                 /* clear the int bits */
389                 intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
390                 writel(intstatus, GMAC0_INT_STATUS_ADDR);
391         } else {
392                 debug("Tx int(0x%x)\n", intstatus);
393         }
394
395         return xfrdone;
396 }
397
398 int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
399 {
400         void *bufp, *datap;
401         size_t rcvlen = 0, buflen = 0;
402         uint32_t stat0 = 0, stat1 = 0;
403         uint32_t control, offset;
404         uint8_t statbuf[HWRXOFF*2];
405
406         int index, curr, active;
407         dma64dd_t *descp = NULL;
408
409         /* udelay(50); */
410
411         /*
412          * this api will check if a packet has been received.
413          * If so it will return the address of the buffer and current
414          * descriptor index will be incremented to the
415          * next descriptor. Once done with the frame the buffer should be
416          * added back onto the descriptor and the lastdscr should be updated
417          * to this descriptor.
418          */
419         index = dma->cur_rx_index;
420         offset = (uint32_t)(dma->rx_desc_aligned);
421         stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
422         stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
423         curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
424         active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
425
426         /* check if any frame */
427         if (index == curr)
428                 return -1;
429
430         debug("received packet\n");
431         debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
432         /* remove warning */
433         if (index == active)
434                 ;
435
436         /* get the packet pointer that corresponds to the rx descriptor */
437         bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
438
439         descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
440         /* flush descriptor and buffer */
441         flush_dcache_range((unsigned long)dma->rx_desc_aligned,
442                            (unsigned long)dma->rx_desc_aligned +
443                            DESCP_SIZE_ALIGNED * RX_BUF_NUM);
444         flush_dcache_range((unsigned long)bufp,
445                            (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
446
447         buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
448
449         stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
450         stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
451
452         debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
453               (uint32_t)bufp, index, buflen, stat0, stat1);
454
455         dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
456
457         /* get buffer offset */
458         control = readl(GMAC0_DMA_RX_CTRL_ADDR);
459         offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
460         rcvlen = *(uint16_t *)bufp;
461
462         debug("Received %d bytes\n", rcvlen);
463         /* copy status into temp buf then copy data from rx buffer */
464         memcpy(statbuf, bufp, offset);
465         datap = (void *)((uint32_t)bufp + offset);
466         memcpy(buf, datap, rcvlen);
467
468         /* update descriptor that is being added back on ring */
469         descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
470         descp->addrlow = (uint32_t)bufp;
471         descp->addrhigh = 0;
472         /* flush descriptor */
473         flush_dcache_range((unsigned long)dma->rx_desc_aligned,
474                            (unsigned long)dma->rx_desc_aligned +
475                            DESCP_SIZE_ALIGNED * RX_BUF_NUM);
476
477         /* set the lastdscr for the rx ring */
478         writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
479
480         return (int)rcvlen;
481 }
482
483 static int gmac_disable_dma(struct eth_dma *dma, int dir)
484 {
485         int status;
486
487         debug("%s enter\n", __func__);
488
489         if (dir == MAC_DMA_TX) {
490                 /* address PR8249/PR7577 issue */
491                 /* suspend tx DMA first */
492                 writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
493                 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
494                                      D64_XS0_XS_MASK)) !=
495                           D64_XS0_XS_DISABLED) &&
496                          (status != D64_XS0_XS_IDLE) &&
497                          (status != D64_XS0_XS_STOPPED), 10000);
498
499                 /*
500                  * PR2414 WAR: DMA engines are not disabled until
501                  * transfer finishes
502                  */
503                 writel(0, GMAC0_DMA_TX_CTRL_ADDR);
504                 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
505                                      D64_XS0_XS_MASK)) !=
506                           D64_XS0_XS_DISABLED), 10000);
507
508                 /* wait for the last transaction to complete */
509                 udelay(2);
510
511                 status = (status == D64_XS0_XS_DISABLED);
512         } else {
513                 /*
514                  * PR2414 WAR: DMA engines are not disabled until
515                  * transfer finishes
516                  */
517                 writel(0, GMAC0_DMA_RX_CTRL_ADDR);
518                 SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
519                                      D64_RS0_RS_MASK)) !=
520                           D64_RS0_RS_DISABLED), 10000);
521
522                 status = (status == D64_RS0_RS_DISABLED);
523         }
524
525         return status;
526 }
527
528 static int gmac_enable_dma(struct eth_dma *dma, int dir)
529 {
530         uint32_t control;
531
532         debug("%s enter\n", __func__);
533
534         if (dir == MAC_DMA_TX) {
535                 dma->cur_tx_index = 0;
536
537                 /*
538                  * These bits 20:18 (burstLen) of control register can be
539                  * written but will take effect only if these bits are
540                  * valid. So this will not affect previous versions
541                  * of the DMA. They will continue to have those bits set to 0.
542                  */
543                 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
544
545                 control |= D64_XC_XE;
546                 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
547                         control |= D64_XC_PD;
548
549                 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
550
551                 /* initailize the DMA channel */
552                 writel((uint32_t)(dma->tx_desc_aligned),
553                        GMAC0_DMA_TX_ADDR_LOW_ADDR);
554                 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
555         } else {
556                 dma->cur_rx_index = 0;
557
558                 control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
559                            D64_RC_AE) | D64_RC_RE;
560
561                 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
562                         control |= D64_RC_PD;
563
564                 if (g_dmactrlflags & DMA_CTRL_ROC)
565                         control |= D64_RC_OC;
566
567                 /*
568                  * These bits 20:18 (burstLen) of control register can be
569                  * written but will take effect only if these bits are
570                  * valid. So this will not affect previous versions
571                  * of the DMA. They will continue to have those bits set to 0.
572                  */
573                 control &= ~D64_RC_BL_MASK;
574                 /* Keep default Rx burstlen */
575                 control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
576                 control |= HWRXOFF << D64_RC_RO_SHIFT;
577
578                 writel(control, GMAC0_DMA_RX_CTRL_ADDR);
579
580                 /*
581                  * the rx descriptor ring should have
582                  * the addresses set properly;
583                  * set the lastdscr for the rx ring
584                  */
585                 writel(((uint32_t)(dma->rx_desc_aligned) +
586                         (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
587                        D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
588         }
589
590         return 0;
591 }
592
593 bool gmac_mii_busywait(unsigned int timeout)
594 {
595         uint32_t tmp = 0;
596
597         while (timeout > 10) {
598                 tmp = readl(GMAC_MII_CTRL_ADDR);
599                 if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
600                         udelay(10);
601                         timeout -= 10;
602                 } else {
603                         break;
604                 }
605         }
606         return tmp & (1 << GMAC_MII_BUSY_SHIFT);
607 }
608
609 int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
610 {
611         uint32_t tmp = 0;
612         u16 value = 0;
613
614         /* Busy wait timeout is 1ms */
615         if (gmac_mii_busywait(1000)) {
616                 pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
617                 return -1;
618         }
619
620         /* Read operation */
621         tmp = GMAC_MII_DATA_READ_CMD;
622         tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
623                 (reg << GMAC_MII_PHY_REG_SHIFT);
624         debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
625         writel(tmp, GMAC_MII_DATA_ADDR);
626
627         if (gmac_mii_busywait(1000)) {
628                 pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
629                 return -1;
630         }
631
632         value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
633         debug("MII read data 0x%x\n", value);
634         return value;
635 }
636
637 int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
638                       u16 value)
639 {
640         uint32_t tmp = 0;
641
642         /* Busy wait timeout is 1ms */
643         if (gmac_mii_busywait(1000)) {
644                 pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
645                 return -1;
646         }
647
648         /* Write operation */
649         tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
650         tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
651                 (reg << GMAC_MII_PHY_REG_SHIFT));
652         debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
653               tmp, phyaddr, reg, value);
654         writel(tmp, GMAC_MII_DATA_ADDR);
655
656         if (gmac_mii_busywait(1000)) {
657                 pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
658                 return -1;
659         }
660
661         return 0;
662 }
663
664 void gmac_init_reset(void)
665 {
666         debug("%s enter\n", __func__);
667
668         /* set command config reg CC_SR */
669         reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
670         udelay(GMAC_RESET_DELAY);
671 }
672
673 void gmac_clear_reset(void)
674 {
675         debug("%s enter\n", __func__);
676
677         /* clear command config reg CC_SR */
678         reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
679         udelay(GMAC_RESET_DELAY);
680 }
681
682 static void gmac_enable_local(bool en)
683 {
684         uint32_t cmdcfg;
685
686         debug("%s enter\n", __func__);
687
688         /* read command config reg */
689         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
690
691         /* put mac in reset */
692         gmac_init_reset();
693
694         cmdcfg |= CC_SR;
695
696         /* first deassert rx_ena and tx_ena while in reset */
697         cmdcfg &= ~(CC_RE | CC_TE);
698         /* write command config reg */
699         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
700
701         /* bring mac out of reset */
702         gmac_clear_reset();
703
704         /* if not enable exit now */
705         if (!en)
706                 return;
707
708         /* enable the mac transmit and receive paths now */
709         udelay(2);
710         cmdcfg &= ~CC_SR;
711         cmdcfg |= (CC_RE | CC_TE);
712
713         /* assert rx_ena and tx_ena when out of reset to enable the mac */
714         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
715
716         return;
717 }
718
719 int gmac_enable(void)
720 {
721         gmac_enable_local(1);
722
723         /* clear interrupts */
724         writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
725         return 0;
726 }
727
728 int gmac_disable(void)
729 {
730         gmac_enable_local(0);
731         return 0;
732 }
733
734 int gmac_set_speed(int speed, int duplex)
735 {
736         uint32_t cmdcfg;
737         uint32_t hd_ena;
738         uint32_t speed_cfg;
739
740         hd_ena = duplex ? 0 : CC_HD;
741         if (speed == 1000) {
742                 speed_cfg = 2;
743         } else if (speed == 100) {
744                 speed_cfg = 1;
745         } else if (speed == 10) {
746                 speed_cfg = 0;
747         } else {
748                 pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
749                 return -1;
750         }
751
752         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
753         cmdcfg &= ~(CC_ES_MASK | CC_HD);
754         cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
755
756         printf("Change GMAC speed to %dMB\n", speed);
757         debug("GMAC speed cfg 0x%x\n", cmdcfg);
758         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
759
760         return 0;
761 }
762
763 int gmac_set_mac_addr(unsigned char *mac)
764 {
765         /* set our local address */
766         debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
767               mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
768         writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
769         writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
770
771         return 0;
772 }
773
774 int gmac_mac_init(struct eth_device *dev)
775 {
776         struct eth_info *eth = (struct eth_info *)(dev->priv);
777         struct eth_dma *dma = &(eth->dma);
778
779         uint32_t tmp;
780         uint32_t cmdcfg;
781         int chipid;
782
783         debug("%s enter\n", __func__);
784
785         /* Always use GMAC0 */
786         printf("Using GMAC%d\n", 0);
787
788         /* Reset AMAC0 core */
789         writel(0, AMAC0_IDM_RESET_ADDR);
790         tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
791         /* Set clock */
792         tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
793         tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
794         /* Set Tx clock */
795         tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
796         writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
797
798         /* reset gmac */
799         /*
800          * As AMAC is just reset, NO need?
801          * set eth_data into loopback mode to ensure no rx traffic
802          * gmac_loopback(eth_data, TRUE);
803          * ET_TRACE(("%s gmac loopback\n", __func__));
804          * udelay(1);
805          */
806
807         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
808         cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
809                     CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
810                     CC_PAD_EN | CC_PF);
811         cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
812         /* put mac in reset */
813         gmac_init_reset();
814         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
815         gmac_clear_reset();
816
817         /* enable clear MIB on read */
818         reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
819         /* PHY: set smi_master to drive mdc_clk */
820         reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
821
822         /* clear persistent sw intstatus */
823         writel(0, GMAC0_INT_STATUS_ADDR);
824
825         if (dma_init(dma) < 0) {
826                 pr_err("%s: GMAC dma_init failed\n", __func__);
827                 goto err_exit;
828         }
829
830         chipid = CHIPID;
831         printf("%s: Chip ID: 0x%x\n", __func__, chipid);
832
833         /* set switch bypass mode */
834         tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
835         tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
836
837         /* Switch mode */
838         /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
839
840         writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
841
842         tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
843         tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
844         writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
845
846         /* Set MDIO to internal GPHY */
847         tmp = readl(GMAC_MII_CTRL_ADDR);
848         /* Select internal MDC/MDIO bus*/
849         tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
850         /* select MDC/MDIO connecting to on-chip internal PHYs */
851         tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
852         /*
853          * give bit[6:0](MDCDIV) with required divisor to set
854          * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
855          */
856         tmp |= 0x1A;
857
858         writel(tmp, GMAC_MII_CTRL_ADDR);
859
860         if (gmac_mii_busywait(1000)) {
861                 pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
862                 goto err_exit;
863         }
864
865         /* Configure GMAC0 */
866         /* enable one rx interrupt per received frame */
867         writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
868
869         /* read command config reg */
870         cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
871         /* enable 802.3x tx flow control (honor received PAUSE frames) */
872         cmdcfg &= ~CC_RPI;
873         /* enable promiscuous mode */
874         cmdcfg |= CC_PROM;
875         /* Disable loopback mode */
876         cmdcfg &= ~CC_ML;
877         /* set the speed */
878         cmdcfg &= ~(CC_ES_MASK | CC_HD);
879         /* Set to 1Gbps and full duplex by default */
880         cmdcfg |= (2 << CC_ES_SHIFT);
881
882         /* put mac in reset */
883         gmac_init_reset();
884         /* write register */
885         writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
886         /* bring mac out of reset */
887         gmac_clear_reset();
888
889         /* set max frame lengths; account for possible vlan tag */
890         writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
891
892         return 0;
893
894 err_exit:
895         dma_deinit(dma);
896         return -1;
897 }
898
899 int gmac_add(struct eth_device *dev)
900 {
901         struct eth_info *eth = (struct eth_info *)(dev->priv);
902         struct eth_dma *dma = &(eth->dma);
903         void *tmp;
904
905         /*
906          * Desc has to be 16-byte aligned. But for dcache flush it must be
907          * aligned to ARCH_DMA_MINALIGN.
908          */
909         tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
910         if (tmp == NULL) {
911                 printf("%s: Failed to allocate TX desc Buffer\n", __func__);
912                 return -1;
913         }
914
915         dma->tx_desc_aligned = (void *)tmp;
916         debug("TX Descriptor Buffer: %p; length: 0x%x\n",
917               dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
918
919         tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
920         if (tmp == NULL) {
921                 printf("%s: Failed to allocate TX Data Buffer\n", __func__);
922                 free(dma->tx_desc_aligned);
923                 return -1;
924         }
925         dma->tx_buf = (uint8_t *)tmp;
926         debug("TX Data Buffer: %p; length: 0x%x\n",
927               dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
928
929         /* Desc has to be 16-byte aligned */
930         tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
931         if (tmp == NULL) {
932                 printf("%s: Failed to allocate RX Descriptor\n", __func__);
933                 free(dma->tx_desc_aligned);
934                 free(dma->tx_buf);
935                 return -1;
936         }
937         dma->rx_desc_aligned = (void *)tmp;
938         debug("RX Descriptor Buffer: %p, length: 0x%x\n",
939               dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
940
941         tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
942         if (tmp == NULL) {
943                 printf("%s: Failed to allocate RX Data Buffer\n", __func__);
944                 free(dma->tx_desc_aligned);
945                 free(dma->tx_buf);
946                 free(dma->rx_desc_aligned);
947                 return -1;
948         }
949         dma->rx_buf = (uint8_t *)tmp;
950         debug("RX Data Buffer: %p; length: 0x%x\n",
951               dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
952
953         g_dmactrlflags = 0;
954
955         eth->phy_interface = PHY_INTERFACE_MODE_GMII;
956
957         dma->tx_packet = gmac_tx_packet;
958         dma->check_tx_done = gmac_check_tx_done;
959
960         dma->check_rx_done = gmac_check_rx_done;
961
962         dma->enable_dma = gmac_enable_dma;
963         dma->disable_dma = gmac_disable_dma;
964
965         eth->miiphy_read = gmac_miiphy_read;
966         eth->miiphy_write = gmac_miiphy_write;
967
968         eth->mac_init = gmac_mac_init;
969         eth->disable_mac = gmac_disable;
970         eth->enable_mac = gmac_enable;
971         eth->set_mac_addr = gmac_set_mac_addr;
972         eth->set_mac_speed = gmac_set_speed;
973
974         return 0;
975 }