1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2014-2017 Broadcom.
18 #include <asm/cache.h>
22 #include "bcm-sf2-eth.h"
23 #include "bcm-sf2-eth-gmac.h"
25 #define SPINWAIT(exp, us) { \
26 uint countdown = (us) + 9; \
27 while ((exp) && (countdown >= 10)) {\
33 #define RX_BUF_SIZE_ALIGNED ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
34 #define TX_BUF_SIZE_ALIGNED ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
35 #define DESCP_SIZE_ALIGNED ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
37 static int gmac_disable_dma(struct eth_dma *dma, int dir);
38 static int gmac_enable_dma(struct eth_dma *dma, int dir);
42 /* misc control bits */
44 /* buffer count and address extension */
46 /* memory address of the date buffer, bits 31:0 */
48 /* memory address of the date buffer, bits 63:32 */
52 uint32_t g_dmactrlflags;
54 static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
56 debug("%s enter\n", __func__);
58 g_dmactrlflags &= ~mask;
59 g_dmactrlflags |= flags;
61 /* If trying to enable parity, check if parity is actually supported */
62 if (g_dmactrlflags & DMA_CTRL_PEN) {
65 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
66 writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
67 if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
69 * We *can* disable it, therefore it is supported;
70 * restore control register
72 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
74 /* Not supported, don't allow it to be enabled */
75 g_dmactrlflags &= ~DMA_CTRL_PEN;
79 return g_dmactrlflags;
82 static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
84 uint32_t v = readl(reg);
89 static inline void reg32_set_bits(uint32_t reg, uint32_t value)
91 uint32_t v = readl(reg);
97 static void dma_tx_dump(struct eth_dma *dma)
99 dma64dd_t *descp = NULL;
103 printf("TX DMA Register:\n");
104 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
105 readl(GMAC0_DMA_TX_CTRL_ADDR),
106 readl(GMAC0_DMA_TX_PTR_ADDR),
107 readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
108 readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
109 readl(GMAC0_DMA_TX_STATUS0_ADDR),
110 readl(GMAC0_DMA_TX_STATUS1_ADDR));
112 printf("TX Descriptors:\n");
113 for (i = 0; i < TX_BUF_NUM; i++) {
114 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
115 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
116 descp->ctrl1, descp->ctrl2,
117 descp->addrhigh, descp->addrlow);
120 printf("TX Buffers:\n");
121 /* Initialize TX DMA descriptor table */
122 for (i = 0; i < TX_BUF_NUM; i++) {
123 bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
124 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
129 static void dma_rx_dump(struct eth_dma *dma)
131 dma64dd_t *descp = NULL;
135 printf("RX DMA Register:\n");
136 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
137 readl(GMAC0_DMA_RX_CTRL_ADDR),
138 readl(GMAC0_DMA_RX_PTR_ADDR),
139 readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
140 readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
141 readl(GMAC0_DMA_RX_STATUS0_ADDR),
142 readl(GMAC0_DMA_RX_STATUS1_ADDR));
144 printf("RX Descriptors:\n");
145 for (i = 0; i < RX_BUF_NUM; i++) {
146 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
147 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
148 descp->ctrl1, descp->ctrl2,
149 descp->addrhigh, descp->addrlow);
152 printf("RX Buffers:\n");
153 for (i = 0; i < RX_BUF_NUM; i++) {
154 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
155 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
161 static int dma_tx_init(struct eth_dma *dma)
163 dma64dd_t *descp = NULL;
168 debug("%s enter\n", __func__);
170 /* clear descriptor memory */
171 memset((void *)(dma->tx_desc_aligned), 0,
172 TX_BUF_NUM * DESCP_SIZE_ALIGNED);
173 memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
175 /* Initialize TX DMA descriptor table */
176 for (i = 0; i < TX_BUF_NUM; i++) {
177 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
178 bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
179 /* clear buffer memory */
180 memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
183 /* if last descr set endOfTable */
184 if (i == (TX_BUF_NUM-1))
185 ctrl = D64_CTRL1_EOT;
188 descp->addrlow = (uint32_t)bufp;
192 /* flush descriptor and buffer */
193 descp = dma->tx_desc_aligned;
195 flush_dcache_range((unsigned long)descp,
196 (unsigned long)descp +
197 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
198 flush_dcache_range((unsigned long)bufp,
199 (unsigned long)bufp +
200 TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
202 /* initialize the DMA channel */
203 writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
204 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
206 /* now update the dma last descriptor */
207 writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
208 GMAC0_DMA_TX_PTR_ADDR);
213 static int dma_rx_init(struct eth_dma *dma)
216 dma64dd_t *descp = NULL;
221 debug("%s enter\n", __func__);
223 /* clear descriptor memory */
224 memset((void *)(dma->rx_desc_aligned), 0,
225 RX_BUF_NUM * DESCP_SIZE_ALIGNED);
226 /* clear buffer memory */
227 memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
229 /* Initialize RX DMA descriptor table */
230 for (i = 0; i < RX_BUF_NUM; i++) {
231 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
232 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
234 /* if last descr set endOfTable */
235 if (i == (RX_BUF_NUM - 1))
236 ctrl = D64_CTRL1_EOT;
238 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
239 descp->addrlow = (uint32_t)bufp;
242 last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
246 descp = dma->rx_desc_aligned;
248 /* flush descriptor and buffer */
249 flush_dcache_range((unsigned long)descp,
250 (unsigned long)descp +
251 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
252 flush_dcache_range((unsigned long)(bufp),
253 (unsigned long)bufp +
254 RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
256 /* initailize the DMA channel */
257 writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
258 writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
260 /* now update the dma last descriptor */
261 writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
266 static int dma_init(struct eth_dma *dma)
268 debug(" %s enter\n", __func__);
271 * Default flags: For backwards compatibility both
272 * Rx Overflow Continue and Parity are DISABLED.
274 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
276 debug("rx burst len 0x%x\n",
277 (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
279 debug("tx burst len 0x%x\n",
280 (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
286 /* From end of chip_init() */
287 /* enable the overflow continue feature and disable parity */
288 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
289 DMA_CTRL_ROC /* value */);
294 static int dma_deinit(struct eth_dma *dma)
296 debug(" %s enter\n", __func__);
298 gmac_disable_dma(dma, MAC_DMA_RX);
299 gmac_disable_dma(dma, MAC_DMA_TX);
303 free(dma->tx_desc_aligned);
304 dma->tx_desc_aligned = NULL;
308 free(dma->rx_desc_aligned);
309 dma->rx_desc_aligned = NULL;
314 int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
316 uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
318 /* kick off the dma */
320 int txout = dma->cur_tx_index;
322 dma64dd_t *descp = NULL;
324 uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
325 sizeof(dma64dd_t)) & D64_XP_LD_MASK;
328 debug("%s enter\n", __func__);
330 /* load the buffer */
331 memcpy(bufp, packet, len);
333 /* Add 4 bytes for Ethernet FCS/CRC */
336 ctrl = (buflen & D64_CTRL2_BC_MASK);
338 /* the transmit will only be one frame or set SOF, EOF */
339 /* also set int on completion */
340 flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
342 /* txout points to the descriptor to uset */
343 /* if last descriptor then set EOT */
344 if (txout == (TX_BUF_NUM - 1)) {
345 flags |= D64_CTRL1_EOT;
346 last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
349 /* write the descriptor */
350 descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
351 descp->addrlow = (uint32_t)bufp;
353 descp->ctrl1 = flags;
356 /* flush descriptor and buffer */
357 flush_dcache_range((unsigned long)dma->tx_desc_aligned,
358 (unsigned long)dma->tx_desc_aligned +
359 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
360 flush_dcache_range((unsigned long)bufp,
361 (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
363 /* now update the dma last descriptor */
364 writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
366 /* tx dma should be enabled so packet should go out */
369 dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
374 bool gmac_check_tx_done(struct eth_dma *dma)
376 /* wait for tx to complete */
378 bool xfrdone = false;
380 debug("%s enter\n", __func__);
382 intstatus = readl(GMAC0_INT_STATUS_ADDR);
384 debug("int(0x%x)\n", intstatus);
385 if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
387 /* clear the int bits */
388 intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
389 writel(intstatus, GMAC0_INT_STATUS_ADDR);
391 debug("Tx int(0x%x)\n", intstatus);
397 int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
400 size_t rcvlen = 0, buflen = 0;
401 uint32_t stat0 = 0, stat1 = 0;
402 uint32_t control, offset;
403 uint8_t statbuf[HWRXOFF*2];
405 int index, curr, active;
406 dma64dd_t *descp = NULL;
411 * this api will check if a packet has been received.
412 * If so it will return the address of the buffer and current
413 * descriptor index will be incremented to the
414 * next descriptor. Once done with the frame the buffer should be
415 * added back onto the descriptor and the lastdscr should be updated
416 * to this descriptor.
418 index = dma->cur_rx_index;
419 offset = (uint32_t)(dma->rx_desc_aligned);
420 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
421 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
422 curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
423 active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
425 /* check if any frame */
429 debug("received packet\n");
430 debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
435 /* get the packet pointer that corresponds to the rx descriptor */
436 bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
438 descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
439 /* flush descriptor and buffer */
440 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
441 (unsigned long)dma->rx_desc_aligned +
442 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
443 flush_dcache_range((unsigned long)bufp,
444 (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
446 buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
448 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
449 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
451 debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
452 (uint32_t)bufp, index, buflen, stat0, stat1);
454 dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
456 /* get buffer offset */
457 control = readl(GMAC0_DMA_RX_CTRL_ADDR);
458 offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
459 rcvlen = *(uint16_t *)bufp;
461 debug("Received %d bytes\n", rcvlen);
462 /* copy status into temp buf then copy data from rx buffer */
463 memcpy(statbuf, bufp, offset);
464 datap = (void *)((uint32_t)bufp + offset);
465 memcpy(buf, datap, rcvlen);
467 /* update descriptor that is being added back on ring */
468 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
469 descp->addrlow = (uint32_t)bufp;
471 /* flush descriptor */
472 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
473 (unsigned long)dma->rx_desc_aligned +
474 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
476 /* set the lastdscr for the rx ring */
477 writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
482 static int gmac_disable_dma(struct eth_dma *dma, int dir)
486 debug("%s enter\n", __func__);
488 if (dir == MAC_DMA_TX) {
489 /* address PR8249/PR7577 issue */
490 /* suspend tx DMA first */
491 writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
492 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
494 D64_XS0_XS_DISABLED) &&
495 (status != D64_XS0_XS_IDLE) &&
496 (status != D64_XS0_XS_STOPPED), 10000);
499 * PR2414 WAR: DMA engines are not disabled until
502 writel(0, GMAC0_DMA_TX_CTRL_ADDR);
503 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
505 D64_XS0_XS_DISABLED), 10000);
507 /* wait for the last transaction to complete */
510 status = (status == D64_XS0_XS_DISABLED);
513 * PR2414 WAR: DMA engines are not disabled until
516 writel(0, GMAC0_DMA_RX_CTRL_ADDR);
517 SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
519 D64_RS0_RS_DISABLED), 10000);
521 status = (status == D64_RS0_RS_DISABLED);
527 static int gmac_enable_dma(struct eth_dma *dma, int dir)
531 debug("%s enter\n", __func__);
533 if (dir == MAC_DMA_TX) {
534 dma->cur_tx_index = 0;
537 * These bits 20:18 (burstLen) of control register can be
538 * written but will take effect only if these bits are
539 * valid. So this will not affect previous versions
540 * of the DMA. They will continue to have those bits set to 0.
542 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
544 control |= D64_XC_XE;
545 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
546 control |= D64_XC_PD;
548 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
550 /* initailize the DMA channel */
551 writel((uint32_t)(dma->tx_desc_aligned),
552 GMAC0_DMA_TX_ADDR_LOW_ADDR);
553 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
555 dma->cur_rx_index = 0;
557 control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
558 D64_RC_AE) | D64_RC_RE;
560 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
561 control |= D64_RC_PD;
563 if (g_dmactrlflags & DMA_CTRL_ROC)
564 control |= D64_RC_OC;
567 * These bits 20:18 (burstLen) of control register can be
568 * written but will take effect only if these bits are
569 * valid. So this will not affect previous versions
570 * of the DMA. They will continue to have those bits set to 0.
572 control &= ~D64_RC_BL_MASK;
573 /* Keep default Rx burstlen */
574 control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
575 control |= HWRXOFF << D64_RC_RO_SHIFT;
577 writel(control, GMAC0_DMA_RX_CTRL_ADDR);
580 * the rx descriptor ring should have
581 * the addresses set properly;
582 * set the lastdscr for the rx ring
584 writel(((uint32_t)(dma->rx_desc_aligned) +
585 (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
586 D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
592 bool gmac_mii_busywait(unsigned int timeout)
596 while (timeout > 10) {
597 tmp = readl(GMAC_MII_CTRL_ADDR);
598 if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
605 return tmp & (1 << GMAC_MII_BUSY_SHIFT);
608 int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
613 /* Busy wait timeout is 1ms */
614 if (gmac_mii_busywait(1000)) {
615 pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
620 tmp = GMAC_MII_DATA_READ_CMD;
621 tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
622 (reg << GMAC_MII_PHY_REG_SHIFT);
623 debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
624 writel(tmp, GMAC_MII_DATA_ADDR);
626 if (gmac_mii_busywait(1000)) {
627 pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
631 value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
632 debug("MII read data 0x%x\n", value);
636 int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
641 /* Busy wait timeout is 1ms */
642 if (gmac_mii_busywait(1000)) {
643 pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
647 /* Write operation */
648 tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
649 tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
650 (reg << GMAC_MII_PHY_REG_SHIFT));
651 debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
652 tmp, phyaddr, reg, value);
653 writel(tmp, GMAC_MII_DATA_ADDR);
655 if (gmac_mii_busywait(1000)) {
656 pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
663 void gmac_init_reset(void)
665 debug("%s enter\n", __func__);
667 /* set command config reg CC_SR */
668 reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
669 udelay(GMAC_RESET_DELAY);
672 void gmac_clear_reset(void)
674 debug("%s enter\n", __func__);
676 /* clear command config reg CC_SR */
677 reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
678 udelay(GMAC_RESET_DELAY);
681 static void gmac_enable_local(bool en)
685 debug("%s enter\n", __func__);
687 /* read command config reg */
688 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
690 /* put mac in reset */
695 /* first deassert rx_ena and tx_ena while in reset */
696 cmdcfg &= ~(CC_RE | CC_TE);
697 /* write command config reg */
698 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
700 /* bring mac out of reset */
703 /* if not enable exit now */
707 /* enable the mac transmit and receive paths now */
710 cmdcfg |= (CC_RE | CC_TE);
712 /* assert rx_ena and tx_ena when out of reset to enable the mac */
713 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
718 int gmac_enable(void)
720 gmac_enable_local(1);
722 /* clear interrupts */
723 writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
727 int gmac_disable(void)
729 gmac_enable_local(0);
733 int gmac_set_speed(int speed, int duplex)
739 hd_ena = duplex ? 0 : CC_HD;
742 } else if (speed == 100) {
744 } else if (speed == 10) {
747 pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
751 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
752 cmdcfg &= ~(CC_ES_MASK | CC_HD);
753 cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
755 printf("Change GMAC speed to %dMB\n", speed);
756 debug("GMAC speed cfg 0x%x\n", cmdcfg);
757 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
762 int gmac_set_mac_addr(unsigned char *mac)
764 /* set our local address */
765 debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
766 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
767 writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
768 writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
773 int gmac_mac_init(struct eth_device *dev)
775 struct eth_info *eth = (struct eth_info *)(dev->priv);
776 struct eth_dma *dma = &(eth->dma);
782 debug("%s enter\n", __func__);
784 /* Always use GMAC0 */
785 printf("Using GMAC%d\n", 0);
787 /* Reset AMAC0 core */
788 writel(0, AMAC0_IDM_RESET_ADDR);
789 tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
791 tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
792 tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
794 tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
795 writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
799 * As AMAC is just reset, NO need?
800 * set eth_data into loopback mode to ensure no rx traffic
801 * gmac_loopback(eth_data, TRUE);
802 * ET_TRACE(("%s gmac loopback\n", __func__));
806 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
807 cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
808 CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
810 cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
811 /* put mac in reset */
813 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
816 /* enable clear MIB on read */
817 reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
818 /* PHY: set smi_master to drive mdc_clk */
819 reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
821 /* clear persistent sw intstatus */
822 writel(0, GMAC0_INT_STATUS_ADDR);
824 if (dma_init(dma) < 0) {
825 pr_err("%s: GMAC dma_init failed\n", __func__);
830 printf("%s: Chip ID: 0x%x\n", __func__, chipid);
832 /* set switch bypass mode */
833 tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
834 tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
837 /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
839 writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
841 tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
842 tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
843 writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
845 /* Set MDIO to internal GPHY */
846 tmp = readl(GMAC_MII_CTRL_ADDR);
847 /* Select internal MDC/MDIO bus*/
848 tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
849 /* select MDC/MDIO connecting to on-chip internal PHYs */
850 tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
852 * give bit[6:0](MDCDIV) with required divisor to set
853 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
857 writel(tmp, GMAC_MII_CTRL_ADDR);
859 if (gmac_mii_busywait(1000)) {
860 pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
864 /* Configure GMAC0 */
865 /* enable one rx interrupt per received frame */
866 writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
868 /* read command config reg */
869 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
870 /* enable 802.3x tx flow control (honor received PAUSE frames) */
872 /* enable promiscuous mode */
874 /* Disable loopback mode */
877 cmdcfg &= ~(CC_ES_MASK | CC_HD);
878 /* Set to 1Gbps and full duplex by default */
879 cmdcfg |= (2 << CC_ES_SHIFT);
881 /* put mac in reset */
884 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
885 /* bring mac out of reset */
888 /* set max frame lengths; account for possible vlan tag */
889 writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
898 int gmac_add(struct eth_device *dev)
900 struct eth_info *eth = (struct eth_info *)(dev->priv);
901 struct eth_dma *dma = &(eth->dma);
905 * Desc has to be 16-byte aligned. But for dcache flush it must be
906 * aligned to ARCH_DMA_MINALIGN.
908 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
910 printf("%s: Failed to allocate TX desc Buffer\n", __func__);
914 dma->tx_desc_aligned = (void *)tmp;
915 debug("TX Descriptor Buffer: %p; length: 0x%x\n",
916 dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
918 tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
920 printf("%s: Failed to allocate TX Data Buffer\n", __func__);
921 free(dma->tx_desc_aligned);
924 dma->tx_buf = (uint8_t *)tmp;
925 debug("TX Data Buffer: %p; length: 0x%x\n",
926 dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
928 /* Desc has to be 16-byte aligned */
929 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
931 printf("%s: Failed to allocate RX Descriptor\n", __func__);
932 free(dma->tx_desc_aligned);
936 dma->rx_desc_aligned = (void *)tmp;
937 debug("RX Descriptor Buffer: %p, length: 0x%x\n",
938 dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
940 tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
942 printf("%s: Failed to allocate RX Data Buffer\n", __func__);
943 free(dma->tx_desc_aligned);
945 free(dma->rx_desc_aligned);
948 dma->rx_buf = (uint8_t *)tmp;
949 debug("RX Data Buffer: %p; length: 0x%x\n",
950 dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
954 eth->phy_interface = PHY_INTERFACE_MODE_GMII;
956 dma->tx_packet = gmac_tx_packet;
957 dma->check_tx_done = gmac_check_tx_done;
959 dma->check_rx_done = gmac_check_rx_done;
961 dma->enable_dma = gmac_enable_dma;
962 dma->disable_dma = gmac_disable_dma;
964 eth->miiphy_read = gmac_miiphy_read;
965 eth->miiphy_write = gmac_miiphy_write;
967 eth->mac_init = gmac_mac_init;
968 eth->disable_mac = gmac_disable;
969 eth->enable_mac = gmac_enable;
970 eth->set_mac_addr = gmac_set_mac_addr;
971 eth->set_mac_speed = gmac_set_speed;