1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2019 Amit Singh Tomar <amittomer25@gmail.com>
5 * Driver for Broadcom GENETv5 Ethernet controller (as found on the RPi4)
6 * This driver is based on the Linux driver:
7 * drivers/net/ethernet/broadcom/genet/bcmgenet.c
8 * which is: Copyright (c) 2014-2017 Broadcom
10 * The hardware supports multiple queues (16 priority queues and one
11 * default queue), both for RX and TX. There are 256 DMA descriptors (both
12 * for TX and RX), and they live in MMIO registers. The hardware allows
13 * assigning descriptor ranges to queues, but we choose the most simple setup:
14 * All 256 descriptors are assigned to the default queue (#16).
15 * Also the Linux driver supports multiple generations of the MAC, whereas
16 * we only support v5, as used in the Raspberry Pi 4.
19 #include <asm/cache.h>
24 #include <fdt_support.h>
25 #include <linux/err.h>
29 #include <dm/of_access.h>
30 #include <dm/ofnode.h>
31 #include <linux/iopoll.h>
32 #include <linux/sizes.h>
33 #include <asm/dma-mapping.h>
36 /* Register definitions derived from Linux source */
37 #define SYS_REV_CTRL 0x00
39 #define SYS_PORT_CTRL 0x04
40 #define PORT_MODE_EXT_GPHY 3
42 #define GENET_SYS_OFF 0x0000
43 #define SYS_RBUF_FLUSH_CTRL (GENET_SYS_OFF + 0x08)
44 #define SYS_TBUF_FLUSH_CTRL (GENET_SYS_OFF + 0x0c)
46 #define GENET_EXT_OFF 0x0080
47 #define EXT_RGMII_OOB_CTRL (GENET_EXT_OFF + 0x0c)
48 #define RGMII_LINK BIT(4)
49 #define OOB_DISABLE BIT(5)
50 #define RGMII_MODE_EN BIT(6)
51 #define ID_MODE_DIS BIT(16)
53 #define GENET_RBUF_OFF 0x0300
54 #define RBUF_TBUF_SIZE_CTRL (GENET_RBUF_OFF + 0xb4)
55 #define RBUF_CTRL (GENET_RBUF_OFF + 0x00)
56 #define RBUF_ALIGN_2B BIT(1)
58 #define GENET_UMAC_OFF 0x0800
59 #define UMAC_MIB_CTRL (GENET_UMAC_OFF + 0x580)
60 #define UMAC_MAX_FRAME_LEN (GENET_UMAC_OFF + 0x014)
61 #define UMAC_MAC0 (GENET_UMAC_OFF + 0x00c)
62 #define UMAC_MAC1 (GENET_UMAC_OFF + 0x010)
63 #define UMAC_CMD (GENET_UMAC_OFF + 0x008)
64 #define MDIO_CMD (GENET_UMAC_OFF + 0x614)
65 #define UMAC_TX_FLUSH (GENET_UMAC_OFF + 0x334)
66 #define MDIO_START_BUSY BIT(29)
67 #define MDIO_READ_FAIL BIT(28)
68 #define MDIO_RD (2 << 26)
69 #define MDIO_WR BIT(26)
70 #define MDIO_PMD_SHIFT 21
71 #define MDIO_PMD_MASK 0x1f
72 #define MDIO_REG_SHIFT 16
73 #define MDIO_REG_MASK 0x1f
75 #define CMD_TX_EN BIT(0)
76 #define CMD_RX_EN BIT(1)
77 #define UMAC_SPEED_10 0
78 #define UMAC_SPEED_100 1
79 #define UMAC_SPEED_1000 2
80 #define UMAC_SPEED_2500 3
81 #define CMD_SPEED_SHIFT 2
82 #define CMD_SPEED_MASK 3
83 #define CMD_SW_RESET BIT(13)
84 #define CMD_LCL_LOOP_EN BIT(15)
85 #define CMD_TX_EN BIT(0)
86 #define CMD_RX_EN BIT(1)
88 #define MIB_RESET_RX BIT(0)
89 #define MIB_RESET_RUNT BIT(1)
90 #define MIB_RESET_TX BIT(2)
92 /* total number of Buffer Descriptors, same for Rx/Tx */
93 #define TOTAL_DESCS 256
94 #define RX_DESCS TOTAL_DESCS
95 #define TX_DESCS TOTAL_DESCS
97 #define DEFAULT_Q 0x10
99 /* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
100 * 1536 is multiple of 256 bytes
102 #define ENET_BRCM_TAG_LEN 6
104 #define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + \
105 VLAN_HLEN + ENET_BRCM_TAG_LEN + \
106 ETH_FCS_LEN + ENET_PAD)
108 /* Tx/Rx Dma Descriptor common bits */
109 #define DMA_EN BIT(0)
110 #define DMA_RING_BUF_EN_SHIFT 0x01
111 #define DMA_RING_BUF_EN_MASK 0xffff
112 #define DMA_BUFLENGTH_MASK 0x0fff
113 #define DMA_BUFLENGTH_SHIFT 16
114 #define DMA_RING_SIZE_SHIFT 16
115 #define DMA_OWN 0x8000
116 #define DMA_EOP 0x4000
117 #define DMA_SOP 0x2000
118 #define DMA_WRAP 0x1000
119 #define DMA_MAX_BURST_LENGTH 0x8
120 /* Tx specific DMA descriptor bits */
121 #define DMA_TX_UNDERRUN 0x0200
122 #define DMA_TX_APPEND_CRC 0x0040
123 #define DMA_TX_OW_CRC 0x0020
124 #define DMA_TX_DO_CSUM 0x0010
125 #define DMA_TX_QTAG_SHIFT 7
128 #define DMA_RING_SIZE 0x40
129 #define DMA_RINGS_SIZE (DMA_RING_SIZE * (DEFAULT_Q + 1))
132 #define DMA_DESC_LENGTH_STATUS 0x00
133 #define DMA_DESC_ADDRESS_LO 0x04
134 #define DMA_DESC_ADDRESS_HI 0x08
135 #define DMA_DESC_SIZE 12
137 #define GENET_RX_OFF 0x2000
138 #define GENET_RDMA_REG_OFF \
139 (GENET_RX_OFF + TOTAL_DESCS * DMA_DESC_SIZE)
140 #define GENET_TX_OFF 0x4000
141 #define GENET_TDMA_REG_OFF \
142 (GENET_TX_OFF + TOTAL_DESCS * DMA_DESC_SIZE)
144 #define DMA_FC_THRESH_HI (RX_DESCS >> 4)
145 #define DMA_FC_THRESH_LO 5
146 #define DMA_FC_THRESH_VALUE ((DMA_FC_THRESH_LO << 16) | \
149 #define DMA_XOFF_THRESHOLD_SHIFT 16
151 #define TDMA_RING_REG_BASE \
152 (GENET_TDMA_REG_OFF + DEFAULT_Q * DMA_RING_SIZE)
153 #define TDMA_READ_PTR (TDMA_RING_REG_BASE + 0x00)
154 #define TDMA_CONS_INDEX (TDMA_RING_REG_BASE + 0x08)
155 #define TDMA_PROD_INDEX (TDMA_RING_REG_BASE + 0x0c)
156 #define DMA_RING_BUF_SIZE 0x10
157 #define DMA_START_ADDR 0x14
158 #define DMA_END_ADDR 0x1c
159 #define DMA_MBUF_DONE_THRESH 0x24
160 #define TDMA_FLOW_PERIOD (TDMA_RING_REG_BASE + 0x28)
161 #define TDMA_WRITE_PTR (TDMA_RING_REG_BASE + 0x2c)
163 #define RDMA_RING_REG_BASE \
164 (GENET_RDMA_REG_OFF + DEFAULT_Q * DMA_RING_SIZE)
165 #define RDMA_WRITE_PTR (RDMA_RING_REG_BASE + 0x00)
166 #define RDMA_PROD_INDEX (RDMA_RING_REG_BASE + 0x08)
167 #define RDMA_CONS_INDEX (RDMA_RING_REG_BASE + 0x0c)
168 #define RDMA_XON_XOFF_THRESH (RDMA_RING_REG_BASE + 0x28)
169 #define RDMA_READ_PTR (RDMA_RING_REG_BASE + 0x2c)
171 #define TDMA_REG_BASE (GENET_TDMA_REG_OFF + DMA_RINGS_SIZE)
172 #define RDMA_REG_BASE (GENET_RDMA_REG_OFF + DMA_RINGS_SIZE)
173 #define DMA_RING_CFG 0x00
174 #define DMA_CTRL 0x04
175 #define DMA_SCB_BURST_SIZE 0x0c
177 #define RX_BUF_LENGTH 2048
178 #define RX_TOTAL_BUFSIZE (RX_BUF_LENGTH * RX_DESCS)
179 #define RX_BUF_OFFSET 2
181 struct bcmgenet_eth_priv {
182 char rxbuffer[RX_TOTAL_BUFSIZE] __aligned(ARCH_DMA_MINALIGN);
192 struct phy_device *phydev;
196 static void bcmgenet_umac_reset(struct bcmgenet_eth_priv *priv)
200 reg = readl(priv->mac_reg + SYS_RBUF_FLUSH_CTRL);
202 writel(reg, (priv->mac_reg + SYS_RBUF_FLUSH_CTRL));
206 writel(reg, (priv->mac_reg + SYS_RBUF_FLUSH_CTRL));
209 writel(0, (priv->mac_reg + SYS_RBUF_FLUSH_CTRL));
212 writel(0, priv->mac_reg + UMAC_CMD);
214 writel(CMD_SW_RESET | CMD_LCL_LOOP_EN, priv->mac_reg + UMAC_CMD);
216 writel(0, priv->mac_reg + UMAC_CMD);
218 /* clear tx/rx counter */
219 writel(MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
220 priv->mac_reg + UMAC_MIB_CTRL);
221 writel(0, priv->mac_reg + UMAC_MIB_CTRL);
223 writel(ENET_MAX_MTU_SIZE, priv->mac_reg + UMAC_MAX_FRAME_LEN);
225 /* init rx registers, enable ip header optimization */
226 reg = readl(priv->mac_reg + RBUF_CTRL);
227 reg |= RBUF_ALIGN_2B;
228 writel(reg, (priv->mac_reg + RBUF_CTRL));
230 writel(1, (priv->mac_reg + RBUF_TBUF_SIZE_CTRL));
233 static int bcmgenet_gmac_write_hwaddr(struct udevice *dev)
235 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
236 struct eth_pdata *pdata = dev_get_platdata(dev);
237 uchar *addr = pdata->enetaddr;
240 reg = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
241 writel_relaxed(reg, priv->mac_reg + UMAC_MAC0);
243 reg = addr[4] << 8 | addr[5];
244 writel_relaxed(reg, priv->mac_reg + UMAC_MAC1);
249 static void bcmgenet_disable_dma(struct bcmgenet_eth_priv *priv)
251 clrbits_32(priv->mac_reg + TDMA_REG_BASE + DMA_CTRL, DMA_EN);
252 clrbits_32(priv->mac_reg + RDMA_REG_BASE + DMA_CTRL, DMA_EN);
254 writel(1, priv->mac_reg + UMAC_TX_FLUSH);
256 writel(0, priv->mac_reg + UMAC_TX_FLUSH);
259 static void bcmgenet_enable_dma(struct bcmgenet_eth_priv *priv)
261 u32 dma_ctrl = (1 << (DEFAULT_Q + DMA_RING_BUF_EN_SHIFT)) | DMA_EN;
263 writel(dma_ctrl, priv->mac_reg + TDMA_REG_BASE + DMA_CTRL);
265 setbits_32(priv->mac_reg + RDMA_REG_BASE + DMA_CTRL, dma_ctrl);
268 static int bcmgenet_gmac_eth_send(struct udevice *dev, void *packet, int length)
270 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
271 void *desc_base = priv->tx_desc_base + priv->tx_index * DMA_DESC_SIZE;
272 u32 len_stat = length << DMA_BUFLENGTH_SHIFT;
273 ulong packet_aligned = rounddown((ulong)packet, ARCH_DMA_MINALIGN);
274 u32 prod_index, cons;
277 prod_index = readl(priv->mac_reg + TDMA_PROD_INDEX);
279 /* There is actually no reason for the rounding here, but the ARMv7
280 * implementation of flush_dcache_range() checks for aligned
281 * boundaries of the flushed range.
282 * Adjust them here to pass that check and avoid misleading messages.
284 flush_dcache_range(packet_aligned,
285 packet_aligned + roundup(length, ARCH_DMA_MINALIGN));
287 len_stat |= 0x3F << DMA_TX_QTAG_SHIFT;
288 len_stat |= DMA_TX_APPEND_CRC | DMA_SOP | DMA_EOP;
290 /* Set-up packet for transmission */
291 writel(lower_32_bits((ulong)packet), (desc_base + DMA_DESC_ADDRESS_LO));
292 writel(upper_32_bits((ulong)packet), (desc_base + DMA_DESC_ADDRESS_HI));
293 writel(len_stat, (desc_base + DMA_DESC_LENGTH_STATUS));
295 /* Increment index and start transmission */
296 if (++priv->tx_index >= TX_DESCS)
301 /* Start Transmisson */
302 writel(prod_index, priv->mac_reg + TDMA_PROD_INDEX);
305 cons = readl(priv->mac_reg + TDMA_CONS_INDEX);
306 } while ((cons & 0xffff) < prod_index && --tries);
313 /* Check whether all cache lines affected by an invalidate are within
314 * the buffer, to make sure we don't accidentally lose unrelated dirty
315 * data stored nearby.
316 * Alignment of the buffer start address will be checked in the implementation
317 * of invalidate_dcache_range().
319 static void invalidate_dcache_check(unsigned long addr, size_t size,
322 size_t inval_size = roundup(size, ARCH_DMA_MINALIGN);
324 if (unlikely(inval_size > buffer_size))
325 printf("WARNING: Cache invalidate area exceeds buffer size\n");
327 invalidate_dcache_range(addr, addr + inval_size);
330 static int bcmgenet_gmac_eth_recv(struct udevice *dev,
331 int flags, uchar **packetp)
333 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
334 void *desc_base = priv->rx_desc_base + priv->rx_index * DMA_DESC_SIZE;
335 u32 prod_index = readl(priv->mac_reg + RDMA_PROD_INDEX);
338 if (prod_index == priv->c_index)
341 length = readl(desc_base + DMA_DESC_LENGTH_STATUS);
342 length = (length >> DMA_BUFLENGTH_SHIFT) & DMA_BUFLENGTH_MASK;
343 addr = readl(desc_base + DMA_DESC_ADDRESS_LO);
345 invalidate_dcache_check(addr, length, RX_BUF_LENGTH);
347 /* To cater for the IP header alignment the hardware does.
348 * This would actually not be needed if we don't program
351 *packetp = (uchar *)(ulong)addr + RX_BUF_OFFSET;
353 return length - RX_BUF_OFFSET;
356 static int bcmgenet_gmac_free_pkt(struct udevice *dev, uchar *packet,
359 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
361 /* Tell the MAC we have consumed that last receive buffer. */
362 priv->c_index = (priv->c_index + 1) & 0xFFFF;
363 writel(priv->c_index, priv->mac_reg + RDMA_CONS_INDEX);
365 /* Forward our descriptor pointer, wrapping around if needed. */
366 if (++priv->rx_index >= RX_DESCS)
372 static void rx_descs_init(struct bcmgenet_eth_priv *priv)
374 char *rxbuffs = &priv->rxbuffer[0];
376 void *desc_base = priv->rx_desc_base;
380 len_stat = (RX_BUF_LENGTH << DMA_BUFLENGTH_SHIFT) | DMA_OWN;
382 for (i = 0; i < RX_DESCS; i++) {
383 writel(lower_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]),
384 desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_LO);
385 writel(upper_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]),
386 desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_HI);
388 desc_base + i * DMA_DESC_SIZE + DMA_DESC_LENGTH_STATUS);
392 static void rx_ring_init(struct bcmgenet_eth_priv *priv)
394 writel(DMA_MAX_BURST_LENGTH,
395 priv->mac_reg + RDMA_REG_BASE + DMA_SCB_BURST_SIZE);
397 writel(0x0, priv->mac_reg + RDMA_RING_REG_BASE + DMA_START_ADDR);
398 writel(0x0, priv->mac_reg + RDMA_READ_PTR);
399 writel(0x0, priv->mac_reg + RDMA_WRITE_PTR);
400 writel(RX_DESCS * DMA_DESC_SIZE / 4 - 1,
401 priv->mac_reg + RDMA_RING_REG_BASE + DMA_END_ADDR);
403 writel(0x0, priv->mac_reg + RDMA_PROD_INDEX);
404 writel(0x0, priv->mac_reg + RDMA_CONS_INDEX);
405 writel((RX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH,
406 priv->mac_reg + RDMA_RING_REG_BASE + DMA_RING_BUF_SIZE);
407 writel(DMA_FC_THRESH_VALUE, priv->mac_reg + RDMA_XON_XOFF_THRESH);
408 writel(1 << DEFAULT_Q, priv->mac_reg + RDMA_REG_BASE + DMA_RING_CFG);
411 static void tx_ring_init(struct bcmgenet_eth_priv *priv)
413 writel(DMA_MAX_BURST_LENGTH,
414 priv->mac_reg + TDMA_REG_BASE + DMA_SCB_BURST_SIZE);
416 writel(0x0, priv->mac_reg + TDMA_RING_REG_BASE + DMA_START_ADDR);
417 writel(0x0, priv->mac_reg + TDMA_READ_PTR);
418 writel(0x0, priv->mac_reg + TDMA_WRITE_PTR);
419 writel(TX_DESCS * DMA_DESC_SIZE / 4 - 1,
420 priv->mac_reg + TDMA_RING_REG_BASE + DMA_END_ADDR);
421 writel(0x0, priv->mac_reg + TDMA_PROD_INDEX);
422 writel(0x0, priv->mac_reg + TDMA_CONS_INDEX);
423 writel(0x1, priv->mac_reg + TDMA_RING_REG_BASE + DMA_MBUF_DONE_THRESH);
424 writel(0x0, priv->mac_reg + TDMA_FLOW_PERIOD);
425 writel((TX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH,
426 priv->mac_reg + TDMA_RING_REG_BASE + DMA_RING_BUF_SIZE);
428 writel(1 << DEFAULT_Q, priv->mac_reg + TDMA_REG_BASE + DMA_RING_CFG);
431 static int bcmgenet_adjust_link(struct bcmgenet_eth_priv *priv)
433 struct phy_device *phy_dev = priv->phydev;
436 switch (phy_dev->speed) {
438 speed = UMAC_SPEED_1000;
441 speed = UMAC_SPEED_100;
444 speed = UMAC_SPEED_10;
447 printf("bcmgenet: Unsupported PHY speed: %d\n", phy_dev->speed);
451 clrsetbits_32(priv->mac_reg + EXT_RGMII_OOB_CTRL, OOB_DISABLE,
452 RGMII_LINK | RGMII_MODE_EN);
454 if (phy_dev->interface == PHY_INTERFACE_MODE_RGMII)
455 setbits_32(priv->mac_reg + EXT_RGMII_OOB_CTRL, ID_MODE_DIS);
457 writel(speed << CMD_SPEED_SHIFT, (priv->mac_reg + UMAC_CMD));
462 static int bcmgenet_gmac_eth_start(struct udevice *dev)
464 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
467 priv->tx_desc_base = priv->mac_reg + GENET_TX_OFF;
468 priv->rx_desc_base = priv->mac_reg + GENET_RX_OFF;
469 priv->tx_index = 0x0;
470 priv->rx_index = 0x0;
472 bcmgenet_umac_reset(priv);
474 bcmgenet_gmac_write_hwaddr(dev);
476 /* Disable RX/TX DMA and flush TX queues */
477 bcmgenet_disable_dma(priv);
484 /* Enable RX/TX DMA */
485 bcmgenet_enable_dma(priv);
487 /* read PHY properties over the wire from generic PHY set-up */
488 ret = phy_startup(priv->phydev);
490 printf("bcmgenet: PHY startup failed: %d\n", ret);
494 /* Update MAC registers based on PHY property */
495 ret = bcmgenet_adjust_link(priv);
497 printf("bcmgenet: adjust PHY link failed: %d\n", ret);
502 setbits_32(priv->mac_reg + UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
507 static int bcmgenet_phy_init(struct bcmgenet_eth_priv *priv, void *dev)
509 struct phy_device *phydev;
512 phydev = phy_connect(priv->bus, priv->phyaddr, dev, priv->interface);
516 phydev->supported &= PHY_GBIT_FEATURES;
518 ret = phy_set_supported(priv->phydev, priv->speed);
522 phydev->advertising = phydev->supported;
524 phy_connect_dev(phydev, dev);
526 priv->phydev = phydev;
527 phy_config(priv->phydev);
532 static void bcmgenet_mdio_start(struct bcmgenet_eth_priv *priv)
534 setbits_32(priv->mac_reg + MDIO_CMD, MDIO_START_BUSY);
537 static int bcmgenet_mdio_write(struct mii_dev *bus, int addr, int devad,
540 struct udevice *dev = bus->priv;
541 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
544 /* Prepare the read operation */
545 val = MDIO_WR | (addr << MDIO_PMD_SHIFT) |
546 (reg << MDIO_REG_SHIFT) | (0xffff & value);
547 writel_relaxed(val, priv->mac_reg + MDIO_CMD);
549 /* Start MDIO transaction */
550 bcmgenet_mdio_start(priv);
552 return wait_for_bit_32(priv->mac_reg + MDIO_CMD,
553 MDIO_START_BUSY, false, 20, true);
556 static int bcmgenet_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
558 struct udevice *dev = bus->priv;
559 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
563 /* Prepare the read operation */
564 val = MDIO_RD | (addr << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
565 writel_relaxed(val, priv->mac_reg + MDIO_CMD);
567 /* Start MDIO transaction */
568 bcmgenet_mdio_start(priv);
570 ret = wait_for_bit_32(priv->mac_reg + MDIO_CMD,
571 MDIO_START_BUSY, false, 20, true);
575 val = readl_relaxed(priv->mac_reg + MDIO_CMD);
580 static int bcmgenet_mdio_init(const char *name, struct udevice *priv)
582 struct mii_dev *bus = mdio_alloc();
585 debug("Failed to allocate MDIO bus\n");
589 bus->read = bcmgenet_mdio_read;
590 bus->write = bcmgenet_mdio_write;
591 snprintf(bus->name, sizeof(bus->name), name);
592 bus->priv = (void *)priv;
594 return mdio_register(bus);
597 /* We only support RGMII (as used on the RPi4). */
598 static int bcmgenet_interface_set(struct bcmgenet_eth_priv *priv)
600 phy_interface_t phy_mode = priv->interface;
603 case PHY_INTERFACE_MODE_RGMII:
604 case PHY_INTERFACE_MODE_RGMII_RXID:
605 writel(PORT_MODE_EXT_GPHY, priv->mac_reg + SYS_PORT_CTRL);
608 printf("unknown phy mode: %d\n", priv->interface);
615 static int bcmgenet_eth_probe(struct udevice *dev)
617 struct eth_pdata *pdata = dev_get_platdata(dev);
618 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
625 priv->mac_reg = map_physmem(pdata->iobase, SZ_64K, MAP_NOCACHE);
626 priv->interface = pdata->phy_interface;
627 priv->speed = pdata->max_speed;
629 /* Read GENET HW version */
630 reg = readl_relaxed(priv->mac_reg + SYS_REV_CTRL);
631 major = (reg >> 24) & 0x0f;
638 printf("Unsupported GENETv%d.%d\n", major, (reg >> 16) & 0x0f);
642 ret = bcmgenet_interface_set(priv);
646 writel(0, priv->mac_reg + SYS_RBUF_FLUSH_CTRL);
648 /* disable MAC while updating its registers */
649 writel(0, priv->mac_reg + UMAC_CMD);
650 /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
651 writel(CMD_SW_RESET | CMD_LCL_LOOP_EN, priv->mac_reg + UMAC_CMD);
653 mdio_node = dev_read_first_subnode(dev);
654 name = ofnode_get_name(mdio_node);
656 ret = bcmgenet_mdio_init(name, dev);
660 priv->bus = miiphy_get_dev_by_name(name);
662 return bcmgenet_phy_init(priv, dev);
665 static void bcmgenet_gmac_eth_stop(struct udevice *dev)
667 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
669 clrbits_32(priv->mac_reg + UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
671 bcmgenet_disable_dma(priv);
674 static const struct eth_ops bcmgenet_gmac_eth_ops = {
675 .start = bcmgenet_gmac_eth_start,
676 .write_hwaddr = bcmgenet_gmac_write_hwaddr,
677 .send = bcmgenet_gmac_eth_send,
678 .recv = bcmgenet_gmac_eth_recv,
679 .free_pkt = bcmgenet_gmac_free_pkt,
680 .stop = bcmgenet_gmac_eth_stop,
683 static int bcmgenet_eth_ofdata_to_platdata(struct udevice *dev)
685 struct eth_pdata *pdata = dev_get_platdata(dev);
686 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
687 struct ofnode_phandle_args phy_node;
688 const char *phy_mode;
691 pdata->iobase = dev_read_addr(dev);
693 /* Get phy mode from DT */
694 pdata->phy_interface = -1;
695 phy_mode = dev_read_string(dev, "phy-mode");
697 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
698 if (pdata->phy_interface == -1) {
699 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
703 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
706 ofnode_read_s32(phy_node.node, "reg", &priv->phyaddr);
707 ofnode_read_s32(phy_node.node, "max-speed", &pdata->max_speed);
713 /* The BCM2711 implementation has a limited burst length compared to a generic
714 * GENETv5 version, but we go with that shorter value (8) in both cases, for
715 * the sake of simplicity.
717 static const struct udevice_id bcmgenet_eth_ids[] = {
718 {.compatible = "brcm,genet-v5"},
719 {.compatible = "brcm,bcm2711-genet-v5"},
723 U_BOOT_DRIVER(eth_bcmgenet) = {
724 .name = "eth_bcmgenet",
726 .of_match = bcmgenet_eth_ids,
727 .ofdata_to_platdata = bcmgenet_eth_ofdata_to_platdata,
728 .probe = bcmgenet_eth_probe,
729 .ops = &bcmgenet_gmac_eth_ops,
730 .priv_auto_alloc_size = sizeof(struct bcmgenet_eth_priv),
731 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
732 .flags = DM_FLAG_ALLOC_PRIV_DMA,