1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
6 * Copyright (C) 2014-2015 Stefan Roese <sr@denx.de>
8 * Based on the Linux version which is:
9 * Copyright (C) 2012 Marvell
11 * Rami Rosen <rosenr@marvell.com>
12 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
23 #include <asm/cache.h>
25 #include <dm/device_compat.h>
26 #include <dm/devres.h>
27 #include <linux/bug.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
33 #include <asm/arch/cpu.h>
34 #include <asm/arch/soc.h>
35 #include <linux/compat.h>
36 #include <linux/mbus.h>
37 #include <asm-generic/gpio.h>
39 DECLARE_GLOBAL_DATA_PTR;
41 #if !defined(CONFIG_PHYLIB)
42 # error Marvell mvneta requires PHYLIB
45 #define CONFIG_NR_CPUS 1
46 #define ETH_HLEN 14 /* Total octets in header */
48 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
49 #define WRAP (2 + ETH_HLEN + 4 + 32)
51 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
53 #define MVNETA_SMI_TIMEOUT 10000
56 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
57 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
58 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
59 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
60 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
61 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
62 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
63 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
64 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
65 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
66 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
67 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
68 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
69 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
70 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
71 #define MVNETA_PORT_RX_RESET 0x1cc0
72 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
73 #define MVNETA_PHY_ADDR 0x2000
74 #define MVNETA_PHY_ADDR_MASK 0x1f
75 #define MVNETA_SMI 0x2004
76 #define MVNETA_PHY_REG_MASK 0x1f
77 /* SMI register fields */
78 #define MVNETA_SMI_DATA_OFFS 0 /* Data */
79 #define MVNETA_SMI_DATA_MASK (0xffff << MVNETA_SMI_DATA_OFFS)
80 #define MVNETA_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
81 #define MVNETA_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
82 #define MVNETA_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
83 #define MVNETA_SMI_OPCODE_READ (1 << MVNETA_SMI_OPCODE_OFFS)
84 #define MVNETA_SMI_READ_VALID (1 << 27) /* Read Valid */
85 #define MVNETA_SMI_BUSY (1 << 28) /* Busy */
86 #define MVNETA_MBUS_RETRY 0x2010
87 #define MVNETA_UNIT_INTR_CAUSE 0x2080
88 #define MVNETA_UNIT_CONTROL 0x20B0
89 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
90 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
91 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
92 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
93 #define MVNETA_WIN_SIZE_MASK (0xffff0000)
94 #define MVNETA_BASE_ADDR_ENABLE 0x2290
95 #define MVNETA_BASE_ADDR_ENABLE_BIT 0x1
96 #define MVNETA_PORT_ACCESS_PROTECT 0x2294
97 #define MVNETA_PORT_ACCESS_PROTECT_WIN0_RW 0x3
98 #define MVNETA_PORT_CONFIG 0x2400
99 #define MVNETA_UNI_PROMISC_MODE BIT(0)
100 #define MVNETA_DEF_RXQ(q) ((q) << 1)
101 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
102 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
103 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
104 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
105 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
106 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
107 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
108 MVNETA_DEF_RXQ_ARP(q) | \
109 MVNETA_DEF_RXQ_TCP(q) | \
110 MVNETA_DEF_RXQ_UDP(q) | \
111 MVNETA_DEF_RXQ_BPDU(q) | \
112 MVNETA_TX_UNSET_ERR_SUM | \
113 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
114 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
115 #define MVNETA_MAC_ADDR_LOW 0x2414
116 #define MVNETA_MAC_ADDR_HIGH 0x2418
117 #define MVNETA_SDMA_CONFIG 0x241c
118 #define MVNETA_SDMA_BRST_SIZE_16 4
119 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
120 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
121 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
122 #define MVNETA_DESC_SWAP BIT(6)
123 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
124 #define MVNETA_PORT_STATUS 0x2444
125 #define MVNETA_TX_IN_PRGRS BIT(1)
126 #define MVNETA_TX_FIFO_EMPTY BIT(8)
127 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
128 #define MVNETA_SERDES_CFG 0x24A0
129 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
130 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
131 #define MVNETA_TYPE_PRIO 0x24bc
132 #define MVNETA_FORCE_UNI BIT(21)
133 #define MVNETA_TXQ_CMD_1 0x24e4
134 #define MVNETA_TXQ_CMD 0x2448
135 #define MVNETA_TXQ_DISABLE_SHIFT 8
136 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
137 #define MVNETA_ACC_MODE 0x2500
138 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
139 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
140 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
141 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
143 /* Exception Interrupt Port/Queue Cause register */
145 #define MVNETA_INTR_NEW_CAUSE 0x25a0
146 #define MVNETA_INTR_NEW_MASK 0x25a4
148 /* bits 0..7 = TXQ SENT, one bit per queue.
149 * bits 8..15 = RXQ OCCUP, one bit per queue.
150 * bits 16..23 = RXQ FREE, one bit per queue.
151 * bit 29 = OLD_REG_SUM, see old reg ?
152 * bit 30 = TX_ERR_SUM, one bit for 4 ports
153 * bit 31 = MISC_SUM, one bit for 4 ports
155 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
156 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
157 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
158 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
160 #define MVNETA_INTR_OLD_CAUSE 0x25a8
161 #define MVNETA_INTR_OLD_MASK 0x25ac
163 /* Data Path Port/Queue Cause Register */
164 #define MVNETA_INTR_MISC_CAUSE 0x25b0
165 #define MVNETA_INTR_MISC_MASK 0x25b4
166 #define MVNETA_INTR_ENABLE 0x25b8
168 #define MVNETA_RXQ_CMD 0x2680
169 #define MVNETA_RXQ_DISABLE_SHIFT 8
170 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
171 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
172 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
173 #define MVNETA_GMAC_CTRL_0 0x2c00
174 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
175 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
176 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
177 #define MVNETA_GMAC_CTRL_2 0x2c08
178 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
179 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
180 #define MVNETA_GMAC2_PORT_RESET BIT(6)
181 #define MVNETA_GMAC_STATUS 0x2c10
182 #define MVNETA_GMAC_LINK_UP BIT(0)
183 #define MVNETA_GMAC_SPEED_1000 BIT(1)
184 #define MVNETA_GMAC_SPEED_100 BIT(2)
185 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
186 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
187 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
188 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
189 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
190 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
191 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
192 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
193 #define MVNETA_GMAC_FORCE_LINK_UP (BIT(0) | BIT(1))
194 #define MVNETA_GMAC_IB_BYPASS_AN_EN BIT(3)
195 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
196 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
197 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
198 #define MVNETA_GMAC_SET_FC_EN BIT(8)
199 #define MVNETA_GMAC_ADVERT_FC_EN BIT(9)
200 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
201 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
202 #define MVNETA_GMAC_SAMPLE_TX_CFG_EN BIT(15)
203 #define MVNETA_MIB_COUNTERS_BASE 0x3080
204 #define MVNETA_MIB_LATE_COLLISION 0x7c
205 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
206 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
207 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
208 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
209 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
210 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
211 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
212 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
213 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
214 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
215 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
216 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
217 #define MVNETA_PORT_TX_RESET 0x3cf0
218 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
219 #define MVNETA_TX_MTU 0x3e0c
220 #define MVNETA_TX_TOKEN_SIZE 0x3e14
221 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
222 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
223 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
225 /* Descriptor ring Macros */
226 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
227 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
229 /* Various constants */
232 #define MVNETA_TXDONE_COAL_PKTS 16
233 #define MVNETA_RX_COAL_PKTS 32
234 #define MVNETA_RX_COAL_USEC 100
236 /* The two bytes Marvell header. Either contains a special value used
237 * by Marvell switches when a specific hardware mode is enabled (not
238 * supported by this driver) or is filled automatically by zeroes on
239 * the RX side. Those two bytes being at the front of the Ethernet
240 * header, they allow to have the IP header aligned on a 4 bytes
241 * boundary automatically: the hardware skips those two bytes on its
244 #define MVNETA_MH_SIZE 2
246 #define MVNETA_VLAN_TAG_LEN 4
248 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
249 #define MVNETA_TX_CSUM_MAX_SIZE 9800
250 #define MVNETA_ACC_MODE_EXT 1
252 /* Timeout constants */
253 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
254 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
255 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
257 #define MVNETA_TX_MTU_MAX 0x3ffff
259 /* Max number of Rx descriptors */
260 #define MVNETA_MAX_RXD 16
262 /* Max number of Tx descriptors */
263 #define MVNETA_MAX_TXD 16
265 /* descriptor aligned size */
266 #define MVNETA_DESC_ALIGNED_SIZE 32
270 struct mvneta_rx_queue *rxqs;
271 struct mvneta_tx_queue *txqs;
277 phy_interface_t phy_interface;
284 struct phy_device *phydev;
285 #if CONFIG_IS_ENABLED(DM_GPIO)
286 struct gpio_desc phy_reset_gpio;
291 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
292 * layout of the transmit and reception DMA descriptors, and their
293 * layout is therefore defined by the hardware design
296 #define MVNETA_TX_L3_OFF_SHIFT 0
297 #define MVNETA_TX_IP_HLEN_SHIFT 8
298 #define MVNETA_TX_L4_UDP BIT(16)
299 #define MVNETA_TX_L3_IP6 BIT(17)
300 #define MVNETA_TXD_IP_CSUM BIT(18)
301 #define MVNETA_TXD_Z_PAD BIT(19)
302 #define MVNETA_TXD_L_DESC BIT(20)
303 #define MVNETA_TXD_F_DESC BIT(21)
304 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
305 MVNETA_TXD_L_DESC | \
307 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
308 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
310 #define MVNETA_RXD_ERR_CRC 0x0
311 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
312 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
313 #define MVNETA_RXD_ERR_LEN BIT(18)
314 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
315 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
316 #define MVNETA_RXD_L3_IP4 BIT(25)
317 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
318 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
320 struct mvneta_tx_desc {
321 u32 command; /* Options used by HW for packet transmitting.*/
322 u16 reserverd1; /* csum_l4 (for future use) */
323 u16 data_size; /* Data size of transmitted packet in bytes */
324 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
325 u32 reserved2; /* hw_cmd - (for future use, PMT) */
326 u32 reserved3[4]; /* Reserved - (for future use) */
329 struct mvneta_rx_desc {
330 u32 status; /* Info about received packet */
331 u16 reserved1; /* pnc_info - (for future use, PnC) */
332 u16 data_size; /* Size of received packet in bytes */
334 u32 buf_phys_addr; /* Physical address of the buffer */
335 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
337 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
338 u16 reserved3; /* prefetch_cmd, for future use */
339 u16 reserved4; /* csum_l4 - (for future use, PnC) */
341 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
342 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
345 struct mvneta_tx_queue {
346 /* Number of this TX queue, in the range 0-7 */
349 /* Number of TX DMA descriptors in the descriptor ring */
352 /* Index of last TX DMA descriptor that was inserted */
355 /* Index of the TX DMA descriptor to be cleaned up */
358 /* Virtual address of the TX DMA descriptors array */
359 struct mvneta_tx_desc *descs;
361 /* DMA address of the TX DMA descriptors array */
362 dma_addr_t descs_phys;
364 /* Index of the last TX DMA descriptor */
367 /* Index of the next TX DMA descriptor to process */
368 int next_desc_to_proc;
371 struct mvneta_rx_queue {
372 /* rx queue number, in the range 0-7 */
375 /* num of rx descriptors in the rx descriptor ring */
378 /* Virtual address of the RX DMA descriptors array */
379 struct mvneta_rx_desc *descs;
381 /* DMA address of the RX DMA descriptors array */
382 dma_addr_t descs_phys;
384 /* Index of the last RX DMA descriptor */
387 /* Index of the next RX DMA descriptor to process */
388 int next_desc_to_proc;
391 /* U-Boot doesn't use the queues, so set the number to 1 */
392 static int rxq_number = 1;
393 static int txq_number = 1;
396 struct buffer_location {
397 struct mvneta_tx_desc *tx_descs;
398 struct mvneta_rx_desc *rx_descs;
403 * All 4 interfaces use the same global buffer, since only one interface
404 * can be enabled at once
406 static struct buffer_location buffer_loc;
409 * Page table entries are set to 1MB, or multiples of 1MB
410 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
412 #define BD_SPACE (1 << 20)
415 * Dummy implementation that can be overwritten by a board
418 __weak int board_network_enable(struct mii_dev *bus)
423 /* Utility/helper methods */
425 /* Write helper method */
426 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
428 writel(data, pp->base + offset);
431 /* Read helper method */
432 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
434 return readl(pp->base + offset);
437 /* Clear all MIB counters */
438 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
442 /* Perform dummy reads from MIB counters */
443 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
444 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
447 /* Rx descriptors helper methods */
449 /* Checks whether the RX descriptor having this status is both the first
450 * and the last descriptor for the RX packet. Each RX packet is currently
451 * received through a single RX descriptor, so not having each RX
452 * descriptor with its first and last bits set is an error
454 static int mvneta_rxq_desc_is_first_last(u32 status)
456 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
457 MVNETA_RXD_FIRST_LAST_DESC;
460 /* Add number of descriptors ready to receive new packets */
461 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
462 struct mvneta_rx_queue *rxq,
465 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
468 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
469 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
470 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
471 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
472 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
475 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
476 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
479 /* Get number of RX descriptors occupied by received packets */
480 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
481 struct mvneta_rx_queue *rxq)
485 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
486 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
489 /* Update num of rx desc called upon return from rx path or
490 * from mvneta_rxq_drop_pkts().
492 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
493 struct mvneta_rx_queue *rxq,
494 int rx_done, int rx_filled)
498 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
500 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
501 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
505 /* Only 255 descriptors can be added at once */
506 while ((rx_done > 0) || (rx_filled > 0)) {
507 if (rx_done <= 0xff) {
514 if (rx_filled <= 0xff) {
515 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
518 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
521 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
525 /* Get pointer to next RX descriptor to be processed by SW */
526 static struct mvneta_rx_desc *
527 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
529 int rx_desc = rxq->next_desc_to_proc;
531 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
532 return rxq->descs + rx_desc;
535 /* Tx descriptors helper methods */
537 /* Update HW with number of TX descriptors to be sent */
538 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
539 struct mvneta_tx_queue *txq,
544 /* Only 255 descriptors can be added at once ; Assume caller
545 * process TX descriptors in quanta less than 256
548 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
551 /* Get pointer to next TX descriptor to be processed (send) by HW */
552 static struct mvneta_tx_desc *
553 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
555 int tx_desc = txq->next_desc_to_proc;
557 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
558 return txq->descs + tx_desc;
561 /* Set rxq buf size */
562 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
563 struct mvneta_rx_queue *rxq,
568 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
570 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
571 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
573 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
576 static int mvneta_port_is_fixed_link(struct mvneta_port *pp)
578 /* phy_addr is set to invalid value for fixed link */
579 return pp->phyaddr > PHY_MAX_ADDR;
583 /* Start the Ethernet port RX and TX activity */
584 static void mvneta_port_up(struct mvneta_port *pp)
589 /* Enable all initialized TXs. */
590 mvneta_mib_counters_clear(pp);
592 for (queue = 0; queue < txq_number; queue++) {
593 struct mvneta_tx_queue *txq = &pp->txqs[queue];
594 if (txq->descs != NULL)
595 q_map |= (1 << queue);
597 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
599 /* Enable all initialized RXQs. */
601 for (queue = 0; queue < rxq_number; queue++) {
602 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
603 if (rxq->descs != NULL)
604 q_map |= (1 << queue);
606 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
609 /* Stop the Ethernet port activity */
610 static void mvneta_port_down(struct mvneta_port *pp)
615 /* Stop Rx port activity. Check port Rx activity. */
616 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
618 /* Issue stop command for active channels only */
620 mvreg_write(pp, MVNETA_RXQ_CMD,
621 val << MVNETA_RXQ_DISABLE_SHIFT);
623 /* Wait for all Rx activity to terminate. */
626 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
628 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
634 val = mvreg_read(pp, MVNETA_RXQ_CMD);
635 } while (val & 0xff);
637 /* Stop Tx port activity. Check port Tx activity. Issue stop
638 * command for active channels only
640 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
643 mvreg_write(pp, MVNETA_TXQ_CMD,
644 (val << MVNETA_TXQ_DISABLE_SHIFT));
646 /* Wait for all Tx activity to terminate. */
649 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
651 "TIMEOUT for TX stopped status=0x%08x\n",
657 /* Check TX Command reg that all Txqs are stopped */
658 val = mvreg_read(pp, MVNETA_TXQ_CMD);
660 } while (val & 0xff);
662 /* Double check to verify that TX FIFO is empty */
665 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
667 "TX FIFO empty timeout status=0x08%x\n",
673 val = mvreg_read(pp, MVNETA_PORT_STATUS);
674 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
675 (val & MVNETA_TX_IN_PRGRS));
680 /* Enable the port by setting the port enable bit of the MAC control register */
681 static void mvneta_port_enable(struct mvneta_port *pp)
686 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
687 val |= MVNETA_GMAC0_PORT_ENABLE;
688 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
691 /* Disable the port and wait for about 200 usec before retuning */
692 static void mvneta_port_disable(struct mvneta_port *pp)
696 /* Reset the Enable bit in the Serial Control Register */
697 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
698 val &= ~MVNETA_GMAC0_PORT_ENABLE;
699 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
704 /* Multicast tables methods */
706 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
707 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
715 val = 0x1 | (queue << 1);
716 val |= (val << 24) | (val << 16) | (val << 8);
719 for (offset = 0; offset <= 0xc; offset += 4)
720 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
723 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
724 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
732 val = 0x1 | (queue << 1);
733 val |= (val << 24) | (val << 16) | (val << 8);
736 for (offset = 0; offset <= 0xfc; offset += 4)
737 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
740 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
741 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
747 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
750 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
751 val = 0x1 | (queue << 1);
752 val |= (val << 24) | (val << 16) | (val << 8);
755 for (offset = 0; offset <= 0xfc; offset += 4)
756 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
759 /* This method sets defaults to the NETA port:
760 * Clears interrupt Cause and Mask registers.
761 * Clears all MAC tables.
762 * Sets defaults to all registers.
763 * Resets RX and TX descriptor rings.
765 * This method can be called after mvneta_port_down() to return the port
766 * settings to defaults.
768 static void mvneta_defaults_set(struct mvneta_port *pp)
774 /* Clear all Cause registers */
775 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
776 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
777 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
779 /* Mask all interrupts */
780 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
781 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
782 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
783 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
785 /* Enable MBUS Retry bit16 */
786 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
788 /* Set CPU queue access map - all CPUs have access to all RX
789 * queues and to all TX queues
791 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
792 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
793 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
794 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
796 /* Reset RX and TX DMAs */
797 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
798 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
800 /* Disable Legacy WRR, Disable EJP, Release from reset */
801 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
802 for (queue = 0; queue < txq_number; queue++) {
803 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
804 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
807 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
808 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
810 /* Set Port Acceleration Mode */
811 val = MVNETA_ACC_MODE_EXT;
812 mvreg_write(pp, MVNETA_ACC_MODE, val);
814 /* Update val of portCfg register accordingly with all RxQueue types */
815 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
816 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
819 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
820 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
822 /* Build PORT_SDMA_CONFIG_REG */
825 /* Default burst size */
826 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
827 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
828 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
830 /* Assign port SDMA configuration */
831 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
833 /* Enable PHY polling in hardware if not in fixed-link mode */
834 if (!mvneta_port_is_fixed_link(pp)) {
835 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
836 val |= MVNETA_PHY_POLLING_ENABLE;
837 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
840 mvneta_set_ucast_table(pp, -1);
841 mvneta_set_special_mcast_table(pp, -1);
842 mvneta_set_other_mcast_table(pp, -1);
845 /* Set unicast address */
846 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
849 unsigned int unicast_reg;
850 unsigned int tbl_offset;
851 unsigned int reg_offset;
853 /* Locate the Unicast table entry */
854 last_nibble = (0xf & last_nibble);
856 /* offset from unicast tbl base */
857 tbl_offset = (last_nibble / 4) * 4;
859 /* offset within the above reg */
860 reg_offset = last_nibble % 4;
862 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
865 /* Clear accepts frame bit at specified unicast DA tbl entry */
866 unicast_reg &= ~(0xff << (8 * reg_offset));
868 unicast_reg &= ~(0xff << (8 * reg_offset));
869 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
872 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
875 /* Set mac address */
876 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
883 mac_l = (addr[4] << 8) | (addr[5]);
884 mac_h = (addr[0] << 24) | (addr[1] << 16) |
885 (addr[2] << 8) | (addr[3] << 0);
887 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
888 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
891 /* Accept frames of this address */
892 mvneta_set_ucast_addr(pp, addr[5], queue);
895 static int mvneta_write_hwaddr(struct udevice *dev)
897 mvneta_mac_addr_set(dev_get_priv(dev),
898 ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr,
904 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
905 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
906 u32 phys_addr, u32 cookie)
908 rx_desc->buf_cookie = cookie;
909 rx_desc->buf_phys_addr = phys_addr;
912 /* Decrement sent descriptors counter */
913 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
914 struct mvneta_tx_queue *txq,
919 /* Only 255 TX descriptors can be updated at once */
920 while (sent_desc > 0xff) {
921 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
922 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
923 sent_desc = sent_desc - 0xff;
926 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
927 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
930 /* Get number of TX descriptors already sent by HW */
931 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
932 struct mvneta_tx_queue *txq)
937 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
938 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
939 MVNETA_TXQ_SENT_DESC_SHIFT;
944 /* Display more error info */
945 static void mvneta_rx_error(struct mvneta_port *pp,
946 struct mvneta_rx_desc *rx_desc)
948 u32 status = rx_desc->status;
950 if (!mvneta_rxq_desc_is_first_last(status)) {
952 "bad rx status %08x (buffer oversize), size=%d\n",
953 status, rx_desc->data_size);
957 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
958 case MVNETA_RXD_ERR_CRC:
959 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
960 status, rx_desc->data_size);
962 case MVNETA_RXD_ERR_OVERRUN:
963 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
964 status, rx_desc->data_size);
966 case MVNETA_RXD_ERR_LEN:
967 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
968 status, rx_desc->data_size);
970 case MVNETA_RXD_ERR_RESOURCE:
971 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
972 status, rx_desc->data_size);
977 static struct mvneta_rx_queue *mvneta_rxq_handle_get(struct mvneta_port *pp,
980 return &pp->rxqs[rxq];
984 /* Drop packets received by the RXQ and free buffers */
985 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
986 struct mvneta_rx_queue *rxq)
990 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
992 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
995 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
996 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
1001 for (i = 0; i < num; i++) {
1004 /* U-Boot special: Fill in the rx buffer addresses */
1005 addr = buffer_loc.rx_buffers + (i * RX_BUFFER_SIZE);
1006 mvneta_rx_desc_fill(rxq->descs + i, addr, addr);
1009 /* Add this number of RX descriptors as non occupied (ready to
1012 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1017 /* Rx/Tx queue initialization/cleanup methods */
1019 /* Create a specified RX queue */
1020 static int mvneta_rxq_init(struct mvneta_port *pp,
1021 struct mvneta_rx_queue *rxq)
1024 rxq->size = pp->rx_ring_size;
1026 /* Allocate memory for RX descriptors */
1027 rxq->descs_phys = (dma_addr_t)rxq->descs;
1028 if (rxq->descs == NULL)
1031 WARN_ON(rxq->descs != PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN));
1033 rxq->last_desc = rxq->size - 1;
1035 /* Set Rx descriptors queue starting address */
1036 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
1037 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
1039 /* Fill RXQ with buffers from RX pool */
1040 mvneta_rxq_buf_size_set(pp, rxq, RX_BUFFER_SIZE);
1041 mvneta_rxq_fill(pp, rxq, rxq->size);
1046 /* Cleanup Rx queue */
1047 static void mvneta_rxq_deinit(struct mvneta_port *pp,
1048 struct mvneta_rx_queue *rxq)
1050 mvneta_rxq_drop_pkts(pp, rxq);
1054 rxq->next_desc_to_proc = 0;
1055 rxq->descs_phys = 0;
1058 /* Create and initialize a tx queue */
1059 static int mvneta_txq_init(struct mvneta_port *pp,
1060 struct mvneta_tx_queue *txq)
1062 txq->size = pp->tx_ring_size;
1064 /* Allocate memory for TX descriptors */
1065 txq->descs_phys = (dma_addr_t)txq->descs;
1066 if (txq->descs == NULL)
1069 WARN_ON(txq->descs != PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN));
1071 txq->last_desc = txq->size - 1;
1073 /* Set maximum bandwidth for enabled TXQs */
1074 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
1075 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
1077 /* Set Tx descriptors queue starting address */
1078 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
1079 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
1084 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
1085 static void mvneta_txq_deinit(struct mvneta_port *pp,
1086 struct mvneta_tx_queue *txq)
1090 txq->next_desc_to_proc = 0;
1091 txq->descs_phys = 0;
1093 /* Set minimum bandwidth for disabled TXQs */
1094 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
1095 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
1097 /* Set Tx descriptors queue starting address and size */
1098 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
1099 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
1102 /* Cleanup all Tx queues */
1103 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
1107 for (queue = 0; queue < txq_number; queue++)
1108 mvneta_txq_deinit(pp, &pp->txqs[queue]);
1111 /* Cleanup all Rx queues */
1112 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
1116 for (queue = 0; queue < rxq_number; queue++)
1117 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
1121 /* Init all Rx queues */
1122 static int mvneta_setup_rxqs(struct mvneta_port *pp)
1126 for (queue = 0; queue < rxq_number; queue++) {
1127 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
1129 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
1131 mvneta_cleanup_rxqs(pp);
1139 /* Init all tx queues */
1140 static int mvneta_setup_txqs(struct mvneta_port *pp)
1144 for (queue = 0; queue < txq_number; queue++) {
1145 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
1147 netdev_err(pp->dev, "%s: can't create txq=%d\n",
1149 mvneta_cleanup_txqs(pp);
1157 static void mvneta_start_dev(struct mvneta_port *pp)
1159 /* start the Rx/Tx activity */
1160 mvneta_port_enable(pp);
1163 static void mvneta_adjust_link(struct udevice *dev)
1165 struct mvneta_port *pp = dev_get_priv(dev);
1166 struct phy_device *phydev = pp->phydev;
1167 int status_change = 0;
1169 if (mvneta_port_is_fixed_link(pp)) {
1170 debug("Using fixed link, skip link adjust\n");
1175 if ((pp->speed != phydev->speed) ||
1176 (pp->duplex != phydev->duplex)) {
1179 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1180 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
1181 MVNETA_GMAC_CONFIG_GMII_SPEED |
1182 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
1183 MVNETA_GMAC_AN_SPEED_EN |
1184 MVNETA_GMAC_AN_DUPLEX_EN);
1187 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
1189 if (phydev->speed == SPEED_1000)
1190 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
1192 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
1194 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1196 pp->duplex = phydev->duplex;
1197 pp->speed = phydev->speed;
1201 if (phydev->link != pp->link) {
1202 if (!phydev->link) {
1207 pp->link = phydev->link;
1211 if (status_change) {
1213 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1214 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
1215 MVNETA_GMAC_FORCE_LINK_DOWN);
1216 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1219 mvneta_port_down(pp);
1224 static int mvneta_open(struct udevice *dev)
1226 struct mvneta_port *pp = dev_get_priv(dev);
1229 ret = mvneta_setup_rxqs(pp);
1233 ret = mvneta_setup_txqs(pp);
1237 mvneta_adjust_link(dev);
1239 mvneta_start_dev(pp);
1245 static int mvneta_init2(struct mvneta_port *pp)
1250 mvneta_port_disable(pp);
1252 /* Set port default values */
1253 mvneta_defaults_set(pp);
1255 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
1260 /* U-Boot special: use preallocated area */
1261 pp->txqs[0].descs = buffer_loc.tx_descs;
1263 /* Initialize TX descriptor rings */
1264 for (queue = 0; queue < txq_number; queue++) {
1265 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1267 txq->size = pp->tx_ring_size;
1270 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
1277 /* U-Boot special: use preallocated area */
1278 pp->rxqs[0].descs = buffer_loc.rx_descs;
1280 /* Create Rx descriptor rings */
1281 for (queue = 0; queue < rxq_number; queue++) {
1282 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1284 rxq->size = pp->rx_ring_size;
1290 /* platform glue : initialize decoding windows */
1293 * Not like A380, in Armada3700, there are two layers of decode windows for GBE:
1294 * First layer is: GbE Address window that resides inside the GBE unit,
1295 * Second layer is: Fabric address window which is located in the NIC400
1297 * To simplify the address decode configuration for Armada3700, we bypass the
1298 * first layer of GBE decode window by setting the first window to 4GB.
1300 static void mvneta_bypass_mbus_windows(struct mvneta_port *pp)
1303 * Set window size to 4GB, to bypass GBE address decode, leave the
1304 * work to MBUS decode window
1306 mvreg_write(pp, MVNETA_WIN_SIZE(0), MVNETA_WIN_SIZE_MASK);
1308 /* Enable GBE address decode window 0 by set bit 0 to 0 */
1309 clrbits_le32(pp->base + MVNETA_BASE_ADDR_ENABLE,
1310 MVNETA_BASE_ADDR_ENABLE_BIT);
1312 /* Set GBE address decode window 0 to full Access (read or write) */
1313 setbits_le32(pp->base + MVNETA_PORT_ACCESS_PROTECT,
1314 MVNETA_PORT_ACCESS_PROTECT_WIN0_RW);
1317 static void mvneta_conf_mbus_windows(struct mvneta_port *pp)
1319 const struct mbus_dram_target_info *dram;
1324 dram = mvebu_mbus_dram_info();
1325 for (i = 0; i < 6; i++) {
1326 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1327 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1330 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1336 for (i = 0; i < dram->num_cs; i++) {
1337 const struct mbus_dram_window *cs = dram->cs + i;
1338 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
1339 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
1341 mvreg_write(pp, MVNETA_WIN_SIZE(i),
1342 (cs->size - 1) & 0xffff0000);
1344 win_enable &= ~(1 << i);
1345 win_protect |= 3 << (2 * i);
1348 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1351 /* Power up the port */
1352 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
1356 /* MAC Cause register should be cleared */
1357 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
1359 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1361 /* Even though it might look weird, when we're configured in
1362 * SGMII or QSGMII mode, the RGMII bit needs to be set.
1365 case PHY_INTERFACE_MODE_QSGMII:
1366 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
1367 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
1369 case PHY_INTERFACE_MODE_SGMII:
1370 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
1371 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
1373 case PHY_INTERFACE_MODE_RGMII:
1374 case PHY_INTERFACE_MODE_RGMII_ID:
1375 ctrl |= MVNETA_GMAC2_PORT_RGMII;
1381 /* Cancel Port Reset */
1382 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
1383 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
1385 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
1386 MVNETA_GMAC2_PORT_RESET) != 0)
1392 /* Device initialization routine */
1393 static int mvneta_init(struct udevice *dev)
1395 struct eth_pdata *pdata = dev_get_platdata(dev);
1396 struct mvneta_port *pp = dev_get_priv(dev);
1399 pp->tx_ring_size = MVNETA_MAX_TXD;
1400 pp->rx_ring_size = MVNETA_MAX_RXD;
1402 err = mvneta_init2(pp);
1404 dev_err(&pdev->dev, "can't init eth hal\n");
1408 mvneta_mac_addr_set(pp, pdata->enetaddr, rxq_def);
1410 err = mvneta_port_power_up(pp, pp->phy_interface);
1412 dev_err(&pdev->dev, "can't power up port\n");
1416 /* Call open() now as it needs to be done before runing send() */
1422 /* U-Boot only functions follow here */
1424 /* SMI / MDIO functions */
1426 static int smi_wait_ready(struct mvneta_port *pp)
1428 u32 timeout = MVNETA_SMI_TIMEOUT;
1431 /* wait till the SMI is not busy */
1433 /* read smi register */
1434 smi_reg = mvreg_read(pp, MVNETA_SMI);
1435 if (timeout-- == 0) {
1436 printf("Error: SMI busy timeout\n");
1439 } while (smi_reg & MVNETA_SMI_BUSY);
1445 * mvneta_mdio_read - miiphy_read callback function.
1447 * Returns 16bit phy register value, or 0xffff on error
1449 static int mvneta_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
1451 struct mvneta_port *pp = bus->priv;
1455 /* check parameters */
1456 if (addr > MVNETA_PHY_ADDR_MASK) {
1457 printf("Error: Invalid PHY address %d\n", addr);
1461 if (reg > MVNETA_PHY_REG_MASK) {
1462 printf("Err: Invalid register offset %d\n", reg);
1466 /* wait till the SMI is not busy */
1467 if (smi_wait_ready(pp) < 0)
1470 /* fill the phy address and regiser offset and read opcode */
1471 smi_reg = (addr << MVNETA_SMI_DEV_ADDR_OFFS)
1472 | (reg << MVNETA_SMI_REG_ADDR_OFFS)
1473 | MVNETA_SMI_OPCODE_READ;
1475 /* write the smi register */
1476 mvreg_write(pp, MVNETA_SMI, smi_reg);
1478 /* wait till read value is ready */
1479 timeout = MVNETA_SMI_TIMEOUT;
1482 /* read smi register */
1483 smi_reg = mvreg_read(pp, MVNETA_SMI);
1484 if (timeout-- == 0) {
1485 printf("Err: SMI read ready timeout\n");
1488 } while (!(smi_reg & MVNETA_SMI_READ_VALID));
1490 /* Wait for the data to update in the SMI register */
1491 for (timeout = 0; timeout < MVNETA_SMI_TIMEOUT; timeout++)
1494 return mvreg_read(pp, MVNETA_SMI) & MVNETA_SMI_DATA_MASK;
1498 * mvneta_mdio_write - miiphy_write callback function.
1500 * Returns 0 if write succeed, -EINVAL on bad parameters
1503 static int mvneta_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
1506 struct mvneta_port *pp = bus->priv;
1509 /* check parameters */
1510 if (addr > MVNETA_PHY_ADDR_MASK) {
1511 printf("Error: Invalid PHY address %d\n", addr);
1515 if (reg > MVNETA_PHY_REG_MASK) {
1516 printf("Err: Invalid register offset %d\n", reg);
1520 /* wait till the SMI is not busy */
1521 if (smi_wait_ready(pp) < 0)
1524 /* fill the phy addr and reg offset and write opcode and data */
1525 smi_reg = value << MVNETA_SMI_DATA_OFFS;
1526 smi_reg |= (addr << MVNETA_SMI_DEV_ADDR_OFFS)
1527 | (reg << MVNETA_SMI_REG_ADDR_OFFS);
1528 smi_reg &= ~MVNETA_SMI_OPCODE_READ;
1530 /* write the smi register */
1531 mvreg_write(pp, MVNETA_SMI, smi_reg);
1536 static int mvneta_start(struct udevice *dev)
1538 struct mvneta_port *pp = dev_get_priv(dev);
1539 struct phy_device *phydev;
1541 mvneta_port_power_up(pp, pp->phy_interface);
1543 if (!pp->init || pp->link == 0) {
1544 if (mvneta_port_is_fixed_link(pp)) {
1551 val = MVNETA_GMAC_FORCE_LINK_UP |
1552 MVNETA_GMAC_IB_BYPASS_AN_EN |
1553 MVNETA_GMAC_SET_FC_EN |
1554 MVNETA_GMAC_ADVERT_FC_EN |
1555 MVNETA_GMAC_SAMPLE_TX_CFG_EN;
1558 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
1560 if (pp->speed == SPEED_1000)
1561 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
1562 else if (pp->speed == SPEED_100)
1563 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
1565 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1567 /* Set phy address of the port */
1568 mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr);
1570 phydev = phy_connect(pp->bus, pp->phyaddr, dev,
1573 printf("phy_connect failed\n");
1577 pp->phydev = phydev;
1579 phy_startup(phydev);
1580 if (!phydev->link) {
1581 printf("%s: No link.\n", phydev->dev->name);
1585 /* Full init on first call */
1592 /* Upon all following calls, this is enough */
1594 mvneta_port_enable(pp);
1599 static int mvneta_send(struct udevice *dev, void *packet, int length)
1601 struct mvneta_port *pp = dev_get_priv(dev);
1602 struct mvneta_tx_queue *txq = &pp->txqs[0];
1603 struct mvneta_tx_desc *tx_desc;
1607 /* Get a descriptor for the first part of the packet */
1608 tx_desc = mvneta_txq_next_desc_get(txq);
1610 tx_desc->buf_phys_addr = (u32)(uintptr_t)packet;
1611 tx_desc->data_size = length;
1612 flush_dcache_range((ulong)packet,
1613 (ulong)packet + ALIGN(length, PKTALIGN));
1615 /* First and Last descriptor */
1616 tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC;
1617 mvneta_txq_pend_desc_add(pp, txq, 1);
1619 /* Wait for packet to be sent (queue might help with speed here) */
1620 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1621 while (!sent_desc) {
1622 if (timeout++ > 10000) {
1623 printf("timeout: packet not sent\n");
1626 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1629 /* txDone has increased - hw sent packet */
1630 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1635 static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp)
1637 struct mvneta_port *pp = dev_get_priv(dev);
1639 struct mvneta_rx_queue *rxq;
1643 rxq = mvneta_rxq_handle_get(pp, rxq_def);
1644 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1647 struct mvneta_rx_desc *rx_desc;
1648 unsigned char *data;
1652 * No cache invalidation needed here, since the desc's are
1653 * located in a uncached memory region
1655 rx_desc = mvneta_rxq_next_desc_get(rxq);
1657 rx_status = rx_desc->status;
1658 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1659 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1660 mvneta_rx_error(pp, rx_desc);
1661 /* leave the descriptor untouched */
1665 /* 2 bytes for marvell header. 4 bytes for crc */
1666 rx_bytes = rx_desc->data_size - 6;
1668 /* give packet to stack - skip on first 2 bytes */
1669 data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2;
1671 * No cache invalidation needed here, since the rx_buffer's are
1672 * located in a uncached memory region
1677 * Only mark one descriptor as free
1678 * since only one was processed
1680 mvneta_rxq_desc_num_update(pp, rxq, 1, 1);
1686 static int mvneta_probe(struct udevice *dev)
1688 struct eth_pdata *pdata = dev_get_platdata(dev);
1689 struct mvneta_port *pp = dev_get_priv(dev);
1690 void *blob = (void *)gd->fdt_blob;
1691 int node = dev_of_offset(dev);
1692 struct mii_dev *bus;
1699 * Allocate buffer area for descs and rx_buffers. This is only
1700 * done once for all interfaces. As only one interface can
1701 * be active. Make this area DMA safe by disabling the D-cache
1703 if (!buffer_loc.tx_descs) {
1706 /* Align buffer area for descs and rx_buffers to 1MiB */
1707 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
1708 flush_dcache_range((ulong)bd_space, (ulong)bd_space + BD_SPACE);
1709 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE,
1711 buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space;
1712 size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc),
1714 memset(buffer_loc.tx_descs, 0, size);
1715 buffer_loc.rx_descs = (struct mvneta_rx_desc *)
1716 ((phys_addr_t)bd_space + size);
1717 size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc),
1719 buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size);
1722 pp->base = (void __iomem *)pdata->iobase;
1724 /* Configure MBUS address windows */
1725 if (device_is_compatible(dev, "marvell,armada-3700-neta"))
1726 mvneta_bypass_mbus_windows(pp);
1728 mvneta_conf_mbus_windows(pp);
1730 /* PHY interface is already decoded in mvneta_ofdata_to_platdata() */
1731 pp->phy_interface = pdata->phy_interface;
1733 /* fetch 'fixed-link' property from 'neta' node */
1734 fl_node = fdt_subnode_offset(blob, node, "fixed-link");
1735 if (fl_node != -FDT_ERR_NOTFOUND) {
1736 /* set phy_addr to invalid value for fixed link */
1737 pp->phyaddr = PHY_MAX_ADDR + 1;
1738 pp->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
1739 pp->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
1741 /* Now read phyaddr from DT */
1742 addr = fdtdec_get_int(blob, node, "phy", 0);
1743 addr = fdt_node_offset_by_phandle(blob, addr);
1744 pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
1749 printf("Failed to allocate MDIO bus\n");
1753 bus->read = mvneta_mdio_read;
1754 bus->write = mvneta_mdio_write;
1755 snprintf(bus->name, sizeof(bus->name), dev->name);
1756 bus->priv = (void *)pp;
1759 ret = mdio_register(bus);
1763 #if CONFIG_IS_ENABLED(DM_GPIO)
1764 gpio_request_by_name(dev, "phy-reset-gpios", 0,
1765 &pp->phy_reset_gpio, GPIOD_IS_OUT);
1767 if (dm_gpio_is_valid(&pp->phy_reset_gpio)) {
1768 dm_gpio_set_value(&pp->phy_reset_gpio, 1);
1770 dm_gpio_set_value(&pp->phy_reset_gpio, 0);
1774 return board_network_enable(bus);
1777 static void mvneta_stop(struct udevice *dev)
1779 struct mvneta_port *pp = dev_get_priv(dev);
1781 mvneta_port_down(pp);
1782 mvneta_port_disable(pp);
1785 static const struct eth_ops mvneta_ops = {
1786 .start = mvneta_start,
1787 .send = mvneta_send,
1788 .recv = mvneta_recv,
1789 .stop = mvneta_stop,
1790 .write_hwaddr = mvneta_write_hwaddr,
1793 static int mvneta_ofdata_to_platdata(struct udevice *dev)
1795 struct eth_pdata *pdata = dev_get_platdata(dev);
1796 const char *phy_mode;
1798 pdata->iobase = devfdt_get_addr(dev);
1800 /* Get phy-mode / phy_interface from DT */
1801 pdata->phy_interface = -1;
1802 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1805 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
1806 if (pdata->phy_interface == -1) {
1807 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1814 static const struct udevice_id mvneta_ids[] = {
1815 { .compatible = "marvell,armada-370-neta" },
1816 { .compatible = "marvell,armada-xp-neta" },
1817 { .compatible = "marvell,armada-3700-neta" },
1821 U_BOOT_DRIVER(mvneta) = {
1824 .of_match = mvneta_ids,
1825 .ofdata_to_platdata = mvneta_ofdata_to_platdata,
1826 .probe = mvneta_probe,
1828 .priv_auto_alloc_size = sizeof(struct mvneta_port),
1829 .platdata_auto_alloc_size = sizeof(struct eth_pdata),