2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
9 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
18 #include <dm/device-internal.h>
25 #include <linux/errno.h>
29 #include <asm/arch/cpu.h>
30 #include <asm/arch/soc.h>
31 #include <linux/compat.h>
32 #include <linux/mbus.h>
34 DECLARE_GLOBAL_DATA_PTR;
36 /* Some linux -> U-Boot compatibility stuff */
37 #define netdev_err(dev, fmt, args...) \
39 #define netdev_warn(dev, fmt, args...) \
41 #define netdev_info(dev, fmt, args...) \
43 #define netdev_dbg(dev, fmt, args...) \
46 #define ETH_ALEN 6 /* Octets in one ethernet addr */
48 #define __verify_pcpu_ptr(ptr) \
50 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
54 #define VERIFY_PERCPU_PTR(__p) \
56 __verify_pcpu_ptr(__p); \
57 (typeof(*(__p)) __kernel __force *)(__p); \
60 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
61 #define smp_processor_id() 0
62 #define num_present_cpus() 1
63 #define for_each_present_cpu(cpu) \
64 for ((cpu) = 0; (cpu) < 1; (cpu)++)
66 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
68 #define CONFIG_NR_CPUS 1
69 #define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */
71 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
72 #define WRAP (2 + ETH_HLEN + 4 + 32)
74 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
76 #define MVPP2_SMI_TIMEOUT 10000
78 /* RX Fifo Registers */
79 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
80 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
81 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
82 #define MVPP2_RX_FIFO_INIT_REG 0x64
84 /* RX DMA Top Registers */
85 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
86 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
87 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
88 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
89 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
90 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
91 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
92 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
93 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
94 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
95 #define MVPP2_RXQ_POOL_LONG_OFFS 24
96 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
97 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
98 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
99 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
101 /* Parser Registers */
102 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
103 #define MVPP2_PRS_PORT_LU_MAX 0xf
104 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
105 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
106 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
107 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
108 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
109 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
110 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
111 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
112 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
113 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
114 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
115 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
116 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
117 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
118 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
120 /* Classifier Registers */
121 #define MVPP2_CLS_MODE_REG 0x1800
122 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
123 #define MVPP2_CLS_PORT_WAY_REG 0x1810
124 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
125 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
126 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
127 #define MVPP2_CLS_LKP_TBL_REG 0x1818
128 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
129 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
130 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
131 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
132 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
133 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
134 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
135 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
136 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
137 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
138 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
139 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
141 /* Descriptor Manager Top Registers */
142 #define MVPP2_RXQ_NUM_REG 0x2040
143 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
144 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
145 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
146 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
147 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
148 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
149 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
150 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
151 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
152 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
153 #define MVPP2_RXQ_THRESH_REG 0x204c
154 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
155 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
156 #define MVPP2_RXQ_INDEX_REG 0x2050
157 #define MVPP2_TXQ_NUM_REG 0x2080
158 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
159 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
160 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
161 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
162 #define MVPP2_TXQ_THRESH_REG 0x2094
163 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
164 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
165 #define MVPP2_TXQ_INDEX_REG 0x2098
166 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
167 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
168 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
169 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
170 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
171 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
172 #define MVPP2_TXQ_PENDING_REG 0x20a0
173 #define MVPP2_TXQ_PENDING_MASK 0x3fff
174 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
175 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
176 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
177 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
178 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
179 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
180 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
181 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
182 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
183 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
184 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
185 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
186 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
187 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
188 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
189 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
191 /* MBUS bridge registers */
192 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
193 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
194 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
195 #define MVPP2_BASE_ADDR_ENABLE 0x4060
197 /* Interrupt Cause and Mask registers */
198 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
199 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
200 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
201 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
202 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
203 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
204 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
205 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
206 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
207 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
208 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
209 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
210 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
211 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
212 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
213 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
214 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
215 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
216 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
217 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
219 /* Buffer Manager registers */
220 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
221 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
222 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
223 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
224 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
225 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
226 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
227 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
228 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
229 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
230 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
231 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
232 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
233 #define MVPP2_BM_START_MASK BIT(0)
234 #define MVPP2_BM_STOP_MASK BIT(1)
235 #define MVPP2_BM_STATE_MASK BIT(4)
236 #define MVPP2_BM_LOW_THRESH_OFFS 8
237 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
238 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
239 MVPP2_BM_LOW_THRESH_OFFS)
240 #define MVPP2_BM_HIGH_THRESH_OFFS 16
241 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
242 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
243 MVPP2_BM_HIGH_THRESH_OFFS)
244 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
245 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
246 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
247 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
248 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
249 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
250 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
251 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
252 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
253 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
254 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
255 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
256 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
257 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
258 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
259 #define MVPP2_BM_MC_RLS_REG 0x64c4
260 #define MVPP2_BM_MC_ID_MASK 0xfff
261 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
263 /* TX Scheduler registers */
264 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
265 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
266 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
267 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
268 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
269 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
270 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
271 #define MVPP2_TXP_MTU_MAX 0x7FFFF
272 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
273 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
274 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
275 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
276 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
277 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
278 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
279 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
280 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
281 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
282 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
283 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
284 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
285 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
287 /* TX general registers */
288 #define MVPP2_TX_SNOOP_REG 0x8800
289 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
290 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
293 #define MVPP2_SRC_ADDR_MIDDLE 0x24
294 #define MVPP2_SRC_ADDR_HIGH 0x28
295 #define MVPP2_PHY_AN_CFG0_REG 0x34
296 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
297 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
298 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
300 /* Per-port registers */
301 #define MVPP2_GMAC_CTRL_0_REG 0x0
302 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
303 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
304 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
305 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
306 #define MVPP2_GMAC_CTRL_1_REG 0x4
307 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
308 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
309 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
310 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
311 #define MVPP2_GMAC_SA_LOW_OFFS 7
312 #define MVPP2_GMAC_CTRL_2_REG 0x8
313 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
314 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
315 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
316 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
317 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
318 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
319 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
320 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
321 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
322 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
323 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
324 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
325 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
326 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
327 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
328 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
329 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
330 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
332 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
334 /* Descriptor ring Macros */
335 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
336 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
338 /* SMI: 0xc0054 -> offset 0x54 to lms_base */
339 #define MVPP2_SMI 0x0054
340 #define MVPP2_PHY_REG_MASK 0x1f
341 /* SMI register fields */
342 #define MVPP2_SMI_DATA_OFFS 0 /* Data */
343 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS)
344 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
345 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
346 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
347 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS)
348 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */
349 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */
351 #define MVPP2_PHY_ADDR_MASK 0x1f
352 #define MVPP2_PHY_REG_MASK 0x1f
354 /* Various constants */
357 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
358 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
359 #define MVPP2_RX_COAL_PKTS 32
360 #define MVPP2_RX_COAL_USEC 100
362 /* The two bytes Marvell header. Either contains a special value used
363 * by Marvell switches when a specific hardware mode is enabled (not
364 * supported by this driver) or is filled automatically by zeroes on
365 * the RX side. Those two bytes being at the front of the Ethernet
366 * header, they allow to have the IP header aligned on a 4 bytes
367 * boundary automatically: the hardware skips those two bytes on its
370 #define MVPP2_MH_SIZE 2
371 #define MVPP2_ETH_TYPE_LEN 2
372 #define MVPP2_PPPOE_HDR_SIZE 8
373 #define MVPP2_VLAN_TAG_LEN 4
375 /* Lbtd 802.3 type */
376 #define MVPP2_IP_LBDT_TYPE 0xfffa
378 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32
379 #define MVPP2_TX_CSUM_MAX_SIZE 9800
381 /* Timeout constants */
382 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
383 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
385 #define MVPP2_TX_MTU_MAX 0x7ffff
387 /* Maximum number of T-CONTs of PON port */
388 #define MVPP2_MAX_TCONT 16
390 /* Maximum number of supported ports */
391 #define MVPP2_MAX_PORTS 4
393 /* Maximum number of TXQs used by single port */
394 #define MVPP2_MAX_TXQ 8
396 /* Maximum number of RXQs used by single port */
397 #define MVPP2_MAX_RXQ 8
399 /* Default number of TXQs in use */
400 #define MVPP2_DEFAULT_TXQ 1
402 /* Dfault number of RXQs in use */
403 #define MVPP2_DEFAULT_RXQ 1
404 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
406 /* Total number of RXQs available to all ports */
407 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
409 /* Max number of Rx descriptors */
410 #define MVPP2_MAX_RXD 16
412 /* Max number of Tx descriptors */
413 #define MVPP2_MAX_TXD 16
415 /* Amount of Tx descriptors that can be reserved at once by CPU */
416 #define MVPP2_CPU_DESC_CHUNK 64
418 /* Max number of Tx descriptors in each aggregated queue */
419 #define MVPP2_AGGR_TXQ_SIZE 256
421 /* Descriptor aligned size */
422 #define MVPP2_DESC_ALIGNED_SIZE 32
424 /* Descriptor alignment mask */
425 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
427 /* RX FIFO constants */
428 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
429 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
430 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
432 /* RX buffer constants */
433 #define MVPP2_SKB_SHINFO_SIZE \
436 #define MVPP2_RX_PKT_SIZE(mtu) \
437 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
438 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
440 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
441 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
442 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
443 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
445 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
447 /* IPv6 max L3 address size */
448 #define MVPP2_MAX_L3_ADDR_SIZE 16
451 #define MVPP2_F_LOOPBACK BIT(0)
453 /* Marvell tag types */
454 enum mvpp2_tag_type {
455 MVPP2_TAG_TYPE_NONE = 0,
456 MVPP2_TAG_TYPE_MH = 1,
457 MVPP2_TAG_TYPE_DSA = 2,
458 MVPP2_TAG_TYPE_EDSA = 3,
459 MVPP2_TAG_TYPE_VLAN = 4,
460 MVPP2_TAG_TYPE_LAST = 5
463 /* Parser constants */
464 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
465 #define MVPP2_PRS_TCAM_WORDS 6
466 #define MVPP2_PRS_SRAM_WORDS 4
467 #define MVPP2_PRS_FLOW_ID_SIZE 64
468 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
469 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
470 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
471 #define MVPP2_PRS_IPV4_HEAD 0x40
472 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
473 #define MVPP2_PRS_IPV4_MC 0xe0
474 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
475 #define MVPP2_PRS_IPV4_BC_MASK 0xff
476 #define MVPP2_PRS_IPV4_IHL 0x5
477 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
478 #define MVPP2_PRS_IPV6_MC 0xff
479 #define MVPP2_PRS_IPV6_MC_MASK 0xff
480 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
481 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
482 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
483 #define MVPP2_PRS_DBL_VLANS_MAX 100
486 * - lookup ID - 4 bits
488 * - additional information - 1 byte
489 * - header data - 8 bytes
490 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
492 #define MVPP2_PRS_AI_BITS 8
493 #define MVPP2_PRS_PORT_MASK 0xff
494 #define MVPP2_PRS_LU_MASK 0xf
495 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
496 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
497 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
498 (((offs) * 2) - ((offs) % 2) + 2)
499 #define MVPP2_PRS_TCAM_AI_BYTE 16
500 #define MVPP2_PRS_TCAM_PORT_BYTE 17
501 #define MVPP2_PRS_TCAM_LU_BYTE 20
502 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
503 #define MVPP2_PRS_TCAM_INV_WORD 5
504 /* Tcam entries ID */
505 #define MVPP2_PE_DROP_ALL 0
506 #define MVPP2_PE_FIRST_FREE_TID 1
507 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
508 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
509 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
510 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
511 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
512 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
513 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
514 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
515 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
516 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
517 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
518 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
519 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
520 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
521 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
522 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
523 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
524 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
525 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
526 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
527 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
528 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
529 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
530 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
531 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
534 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
536 #define MVPP2_PRS_SRAM_RI_OFFS 0
537 #define MVPP2_PRS_SRAM_RI_WORD 0
538 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
539 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
540 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
541 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
542 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
543 #define MVPP2_PRS_SRAM_UDF_OFFS 73
544 #define MVPP2_PRS_SRAM_UDF_BITS 8
545 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
546 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
547 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
548 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
549 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
550 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
551 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
552 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
553 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
554 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
555 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
556 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
557 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
558 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
559 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
560 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
561 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
562 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
563 #define MVPP2_PRS_SRAM_AI_OFFS 90
564 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
565 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
566 #define MVPP2_PRS_SRAM_AI_MASK 0xff
567 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
568 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
569 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
570 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
572 /* Sram result info bits assignment */
573 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
574 #define MVPP2_PRS_RI_DSA_MASK 0x2
575 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
576 #define MVPP2_PRS_RI_VLAN_NONE 0x0
577 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
578 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
579 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
580 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
581 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
582 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
583 #define MVPP2_PRS_RI_L2_UCAST 0x0
584 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
585 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
586 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
587 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
588 #define MVPP2_PRS_RI_L3_UN 0x0
589 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
590 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
591 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
592 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
593 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
594 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
595 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
596 #define MVPP2_PRS_RI_L3_UCAST 0x0
597 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
598 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
599 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
600 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
601 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
602 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
603 #define MVPP2_PRS_RI_L4_TCP BIT(22)
604 #define MVPP2_PRS_RI_L4_UDP BIT(23)
605 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
606 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
607 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
608 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
610 /* Sram additional info bits assignment */
611 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
612 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
613 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
614 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
615 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
616 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
617 #define MVPP2_PRS_SINGLE_VLAN_AI 0
618 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
621 #define MVPP2_PRS_TAGGED true
622 #define MVPP2_PRS_UNTAGGED false
623 #define MVPP2_PRS_EDSA true
624 #define MVPP2_PRS_DSA false
626 /* MAC entries, shadow udf */
628 MVPP2_PRS_UDF_MAC_DEF,
629 MVPP2_PRS_UDF_MAC_RANGE,
630 MVPP2_PRS_UDF_L2_DEF,
631 MVPP2_PRS_UDF_L2_DEF_COPY,
632 MVPP2_PRS_UDF_L2_USER,
636 enum mvpp2_prs_lookup {
650 enum mvpp2_prs_l3_cast {
651 MVPP2_PRS_L3_UNI_CAST,
652 MVPP2_PRS_L3_MULTI_CAST,
653 MVPP2_PRS_L3_BROAD_CAST
656 /* Classifier constants */
657 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
658 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
659 #define MVPP2_CLS_LKP_TBL_SIZE 64
662 #define MVPP2_BM_POOLS_NUM 1
663 #define MVPP2_BM_LONG_BUF_NUM 16
664 #define MVPP2_BM_SHORT_BUF_NUM 16
665 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
666 #define MVPP2_BM_POOL_PTR_ALIGN 128
667 #define MVPP2_BM_SWF_LONG_POOL(port) 0
669 /* BM cookie (32 bits) definition */
670 #define MVPP2_BM_COOKIE_POOL_OFFS 8
671 #define MVPP2_BM_COOKIE_CPU_OFFS 24
673 /* BM short pool packet size
674 * These value assure that for SWF the total number
675 * of bytes allocated for each buffer will be 512
677 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
687 /* Shared Packet Processor resources */
689 /* Shared registers' base addresses */
691 void __iomem *lms_base;
693 /* List of pointers to port structures */
694 struct mvpp2_port **port_list;
696 /* Aggregated TXQs */
697 struct mvpp2_tx_queue *aggr_txqs;
700 struct mvpp2_bm_pool *bm_pools;
702 /* PRS shadow table */
703 struct mvpp2_prs_shadow *prs_shadow;
704 /* PRS auxiliary table for double vlan entries control */
705 bool *prs_double_vlans;
713 struct mvpp2_pcpu_stats {
727 /* Per-port registers' base address */
730 struct mvpp2_rx_queue **rxqs;
731 struct mvpp2_tx_queue **txqs;
735 u32 pending_cause_rx;
737 /* Per-CPU port control */
738 struct mvpp2_port_pcpu __percpu *pcpu;
745 struct mvpp2_pcpu_stats __percpu *stats;
747 struct phy_device *phy_dev;
748 phy_interface_t phy_interface;
756 struct mvpp2_bm_pool *pool_long;
757 struct mvpp2_bm_pool *pool_short;
759 /* Index of first port's physical RXQ */
762 u8 dev_addr[ETH_ALEN];
765 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
766 * layout of the transmit and reception DMA descriptors, and their
767 * layout is therefore defined by the hardware design
770 #define MVPP2_TXD_L3_OFF_SHIFT 0
771 #define MVPP2_TXD_IP_HLEN_SHIFT 8
772 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
773 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
774 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
775 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
776 #define MVPP2_TXD_L4_UDP BIT(24)
777 #define MVPP2_TXD_L3_IP6 BIT(26)
778 #define MVPP2_TXD_L_DESC BIT(28)
779 #define MVPP2_TXD_F_DESC BIT(29)
781 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
782 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
783 #define MVPP2_RXD_ERR_CRC 0x0
784 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
785 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
786 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
787 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
788 #define MVPP2_RXD_HWF_SYNC BIT(21)
789 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
790 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
791 #define MVPP2_RXD_L4_TCP BIT(25)
792 #define MVPP2_RXD_L4_UDP BIT(26)
793 #define MVPP2_RXD_L3_IP4 BIT(28)
794 #define MVPP2_RXD_L3_IP6 BIT(30)
795 #define MVPP2_RXD_BUF_HDR BIT(31)
797 struct mvpp2_tx_desc {
798 u32 command; /* Options used by HW for packet transmitting.*/
799 u8 packet_offset; /* the offset from the buffer beginning */
800 u8 phys_txq; /* destination queue ID */
801 u16 data_size; /* data size of transmitted packet in bytes */
802 u32 buf_dma_addr; /* physical addr of transmitted buffer */
803 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
804 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
805 u32 reserved2; /* reserved (for future use) */
808 struct mvpp2_rx_desc {
809 u32 status; /* info about received packet */
810 u16 reserved1; /* parser_info (for future use, PnC) */
811 u16 data_size; /* size of received packet in bytes */
812 u32 buf_dma_addr; /* physical address of the buffer */
813 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
814 u16 reserved2; /* gem_port_id (for future use, PON) */
815 u16 reserved3; /* csum_l4 (for future use, PnC) */
816 u8 reserved4; /* bm_qset (for future use, BM) */
818 u16 reserved6; /* classify_info (for future use, PnC) */
819 u32 reserved7; /* flow_id (for future use, PnC) */
823 /* Per-CPU Tx queue control */
824 struct mvpp2_txq_pcpu {
827 /* Number of Tx DMA descriptors in the descriptor ring */
830 /* Number of currently used Tx DMA descriptor in the
835 /* Number of Tx DMA descriptors reserved for each CPU */
838 /* Index of last TX DMA descriptor that was inserted */
841 /* Index of the TX DMA descriptor to be cleaned up */
845 struct mvpp2_tx_queue {
846 /* Physical number of this Tx queue */
849 /* Logical number of this Tx queue */
852 /* Number of Tx DMA descriptors in the descriptor ring */
855 /* Number of currently used Tx DMA descriptor in the descriptor ring */
858 /* Per-CPU control of physical Tx queues */
859 struct mvpp2_txq_pcpu __percpu *pcpu;
863 /* Virtual address of thex Tx DMA descriptors array */
864 struct mvpp2_tx_desc *descs;
866 /* DMA address of the Tx DMA descriptors array */
867 dma_addr_t descs_dma;
869 /* Index of the last Tx DMA descriptor */
872 /* Index of the next Tx DMA descriptor to process */
873 int next_desc_to_proc;
876 struct mvpp2_rx_queue {
877 /* RX queue number, in the range 0-31 for physical RXQs */
880 /* Num of rx descriptors in the rx descriptor ring */
886 /* Virtual address of the RX DMA descriptors array */
887 struct mvpp2_rx_desc *descs;
889 /* DMA address of the RX DMA descriptors array */
890 dma_addr_t descs_dma;
892 /* Index of the last RX DMA descriptor */
895 /* Index of the next RX DMA descriptor to process */
896 int next_desc_to_proc;
898 /* ID of port to which physical RXQ is mapped */
901 /* Port's logic RXQ number to which physical RXQ is mapped */
905 union mvpp2_prs_tcam_entry {
906 u32 word[MVPP2_PRS_TCAM_WORDS];
907 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
910 union mvpp2_prs_sram_entry {
911 u32 word[MVPP2_PRS_SRAM_WORDS];
912 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
915 struct mvpp2_prs_entry {
917 union mvpp2_prs_tcam_entry tcam;
918 union mvpp2_prs_sram_entry sram;
921 struct mvpp2_prs_shadow {
928 /* User defined offset */
936 struct mvpp2_cls_flow_entry {
938 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
941 struct mvpp2_cls_lookup_entry {
947 struct mvpp2_bm_pool {
948 /* Pool number in the range 0-7 */
950 enum mvpp2_bm_type type;
952 /* Buffer Pointers Pool External (BPPE) size */
954 /* Number of buffers for this pool */
956 /* Pool buffer size */
961 /* BPPE virtual base address */
962 unsigned long *virt_addr;
963 /* BPPE DMA base address */
966 /* Ports using BM pool */
969 /* Occupied buffers indicator */
973 /* Static declaractions */
975 /* Number of RXQs used by single port */
976 static int rxq_number = MVPP2_DEFAULT_RXQ;
977 /* Number of TXQs used by single port */
978 static int txq_number = MVPP2_DEFAULT_TXQ;
980 #define MVPP2_DRIVER_NAME "mvpp2"
981 #define MVPP2_DRIVER_VERSION "1.0"
984 * U-Boot internal data, mostly uncached buffers for descriptors and data
986 struct buffer_location {
987 struct mvpp2_tx_desc *aggr_tx_descs;
988 struct mvpp2_tx_desc *tx_descs;
989 struct mvpp2_rx_desc *rx_descs;
990 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
991 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
996 * All 4 interfaces use the same global buffer, since only one interface
997 * can be enabled at once
999 static struct buffer_location buffer_loc;
1002 * Page table entries are set to 1MB, or multiples of 1MB
1003 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1005 #define BD_SPACE (1 << 20)
1007 /* Utility/helper methods */
1009 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1011 writel(data, priv->base + offset);
1014 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1016 return readl(priv->base + offset);
1019 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1020 struct mvpp2_tx_desc *tx_desc,
1021 dma_addr_t dma_addr)
1023 tx_desc->buf_dma_addr = dma_addr;
1026 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1027 struct mvpp2_tx_desc *tx_desc,
1030 tx_desc->data_size = size;
1033 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1034 struct mvpp2_tx_desc *tx_desc,
1037 tx_desc->phys_txq = txq;
1040 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1041 struct mvpp2_tx_desc *tx_desc,
1042 unsigned int command)
1044 tx_desc->command = command;
1047 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1048 struct mvpp2_tx_desc *tx_desc,
1049 unsigned int offset)
1051 tx_desc->packet_offset = offset;
1054 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1055 struct mvpp2_rx_desc *rx_desc)
1057 return rx_desc->buf_dma_addr;
1060 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1061 struct mvpp2_rx_desc *rx_desc)
1063 return rx_desc->buf_cookie;
1066 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1067 struct mvpp2_rx_desc *rx_desc)
1069 return rx_desc->data_size;
1072 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1073 struct mvpp2_rx_desc *rx_desc)
1075 return rx_desc->status;
1078 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1080 txq_pcpu->txq_get_index++;
1081 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1082 txq_pcpu->txq_get_index = 0;
1085 /* Get number of physical egress port */
1086 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1088 return MVPP2_MAX_TCONT + port->id;
1091 /* Get number of physical TXQ */
1092 static inline int mvpp2_txq_phys(int port, int txq)
1094 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1097 /* Parser configuration routines */
1099 /* Update parser tcam and sram hw entries */
1100 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1104 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1107 /* Clear entry invalidation bit */
1108 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1110 /* Write tcam index - indirect access */
1111 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1112 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1113 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1115 /* Write sram index - indirect access */
1116 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1117 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1118 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1123 /* Read tcam entry from hw */
1124 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1128 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1131 /* Write tcam index - indirect access */
1132 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1134 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1135 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1136 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1137 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1139 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1140 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1142 /* Write sram index - indirect access */
1143 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1144 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1145 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1150 /* Invalidate tcam hw entry */
1151 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1153 /* Write index - indirect access */
1154 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1155 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1156 MVPP2_PRS_TCAM_INV_MASK);
1159 /* Enable shadow table entry and set its lookup ID */
1160 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1162 priv->prs_shadow[index].valid = true;
1163 priv->prs_shadow[index].lu = lu;
1166 /* Update ri fields in shadow table entry */
1167 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1168 unsigned int ri, unsigned int ri_mask)
1170 priv->prs_shadow[index].ri_mask = ri_mask;
1171 priv->prs_shadow[index].ri = ri;
1174 /* Update lookup field in tcam sw entry */
1175 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1177 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1179 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1180 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1183 /* Update mask for single port in tcam sw entry */
1184 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1185 unsigned int port, bool add)
1187 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1190 pe->tcam.byte[enable_off] &= ~(1 << port);
1192 pe->tcam.byte[enable_off] |= 1 << port;
1195 /* Update port map in tcam sw entry */
1196 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1199 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1200 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1202 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1203 pe->tcam.byte[enable_off] &= ~port_mask;
1204 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1207 /* Obtain port map from tcam sw entry */
1208 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1210 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1212 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1215 /* Set byte of data and its enable bits in tcam sw entry */
1216 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1217 unsigned int offs, unsigned char byte,
1218 unsigned char enable)
1220 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1221 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1224 /* Get byte of data and its enable bits from tcam sw entry */
1225 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1226 unsigned int offs, unsigned char *byte,
1227 unsigned char *enable)
1229 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1230 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1233 /* Set ethertype in tcam sw entry */
1234 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1235 unsigned short ethertype)
1237 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1238 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1241 /* Set bits in sram sw entry */
1242 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1245 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1248 /* Clear bits in sram sw entry */
1249 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1252 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1255 /* Update ri bits in sram sw entry */
1256 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1257 unsigned int bits, unsigned int mask)
1261 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1262 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1264 if (!(mask & BIT(i)))
1268 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1270 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1272 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1276 /* Update ai bits in sram sw entry */
1277 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1278 unsigned int bits, unsigned int mask)
1281 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1283 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1285 if (!(mask & BIT(i)))
1289 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1291 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1293 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1297 /* Read ai bits from sram sw entry */
1298 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1301 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1302 int ai_en_off = ai_off + 1;
1303 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1305 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1306 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1311 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1314 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1317 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1319 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1320 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1321 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1324 /* In the sram sw entry set sign and value of the next lookup offset
1325 * and the offset value generated to the classifier
1327 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1332 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1335 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1339 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1340 (unsigned char)shift;
1342 /* Reset and set operation */
1343 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1344 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1345 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1347 /* Set base offset as current */
1348 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1351 /* In the sram sw entry set sign and value of the user defined offset
1352 * generated to the classifier
1354 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1355 unsigned int type, int offset,
1360 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1361 offset = 0 - offset;
1363 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1367 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1368 MVPP2_PRS_SRAM_UDF_MASK);
1369 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1370 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1371 MVPP2_PRS_SRAM_UDF_BITS)] &=
1372 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1373 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1374 MVPP2_PRS_SRAM_UDF_BITS)] |=
1375 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1377 /* Set offset type */
1378 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1379 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1380 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1382 /* Set offset operation */
1383 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1384 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1385 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1387 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1388 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1389 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1390 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1392 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1393 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1394 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1396 /* Set base offset as current */
1397 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1400 /* Find parser flow entry */
1401 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1403 struct mvpp2_prs_entry *pe;
1406 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1409 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1411 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1412 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1415 if (!priv->prs_shadow[tid].valid ||
1416 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1420 mvpp2_prs_hw_read(priv, pe);
1421 bits = mvpp2_prs_sram_ai_get(pe);
1423 /* Sram store classification lookup ID in AI bits [5:0] */
1424 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1432 /* Return first free tcam index, seeking from start to end */
1433 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1441 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1442 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1444 for (tid = start; tid <= end; tid++) {
1445 if (!priv->prs_shadow[tid].valid)
1452 /* Enable/disable dropping all mac da's */
1453 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1455 struct mvpp2_prs_entry pe;
1457 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1458 /* Entry exist - update port only */
1459 pe.index = MVPP2_PE_DROP_ALL;
1460 mvpp2_prs_hw_read(priv, &pe);
1462 /* Entry doesn't exist - create new */
1463 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1464 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1465 pe.index = MVPP2_PE_DROP_ALL;
1467 /* Non-promiscuous mode for all ports - DROP unknown packets */
1468 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1469 MVPP2_PRS_RI_DROP_MASK);
1471 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1472 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1474 /* Update shadow table */
1475 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1477 /* Mask all ports */
1478 mvpp2_prs_tcam_port_map_set(&pe, 0);
1481 /* Update port mask */
1482 mvpp2_prs_tcam_port_set(&pe, port, add);
1484 mvpp2_prs_hw_write(priv, &pe);
1487 /* Set port to promiscuous mode */
1488 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1490 struct mvpp2_prs_entry pe;
1492 /* Promiscuous mode - Accept unknown packets */
1494 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1495 /* Entry exist - update port only */
1496 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1497 mvpp2_prs_hw_read(priv, &pe);
1499 /* Entry doesn't exist - create new */
1500 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1501 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1502 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1504 /* Continue - set next lookup */
1505 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1507 /* Set result info bits */
1508 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1509 MVPP2_PRS_RI_L2_CAST_MASK);
1511 /* Shift to ethertype */
1512 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1513 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1515 /* Mask all ports */
1516 mvpp2_prs_tcam_port_map_set(&pe, 0);
1518 /* Update shadow table */
1519 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1522 /* Update port mask */
1523 mvpp2_prs_tcam_port_set(&pe, port, add);
1525 mvpp2_prs_hw_write(priv, &pe);
1528 /* Accept multicast */
1529 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1532 struct mvpp2_prs_entry pe;
1533 unsigned char da_mc;
1535 /* Ethernet multicast address first byte is
1536 * 0x01 for IPv4 and 0x33 for IPv6
1538 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1540 if (priv->prs_shadow[index].valid) {
1541 /* Entry exist - update port only */
1543 mvpp2_prs_hw_read(priv, &pe);
1545 /* Entry doesn't exist - create new */
1546 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1547 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1550 /* Continue - set next lookup */
1551 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1553 /* Set result info bits */
1554 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1555 MVPP2_PRS_RI_L2_CAST_MASK);
1557 /* Update tcam entry data first byte */
1558 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1560 /* Shift to ethertype */
1561 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1562 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1564 /* Mask all ports */
1565 mvpp2_prs_tcam_port_map_set(&pe, 0);
1567 /* Update shadow table */
1568 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1571 /* Update port mask */
1572 mvpp2_prs_tcam_port_set(&pe, port, add);
1574 mvpp2_prs_hw_write(priv, &pe);
1577 /* Parser per-port initialization */
1578 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1579 int lu_max, int offset)
1584 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1585 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1586 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1587 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1589 /* Set maximum number of loops for packet received from port */
1590 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1591 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1592 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1593 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1595 /* Set initial offset for packet header extraction for the first
1598 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1599 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1600 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1601 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1604 /* Default flow entries initialization for all ports */
1605 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1607 struct mvpp2_prs_entry pe;
1610 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1611 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1612 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1613 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1615 /* Mask all ports */
1616 mvpp2_prs_tcam_port_map_set(&pe, 0);
1619 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1620 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1622 /* Update shadow table and hw entry */
1623 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1624 mvpp2_prs_hw_write(priv, &pe);
1628 /* Set default entry for Marvell Header field */
1629 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1631 struct mvpp2_prs_entry pe;
1633 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1635 pe.index = MVPP2_PE_MH_DEFAULT;
1636 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1637 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1638 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1639 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1641 /* Unmask all ports */
1642 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1644 /* Update shadow table and hw entry */
1645 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1646 mvpp2_prs_hw_write(priv, &pe);
1649 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1650 * multicast MAC addresses
1652 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1654 struct mvpp2_prs_entry pe;
1656 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1658 /* Non-promiscuous mode for all ports - DROP unknown packets */
1659 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1660 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1662 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1663 MVPP2_PRS_RI_DROP_MASK);
1664 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1665 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1667 /* Unmask all ports */
1668 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1670 /* Update shadow table and hw entry */
1671 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1672 mvpp2_prs_hw_write(priv, &pe);
1674 /* place holders only - no ports */
1675 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1676 mvpp2_prs_mac_promisc_set(priv, 0, false);
1677 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1678 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1681 /* Match basic ethertypes */
1682 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1684 struct mvpp2_prs_entry pe;
1687 /* Ethertype: PPPoE */
1688 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1689 MVPP2_PE_LAST_FREE_TID);
1693 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1694 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1697 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1699 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1700 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1701 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1702 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1703 MVPP2_PRS_RI_PPPOE_MASK);
1705 /* Update shadow table and hw entry */
1706 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1707 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1708 priv->prs_shadow[pe.index].finish = false;
1709 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1710 MVPP2_PRS_RI_PPPOE_MASK);
1711 mvpp2_prs_hw_write(priv, &pe);
1713 /* Ethertype: ARP */
1714 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1715 MVPP2_PE_LAST_FREE_TID);
1719 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1720 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1723 mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
1725 /* Generate flow in the next iteration*/
1726 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1727 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1728 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1729 MVPP2_PRS_RI_L3_PROTO_MASK);
1731 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1733 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1735 /* Update shadow table and hw entry */
1736 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1737 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1738 priv->prs_shadow[pe.index].finish = true;
1739 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1740 MVPP2_PRS_RI_L3_PROTO_MASK);
1741 mvpp2_prs_hw_write(priv, &pe);
1743 /* Ethertype: LBTD */
1744 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1745 MVPP2_PE_LAST_FREE_TID);
1749 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1750 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1753 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1755 /* Generate flow in the next iteration*/
1756 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1757 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1758 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1759 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1760 MVPP2_PRS_RI_CPU_CODE_MASK |
1761 MVPP2_PRS_RI_UDF3_MASK);
1763 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1765 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1767 /* Update shadow table and hw entry */
1768 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1769 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1770 priv->prs_shadow[pe.index].finish = true;
1771 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1772 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1773 MVPP2_PRS_RI_CPU_CODE_MASK |
1774 MVPP2_PRS_RI_UDF3_MASK);
1775 mvpp2_prs_hw_write(priv, &pe);
1777 /* Ethertype: IPv4 without options */
1778 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1779 MVPP2_PE_LAST_FREE_TID);
1783 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1784 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1787 mvpp2_prs_match_etype(&pe, 0, PROT_IP);
1788 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1789 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1790 MVPP2_PRS_IPV4_HEAD_MASK |
1791 MVPP2_PRS_IPV4_IHL_MASK);
1793 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1794 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1795 MVPP2_PRS_RI_L3_PROTO_MASK);
1796 /* Skip eth_type + 4 bytes of IP header */
1797 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1798 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1800 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1802 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1804 /* Update shadow table and hw entry */
1805 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1806 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1807 priv->prs_shadow[pe.index].finish = false;
1808 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1809 MVPP2_PRS_RI_L3_PROTO_MASK);
1810 mvpp2_prs_hw_write(priv, &pe);
1812 /* Ethertype: IPv4 with options */
1813 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1814 MVPP2_PE_LAST_FREE_TID);
1820 /* Clear tcam data before updating */
1821 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
1822 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
1824 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1825 MVPP2_PRS_IPV4_HEAD,
1826 MVPP2_PRS_IPV4_HEAD_MASK);
1828 /* Clear ri before updating */
1829 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1830 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1831 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1832 MVPP2_PRS_RI_L3_PROTO_MASK);
1834 /* Update shadow table and hw entry */
1835 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1836 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1837 priv->prs_shadow[pe.index].finish = false;
1838 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1839 MVPP2_PRS_RI_L3_PROTO_MASK);
1840 mvpp2_prs_hw_write(priv, &pe);
1842 /* Ethertype: IPv6 without options */
1843 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1844 MVPP2_PE_LAST_FREE_TID);
1848 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1849 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1852 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
1854 /* Skip DIP of IPV6 header */
1855 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1856 MVPP2_MAX_L3_ADDR_SIZE,
1857 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1858 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1859 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1860 MVPP2_PRS_RI_L3_PROTO_MASK);
1862 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1864 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1866 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1867 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1868 priv->prs_shadow[pe.index].finish = false;
1869 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1870 MVPP2_PRS_RI_L3_PROTO_MASK);
1871 mvpp2_prs_hw_write(priv, &pe);
1873 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1874 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1875 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1876 pe.index = MVPP2_PE_ETH_TYPE_UN;
1878 /* Unmask all ports */
1879 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1881 /* Generate flow in the next iteration*/
1882 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1883 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1884 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1885 MVPP2_PRS_RI_L3_PROTO_MASK);
1886 /* Set L3 offset even it's unknown L3 */
1887 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1889 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1891 /* Update shadow table and hw entry */
1892 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1893 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1894 priv->prs_shadow[pe.index].finish = true;
1895 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1896 MVPP2_PRS_RI_L3_PROTO_MASK);
1897 mvpp2_prs_hw_write(priv, &pe);
1902 /* Parser default initialization */
1903 static int mvpp2_prs_default_init(struct udevice *dev,
1908 /* Enable tcam table */
1909 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
1911 /* Clear all tcam and sram entries */
1912 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
1913 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1914 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1915 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
1917 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
1918 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1919 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
1922 /* Invalidate all tcam entries */
1923 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
1924 mvpp2_prs_hw_inv(priv, index);
1926 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
1927 sizeof(struct mvpp2_prs_shadow),
1929 if (!priv->prs_shadow)
1932 /* Always start from lookup = 0 */
1933 for (index = 0; index < MVPP2_MAX_PORTS; index++)
1934 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
1935 MVPP2_PRS_PORT_LU_MAX, 0);
1937 mvpp2_prs_def_flow_init(priv);
1939 mvpp2_prs_mh_init(priv);
1941 mvpp2_prs_mac_init(priv);
1943 err = mvpp2_prs_etype_init(priv);
1950 /* Compare MAC DA with tcam entry data */
1951 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
1952 const u8 *da, unsigned char *mask)
1954 unsigned char tcam_byte, tcam_mask;
1957 for (index = 0; index < ETH_ALEN; index++) {
1958 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
1959 if (tcam_mask != mask[index])
1962 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
1969 /* Find tcam entry with matched pair <MAC DA, port> */
1970 static struct mvpp2_prs_entry *
1971 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
1972 unsigned char *mask, int udf_type)
1974 struct mvpp2_prs_entry *pe;
1977 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1980 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
1982 /* Go through the all entires with MVPP2_PRS_LU_MAC */
1983 for (tid = MVPP2_PE_FIRST_FREE_TID;
1984 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1985 unsigned int entry_pmap;
1987 if (!priv->prs_shadow[tid].valid ||
1988 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
1989 (priv->prs_shadow[tid].udf != udf_type))
1993 mvpp2_prs_hw_read(priv, pe);
1994 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
1996 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2005 /* Update parser's mac da entry */
2006 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2007 const u8 *da, bool add)
2009 struct mvpp2_prs_entry *pe;
2010 unsigned int pmap, len, ri;
2011 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2014 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2015 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2016 MVPP2_PRS_UDF_MAC_DEF);
2023 /* Create new TCAM entry */
2024 /* Find first range mac entry*/
2025 for (tid = MVPP2_PE_FIRST_FREE_TID;
2026 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2027 if (priv->prs_shadow[tid].valid &&
2028 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2029 (priv->prs_shadow[tid].udf ==
2030 MVPP2_PRS_UDF_MAC_RANGE))
2033 /* Go through the all entries from first to last */
2034 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2039 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2042 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2045 /* Mask all ports */
2046 mvpp2_prs_tcam_port_map_set(pe, 0);
2049 /* Update port mask */
2050 mvpp2_prs_tcam_port_set(pe, port, add);
2052 /* Invalidate the entry if no ports are left enabled */
2053 pmap = mvpp2_prs_tcam_port_map_get(pe);
2059 mvpp2_prs_hw_inv(priv, pe->index);
2060 priv->prs_shadow[pe->index].valid = false;
2065 /* Continue - set next lookup */
2066 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2068 /* Set match on DA */
2071 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2073 /* Set result info bits */
2074 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2076 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2077 MVPP2_PRS_RI_MAC_ME_MASK);
2078 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2079 MVPP2_PRS_RI_MAC_ME_MASK);
2081 /* Shift to ethertype */
2082 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2083 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2085 /* Update shadow table and hw entry */
2086 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2087 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2088 mvpp2_prs_hw_write(priv, pe);
2095 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2099 /* Remove old parser entry */
2100 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2105 /* Add new parser entry */
2106 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2110 /* Set addr in the device */
2111 memcpy(port->dev_addr, da, ETH_ALEN);
2116 /* Set prs flow for the port */
2117 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2119 struct mvpp2_prs_entry *pe;
2122 pe = mvpp2_prs_flow_find(port->priv, port->id);
2124 /* Such entry not exist */
2126 /* Go through the all entires from last to first */
2127 tid = mvpp2_prs_tcam_first_free(port->priv,
2128 MVPP2_PE_LAST_FREE_TID,
2129 MVPP2_PE_FIRST_FREE_TID);
2133 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2137 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2141 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2142 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2144 /* Update shadow table */
2145 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2148 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2149 mvpp2_prs_hw_write(port->priv, pe);
2155 /* Classifier configuration routines */
2157 /* Update classification flow table registers */
2158 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2159 struct mvpp2_cls_flow_entry *fe)
2161 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2162 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
2163 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
2164 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
2167 /* Update classification lookup table register */
2168 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2169 struct mvpp2_cls_lookup_entry *le)
2173 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2174 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2175 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2178 /* Classifier default initialization */
2179 static void mvpp2_cls_init(struct mvpp2 *priv)
2181 struct mvpp2_cls_lookup_entry le;
2182 struct mvpp2_cls_flow_entry fe;
2185 /* Enable classifier */
2186 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2188 /* Clear classifier flow table */
2189 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2190 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2192 mvpp2_cls_flow_write(priv, &fe);
2195 /* Clear classifier lookup table */
2197 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2200 mvpp2_cls_lookup_write(priv, &le);
2203 mvpp2_cls_lookup_write(priv, &le);
2207 static void mvpp2_cls_port_config(struct mvpp2_port *port)
2209 struct mvpp2_cls_lookup_entry le;
2212 /* Set way for the port */
2213 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2214 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2215 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2217 /* Pick the entry to be accessed in lookup ID decoding table
2218 * according to the way and lkpid.
2220 le.lkpid = port->id;
2224 /* Set initial CPU queue for receiving packets */
2225 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2226 le.data |= port->first_rxq;
2228 /* Disable classification engines */
2229 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2231 /* Update lookup ID table entry */
2232 mvpp2_cls_lookup_write(port->priv, &le);
2235 /* Set CPU queue number for oversize packets */
2236 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2240 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2241 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2243 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2244 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2246 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2247 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2248 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2251 /* Buffer Manager configuration routines */
2254 static int mvpp2_bm_pool_create(struct udevice *dev,
2256 struct mvpp2_bm_pool *bm_pool, int size)
2260 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
2261 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
2262 if (!bm_pool->virt_addr)
2265 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2266 MVPP2_BM_POOL_PTR_ALIGN)) {
2267 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2268 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2272 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
2274 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2276 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2277 val |= MVPP2_BM_START_MASK;
2278 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2280 bm_pool->type = MVPP2_BM_FREE;
2281 bm_pool->size = size;
2282 bm_pool->pkt_size = 0;
2283 bm_pool->buf_num = 0;
2288 /* Set pool buffer size */
2289 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2290 struct mvpp2_bm_pool *bm_pool,
2295 bm_pool->buf_size = buf_size;
2297 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2298 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2301 /* Free all buffers from the pool */
2302 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2303 struct mvpp2_bm_pool *bm_pool)
2305 bm_pool->buf_num = 0;
2309 static int mvpp2_bm_pool_destroy(struct udevice *dev,
2311 struct mvpp2_bm_pool *bm_pool)
2315 mvpp2_bm_bufs_free(dev, priv, bm_pool);
2316 if (bm_pool->buf_num) {
2317 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2321 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2322 val |= MVPP2_BM_STOP_MASK;
2323 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2328 static int mvpp2_bm_pools_init(struct udevice *dev,
2332 struct mvpp2_bm_pool *bm_pool;
2334 /* Create all pools with maximum size */
2335 size = MVPP2_BM_POOL_SIZE_MAX;
2336 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2337 bm_pool = &priv->bm_pools[i];
2339 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2341 goto err_unroll_pools;
2342 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
2347 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2348 for (i = i - 1; i >= 0; i--)
2349 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2353 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2357 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2358 /* Mask BM all interrupts */
2359 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2360 /* Clear BM cause register */
2361 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2364 /* Allocate and initialize BM pools */
2365 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2366 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2367 if (!priv->bm_pools)
2370 err = mvpp2_bm_pools_init(dev, priv);
2376 /* Attach long pool to rxq */
2377 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2378 int lrxq, int long_pool)
2383 /* Get queue physical ID */
2384 prxq = port->rxqs[lrxq]->id;
2386 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2387 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
2388 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
2389 MVPP2_RXQ_POOL_LONG_MASK);
2391 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2394 /* Set pool number in a BM cookie */
2395 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2399 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2400 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2405 /* Get pool number from a BM cookie */
2406 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
2408 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2411 /* Release buffer to BM */
2412 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
2413 dma_addr_t buf_dma_addr,
2414 unsigned long buf_phys_addr)
2416 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2417 * returned in the "cookie" field of the RX
2418 * descriptor. Instead of storing the virtual address, we
2419 * store the physical address
2421 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
2422 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
2425 /* Refill BM pool */
2426 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
2427 dma_addr_t dma_addr,
2428 phys_addr_t phys_addr)
2430 int pool = mvpp2_bm_cookie_pool_get(bm);
2432 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2435 /* Allocate buffers for the pool */
2436 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2437 struct mvpp2_bm_pool *bm_pool, int buf_num)
2442 (buf_num + bm_pool->buf_num > bm_pool->size)) {
2443 netdev_err(port->dev,
2444 "cannot allocate %d buffers for pool %d\n",
2445 buf_num, bm_pool->id);
2449 for (i = 0; i < buf_num; i++) {
2450 mvpp2_bm_pool_put(port, bm_pool->id,
2451 (dma_addr_t)buffer_loc.rx_buffer[i],
2452 (unsigned long)buffer_loc.rx_buffer[i]);
2456 /* Update BM driver with number of buffers added to pool */
2457 bm_pool->buf_num += i;
2458 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
2463 /* Notify the driver that BM pool is being used as specific type and return the
2464 * pool pointer on success
2466 static struct mvpp2_bm_pool *
2467 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2470 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2473 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2474 netdev_err(port->dev, "mixing pool types is forbidden\n");
2478 if (new_pool->type == MVPP2_BM_FREE)
2479 new_pool->type = type;
2481 /* Allocate buffers in case BM pool is used as long pool, but packet
2482 * size doesn't match MTU or BM pool hasn't being used yet
2484 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2485 (new_pool->pkt_size == 0)) {
2488 /* Set default buffer number or free all the buffers in case
2489 * the pool is not empty
2491 pkts_num = new_pool->buf_num;
2493 pkts_num = type == MVPP2_BM_SWF_LONG ?
2494 MVPP2_BM_LONG_BUF_NUM :
2495 MVPP2_BM_SHORT_BUF_NUM;
2497 mvpp2_bm_bufs_free(NULL,
2498 port->priv, new_pool);
2500 new_pool->pkt_size = pkt_size;
2502 /* Allocate buffers for this pool */
2503 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2504 if (num != pkts_num) {
2505 dev_err(dev, "pool %d: %d of %d allocated\n",
2506 new_pool->id, num, pkts_num);
2511 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
2512 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
2517 /* Initialize pools for swf */
2518 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2522 if (!port->pool_long) {
2524 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2527 if (!port->pool_long)
2530 port->pool_long->port_map |= (1 << port->id);
2532 for (rxq = 0; rxq < rxq_number; rxq++)
2533 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2539 /* Port configuration routines */
2541 static void mvpp2_port_mii_set(struct mvpp2_port *port)
2545 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2547 switch (port->phy_interface) {
2548 case PHY_INTERFACE_MODE_SGMII:
2549 val |= MVPP2_GMAC_INBAND_AN_MASK;
2551 case PHY_INTERFACE_MODE_RGMII:
2552 val |= MVPP2_GMAC_PORT_RGMII_MASK;
2554 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2557 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2560 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2564 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2565 val |= MVPP2_GMAC_FC_ADV_EN;
2566 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2569 static void mvpp2_port_enable(struct mvpp2_port *port)
2573 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2574 val |= MVPP2_GMAC_PORT_EN_MASK;
2575 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2576 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2579 static void mvpp2_port_disable(struct mvpp2_port *port)
2583 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2584 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2585 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2588 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2589 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2593 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2594 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2595 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2598 /* Configure loopback port */
2599 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2603 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2605 if (port->speed == 1000)
2606 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2608 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2610 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2611 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2613 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2615 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2618 static void mvpp2_port_reset(struct mvpp2_port *port)
2622 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2623 ~MVPP2_GMAC_PORT_RESET_MASK;
2624 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2626 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2627 MVPP2_GMAC_PORT_RESET_MASK)
2631 /* Change maximum receive size of the port */
2632 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2636 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2637 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2638 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2639 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2640 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2643 /* Set defaults to the MVPP2 port */
2644 static void mvpp2_defaults_set(struct mvpp2_port *port)
2646 int tx_port_num, val, queue, ptxq, lrxq;
2648 /* Configure port to loopback if needed */
2649 if (port->flags & MVPP2_F_LOOPBACK)
2650 mvpp2_port_loopback_set(port);
2652 /* Update TX FIFO MIN Threshold */
2653 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2654 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2655 /* Min. TX threshold must be less than minimal packet length */
2656 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2657 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2659 /* Disable Legacy WRR, Disable EJP, Release from reset */
2660 tx_port_num = mvpp2_egress_port(port);
2661 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2663 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2665 /* Close bandwidth for all queues */
2666 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
2667 ptxq = mvpp2_txq_phys(port->id, queue);
2668 mvpp2_write(port->priv,
2669 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
2672 /* Set refill period to 1 usec, refill tokens
2673 * and bucket size to maximum
2675 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
2676 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2677 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2678 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2679 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2680 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2681 val = MVPP2_TXP_TOKEN_SIZE_MAX;
2682 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2684 /* Set MaximumLowLatencyPacketSize value to 256 */
2685 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2686 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2687 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2689 /* Enable Rx cache snoop */
2690 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2691 queue = port->rxqs[lrxq]->id;
2692 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2693 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2694 MVPP2_SNOOP_BUF_HDR_MASK;
2695 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2699 /* Enable/disable receiving packets */
2700 static void mvpp2_ingress_enable(struct mvpp2_port *port)
2705 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2706 queue = port->rxqs[lrxq]->id;
2707 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2708 val &= ~MVPP2_RXQ_DISABLE_MASK;
2709 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2713 static void mvpp2_ingress_disable(struct mvpp2_port *port)
2718 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2719 queue = port->rxqs[lrxq]->id;
2720 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2721 val |= MVPP2_RXQ_DISABLE_MASK;
2722 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2726 /* Enable transmit via physical egress queue
2727 * - HW starts take descriptors from DRAM
2729 static void mvpp2_egress_enable(struct mvpp2_port *port)
2733 int tx_port_num = mvpp2_egress_port(port);
2735 /* Enable all initialized TXs. */
2737 for (queue = 0; queue < txq_number; queue++) {
2738 struct mvpp2_tx_queue *txq = port->txqs[queue];
2740 if (txq->descs != NULL)
2741 qmap |= (1 << queue);
2744 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2745 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2748 /* Disable transmit via physical egress queue
2749 * - HW doesn't take descriptors from DRAM
2751 static void mvpp2_egress_disable(struct mvpp2_port *port)
2755 int tx_port_num = mvpp2_egress_port(port);
2757 /* Issue stop command for active channels only */
2758 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2759 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2760 MVPP2_TXP_SCHED_ENQ_MASK;
2762 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2763 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2765 /* Wait for all Tx activity to terminate. */
2768 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2769 netdev_warn(port->dev,
2770 "Tx stop timed out, status=0x%08x\n",
2777 /* Check port TX Command register that all
2778 * Tx queues are stopped
2780 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2781 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2784 /* Rx descriptors helper methods */
2786 /* Get number of Rx descriptors occupied by received packets */
2788 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2790 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2792 return val & MVPP2_RXQ_OCCUPIED_MASK;
2795 /* Update Rx queue status with the number of occupied and available
2796 * Rx descriptor slots.
2799 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2800 int used_count, int free_count)
2802 /* Decrement the number of used descriptors and increment count
2803 * increment the number of free descriptors.
2805 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2807 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2810 /* Get pointer to next RX descriptor to be processed by SW */
2811 static inline struct mvpp2_rx_desc *
2812 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2814 int rx_desc = rxq->next_desc_to_proc;
2816 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2817 prefetch(rxq->descs + rxq->next_desc_to_proc);
2818 return rxq->descs + rx_desc;
2821 /* Set rx queue offset */
2822 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2823 int prxq, int offset)
2827 /* Convert offset from bytes to units of 32 bytes */
2828 offset = offset >> 5;
2830 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2831 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2834 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2835 MVPP2_RXQ_PACKET_OFFSET_MASK);
2837 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2840 /* Obtain BM cookie information from descriptor */
2841 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
2842 struct mvpp2_rx_desc *rx_desc)
2844 int cpu = smp_processor_id();
2847 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
2848 MVPP2_RXD_BM_POOL_ID_MASK) >>
2849 MVPP2_RXD_BM_POOL_ID_OFFS;
2851 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
2852 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
2855 /* Tx descriptors helper methods */
2857 /* Get number of Tx descriptors waiting to be transmitted by HW */
2858 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
2859 struct mvpp2_tx_queue *txq)
2863 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
2864 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
2866 return val & MVPP2_TXQ_PENDING_MASK;
2869 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2870 static struct mvpp2_tx_desc *
2871 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2873 int tx_desc = txq->next_desc_to_proc;
2875 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2876 return txq->descs + tx_desc;
2879 /* Update HW with number of aggregated Tx descriptors to be sent */
2880 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2882 /* aggregated access - relevant TXQ number is written in TX desc */
2883 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2886 /* Get number of sent descriptors and decrement counter.
2887 * The number of sent descriptors is returned.
2890 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2891 struct mvpp2_tx_queue *txq)
2895 /* Reading status reg resets transmitted descriptor counter */
2896 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
2898 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2899 MVPP2_TRANSMITTED_COUNT_OFFSET;
2902 static void mvpp2_txq_sent_counter_clear(void *arg)
2904 struct mvpp2_port *port = arg;
2907 for (queue = 0; queue < txq_number; queue++) {
2908 int id = port->txqs[queue]->id;
2910 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
2914 /* Set max sizes for Tx queues */
2915 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2918 int txq, tx_port_num;
2920 mtu = port->pkt_size * 8;
2921 if (mtu > MVPP2_TXP_MTU_MAX)
2922 mtu = MVPP2_TXP_MTU_MAX;
2924 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2927 /* Indirect access to registers */
2928 tx_port_num = mvpp2_egress_port(port);
2929 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2932 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2933 val &= ~MVPP2_TXP_MTU_MAX;
2935 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2937 /* TXP token size and all TXQs token size must be larger that MTU */
2938 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2939 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2942 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2944 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2947 for (txq = 0; txq < txq_number; txq++) {
2948 val = mvpp2_read(port->priv,
2949 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2950 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2954 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2956 mvpp2_write(port->priv,
2957 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2963 /* Free Tx queue skbuffs */
2964 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2965 struct mvpp2_tx_queue *txq,
2966 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2970 for (i = 0; i < num; i++)
2971 mvpp2_txq_inc_get(txq_pcpu);
2974 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2977 int queue = fls(cause) - 1;
2979 return port->rxqs[queue];
2982 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2985 int queue = fls(cause) - 1;
2987 return port->txqs[queue];
2990 /* Rx/Tx queue initialization/cleanup methods */
2992 /* Allocate and initialize descriptors for aggr TXQ */
2993 static int mvpp2_aggr_txq_init(struct udevice *dev,
2994 struct mvpp2_tx_queue *aggr_txq,
2995 int desc_num, int cpu,
2998 /* Allocate memory for TX descriptors */
2999 aggr_txq->descs = buffer_loc.aggr_tx_descs;
3000 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
3001 if (!aggr_txq->descs)
3004 /* Make sure descriptor address is cache line size aligned */
3005 BUG_ON(aggr_txq->descs !=
3006 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3008 aggr_txq->last_desc = aggr_txq->size - 1;
3010 /* Aggr TXQ no reset WA */
3011 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
3012 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
3014 /* Set Tx descriptors queue starting address */
3015 /* indirect access */
3016 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
3017 aggr_txq->descs_dma);
3018 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
3023 /* Create a specified Rx queue */
3024 static int mvpp2_rxq_init(struct mvpp2_port *port,
3025 struct mvpp2_rx_queue *rxq)
3028 rxq->size = port->rx_ring_size;
3030 /* Allocate memory for RX descriptors */
3031 rxq->descs = buffer_loc.rx_descs;
3032 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
3036 BUG_ON(rxq->descs !=
3037 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3039 rxq->last_desc = rxq->size - 1;
3041 /* Zero occupied and non-occupied counters - direct access */
3042 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3044 /* Set Rx descriptors queue starting address - indirect access */
3045 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
3046 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_dma);
3047 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
3048 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
3051 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
3053 /* Add number of descriptors ready for receiving packets */
3054 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
3059 /* Push packets received by the RXQ to BM pool */
3060 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3061 struct mvpp2_rx_queue *rxq)
3065 rx_received = mvpp2_rxq_received(port, rxq->id);
3069 for (i = 0; i < rx_received; i++) {
3070 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3071 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
3073 mvpp2_pool_refill(port, bm,
3074 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3075 mvpp2_rxdesc_cookie_get(port, rx_desc));
3077 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3080 /* Cleanup Rx queue */
3081 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3082 struct mvpp2_rx_queue *rxq)
3084 mvpp2_rxq_drop_pkts(port, rxq);
3088 rxq->next_desc_to_proc = 0;
3091 /* Clear Rx descriptors queue starting address and size;
3092 * free descriptor number
3094 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3095 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
3096 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
3097 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
3100 /* Create and initialize a Tx queue */
3101 static int mvpp2_txq_init(struct mvpp2_port *port,
3102 struct mvpp2_tx_queue *txq)
3105 int cpu, desc, desc_per_txq, tx_port_num;
3106 struct mvpp2_txq_pcpu *txq_pcpu;
3108 txq->size = port->tx_ring_size;
3110 /* Allocate memory for Tx descriptors */
3111 txq->descs = buffer_loc.tx_descs;
3112 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
3116 /* Make sure descriptor address is cache line size aligned */
3117 BUG_ON(txq->descs !=
3118 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3120 txq->last_desc = txq->size - 1;
3122 /* Set Tx descriptors queue starting address - indirect access */
3123 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3124 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
3125 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
3126 MVPP2_TXQ_DESC_SIZE_MASK);
3127 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
3128 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
3129 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3130 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3131 val &= ~MVPP2_TXQ_PENDING_MASK;
3132 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
3134 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3135 * for each existing TXQ.
3136 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3137 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
3140 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3141 (txq->log_id * desc_per_txq);
3143 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
3144 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3145 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
3147 /* WRR / EJP configuration - indirect access */
3148 tx_port_num = mvpp2_egress_port(port);
3149 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3151 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3152 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3153 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3154 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3155 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3157 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3158 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3161 for_each_present_cpu(cpu) {
3162 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3163 txq_pcpu->size = txq->size;
3169 /* Free allocated TXQ resources */
3170 static void mvpp2_txq_deinit(struct mvpp2_port *port,
3171 struct mvpp2_tx_queue *txq)
3175 txq->next_desc_to_proc = 0;
3178 /* Set minimum bandwidth for disabled TXQs */
3179 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
3181 /* Set Tx descriptors queue starting address and size */
3182 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3183 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
3184 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
3187 /* Cleanup Tx ports */
3188 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3190 struct mvpp2_txq_pcpu *txq_pcpu;
3191 int delay, pending, cpu;
3194 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3195 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3196 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3197 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3199 /* The napi queue has been stopped so wait for all packets
3200 * to be transmitted.
3204 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3205 netdev_warn(port->dev,
3206 "port %d: cleaning queue %d timed out\n",
3207 port->id, txq->log_id);
3213 pending = mvpp2_txq_pend_desc_num_get(port, txq);
3216 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3217 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3219 for_each_present_cpu(cpu) {
3220 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3222 /* Release all packets */
3223 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3226 txq_pcpu->count = 0;
3227 txq_pcpu->txq_put_index = 0;
3228 txq_pcpu->txq_get_index = 0;
3232 /* Cleanup all Tx queues */
3233 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3235 struct mvpp2_tx_queue *txq;
3239 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3241 /* Reset Tx ports and delete Tx queues */
3242 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3243 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3245 for (queue = 0; queue < txq_number; queue++) {
3246 txq = port->txqs[queue];
3247 mvpp2_txq_clean(port, txq);
3248 mvpp2_txq_deinit(port, txq);
3251 mvpp2_txq_sent_counter_clear(port);
3253 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3254 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3257 /* Cleanup all Rx queues */
3258 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3262 for (queue = 0; queue < rxq_number; queue++)
3263 mvpp2_rxq_deinit(port, port->rxqs[queue]);
3266 /* Init all Rx queues for port */
3267 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3271 for (queue = 0; queue < rxq_number; queue++) {
3272 err = mvpp2_rxq_init(port, port->rxqs[queue]);
3279 mvpp2_cleanup_rxqs(port);
3283 /* Init all tx queues for port */
3284 static int mvpp2_setup_txqs(struct mvpp2_port *port)
3286 struct mvpp2_tx_queue *txq;
3289 for (queue = 0; queue < txq_number; queue++) {
3290 txq = port->txqs[queue];
3291 err = mvpp2_txq_init(port, txq);
3296 mvpp2_txq_sent_counter_clear(port);
3300 mvpp2_cleanup_txqs(port);
3305 static void mvpp2_link_event(struct mvpp2_port *port)
3307 struct phy_device *phydev = port->phy_dev;
3308 int status_change = 0;
3312 if ((port->speed != phydev->speed) ||
3313 (port->duplex != phydev->duplex)) {
3316 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3317 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
3318 MVPP2_GMAC_CONFIG_GMII_SPEED |
3319 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3320 MVPP2_GMAC_AN_SPEED_EN |
3321 MVPP2_GMAC_AN_DUPLEX_EN);
3324 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
3326 if (phydev->speed == SPEED_1000)
3327 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
3328 else if (phydev->speed == SPEED_100)
3329 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
3331 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3333 port->duplex = phydev->duplex;
3334 port->speed = phydev->speed;
3338 if (phydev->link != port->link) {
3339 if (!phydev->link) {
3344 port->link = phydev->link;
3348 if (status_change) {
3350 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3351 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
3352 MVPP2_GMAC_FORCE_LINK_DOWN);
3353 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3354 mvpp2_egress_enable(port);
3355 mvpp2_ingress_enable(port);
3357 mvpp2_ingress_disable(port);
3358 mvpp2_egress_disable(port);
3363 /* Main RX/TX processing routines */
3365 /* Display more error info */
3366 static void mvpp2_rx_error(struct mvpp2_port *port,
3367 struct mvpp2_rx_desc *rx_desc)
3369 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3370 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3372 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3373 case MVPP2_RXD_ERR_CRC:
3374 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
3377 case MVPP2_RXD_ERR_OVERRUN:
3378 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
3381 case MVPP2_RXD_ERR_RESOURCE:
3382 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
3388 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
3389 static int mvpp2_rx_refill(struct mvpp2_port *port,
3390 struct mvpp2_bm_pool *bm_pool,
3391 u32 bm, dma_addr_t dma_addr)
3393 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
3397 /* Set hw internals when starting port */
3398 static void mvpp2_start_dev(struct mvpp2_port *port)
3400 mvpp2_gmac_max_rx_size_set(port);
3401 mvpp2_txp_max_tx_size_set(port);
3403 mvpp2_port_enable(port);
3406 /* Set hw internals when stopping port */
3407 static void mvpp2_stop_dev(struct mvpp2_port *port)
3409 /* Stop new packets from arriving to RXQs */
3410 mvpp2_ingress_disable(port);
3412 mvpp2_egress_disable(port);
3413 mvpp2_port_disable(port);
3416 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
3418 struct phy_device *phy_dev;
3420 if (!port->init || port->link == 0) {
3421 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev,
3422 port->phy_interface);
3423 port->phy_dev = phy_dev;
3425 netdev_err(port->dev, "cannot connect to phy\n");
3428 phy_dev->supported &= PHY_GBIT_FEATURES;
3429 phy_dev->advertising = phy_dev->supported;
3431 port->phy_dev = phy_dev;
3436 phy_config(phy_dev);
3437 phy_startup(phy_dev);
3438 if (!phy_dev->link) {
3439 printf("%s: No link\n", phy_dev->dev->name);
3445 mvpp2_egress_enable(port);
3446 mvpp2_ingress_enable(port);
3452 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
3454 unsigned char mac_bcast[ETH_ALEN] = {
3455 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3458 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
3460 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3463 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
3464 port->dev_addr, true);
3466 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
3469 err = mvpp2_prs_def_flow(port);
3471 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3475 /* Allocate the Rx/Tx queues */
3476 err = mvpp2_setup_rxqs(port);
3478 netdev_err(port->dev, "cannot allocate Rx queues\n");
3482 err = mvpp2_setup_txqs(port);
3484 netdev_err(port->dev, "cannot allocate Tx queues\n");
3488 err = mvpp2_phy_connect(dev, port);
3492 mvpp2_link_event(port);
3494 mvpp2_start_dev(port);
3499 /* No Device ops here in U-Boot */
3501 /* Driver initialization */
3503 static void mvpp2_port_power_up(struct mvpp2_port *port)
3505 mvpp2_port_mii_set(port);
3506 mvpp2_port_periodic_xon_disable(port);
3507 mvpp2_port_fc_adv_enable(port);
3508 mvpp2_port_reset(port);
3511 /* Initialize port HW */
3512 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
3514 struct mvpp2 *priv = port->priv;
3515 struct mvpp2_txq_pcpu *txq_pcpu;
3516 int queue, cpu, err;
3518 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
3522 mvpp2_egress_disable(port);
3523 mvpp2_port_disable(port);
3525 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
3530 /* Associate physical Tx queues to this port and initialize.
3531 * The mapping is predefined.
3533 for (queue = 0; queue < txq_number; queue++) {
3534 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
3535 struct mvpp2_tx_queue *txq;
3537 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
3541 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
3546 txq->id = queue_phy_id;
3547 txq->log_id = queue;
3548 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
3549 for_each_present_cpu(cpu) {
3550 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3551 txq_pcpu->cpu = cpu;
3554 port->txqs[queue] = txq;
3557 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
3562 /* Allocate and initialize Rx queue for this port */
3563 for (queue = 0; queue < rxq_number; queue++) {
3564 struct mvpp2_rx_queue *rxq;
3566 /* Map physical Rx queue to port's logical Rx queue */
3567 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
3570 /* Map this Rx queue to a physical queue */
3571 rxq->id = port->first_rxq + queue;
3572 rxq->port = port->id;
3573 rxq->logic_rxq = queue;
3575 port->rxqs[queue] = rxq;
3578 /* Configure Rx queue group interrupt for this port */
3579 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), CONFIG_MV_ETH_RXQ);
3581 /* Create Rx descriptor rings */
3582 for (queue = 0; queue < rxq_number; queue++) {
3583 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
3585 rxq->size = port->rx_ring_size;
3586 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
3587 rxq->time_coal = MVPP2_RX_COAL_USEC;
3590 mvpp2_ingress_disable(port);
3592 /* Port default configuration */
3593 mvpp2_defaults_set(port);
3595 /* Port's classifier configuration */
3596 mvpp2_cls_oversize_rxq_set(port);
3597 mvpp2_cls_port_config(port);
3599 /* Provide an initial Rx packet size */
3600 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
3602 /* Initialize pools for swf */
3603 err = mvpp2_swf_bm_pool_init(port);
3610 /* Ports initialization */
3611 static int mvpp2_port_probe(struct udevice *dev,
3612 struct mvpp2_port *port,
3615 int *next_first_rxq)
3620 const char *phy_mode_str;
3622 int priv_common_regs_num = 2;
3625 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
3627 dev_err(&pdev->dev, "missing phy\n");
3631 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
3633 phy_mode = phy_get_interface_by_name(phy_mode_str);
3634 if (phy_mode == -1) {
3635 dev_err(&pdev->dev, "incorrect phy mode\n");
3639 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
3641 dev_err(&pdev->dev, "missing port-id value\n");
3645 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
3649 port->first_rxq = *next_first_rxq;
3650 port->phy_node = phy_node;
3651 port->phy_interface = phy_mode;
3652 port->phyaddr = phyaddr;
3654 port->base = (void __iomem *)dev_get_addr_index(dev->parent,
3655 priv_common_regs_num
3657 if (IS_ERR(port->base))
3658 return PTR_ERR(port->base);
3660 port->tx_ring_size = MVPP2_MAX_TXD;
3661 port->rx_ring_size = MVPP2_MAX_RXD;
3663 err = mvpp2_port_init(dev, port);
3665 dev_err(&pdev->dev, "failed to init port %d\n", id);
3668 mvpp2_port_power_up(port);
3670 /* Increment the first Rx queue number to be used by the next port */
3671 *next_first_rxq += CONFIG_MV_ETH_RXQ;
3672 priv->port_list[id] = port;
3676 /* Initialize decoding windows */
3677 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
3683 for (i = 0; i < 6; i++) {
3684 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
3685 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
3688 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
3693 for (i = 0; i < dram->num_cs; i++) {
3694 const struct mbus_dram_window *cs = dram->cs + i;
3696 mvpp2_write(priv, MVPP2_WIN_BASE(i),
3697 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
3698 dram->mbus_dram_target_id);
3700 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
3701 (cs->size - 1) & 0xffff0000);
3703 win_enable |= (1 << i);
3706 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
3709 /* Initialize Rx FIFO's */
3710 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
3714 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
3715 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
3716 MVPP2_RX_FIFO_PORT_DATA_SIZE);
3717 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
3718 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
3721 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
3722 MVPP2_RX_FIFO_PORT_MIN_PKT);
3723 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
3726 /* Initialize network controller common part HW */
3727 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
3729 const struct mbus_dram_target_info *dram_target_info;
3733 /* Checks for hardware constraints (U-Boot uses only one rxq) */
3734 if ((rxq_number > MVPP2_MAX_RXQ) || (txq_number > MVPP2_MAX_TXQ)) {
3735 dev_err(&pdev->dev, "invalid queue size parameter\n");
3739 /* MBUS windows configuration */
3740 dram_target_info = mvebu_mbus_dram_info();
3741 if (dram_target_info)
3742 mvpp2_conf_mbus_windows(dram_target_info, priv);
3744 /* Disable HW PHY polling */
3745 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3746 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
3747 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3749 /* Allocate and initialize aggregated TXQs */
3750 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
3751 sizeof(struct mvpp2_tx_queue),
3753 if (!priv->aggr_txqs)
3756 for_each_present_cpu(i) {
3757 priv->aggr_txqs[i].id = i;
3758 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
3759 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
3760 MVPP2_AGGR_TXQ_SIZE, i, priv);
3766 mvpp2_rx_fifo_init(priv);
3768 /* Reset Rx queue group interrupt configuration */
3769 for (i = 0; i < MVPP2_MAX_PORTS; i++)
3770 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i),
3773 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
3774 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
3776 /* Allow cache snoop when transmiting packets */
3777 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
3779 /* Buffer Manager initialization */
3780 err = mvpp2_bm_init(dev, priv);
3784 /* Parser default initialization */
3785 err = mvpp2_prs_default_init(dev, priv);
3789 /* Classifier default initialization */
3790 mvpp2_cls_init(priv);
3795 /* SMI / MDIO functions */
3797 static int smi_wait_ready(struct mvpp2 *priv)
3799 u32 timeout = MVPP2_SMI_TIMEOUT;
3802 /* wait till the SMI is not busy */
3804 /* read smi register */
3805 smi_reg = readl(priv->lms_base + MVPP2_SMI);
3806 if (timeout-- == 0) {
3807 printf("Error: SMI busy timeout\n");
3810 } while (smi_reg & MVPP2_SMI_BUSY);
3816 * mpp2_mdio_read - miiphy_read callback function.
3818 * Returns 16bit phy register value, or 0xffff on error
3820 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
3822 struct mvpp2 *priv = bus->priv;
3826 /* check parameters */
3827 if (addr > MVPP2_PHY_ADDR_MASK) {
3828 printf("Error: Invalid PHY address %d\n", addr);
3832 if (reg > MVPP2_PHY_REG_MASK) {
3833 printf("Err: Invalid register offset %d\n", reg);
3837 /* wait till the SMI is not busy */
3838 if (smi_wait_ready(priv) < 0)
3841 /* fill the phy address and regiser offset and read opcode */
3842 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS)
3843 | (reg << MVPP2_SMI_REG_ADDR_OFFS)
3844 | MVPP2_SMI_OPCODE_READ;
3846 /* write the smi register */
3847 writel(smi_reg, priv->lms_base + MVPP2_SMI);
3849 /* wait till read value is ready */
3850 timeout = MVPP2_SMI_TIMEOUT;
3853 /* read smi register */
3854 smi_reg = readl(priv->lms_base + MVPP2_SMI);
3855 if (timeout-- == 0) {
3856 printf("Err: SMI read ready timeout\n");
3859 } while (!(smi_reg & MVPP2_SMI_READ_VALID));
3861 /* Wait for the data to update in the SMI register */
3862 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++)
3865 return readl(priv->lms_base + MVPP2_SMI) & MVPP2_SMI_DATA_MASK;
3869 * mpp2_mdio_write - miiphy_write callback function.
3871 * Returns 0 if write succeed, -EINVAL on bad parameters
3874 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
3877 struct mvpp2 *priv = bus->priv;
3880 /* check parameters */
3881 if (addr > MVPP2_PHY_ADDR_MASK) {
3882 printf("Error: Invalid PHY address %d\n", addr);
3886 if (reg > MVPP2_PHY_REG_MASK) {
3887 printf("Err: Invalid register offset %d\n", reg);
3891 /* wait till the SMI is not busy */
3892 if (smi_wait_ready(priv) < 0)
3895 /* fill the phy addr and reg offset and write opcode and data */
3896 smi_reg = value << MVPP2_SMI_DATA_OFFS;
3897 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS)
3898 | (reg << MVPP2_SMI_REG_ADDR_OFFS);
3899 smi_reg &= ~MVPP2_SMI_OPCODE_READ;
3901 /* write the smi register */
3902 writel(smi_reg, priv->lms_base + MVPP2_SMI);
3907 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
3909 struct mvpp2_port *port = dev_get_priv(dev);
3910 struct mvpp2_rx_desc *rx_desc;
3911 struct mvpp2_bm_pool *bm_pool;
3912 dma_addr_t dma_addr;
3914 int pool, rx_bytes, err;
3916 struct mvpp2_rx_queue *rxq;
3917 u32 cause_rx_tx, cause_rx, cause_misc;
3920 cause_rx_tx = mvpp2_read(port->priv,
3921 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3922 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3923 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3924 if (!cause_rx_tx && !cause_misc)
3927 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
3929 /* Process RX packets */
3930 cause_rx |= port->pending_cause_rx;
3931 rxq = mvpp2_get_rx_queue(port, cause_rx);
3933 /* Get number of received packets and clamp the to-do */
3934 rx_received = mvpp2_rxq_received(port, rxq->id);
3936 /* Return if no packets are received */
3940 rx_desc = mvpp2_rxq_next_desc_get(rxq);
3941 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3942 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3943 rx_bytes -= MVPP2_MH_SIZE;
3944 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3946 bm = mvpp2_bm_cookie_build(port, rx_desc);
3947 pool = mvpp2_bm_cookie_pool_get(bm);
3948 bm_pool = &port->priv->bm_pools[pool];
3950 /* In case of an error, release the requested buffer pointer
3951 * to the Buffer Manager. This request process is controlled
3952 * by the hardware, and the information about the buffer is
3953 * comprised by the RX descriptor.
3955 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
3956 mvpp2_rx_error(port, rx_desc);
3957 /* Return the buffer to the pool */
3958 mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
3962 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
3964 netdev_err(port->dev, "failed to refill BM pools\n");
3968 /* Update Rx queue management counters */
3970 mvpp2_rxq_status_update(port, rxq->id, 1, 1);
3972 /* give packet to stack - skip on first n bytes */
3973 data = (u8 *)dma_addr + 2 + 32;
3979 * No cache invalidation needed here, since the rx_buffer's are
3980 * located in a uncached memory region
3988 static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3993 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3994 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3996 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3998 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3999 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4002 static int mvpp2_send(struct udevice *dev, void *packet, int length)
4004 struct mvpp2_port *port = dev_get_priv(dev);
4005 struct mvpp2_tx_queue *txq, *aggr_txq;
4006 struct mvpp2_tx_desc *tx_desc;
4010 txq = port->txqs[0];
4011 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
4013 /* Get a descriptor for the first part of the packet */
4014 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4015 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4016 mvpp2_txdesc_size_set(port, tx_desc, length);
4017 mvpp2_txdesc_offset_set(port, tx_desc,
4018 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
4019 mvpp2_txdesc_dma_addr_set(port, tx_desc,
4020 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
4021 /* First and Last descriptor */
4022 mvpp2_txdesc_cmd_set(port, tx_desc,
4023 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
4024 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
4027 flush_dcache_range((unsigned long)packet,
4028 (unsigned long)packet + ALIGN(length, PKTALIGN));
4030 /* Enable transmit */
4032 mvpp2_aggr_txq_pend_desc_add(port, 1);
4034 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4038 if (timeout++ > 10000) {
4039 printf("timeout: packet not sent from aggregated to phys TXQ\n");
4042 tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
4045 /* Enable TXQ drain */
4046 mvpp2_txq_drain(port, txq, 1);
4050 if (timeout++ > 10000) {
4051 printf("timeout: packet not sent\n");
4054 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4057 /* Disable TXQ drain */
4058 mvpp2_txq_drain(port, txq, 0);
4063 static int mvpp2_start(struct udevice *dev)
4065 struct eth_pdata *pdata = dev_get_platdata(dev);
4066 struct mvpp2_port *port = dev_get_priv(dev);
4068 /* Load current MAC address */
4069 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
4071 /* Reconfigure parser accept the original MAC address */
4072 mvpp2_prs_update_mac_da(port, port->dev_addr);
4074 mvpp2_port_power_up(port);
4076 mvpp2_open(dev, port);
4081 static void mvpp2_stop(struct udevice *dev)
4083 struct mvpp2_port *port = dev_get_priv(dev);
4085 mvpp2_stop_dev(port);
4086 mvpp2_cleanup_rxqs(port);
4087 mvpp2_cleanup_txqs(port);
4090 static int mvpp2_probe(struct udevice *dev)
4092 struct mvpp2_port *port = dev_get_priv(dev);
4093 struct mvpp2 *priv = dev_get_priv(dev->parent);
4096 /* Initialize network controller */
4097 err = mvpp2_init(dev, priv);
4099 dev_err(&pdev->dev, "failed to initialize controller\n");
4103 return mvpp2_port_probe(dev, port, dev_of_offset(dev), priv,
4104 &buffer_loc.first_rxq);
4107 static const struct eth_ops mvpp2_ops = {
4108 .start = mvpp2_start,
4114 static struct driver mvpp2_driver = {
4117 .probe = mvpp2_probe,
4119 .priv_auto_alloc_size = sizeof(struct mvpp2_port),
4120 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
4124 * Use a MISC device to bind the n instances (child nodes) of the
4125 * network base controller in UCLASS_ETH.
4127 static int mvpp2_base_probe(struct udevice *dev)
4129 struct mvpp2 *priv = dev_get_priv(dev);
4130 struct mii_dev *bus;
4136 * U-Boot special buffer handling:
4138 * Allocate buffer area for descs and rx_buffers. This is only
4139 * done once for all interfaces. As only one interface can
4140 * be active. Make this area DMA-safe by disabling the D-cache
4143 /* Align buffer area for descs and rx_buffers to 1MiB */
4144 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
4145 mmu_set_region_dcache_behaviour((unsigned long)bd_space,
4146 BD_SPACE, DCACHE_OFF);
4148 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
4149 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
4151 buffer_loc.tx_descs =
4152 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
4153 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
4155 buffer_loc.rx_descs =
4156 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
4157 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
4159 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4160 buffer_loc.bm_pool[i] =
4161 (unsigned long *)((unsigned long)bd_space + size);
4162 size += MVPP2_BM_POOL_SIZE_MAX * sizeof(u32);
4165 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
4166 buffer_loc.rx_buffer[i] =
4167 (unsigned long *)((unsigned long)bd_space + size);
4168 size += RX_BUFFER_SIZE;
4171 /* Save base addresses for later use */
4172 priv->base = (void *)dev_get_addr_index(dev, 0);
4173 if (IS_ERR(priv->base))
4174 return PTR_ERR(priv->base);
4176 priv->lms_base = (void *)dev_get_addr_index(dev, 1);
4177 if (IS_ERR(priv->lms_base))
4178 return PTR_ERR(priv->lms_base);
4180 /* Finally create and register the MDIO bus driver */
4183 printf("Failed to allocate MDIO bus\n");
4187 bus->read = mpp2_mdio_read;
4188 bus->write = mpp2_mdio_write;
4189 snprintf(bus->name, sizeof(bus->name), dev->name);
4190 bus->priv = (void *)priv;
4193 return mdio_register(bus);
4196 static int mvpp2_base_bind(struct udevice *parent)
4198 const void *blob = gd->fdt_blob;
4199 int node = dev_of_offset(parent);
4200 struct uclass_driver *drv;
4201 struct udevice *dev;
4202 struct eth_pdata *plat;
4207 /* Lookup eth driver */
4208 drv = lists_uclass_lookup(UCLASS_ETH);
4210 puts("Cannot find eth driver\n");
4214 fdt_for_each_subnode(subnode, blob, node) {
4215 /* Skip disabled ports */
4216 if (!fdtdec_get_is_enabled(blob, subnode))
4219 plat = calloc(1, sizeof(*plat));
4223 id = fdtdec_get_int(blob, subnode, "port-id", -1);
4225 name = calloc(1, 16);
4226 sprintf(name, "mvpp2-%d", id);
4228 /* Create child device UCLASS_ETH and bind it */
4229 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
4230 dev_set_of_offset(dev, subnode);
4236 static const struct udevice_id mvpp2_ids[] = {
4237 { .compatible = "marvell,armada-375-pp2" },
4241 U_BOOT_DRIVER(mvpp2_base) = {
4242 .name = "mvpp2_base",
4244 .of_match = mvpp2_ids,
4245 .bind = mvpp2_base_bind,
4246 .probe = mvpp2_base_probe,
4247 .priv_auto_alloc_size = sizeof(struct mvpp2),