kernel: rename CONFIG_TRACE_ENUM_MAP_FILE to CONFIG_TRACE_EVAL_MAP_FILE
[oweals/openwrt.git] / target / linux / ipq806x / patches-4.9 / 710-net-add-qualcomm-essedma-ethernet-driver.patch
1 From 12e9319da1adacac92930c899c99f0e1970cac11 Mon Sep 17 00:00:00 2001
2 From: Christian Lamparter <chunkeey@googlemail.com>
3 Date: Thu, 19 Jan 2017 02:01:31 +0100
4 Subject: [PATCH 33/38] NET: add qualcomm essedma ethernet driver
5
6 Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
7 ---
8  drivers/net/ethernet/qualcomm/Kconfig  | 9 +++++++++
9  drivers/net/ethernet/qualcomm/Makefile | 1 +
10  2 files changed, 10 insertions(+)
11
12 --- a/drivers/net/ethernet/qualcomm/Kconfig
13 +++ b/drivers/net/ethernet/qualcomm/Kconfig
14 @@ -37,4 +37,13 @@ config QCOM_EMAC
15           low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
16           Precision Clock Synchronization Protocol.
17  
18 +config ESSEDMA
19 +       tristate "Qualcomm Atheros ESS Edma support"
20 +       ---help---
21 +         This driver supports ethernet edma adapter.
22 +         Say Y to build this driver.
23 +
24 +         To compile this driver as a module, choose M here. The module
25 +         will be called essedma.ko.
26 +
27  endif # NET_VENDOR_QUALCOMM
28 --- a/drivers/net/ethernet/qualcomm/Makefile
29 +++ b/drivers/net/ethernet/qualcomm/Makefile
30 @@ -6,3 +6,4 @@ obj-$(CONFIG_QCA7000) += qcaspi.o
31  qcaspi-objs := qca_spi.o qca_framing.o qca_7k.o qca_debug.o
32  
33  obj-y += emac/
34 +obj-$(CONFIG_ESSEDMA) += essedma/
35 --- /dev/null
36 +++ b/drivers/net/ethernet/qualcomm/essedma/Makefile
37 @@ -0,0 +1,9 @@
38 +#
39 +## Makefile for the Qualcomm Atheros ethernet edma driver
40 +#
41 +
42 +
43 +obj-$(CONFIG_ESSEDMA) += essedma.o
44 +
45 +essedma-objs := edma_axi.o edma.o edma_ethtool.o
46 +
47 --- /dev/null
48 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
49 @@ -0,0 +1,2168 @@
50 +/*
51 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
52 + *
53 + * Permission to use, copy, modify, and/or distribute this software for
54 + * any purpose with or without fee is hereby granted, provided that the
55 + * above copyright notice and this permission notice appear in all copies.
56 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
57 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
58 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
59 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
60 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
61 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
62 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
63 + */
64 +
65 +#include <linux/platform_device.h>
66 +#include <linux/if_vlan.h>
67 +#include "ess_edma.h"
68 +#include "edma.h"
69 +
70 +extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
71 +bool edma_stp_rstp;
72 +u16 edma_ath_eth_type;
73 +
74 +/* edma_skb_priority_offset()
75 + *     get edma skb priority
76 + */
77 +static unsigned int edma_skb_priority_offset(struct sk_buff *skb)
78 +{
79 +       return (skb->priority >> 2) & 1;
80 +}
81 +
82 +/* edma_alloc_tx_ring()
83 + *     Allocate Tx descriptors ring
84 + */
85 +static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo,
86 +                             struct edma_tx_desc_ring *etdr)
87 +{
88 +       struct platform_device *pdev = edma_cinfo->pdev;
89 +
90 +       /* Initialize ring */
91 +       etdr->size = sizeof(struct edma_sw_desc) * etdr->count;
92 +       etdr->sw_next_to_fill = 0;
93 +       etdr->sw_next_to_clean = 0;
94 +
95 +       /* Allocate SW descriptors */
96 +       etdr->sw_desc = vzalloc(etdr->size);
97 +       if (!etdr->sw_desc) {
98 +               dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr);
99 +               return -ENOMEM;
100 +       }
101 +
102 +       /* Allocate HW descriptors */
103 +       etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma,
104 +                                         GFP_KERNEL);
105 +       if (!etdr->hw_desc) {
106 +               dev_err(&pdev->dev, "descriptor allocation for tx ring failed");
107 +               vfree(etdr->sw_desc);
108 +               return -ENOMEM;
109 +       }
110 +
111 +       return 0;
112 +}
113 +
114 +/* edma_free_tx_ring()
115 + *     Free tx rings allocated by edma_alloc_tx_rings
116 + */
117 +static void edma_free_tx_ring(struct edma_common_info *edma_cinfo,
118 +                             struct edma_tx_desc_ring *etdr)
119 +{
120 +       struct platform_device *pdev = edma_cinfo->pdev;
121 +
122 +       if (likely(etdr->dma))
123 +               dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc,
124 +                                etdr->dma);
125 +
126 +       vfree(etdr->sw_desc);
127 +       etdr->sw_desc = NULL;
128 +}
129 +
130 +/* edma_alloc_rx_ring()
131 + *     allocate rx descriptor ring
132 + */
133 +static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo,
134 +                            struct edma_rfd_desc_ring *erxd)
135 +{
136 +       struct platform_device *pdev = edma_cinfo->pdev;
137 +
138 +       erxd->size = sizeof(struct edma_sw_desc) * erxd->count;
139 +       erxd->sw_next_to_fill = 0;
140 +       erxd->sw_next_to_clean = 0;
141 +
142 +       /* Allocate SW descriptors */
143 +       erxd->sw_desc = vzalloc(erxd->size);
144 +       if (!erxd->sw_desc)
145 +               return -ENOMEM;
146 +
147 +       /* Alloc HW descriptors */
148 +       erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma,
149 +                       GFP_KERNEL);
150 +       if (!erxd->hw_desc) {
151 +               vfree(erxd->sw_desc);
152 +               return -ENOMEM;
153 +       }
154 +
155 +       return 0;
156 +}
157 +
158 +/* edma_free_rx_ring()
159 + *     Free rx ring allocated by alloc_rx_ring
160 + */
161 +static void edma_free_rx_ring(struct edma_common_info *edma_cinfo,
162 +                            struct edma_rfd_desc_ring *rxdr)
163 +{
164 +       struct platform_device *pdev = edma_cinfo->pdev;
165 +
166 +       if (likely(rxdr->dma))
167 +               dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc,
168 +                                rxdr->dma);
169 +
170 +       vfree(rxdr->sw_desc);
171 +       rxdr->sw_desc = NULL;
172 +}
173 +
174 +/* edma_configure_tx()
175 + *     Configure transmission control data
176 + */
177 +static void edma_configure_tx(struct edma_common_info *edma_cinfo)
178 +{
179 +       u32 txq_ctrl_data;
180 +
181 +       txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
182 +       txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN;
183 +       txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
184 +       edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data);
185 +}
186 +
187 +
188 +/* edma_configure_rx()
189 + *     configure reception control data
190 + */
191 +static void edma_configure_rx(struct edma_common_info *edma_cinfo)
192 +{
193 +       struct edma_hw *hw = &edma_cinfo->hw;
194 +       u32 rss_type, rx_desc1, rxq_ctrl_data;
195 +
196 +       /* Set RSS type */
197 +       rss_type = hw->rss_type;
198 +       edma_write_reg(EDMA_REG_RSS_TYPE, rss_type);
199 +
200 +       /* Set RFD burst number */
201 +       rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
202 +
203 +       /* Set RFD prefetch threshold */
204 +       rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
205 +
206 +       /* Set RFD in host ring low threshold to generte interrupt */
207 +       rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
208 +       edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1);
209 +
210 +       /* Set Rx FIFO threshold to start to DMA data to host */
211 +       rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE;
212 +
213 +       /* Set RX remove vlan bit */
214 +       rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN;
215 +
216 +       edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data);
217 +}
218 +
219 +/* edma_alloc_rx_buf()
220 + *     does skb allocation for the received packets.
221 + */
222 +static int edma_alloc_rx_buf(struct edma_common_info
223 +                            *edma_cinfo,
224 +                            struct edma_rfd_desc_ring *erdr,
225 +                            int cleaned_count, int queue_id)
226 +{
227 +       struct platform_device *pdev = edma_cinfo->pdev;
228 +       struct edma_rx_free_desc *rx_desc;
229 +       struct edma_sw_desc *sw_desc;
230 +       struct sk_buff *skb;
231 +       unsigned int i;
232 +       u16 prod_idx, length;
233 +       u32 reg_data;
234 +
235 +       if (cleaned_count > erdr->count) {
236 +               dev_err(&pdev->dev, "Incorrect cleaned_count %d",
237 +                      cleaned_count);
238 +               return -1;
239 +       }
240 +
241 +       i = erdr->sw_next_to_fill;
242 +
243 +       while (cleaned_count) {
244 +               sw_desc = &erdr->sw_desc[i];
245 +               length = edma_cinfo->rx_head_buffer_len;
246 +
247 +               if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
248 +                       skb = sw_desc->skb;
249 +               } else {
250 +                       /* alloc skb */
251 +                       skb = netdev_alloc_skb(edma_netdev[0], length);
252 +                       if (!skb) {
253 +                               /* Better luck next round */
254 +                               break;
255 +                       }
256 +               }
257 +
258 +               if (edma_cinfo->page_mode) {
259 +                       struct page *pg = alloc_page(GFP_ATOMIC);
260 +
261 +                       if (!pg) {
262 +                               dev_kfree_skb_any(skb);
263 +                               break;
264 +                       }
265 +
266 +                       sw_desc->dma = dma_map_page(&pdev->dev, pg, 0,
267 +                                                  edma_cinfo->rx_page_buffer_len,
268 +                                                  DMA_FROM_DEVICE);
269 +                       if (dma_mapping_error(&pdev->dev,
270 +                                   sw_desc->dma)) {
271 +                               __free_page(pg);
272 +                               dev_kfree_skb_any(skb);
273 +                               break;
274 +                       }
275 +
276 +                       skb_fill_page_desc(skb, 0, pg, 0,
277 +                                          edma_cinfo->rx_page_buffer_len);
278 +                       sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG;
279 +                       sw_desc->length = edma_cinfo->rx_page_buffer_len;
280 +               } else {
281 +                       sw_desc->dma = dma_map_single(&pdev->dev, skb->data,
282 +                                                    length, DMA_FROM_DEVICE);
283 +                       if (dma_mapping_error(&pdev->dev,
284 +                          sw_desc->dma)) {
285 +                               dev_kfree_skb_any(skb);
286 +                               break;
287 +                       }
288 +
289 +                       sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD;
290 +                       sw_desc->length = length;
291 +               }
292 +
293 +               /* Update the buffer info */
294 +               sw_desc->skb = skb;
295 +               rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]);
296 +               rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma);
297 +               if (++i == erdr->count)
298 +                       i = 0;
299 +               cleaned_count--;
300 +       }
301 +
302 +       erdr->sw_next_to_fill = i;
303 +
304 +       if (i == 0)
305 +               prod_idx = erdr->count - 1;
306 +       else
307 +               prod_idx = i - 1;
308 +
309 +       /* Update the producer index */
310 +       edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &reg_data);
311 +       reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
312 +       reg_data |= prod_idx;
313 +       edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
314 +       return cleaned_count;
315 +}
316 +
317 +/* edma_init_desc()
318 + *     update descriptor ring size, buffer and producer/consumer index
319 + */
320 +static void edma_init_desc(struct edma_common_info *edma_cinfo)
321 +{
322 +       struct edma_rfd_desc_ring *rfd_ring;
323 +       struct edma_tx_desc_ring *etdr;
324 +       int i = 0, j = 0;
325 +       u32 data = 0;
326 +       u16 hw_cons_idx = 0;
327 +
328 +       /* Set the base address of every TPD ring. */
329 +       for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
330 +               etdr = edma_cinfo->tpd_ring[i];
331 +
332 +               /* Update descriptor ring base address */
333 +               edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma);
334 +               edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data);
335 +
336 +               /* Calculate hardware consumer index */
337 +               hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
338 +               etdr->sw_next_to_fill = hw_cons_idx;
339 +               etdr->sw_next_to_clean = hw_cons_idx;
340 +               data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
341 +               data |= hw_cons_idx;
342 +
343 +               /* update producer index */
344 +               edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data);
345 +
346 +               /* update SW consumer index register */
347 +               edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx);
348 +
349 +               /* Set TPD ring size */
350 +               edma_write_reg(EDMA_REG_TPD_RING_SIZE,
351 +                              edma_cinfo->tx_ring_count &
352 +                                   EDMA_TPD_RING_SIZE_MASK);
353 +       }
354 +
355 +       for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
356 +               rfd_ring = edma_cinfo->rfd_ring[j];
357 +               /* Update Receive Free descriptor ring base address */
358 +               edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j),
359 +                       (u32)(rfd_ring->dma));
360 +               j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
361 +       }
362 +
363 +       data = edma_cinfo->rx_head_buffer_len;
364 +       if (edma_cinfo->page_mode)
365 +               data = edma_cinfo->rx_page_buffer_len;
366 +
367 +       data &= EDMA_RX_BUF_SIZE_MASK;
368 +       data <<= EDMA_RX_BUF_SIZE_SHIFT;
369 +
370 +       /* Update RFD ring size and RX buffer size */
371 +       data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
372 +               << EDMA_RFD_RING_SIZE_SHIFT;
373 +
374 +       edma_write_reg(EDMA_REG_RX_DESC0, data);
375 +
376 +       /* Disable TX FIFO low watermark and high watermark */
377 +       edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0);
378 +
379 +       /* Load all of base address above */
380 +       edma_read_reg(EDMA_REG_TX_SRAM_PART, &data);
381 +       data |= 1 << EDMA_LOAD_PTR_SHIFT;
382 +       edma_write_reg(EDMA_REG_TX_SRAM_PART, data);
383 +}
384 +
385 +/* edma_receive_checksum
386 + *     Api to check checksum on receive packets
387 + */
388 +static void edma_receive_checksum(struct edma_rx_return_desc *rd,
389 +                                                struct sk_buff *skb)
390 +{
391 +       skb_checksum_none_assert(skb);
392 +
393 +       /* check the RRD IP/L4 checksum bit to see if
394 +        * its set, which in turn indicates checksum
395 +        * failure.
396 +        */
397 +       if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK)
398 +               return;
399 +
400 +       skb->ip_summed = CHECKSUM_UNNECESSARY;
401 +}
402 +
403 +/* edma_clean_rfd()
404 + *     clean up rx resourcers on error
405 + */
406 +static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index)
407 +{
408 +       struct edma_rx_free_desc *rx_desc;
409 +       struct edma_sw_desc *sw_desc;
410 +
411 +       rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]);
412 +       sw_desc = &erdr->sw_desc[index];
413 +       if (sw_desc->skb) {
414 +               dev_kfree_skb_any(sw_desc->skb);
415 +               sw_desc->skb = NULL;
416 +       }
417 +
418 +       memset(rx_desc, 0, sizeof(struct edma_rx_free_desc));
419 +}
420 +
421 +/* edma_rx_complete_fraglist()
422 + *     Complete Rx processing for fraglist skbs
423 + */
424 +static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd)
425 +{
426 +       int i;
427 +       u32 priority;
428 +       u16 port_type;
429 +       u8 mac_addr[EDMA_ETH_HDR_LEN];
430 +
431 +       port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT)
432 +                               & EDMA_RRD_PORT_TYPE_MASK;
433 +       /* if port type is 0x4, then only proceed with
434 +        * other stp/rstp calculation
435 +        */
436 +       if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) {
437 +               u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
438 +
439 +               /* calculate the frame priority */
440 +               priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
441 +                       & EDMA_RRD_PRIORITY_MASK;
442 +
443 +               for (i = 0; i < EDMA_ETH_HDR_LEN; i++)
444 +                       mac_addr[i] = skb->data[i];
445 +
446 +               /* Check if destination mac addr is bpdu addr */
447 +               if (!memcmp(mac_addr, bpdu_mac, 6)) {
448 +                       /* destination mac address is BPDU
449 +                        * destination mac address, then add
450 +                        * atheros header to the packet.
451 +                        */
452 +                       u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) |
453 +                               (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) |
454 +                               (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id;
455 +                       skb_push(skb, 4);
456 +                       memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
457 +                       *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type);
458 +                       *(uint16_t *)&skb->data[14] = htons(athr_hdr);
459 +               }
460 +       }
461 +}
462 +
463 +/*
464 + * edma_rx_complete_fraglist()
465 + *     Complete Rx processing for fraglist skbs
466 + */
467 +static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
468 +                                       u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
469 +{
470 +       struct platform_device *pdev = edma_cinfo->pdev;
471 +       struct edma_hw *hw = &edma_cinfo->hw;
472 +       struct sk_buff *skb_temp;
473 +       struct edma_sw_desc *sw_desc;
474 +       int i;
475 +       u16 size_remaining;
476 +
477 +       skb->data_len = 0;
478 +       skb->tail += (hw->rx_head_buff_size - 16);
479 +       skb->len = skb->truesize = length;
480 +       size_remaining = length - (hw->rx_head_buff_size - 16);
481 +
482 +       /* clean-up all related sw_descs */
483 +       for (i = 1; i < num_rfds; i++) {
484 +               struct sk_buff *skb_prev;
485 +               sw_desc = &erdr->sw_desc[sw_next_to_clean];
486 +               skb_temp = sw_desc->skb;
487 +
488 +               dma_unmap_single(&pdev->dev, sw_desc->dma,
489 +                       sw_desc->length, DMA_FROM_DEVICE);
490 +
491 +               if (size_remaining < hw->rx_head_buff_size)
492 +                       skb_put(skb_temp, size_remaining);
493 +               else
494 +                       skb_put(skb_temp, hw->rx_head_buff_size);
495 +
496 +               /*
497 +                * If we are processing the first rfd, we link
498 +                * skb->frag_list to the skb corresponding to the
499 +                * first RFD
500 +                */
501 +               if (i == 1)
502 +                       skb_shinfo(skb)->frag_list = skb_temp;
503 +               else
504 +                       skb_prev->next = skb_temp;
505 +               skb_prev = skb_temp;
506 +               skb_temp->next = NULL;
507 +
508 +               skb->data_len += skb_temp->len;
509 +               size_remaining -= skb_temp->len;
510 +
511 +               /* Increment SW index */
512 +               sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
513 +               (*cleaned_count)++;
514 +       }
515 +
516 +       return sw_next_to_clean;
517 +}
518 +
519 +/* edma_rx_complete_paged()
520 + *     Complete Rx processing for paged skbs
521 + */
522 +static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean,
523 +                                       u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo)
524 +{
525 +       struct platform_device *pdev = edma_cinfo->pdev;
526 +       struct sk_buff *skb_temp;
527 +       struct edma_sw_desc *sw_desc;
528 +       int i;
529 +       u16 size_remaining;
530 +
531 +       skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
532 +
533 +       /* Setup skbuff fields */
534 +       skb->len = length;
535 +
536 +       if (likely(num_rfds <= 1)) {
537 +               skb->data_len = length;
538 +               skb->truesize += edma_cinfo->rx_page_buffer_len;
539 +               skb_fill_page_desc(skb, 0, skb_frag_page(frag),
540 +                               16, length);
541 +       } else {
542 +               frag->size -= 16;
543 +               skb->data_len = frag->size;
544 +               skb->truesize += edma_cinfo->rx_page_buffer_len;
545 +               size_remaining = length - frag->size;
546 +
547 +               skb_fill_page_desc(skb, 0, skb_frag_page(frag),
548 +                               16, frag->size);
549 +
550 +               /* clean-up all related sw_descs */
551 +               for (i = 1; i < num_rfds; i++) {
552 +                       sw_desc = &erdr->sw_desc[sw_next_to_clean];
553 +                       skb_temp = sw_desc->skb;
554 +                       frag = &skb_shinfo(skb_temp)->frags[0];
555 +                       dma_unmap_page(&pdev->dev, sw_desc->dma,
556 +                               sw_desc->length, DMA_FROM_DEVICE);
557 +
558 +                       if (size_remaining < edma_cinfo->rx_page_buffer_len)
559 +                               frag->size = size_remaining;
560 +
561 +                       skb_fill_page_desc(skb, i, skb_frag_page(frag),
562 +                                       0, frag->size);
563 +
564 +                       skb_shinfo(skb_temp)->nr_frags = 0;
565 +                       dev_kfree_skb_any(skb_temp);
566 +
567 +                       skb->data_len += frag->size;
568 +                       skb->truesize += edma_cinfo->rx_page_buffer_len;
569 +                       size_remaining -= frag->size;
570 +
571 +                       /* Increment SW index */
572 +                       sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
573 +                       (*cleaned_count)++;
574 +               }
575 +       }
576 +
577 +       return sw_next_to_clean;
578 +}
579 +
580 +/*
581 + * edma_rx_complete()
582 + *     Main api called from the poll function to process rx packets.
583 + */
584 +static void edma_rx_complete(struct edma_common_info *edma_cinfo,
585 +                           int *work_done, int work_to_do, int queue_id,
586 +                           struct napi_struct *napi)
587 +{
588 +       struct platform_device *pdev = edma_cinfo->pdev;
589 +       struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id];
590 +       struct net_device *netdev;
591 +       struct edma_adapter *adapter;
592 +       struct edma_sw_desc *sw_desc;
593 +       struct sk_buff *skb;
594 +       struct edma_rx_return_desc *rd;
595 +       u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1,
596 +           sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0;
597 +       u32 data = 0;
598 +       u8 *vaddr;
599 +       int port_id, i, drop_count = 0;
600 +       u32 priority;
601 +       u16 count = erdr->count, rfd_avail;
602 +       u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
603 +
604 +       sw_next_to_clean = erdr->sw_next_to_clean;
605 +
606 +       edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
607 +       hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
608 +                          EDMA_RFD_CONS_IDX_MASK;
609 +
610 +       do {
611 +               while (sw_next_to_clean != hw_next_to_clean) {
612 +                       if (!work_to_do)
613 +                               break;
614 +
615 +                       sw_desc = &erdr->sw_desc[sw_next_to_clean];
616 +                       skb = sw_desc->skb;
617 +
618 +                       /* Unmap the allocated buffer */
619 +                       if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD))
620 +                               dma_unmap_single(&pdev->dev, sw_desc->dma,
621 +                                               sw_desc->length, DMA_FROM_DEVICE);
622 +                       else
623 +                               dma_unmap_page(&pdev->dev, sw_desc->dma,
624 +                                             sw_desc->length, DMA_FROM_DEVICE);
625 +
626 +                       /* Get RRD */
627 +                       if (edma_cinfo->page_mode) {
628 +                               vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0]));
629 +                               memcpy((uint8_t *)&rrd[0], vaddr, 16);
630 +                               rd = (struct edma_rx_return_desc *)rrd;
631 +                               kunmap_atomic(vaddr);
632 +                       } else {
633 +                               rd = (struct edma_rx_return_desc *)skb->data;
634 +                       }
635 +
636 +                       /* Check if RRD is valid */
637 +                       if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) {
638 +                               edma_clean_rfd(erdr, sw_next_to_clean);
639 +                               sw_next_to_clean = (sw_next_to_clean + 1) &
640 +                                                  (erdr->count - 1);
641 +                               cleaned_count++;
642 +                               continue;
643 +                       }
644 +
645 +                       /* Get the number of RFDs from RRD */
646 +                       num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK;
647 +
648 +                       /* Get Rx port ID from switch */
649 +                       port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK;
650 +                       if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) {
651 +                               dev_err(&pdev->dev, "Invalid RRD source port bit set");
652 +                               for (i = 0; i < num_rfds; i++) {
653 +                                       edma_clean_rfd(erdr, sw_next_to_clean);
654 +                                       sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
655 +                                       cleaned_count++;
656 +                               }
657 +                               continue;
658 +                       }
659 +
660 +                       /* check if we have a sink for the data we receive.
661 +                        * If the interface isn't setup, we have to drop the
662 +                        * incoming data for now.
663 +                        */
664 +                       netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id];
665 +                       if (!netdev) {
666 +                               edma_clean_rfd(erdr, sw_next_to_clean);
667 +                               sw_next_to_clean = (sw_next_to_clean + 1) &
668 +                                                  (erdr->count - 1);
669 +                               cleaned_count++;
670 +                               continue;
671 +                       }
672 +                       adapter = netdev_priv(netdev);
673 +
674 +                       /* This code is added to handle a usecase where high
675 +                        * priority stream and a low priority stream are
676 +                        * received simultaneously on DUT. The problem occurs
677 +                        * if one of the  Rx rings is full and the corresponding
678 +                        * core is busy with other stuff. This causes ESS CPU
679 +                        * port to backpressure all incoming traffic including
680 +                        * high priority one. We monitor free descriptor count
681 +                        * on each CPU and whenever it reaches threshold (< 80),
682 +                        * we drop all low priority traffic and let only high
683 +                        * priotiy traffic pass through. We can hence avoid
684 +                        * ESS CPU port to send backpressure on high priroity
685 +                        * stream.
686 +                        */
687 +                       priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
688 +                               & EDMA_RRD_PRIORITY_MASK;
689 +                       if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) {
690 +                               rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1);
691 +                               if (rfd_avail < EDMA_RFD_AVAIL_THR) {
692 +                                       sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE;
693 +                                       sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1);
694 +                                       adapter->stats.rx_dropped++;
695 +                                       cleaned_count++;
696 +                                       drop_count++;
697 +                                       if (drop_count == 3) {
698 +                                               work_to_do--;
699 +                                               (*work_done)++;
700 +                                               drop_count = 0;
701 +                                       }
702 +                                       if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
703 +                                               /* If buffer clean count reaches 16, we replenish HW buffers. */
704 +                                               ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
705 +                                               edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
706 +                                                             sw_next_to_clean);
707 +                                               cleaned_count = ret_count;
708 +                                       }
709 +                                       continue;
710 +                               }
711 +                       }
712 +
713 +                       work_to_do--;
714 +                       (*work_done)++;
715 +
716 +                       /* Increment SW index */
717 +                       sw_next_to_clean = (sw_next_to_clean + 1) &
718 +                                          (erdr->count - 1);
719 +
720 +                       cleaned_count++;
721 +
722 +                       /* Get the packet size and allocate buffer */
723 +                       length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
724 +
725 +                       if (edma_cinfo->page_mode) {
726 +                               /* paged skb */
727 +                               sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
728 +                               if (!pskb_may_pull(skb, ETH_HLEN)) {
729 +                                       dev_kfree_skb_any(skb);
730 +                                       continue;
731 +                               }
732 +                       } else {
733 +                               /* single or fraglist skb */
734 +
735 +                               /* Addition of 16 bytes is required, as in the packet
736 +                                * first 16 bytes are rrd descriptors, so actual data
737 +                                * starts from an offset of 16.
738 +                                */
739 +                               skb_reserve(skb, 16);
740 +                               if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) {
741 +                                       skb_put(skb, length);
742 +                               } else {
743 +                                       sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo);
744 +                               }
745 +                       }
746 +
747 +                       if (edma_stp_rstp) {
748 +                               edma_rx_complete_stp_rstp(skb, port_id, rd);
749 +                       }
750 +
751 +                       skb->protocol = eth_type_trans(skb, netdev);
752 +
753 +                       /* Record Rx queue for RFS/RPS and fill flow hash from HW */
754 +                       skb_record_rx_queue(skb, queue_to_rxid[queue_id]);
755 +                       if (netdev->features & NETIF_F_RXHASH) {
756 +                               hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT);
757 +                               if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END))
758 +                                       skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4);
759 +                       }
760 +
761 +#ifdef CONFIG_NF_FLOW_COOKIE
762 +                       skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
763 +#endif
764 +                       edma_receive_checksum(rd, skb);
765 +
766 +                       /* Process VLAN HW acceleration indication provided by HW */
767 +                       if (unlikely(adapter->default_vlan_tag != rd->rrd4)) {
768 +                               vlan = rd->rrd4;
769 +                               if (likely(rd->rrd7 & EDMA_RRD_CVLAN))
770 +                                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
771 +                               else if (rd->rrd1 & EDMA_RRD_SVLAN)
772 +                                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan);
773 +                       }
774 +
775 +                       /* Update rx statistics */
776 +                       adapter->stats.rx_packets++;
777 +                       adapter->stats.rx_bytes += length;
778 +
779 +                       /* Check if we reached refill threshold */
780 +                       if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
781 +                               ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
782 +                               edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
783 +                                             sw_next_to_clean);
784 +                               cleaned_count = ret_count;
785 +                       }
786 +
787 +                       /* At this point skb should go to stack */
788 +                       napi_gro_receive(napi, skb);
789 +               }
790 +
791 +               /* Check if we still have NAPI budget */
792 +               if (!work_to_do)
793 +                       break;
794 +
795 +               /* Read index once again since we still have NAPI budget */
796 +               edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
797 +               hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) &
798 +                       EDMA_RFD_CONS_IDX_MASK;
799 +       } while (hw_next_to_clean != sw_next_to_clean);
800 +
801 +       erdr->sw_next_to_clean = sw_next_to_clean;
802 +
803 +       /* Refill here in case refill threshold wasn't reached */
804 +       if (likely(cleaned_count)) {
805 +               ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
806 +               if (ret_count)
807 +                       dev_dbg(&pdev->dev, "Not all buffers was reallocated");
808 +               edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
809 +                             erdr->sw_next_to_clean);
810 +       }
811 +}
812 +
813 +/* edma_delete_rfs_filter()
814 + *     Remove RFS filter from switch
815 + */
816 +static int edma_delete_rfs_filter(struct edma_adapter *adapter,
817 +                                struct edma_rfs_filter_node *filter_node)
818 +{
819 +       int res = -1;
820 +
821 +       struct flow_keys *keys = &filter_node->keys;
822 +
823 +       if (likely(adapter->set_rfs_rule))
824 +               res = (*adapter->set_rfs_rule)(adapter->netdev,
825 +                       flow_get_u32_src(keys), flow_get_u32_dst(keys),
826 +                       keys->ports.src, keys->ports.dst,
827 +                       keys->basic.ip_proto, filter_node->rq_id, 0);
828 +
829 +       return res;
830 +}
831 +
832 +/* edma_add_rfs_filter()
833 + *     Add RFS filter to switch
834 + */
835 +static int edma_add_rfs_filter(struct edma_adapter *adapter,
836 +                              struct flow_keys *keys, u16 rq,
837 +                              struct edma_rfs_filter_node *filter_node)
838 +{
839 +       int res = -1;
840 +
841 +       struct flow_keys *dest_keys = &filter_node->keys;
842 +
843 +       memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys));
844 +/*
845 +       dest_keys->control = keys->control;
846 +       dest_keys->basic = keys->basic;
847 +       dest_keys->addrs = keys->addrs;
848 +       dest_keys->ports = keys->ports;
849 +       dest_keys.ip_proto = keys->ip_proto;
850 +*/
851 +       /* Call callback registered by ESS driver */
852 +       if (likely(adapter->set_rfs_rule))
853 +               res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys),
854 +                     flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst,
855 +                     keys->basic.ip_proto, rq, 1);
856 +
857 +       return res;
858 +}
859 +
860 +/* edma_rfs_key_search()
861 + *     Look for existing RFS entry
862 + */
863 +static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h,
864 +                                                      struct flow_keys *key)
865 +{
866 +       struct edma_rfs_filter_node *p;
867 +
868 +       hlist_for_each_entry(p, h, node)
869 +               if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) &&
870 +                   flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) &&
871 +                   p->keys.ports.src == key->ports.src &&
872 +                   p->keys.ports.dst == key->ports.dst &&
873 +                   p->keys.basic.ip_proto == key->basic.ip_proto)
874 +                       return p;
875 +       return NULL;
876 +}
877 +
878 +/* edma_initialise_rfs_flow_table()
879 + *     Initialise EDMA RFS flow table
880 + */
881 +static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter)
882 +{
883 +       int i;
884 +
885 +       spin_lock_init(&adapter->rfs.rfs_ftab_lock);
886 +
887 +       /* Initialize EDMA flow hash table */
888 +       for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++)
889 +               INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]);
890 +
891 +       adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES;
892 +       adapter->rfs.filter_available = adapter->rfs.max_num_filter;
893 +       adapter->rfs.hashtoclean = 0;
894 +
895 +       /* Add timer to get periodic RFS updates from OS */
896 +       init_timer(&adapter->rfs.expire_rfs);
897 +       adapter->rfs.expire_rfs.function = edma_flow_may_expire;
898 +       adapter->rfs.expire_rfs.data = (unsigned long)adapter;
899 +       mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
900 +}
901 +
902 +/* edma_free_rfs_flow_table()
903 + *     Free EDMA RFS flow table
904 + */
905 +static void edma_free_rfs_flow_table(struct edma_adapter *adapter)
906 +{
907 +       int i;
908 +
909 +       /* Remove sync timer */
910 +       del_timer_sync(&adapter->rfs.expire_rfs);
911 +       spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
912 +
913 +       /* Free EDMA RFS table entries */
914 +       adapter->rfs.filter_available = 0;
915 +
916 +       /* Clean-up EDMA flow hash table */
917 +       for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) {
918 +               struct hlist_head *hhead;
919 +               struct hlist_node *tmp;
920 +               struct edma_rfs_filter_node *filter_node;
921 +               int res;
922 +
923 +               hhead = &adapter->rfs.hlist_head[i];
924 +               hlist_for_each_entry_safe(filter_node, tmp, hhead, node) {
925 +                       res  = edma_delete_rfs_filter(adapter, filter_node);
926 +                       if (res < 0)
927 +                               dev_warn(&adapter->netdev->dev,
928 +                                       "EDMA going down but RFS entry %d not allowed to be flushed by Switch",
929 +                                       filter_node->flow_id);
930 +                       hlist_del(&filter_node->node);
931 +                       kfree(filter_node);
932 +               }
933 +       }
934 +       spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
935 +}
936 +
937 +/* edma_tx_unmap_and_free()
938 + *     clean TX buffer
939 + */
940 +static inline void edma_tx_unmap_and_free(struct platform_device *pdev,
941 +                                        struct edma_sw_desc *sw_desc)
942 +{
943 +       struct sk_buff *skb = sw_desc->skb;
944 +
945 +       if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) ||
946 +                       (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST)))
947 +               /* unmap_single for skb head area */
948 +               dma_unmap_single(&pdev->dev, sw_desc->dma,
949 +                               sw_desc->length, DMA_TO_DEVICE);
950 +       else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)
951 +               /* unmap page for paged fragments */
952 +               dma_unmap_page(&pdev->dev, sw_desc->dma,
953 +                             sw_desc->length, DMA_TO_DEVICE);
954 +
955 +       if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST))
956 +               dev_kfree_skb_any(skb);
957 +
958 +       sw_desc->flags = 0;
959 +}
960 +
961 +/* edma_tx_complete()
962 + *     Used to clean tx queues and update hardware and consumer index
963 + */
964 +static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id)
965 +{
966 +       struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
967 +       struct edma_sw_desc *sw_desc;
968 +       struct platform_device *pdev = edma_cinfo->pdev;
969 +       int i;
970 +
971 +       u16 sw_next_to_clean = etdr->sw_next_to_clean;
972 +       u16 hw_next_to_clean;
973 +       u32 data = 0;
974 +
975 +       edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data);
976 +       hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
977 +
978 +       /* clean the buffer here */
979 +       while (sw_next_to_clean != hw_next_to_clean) {
980 +               sw_desc = &etdr->sw_desc[sw_next_to_clean];
981 +               edma_tx_unmap_and_free(pdev, sw_desc);
982 +               sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1);
983 +       }
984 +
985 +       etdr->sw_next_to_clean = sw_next_to_clean;
986 +
987 +       /* update the TPD consumer index register */
988 +       edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean);
989 +
990 +       /* Wake the queue if queue is stopped and netdev link is up */
991 +       for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) {
992 +               if (netif_tx_queue_stopped(etdr->nq[i])) {
993 +                       if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i]))
994 +                               netif_tx_wake_queue(etdr->nq[i]);
995 +               }
996 +       }
997 +}
998 +
999 +/* edma_get_tx_buffer()
1000 + *     Get sw_desc corresponding to the TPD
1001 + */
1002 +static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo,
1003 +                                              struct edma_tx_desc *tpd, int queue_id)
1004 +{
1005 +       struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1006 +       return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc];
1007 +}
1008 +
1009 +/* edma_get_next_tpd()
1010 + *     Return a TPD descriptor for transfer
1011 + */
1012 +static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo,
1013 +                                            int queue_id)
1014 +{
1015 +       struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1016 +       u16 sw_next_to_fill = etdr->sw_next_to_fill;
1017 +       struct edma_tx_desc *tpd_desc =
1018 +               (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]);
1019 +
1020 +       etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1);
1021 +
1022 +       return tpd_desc;
1023 +}
1024 +
1025 +/* edma_tpd_available()
1026 + *     Check number of free TPDs
1027 + */
1028 +static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo,
1029 +                                   int queue_id)
1030 +{
1031 +       struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1032 +
1033 +       u16 sw_next_to_fill;
1034 +       u16 sw_next_to_clean;
1035 +       u16 count = 0;
1036 +
1037 +       sw_next_to_clean = etdr->sw_next_to_clean;
1038 +       sw_next_to_fill = etdr->sw_next_to_fill;
1039 +
1040 +       if (likely(sw_next_to_clean <= sw_next_to_fill))
1041 +               count = etdr->count;
1042 +
1043 +       return count + sw_next_to_clean - sw_next_to_fill - 1;
1044 +}
1045 +
1046 +/* edma_tx_queue_get()
1047 + *     Get the starting number of  the queue
1048 + */
1049 +static inline int edma_tx_queue_get(struct edma_adapter *adapter,
1050 +                                  struct sk_buff *skb, int txq_id)
1051 +{
1052 +       /* skb->priority is used as an index to skb priority table
1053 +        * and based on packet priority, correspong queue is assigned.
1054 +        */
1055 +       return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb);
1056 +}
1057 +
1058 +/* edma_tx_update_hw_idx()
1059 + *     update the producer index for the ring transmitted
1060 + */
1061 +static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo,
1062 +                                struct sk_buff *skb, int queue_id)
1063 +{
1064 +       struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id];
1065 +       u32 tpd_idx_data;
1066 +
1067 +       /* Read and update the producer index */
1068 +       edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data);
1069 +       tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS;
1070 +       tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK)
1071 +               << EDMA_TPD_PROD_IDX_SHIFT;
1072 +
1073 +       edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data);
1074 +}
1075 +
1076 +/* edma_rollback_tx()
1077 + *     Function to retrieve tx resources in case of error
1078 + */
1079 +static void edma_rollback_tx(struct edma_adapter *adapter,
1080 +                           struct edma_tx_desc *start_tpd, int queue_id)
1081 +{
1082 +       struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id];
1083 +       struct edma_sw_desc *sw_desc;
1084 +       struct edma_tx_desc *tpd = NULL;
1085 +       u16 start_index, index;
1086 +
1087 +       start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc);
1088 +
1089 +       index = start_index;
1090 +       while (index != etdr->sw_next_to_fill) {
1091 +               tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]);
1092 +               sw_desc = &etdr->sw_desc[index];
1093 +               edma_tx_unmap_and_free(adapter->pdev, sw_desc);
1094 +               memset(tpd, 0, sizeof(struct edma_tx_desc));
1095 +               if (++index == etdr->count)
1096 +                       index = 0;
1097 +       }
1098 +       etdr->sw_next_to_fill = start_index;
1099 +}
1100 +
1101 +/* edma_tx_map_and_fill()
1102 + *     gets called from edma_xmit_frame
1103 + *
1104 + * This is where the dma of the buffer to be transmitted
1105 + * gets mapped
1106 + */
1107 +static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo,
1108 +                              struct edma_adapter *adapter, struct sk_buff *skb, int queue_id,
1109 +                              unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap,
1110 +                              bool packet_is_rstp, int nr_frags)
1111 +{
1112 +       struct edma_sw_desc *sw_desc = NULL;
1113 +       struct platform_device *pdev = edma_cinfo->pdev;
1114 +       struct edma_tx_desc *tpd = NULL, *start_tpd = NULL;
1115 +       struct sk_buff *iter_skb;
1116 +       int i = 0;
1117 +       u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
1118 +       u16 buf_len, lso_desc_len = 0;
1119 +
1120 +       /* It should either be a nr_frags skb or fraglist skb but not both */
1121 +       BUG_ON(nr_frags && skb_has_frag_list(skb));
1122 +
1123 +       if (skb_is_gso(skb)) {
1124 +               /* TODO: What additional checks need to be performed here */
1125 +               if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1126 +                       lso_word1 |= EDMA_TPD_IPV4_EN;
1127 +                       ip_hdr(skb)->check = 0;
1128 +                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1129 +                               ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1130 +               } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1131 +                       lso_word1 |= EDMA_TPD_LSO_V2_EN;
1132 +                       ipv6_hdr(skb)->payload_len = 0;
1133 +                       tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1134 +                               &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
1135 +               } else
1136 +                       return -EINVAL;
1137 +
1138 +               lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) |
1139 +                               (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT);
1140 +       } else if (flags_transmit & EDMA_HW_CHECKSUM) {
1141 +                       u8 css, cso;
1142 +                       cso = skb_checksum_start_offset(skb);
1143 +                       css = cso  + skb->csum_offset;
1144 +
1145 +                       word1 |= (EDMA_TPD_CUSTOM_CSUM_EN);
1146 +                       word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT;
1147 +                       word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT);
1148 +       }
1149 +
1150 +       if (skb->protocol == htons(ETH_P_PPP_SES))
1151 +               word1 |= EDMA_TPD_PPPOE_EN;
1152 +
1153 +       if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) {
1154 +               switch(skb->vlan_proto) {
1155 +               case htons(ETH_P_8021Q):
1156 +                       word3 |= (1 << EDMA_TX_INS_CVLAN);
1157 +                       word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT;
1158 +                       break;
1159 +               case htons(ETH_P_8021AD):
1160 +                       word1 |= (1 << EDMA_TX_INS_SVLAN);
1161 +                       svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT;
1162 +                       break;
1163 +               default:
1164 +                       dev_err(&pdev->dev, "no ctag or stag present\n");
1165 +                       goto vlan_tag_error;
1166 +               }
1167 +       } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) {
1168 +               word3 |= (1 << EDMA_TX_INS_CVLAN);
1169 +               word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT;
1170 +       }
1171 +
1172 +       if (packet_is_rstp) {
1173 +               word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1174 +               word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT;
1175 +       } else {
1176 +               word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT;
1177 +       }
1178 +
1179 +       buf_len = skb_headlen(skb);
1180 +
1181 +       if (lso_word1) {
1182 +               if (lso_word1 & EDMA_TPD_LSO_V2_EN) {
1183 +
1184 +                       /* IPv6 LSOv2 descriptor */
1185 +                       start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1186 +                       sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1187 +                       sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE;
1188 +
1189 +                       /* LSOv2 descriptor overrides addr field to pass length */
1190 +                       tpd->addr = cpu_to_le16(skb->len);
1191 +                       tpd->svlan_tag = svlan_tag;
1192 +                       tpd->word1 = word1 | lso_word1;
1193 +                       tpd->word3 = word3;
1194 +               }
1195 +
1196 +               tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1197 +               if (!start_tpd)
1198 +                       start_tpd = tpd;
1199 +               sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1200 +
1201 +               /* The last buffer info contain the skb address,
1202 +                * so skb will be freed after unmap
1203 +                */
1204 +               sw_desc->length = lso_desc_len;
1205 +               sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1206 +
1207 +               sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1208 +                                       skb->data, buf_len, DMA_TO_DEVICE);
1209 +               if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1210 +                       goto dma_error;
1211 +
1212 +               tpd->addr = cpu_to_le32(sw_desc->dma);
1213 +               tpd->len  = cpu_to_le16(buf_len);
1214 +
1215 +               tpd->svlan_tag = svlan_tag;
1216 +               tpd->word1 = word1 | lso_word1;
1217 +               tpd->word3 = word3;
1218 +
1219 +               /* The last buffer info contain the skb address,
1220 +                * so it will be freed after unmap
1221 +                */
1222 +               sw_desc->length = lso_desc_len;
1223 +               sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1224 +
1225 +               buf_len = 0;
1226 +       }
1227 +
1228 +       if (likely(buf_len)) {
1229 +
1230 +               /* TODO Do not dequeue descriptor if there is a potential error */
1231 +               tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1232 +
1233 +               if (!start_tpd)
1234 +                       start_tpd = tpd;
1235 +
1236 +               sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1237 +
1238 +               /* The last buffer info contain the skb address,
1239 +                * so it will be free after unmap
1240 +                */
1241 +               sw_desc->length = buf_len;
1242 +               sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD;
1243 +               sw_desc->dma = dma_map_single(&adapter->pdev->dev,
1244 +                       skb->data, buf_len, DMA_TO_DEVICE);
1245 +               if (dma_mapping_error(&pdev->dev, sw_desc->dma))
1246 +                       goto dma_error;
1247 +
1248 +               tpd->addr = cpu_to_le32(sw_desc->dma);
1249 +               tpd->len  = cpu_to_le16(buf_len);
1250 +
1251 +               tpd->svlan_tag = svlan_tag;
1252 +               tpd->word1 = word1 | lso_word1;
1253 +               tpd->word3 = word3;
1254 +       }
1255 +
1256 +       /* Walk through all paged fragments */
1257 +       while (nr_frags--) {
1258 +               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1259 +               buf_len = skb_frag_size(frag);
1260 +               tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1261 +               sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1262 +               sw_desc->length = buf_len;
1263 +               sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG;
1264 +
1265 +               sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE);
1266 +
1267 +               if (dma_mapping_error(NULL, sw_desc->dma))
1268 +                       goto dma_error;
1269 +
1270 +               tpd->addr = cpu_to_le32(sw_desc->dma);
1271 +               tpd->len  = cpu_to_le16(buf_len);
1272 +
1273 +               tpd->svlan_tag = svlan_tag;
1274 +               tpd->word1 = word1 | lso_word1;
1275 +               tpd->word3 = word3;
1276 +               i++;
1277 +       }
1278 +
1279 +       /* Walk through all fraglist skbs */
1280 +       skb_walk_frags(skb, iter_skb) {
1281 +               buf_len = iter_skb->len;
1282 +               tpd = edma_get_next_tpd(edma_cinfo, queue_id);
1283 +               sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id);
1284 +               sw_desc->length = buf_len;
1285 +               sw_desc->dma =  dma_map_single(&adapter->pdev->dev,
1286 +                               iter_skb->data, buf_len, DMA_TO_DEVICE);
1287 +
1288 +               if (dma_mapping_error(NULL, sw_desc->dma))
1289 +                       goto dma_error;
1290 +
1291 +               tpd->addr = cpu_to_le32(sw_desc->dma);
1292 +               tpd->len  = cpu_to_le16(buf_len);
1293 +               tpd->svlan_tag = svlan_tag;
1294 +               tpd->word1 = word1 | lso_word1;
1295 +               tpd->word3 = word3;
1296 +               sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST;
1297 +       }
1298 +
1299 +       if (tpd)
1300 +               tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT;
1301 +
1302 +       sw_desc->skb = skb;
1303 +       sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST;
1304 +
1305 +       return 0;
1306 +
1307 +dma_error:
1308 +       edma_rollback_tx(adapter, start_tpd, queue_id);
1309 +       dev_err(&pdev->dev, "TX DMA map failed\n");
1310 +vlan_tag_error:
1311 +       return -ENOMEM;
1312 +}
1313 +
1314 +/* edma_check_link()
1315 + *     check Link status
1316 + */
1317 +static int edma_check_link(struct edma_adapter *adapter)
1318 +{
1319 +       struct phy_device *phydev = adapter->phydev;
1320 +
1321 +       if (!(adapter->poll_required))
1322 +               return __EDMA_LINKUP;
1323 +
1324 +       if (phydev->link)
1325 +               return __EDMA_LINKUP;
1326 +
1327 +       return __EDMA_LINKDOWN;
1328 +}
1329 +
1330 +/* edma_adjust_link()
1331 + *     check for edma link status
1332 + */
1333 +void edma_adjust_link(struct net_device *netdev)
1334 +{
1335 +       int status;
1336 +       struct edma_adapter *adapter = netdev_priv(netdev);
1337 +       struct phy_device *phydev = adapter->phydev;
1338 +
1339 +       if (!test_bit(__EDMA_UP, &adapter->state_flags))
1340 +               return;
1341 +
1342 +       status = edma_check_link(adapter);
1343 +
1344 +       if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) {
1345 +               dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed);
1346 +               adapter->link_state = __EDMA_LINKUP;
1347 +               netif_carrier_on(netdev);
1348 +               if (netif_running(netdev))
1349 +                       netif_tx_wake_all_queues(netdev);
1350 +       } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) {
1351 +               dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name);
1352 +               adapter->link_state = __EDMA_LINKDOWN;
1353 +               netif_carrier_off(netdev);
1354 +               netif_tx_stop_all_queues(netdev);
1355 +       }
1356 +}
1357 +
1358 +/* edma_get_stats()
1359 + *     Statistics api used to retreive the tx/rx statistics
1360 + */
1361 +struct net_device_stats *edma_get_stats(struct net_device *netdev)
1362 +{
1363 +       struct edma_adapter *adapter = netdev_priv(netdev);
1364 +
1365 +       return &adapter->stats;
1366 +}
1367 +
1368 +/* edma_xmit()
1369 + *     Main api to be called by the core for packet transmission
1370 + */
1371 +netdev_tx_t edma_xmit(struct sk_buff *skb,
1372 +                    struct net_device *net_dev)
1373 +{
1374 +       struct edma_adapter *adapter = netdev_priv(net_dev);
1375 +       struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1376 +       struct edma_tx_desc_ring *etdr;
1377 +       u16 from_cpu, dp_bitmap, txq_id;
1378 +       int ret, nr_frags = 0, num_tpds_needed = 1, queue_id;
1379 +       unsigned int flags_transmit = 0;
1380 +       bool packet_is_rstp = false;
1381 +       struct netdev_queue *nq = NULL;
1382 +
1383 +       if (skb_shinfo(skb)->nr_frags) {
1384 +               nr_frags = skb_shinfo(skb)->nr_frags;
1385 +               num_tpds_needed += nr_frags;
1386 +       } else if (skb_has_frag_list(skb)) {
1387 +               struct sk_buff *iter_skb;
1388 +
1389 +               skb_walk_frags(skb, iter_skb)
1390 +                       num_tpds_needed++;
1391 +       }
1392 +
1393 +       if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) {
1394 +               dev_err(&net_dev->dev,
1395 +                       "skb received with fragments %d which is more than %lu",
1396 +                       num_tpds_needed, EDMA_MAX_SKB_FRAGS);
1397 +               dev_kfree_skb_any(skb);
1398 +               adapter->stats.tx_errors++;
1399 +               return NETDEV_TX_OK;
1400 +       }
1401 +
1402 +       if (edma_stp_rstp) {
1403 +               u16 ath_hdr, ath_eth_type;
1404 +               u8 mac_addr[EDMA_ETH_HDR_LEN];
1405 +               ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]);
1406 +               if (ath_eth_type == edma_ath_eth_type) {
1407 +                       packet_is_rstp = true;
1408 +                       ath_hdr = htons(*(uint16_t *)&skb->data[14]);
1409 +                       dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK;
1410 +                       from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT;
1411 +                       memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN);
1412 +
1413 +                       skb_pull(skb, 4);
1414 +
1415 +                       memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN);
1416 +               }
1417 +       }
1418 +
1419 +       /* this will be one of the 4 TX queues exposed to linux kernel */
1420 +       txq_id = skb_get_queue_mapping(skb);
1421 +       queue_id = edma_tx_queue_get(adapter, skb, txq_id);
1422 +       etdr = edma_cinfo->tpd_ring[queue_id];
1423 +       nq = netdev_get_tx_queue(net_dev, txq_id);
1424 +
1425 +       local_bh_disable();
1426 +       /* Tx is not handled in bottom half context. Hence, we need to protect
1427 +        * Tx from tasks and bottom half
1428 +        */
1429 +
1430 +       if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) {
1431 +               /* not enough descriptor, just stop queue */
1432 +               netif_tx_stop_queue(nq);
1433 +               local_bh_enable();
1434 +               dev_dbg(&net_dev->dev, "Not enough descriptors available");
1435 +               edma_cinfo->edma_ethstats.tx_desc_error++;
1436 +               return NETDEV_TX_BUSY;
1437 +       }
1438 +
1439 +       /* Check and mark VLAN tag offload */
1440 +       if (skb_vlan_tag_present(skb))
1441 +               flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG;
1442 +       else if (adapter->default_vlan_tag)
1443 +               flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG;
1444 +
1445 +       /* Check and mark checksum offload */
1446 +       if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
1447 +               flags_transmit |= EDMA_HW_CHECKSUM;
1448 +
1449 +       /* Map and fill descriptor for Tx */
1450 +       ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id,
1451 +               flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags);
1452 +       if (ret) {
1453 +               dev_kfree_skb_any(skb);
1454 +               adapter->stats.tx_errors++;
1455 +               goto netdev_okay;
1456 +       }
1457 +
1458 +       /* Update SW producer index */
1459 +       edma_tx_update_hw_idx(edma_cinfo, skb, queue_id);
1460 +
1461 +       /* update tx statistics */
1462 +       adapter->stats.tx_packets++;
1463 +       adapter->stats.tx_bytes += skb->len;
1464 +
1465 +netdev_okay:
1466 +       local_bh_enable();
1467 +       return NETDEV_TX_OK;
1468 +}
1469 +
1470 +/*
1471 + * edma_flow_may_expire()
1472 + *     Timer function called periodically to delete the node
1473 + */
1474 +void edma_flow_may_expire(unsigned long data)
1475 +{
1476 +       struct edma_adapter *adapter = (struct edma_adapter *)data;
1477 +       int j;
1478 +
1479 +       spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1480 +       for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) {
1481 +               struct hlist_head *hhead;
1482 +               struct hlist_node *tmp;
1483 +               struct edma_rfs_filter_node *n;
1484 +               bool res;
1485 +
1486 +               hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++];
1487 +               hlist_for_each_entry_safe(n, tmp, hhead, node) {
1488 +                       res = rps_may_expire_flow(adapter->netdev, n->rq_id,
1489 +                                       n->flow_id, n->filter_id);
1490 +                       if (res) {
1491 +                               int ret;
1492 +                               ret = edma_delete_rfs_filter(adapter, n);
1493 +                               if (ret < 0)
1494 +                                       dev_dbg(&adapter->netdev->dev,
1495 +                                                       "RFS entry %d not allowed to be flushed by Switch",
1496 +                                                       n->flow_id);
1497 +                               else {
1498 +                                       hlist_del(&n->node);
1499 +                                       kfree(n);
1500 +                                       adapter->rfs.filter_available++;
1501 +                               }
1502 +                       }
1503 +               }
1504 +       }
1505 +
1506 +       adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1);
1507 +       spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1508 +       mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4);
1509 +}
1510 +
1511 +/* edma_rx_flow_steer()
1512 + *     Called by core to to steer the flow to CPU
1513 + */
1514 +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1515 +                      u16 rxq, u32 flow_id)
1516 +{
1517 +       struct flow_keys keys;
1518 +       struct edma_rfs_filter_node *filter_node;
1519 +       struct edma_adapter *adapter = netdev_priv(dev);
1520 +       u16 hash_tblid;
1521 +       int res;
1522 +
1523 +       if (skb->protocol == htons(ETH_P_IPV6)) {
1524 +               dev_err(&adapter->pdev->dev, "IPv6 not supported\n");
1525 +               res = -EINVAL;
1526 +               goto no_protocol_err;
1527 +       }
1528 +
1529 +       /* Dissect flow parameters
1530 +        * We only support IPv4 + TCP/UDP
1531 +        */
1532 +       res = skb_flow_dissect_flow_keys(skb, &keys, 0);
1533 +       if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) {
1534 +               res = -EPROTONOSUPPORT;
1535 +               goto no_protocol_err;
1536 +       }
1537 +
1538 +       /* Check if table entry exists */
1539 +       hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK;
1540 +
1541 +       spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1542 +       filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys);
1543 +
1544 +       if (filter_node) {
1545 +               if (rxq == filter_node->rq_id) {
1546 +                       res = -EEXIST;
1547 +                       goto out;
1548 +               } else {
1549 +                       res = edma_delete_rfs_filter(adapter, filter_node);
1550 +                       if (res < 0)
1551 +                               dev_warn(&adapter->netdev->dev,
1552 +                                               "Cannot steer flow %d to different queue",
1553 +                                               filter_node->flow_id);
1554 +                       else {
1555 +                               adapter->rfs.filter_available++;
1556 +                               res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1557 +                               if (res < 0) {
1558 +                                       dev_warn(&adapter->netdev->dev,
1559 +                                                       "Cannot steer flow %d to different queue",
1560 +                                                       filter_node->flow_id);
1561 +                               } else {
1562 +                                       adapter->rfs.filter_available--;
1563 +                                       filter_node->rq_id = rxq;
1564 +                                       filter_node->filter_id = res;
1565 +                               }
1566 +                       }
1567 +               }
1568 +       } else {
1569 +               if (adapter->rfs.filter_available == 0) {
1570 +                       res = -EBUSY;
1571 +                       goto out;
1572 +               }
1573 +
1574 +               filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC);
1575 +               if (!filter_node) {
1576 +                       res = -ENOMEM;
1577 +                       goto out;
1578 +               }
1579 +
1580 +               res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node);
1581 +               if (res < 0) {
1582 +                       kfree(filter_node);
1583 +                       goto out;
1584 +               }
1585 +
1586 +               adapter->rfs.filter_available--;
1587 +               filter_node->rq_id = rxq;
1588 +               filter_node->filter_id = res;
1589 +               filter_node->flow_id = flow_id;
1590 +               filter_node->keys = keys;
1591 +               INIT_HLIST_NODE(&filter_node->node);
1592 +               hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]);
1593 +       }
1594 +
1595 +out:
1596 +       spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1597 +no_protocol_err:
1598 +       return res;
1599 +}
1600 +
1601 +/* edma_register_rfs_filter()
1602 + *     Add RFS filter callback
1603 + */
1604 +int edma_register_rfs_filter(struct net_device *netdev,
1605 +                           set_rfs_filter_callback_t set_filter)
1606 +{
1607 +       struct edma_adapter *adapter = netdev_priv(netdev);
1608 +
1609 +       spin_lock_bh(&adapter->rfs.rfs_ftab_lock);
1610 +
1611 +       if (adapter->set_rfs_rule) {
1612 +               spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1613 +               return -1;
1614 +       }
1615 +
1616 +       adapter->set_rfs_rule = set_filter;
1617 +       spin_unlock_bh(&adapter->rfs.rfs_ftab_lock);
1618 +
1619 +       return 0;
1620 +}
1621 +
1622 +/* edma_alloc_tx_rings()
1623 + *     Allocate rx rings
1624 + */
1625 +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo)
1626 +{
1627 +       struct platform_device *pdev = edma_cinfo->pdev;
1628 +       int i, err = 0;
1629 +
1630 +       for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1631 +               err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1632 +               if (err) {
1633 +                       dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i);
1634 +                       return err;
1635 +               }
1636 +       }
1637 +
1638 +       return 0;
1639 +}
1640 +
1641 +/* edma_free_tx_rings()
1642 + *     Free tx rings
1643 + */
1644 +void edma_free_tx_rings(struct edma_common_info *edma_cinfo)
1645 +{
1646 +       int i;
1647 +
1648 +       for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1649 +               edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]);
1650 +}
1651 +
1652 +/* edma_free_tx_resources()
1653 + *     Free buffers associated with tx rings
1654 + */
1655 +void edma_free_tx_resources(struct edma_common_info *edma_cinfo)
1656 +{
1657 +       struct edma_tx_desc_ring *etdr;
1658 +       struct edma_sw_desc *sw_desc;
1659 +       struct platform_device *pdev = edma_cinfo->pdev;
1660 +       int i, j;
1661 +
1662 +       for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1663 +               etdr = edma_cinfo->tpd_ring[i];
1664 +               for (j = 0; j < EDMA_TX_RING_SIZE; j++) {
1665 +                       sw_desc = &etdr->sw_desc[j];
1666 +                       if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD |
1667 +                               EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST))
1668 +                               edma_tx_unmap_and_free(pdev, sw_desc);
1669 +               }
1670 +       }
1671 +}
1672 +
1673 +/* edma_alloc_rx_rings()
1674 + *     Allocate rx rings
1675 + */
1676 +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo)
1677 +{
1678 +       struct platform_device *pdev = edma_cinfo->pdev;
1679 +       int i, j, err = 0;
1680 +
1681 +       for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1682 +               err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1683 +               if (err) {
1684 +                       dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i);
1685 +                       return err;
1686 +               }
1687 +               j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1688 +       }
1689 +
1690 +       return 0;
1691 +}
1692 +
1693 +/* edma_free_rx_rings()
1694 + *     free rx rings
1695 + */
1696 +void edma_free_rx_rings(struct edma_common_info *edma_cinfo)
1697 +{
1698 +       int i, j;
1699 +
1700 +       for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1701 +               edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]);
1702 +               j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1703 +       }
1704 +}
1705 +
1706 +/* edma_free_queues()
1707 + *     Free the queues allocaated
1708 + */
1709 +void edma_free_queues(struct edma_common_info *edma_cinfo)
1710 +{
1711 +       int i , j;
1712 +
1713 +       for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1714 +               if (edma_cinfo->tpd_ring[i])
1715 +                       kfree(edma_cinfo->tpd_ring[i]);
1716 +               edma_cinfo->tpd_ring[i] = NULL;
1717 +       }
1718 +
1719 +       for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1720 +               if (edma_cinfo->rfd_ring[j])
1721 +                       kfree(edma_cinfo->rfd_ring[j]);
1722 +               edma_cinfo->rfd_ring[j] = NULL;
1723 +               j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1724 +       }
1725 +
1726 +       edma_cinfo->num_rx_queues = 0;
1727 +       edma_cinfo->num_tx_queues = 0;
1728 +
1729 +       return;
1730 +}
1731 +
1732 +/* edma_free_rx_resources()
1733 + *     Free buffers associated with tx rings
1734 + */
1735 +void edma_free_rx_resources(struct edma_common_info *edma_cinfo)
1736 +{
1737 +        struct edma_rfd_desc_ring *erdr;
1738 +       struct edma_sw_desc *sw_desc;
1739 +       struct platform_device *pdev = edma_cinfo->pdev;
1740 +       int i, j, k;
1741 +
1742 +       for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) {
1743 +               erdr = edma_cinfo->rfd_ring[k];
1744 +               for (j = 0; j < EDMA_RX_RING_SIZE; j++) {
1745 +                       sw_desc = &erdr->sw_desc[j];
1746 +                       if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) {
1747 +                               dma_unmap_single(&pdev->dev, sw_desc->dma,
1748 +                                       sw_desc->length, DMA_FROM_DEVICE);
1749 +                               edma_clean_rfd(erdr, j);
1750 +                       } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) {
1751 +                               dma_unmap_page(&pdev->dev, sw_desc->dma,
1752 +                                       sw_desc->length, DMA_FROM_DEVICE);
1753 +                               edma_clean_rfd(erdr, j);
1754 +                       }
1755 +               }
1756 +               k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1757 +
1758 +       }
1759 +}
1760 +
1761 +/* edma_alloc_queues_tx()
1762 + *     Allocate memory for all rings
1763 + */
1764 +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo)
1765 +{
1766 +       int i;
1767 +
1768 +       for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
1769 +               struct edma_tx_desc_ring *etdr;
1770 +               etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL);
1771 +               if (!etdr)
1772 +                       goto err;
1773 +               etdr->count = edma_cinfo->tx_ring_count;
1774 +               edma_cinfo->tpd_ring[i] = etdr;
1775 +       }
1776 +
1777 +       return 0;
1778 +err:
1779 +       edma_free_queues(edma_cinfo);
1780 +       return -1;
1781 +}
1782 +
1783 +/* edma_alloc_queues_rx()
1784 + *     Allocate memory for all rings
1785 + */
1786 +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo)
1787 +{
1788 +       int i, j;
1789 +
1790 +       for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1791 +               struct edma_rfd_desc_ring *rfd_ring;
1792 +               rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring),
1793 +                               GFP_KERNEL);
1794 +               if (!rfd_ring)
1795 +                       goto err;
1796 +               rfd_ring->count = edma_cinfo->rx_ring_count;
1797 +               edma_cinfo->rfd_ring[j] = rfd_ring;
1798 +               j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1799 +       }
1800 +       return 0;
1801 +err:
1802 +       edma_free_queues(edma_cinfo);
1803 +       return -1;
1804 +}
1805 +
1806 +/* edma_clear_irq_status()
1807 + *     Clear interrupt status
1808 + */
1809 +void edma_clear_irq_status()
1810 +{
1811 +       edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1812 +       edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1813 +       edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff);
1814 +       edma_write_reg(EDMA_REG_WOL_ISR, 0x1);
1815 +};
1816 +
1817 +/* edma_configure()
1818 + *     Configure skb, edma interrupts and control register.
1819 + */
1820 +int edma_configure(struct edma_common_info *edma_cinfo)
1821 +{
1822 +       struct edma_hw *hw = &edma_cinfo->hw;
1823 +       u32 intr_modrt_data;
1824 +       u32 intr_ctrl_data = 0;
1825 +       int i, j, ret_count;
1826 +
1827 +       edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data);
1828 +       intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
1829 +       intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
1830 +       edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data);
1831 +
1832 +       edma_clear_irq_status();
1833 +
1834 +       /* Clear any WOL status */
1835 +       edma_write_reg(EDMA_REG_WOL_CTRL, 0);
1836 +       intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
1837 +       intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
1838 +       edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1839 +       edma_configure_tx(edma_cinfo);
1840 +       edma_configure_rx(edma_cinfo);
1841 +
1842 +       /* Allocate the RX buffer */
1843 +       for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1844 +               struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j];
1845 +               ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j);
1846 +               if (ret_count) {
1847 +                       dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n");
1848 +               }
1849 +               j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1850 +       }
1851 +
1852 +       /* Configure descriptor Ring */
1853 +       edma_init_desc(edma_cinfo);
1854 +       return 0;
1855 +}
1856 +
1857 +/* edma_irq_enable()
1858 + *     Enable default interrupt generation settings
1859 + */
1860 +void edma_irq_enable(struct edma_common_info *edma_cinfo)
1861 +{
1862 +       struct edma_hw *hw = &edma_cinfo->hw;
1863 +       int i, j;
1864 +
1865 +       edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1866 +       for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) {
1867 +               edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask);
1868 +               j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1);
1869 +       }
1870 +       edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1871 +       for (i = 0; i < edma_cinfo->num_tx_queues; i++)
1872 +               edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask);
1873 +}
1874 +
1875 +/* edma_irq_disable()
1876 + *     Disable Interrupt
1877 + */
1878 +void edma_irq_disable(struct edma_common_info *edma_cinfo)
1879 +{
1880 +       int i;
1881 +
1882 +       for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++)
1883 +               edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0);
1884 +
1885 +       for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++)
1886 +               edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0);
1887 +       edma_write_reg(EDMA_REG_MISC_IMR, 0);
1888 +       edma_write_reg(EDMA_REG_WOL_IMR, 0);
1889 +}
1890 +
1891 +/* edma_free_irqs()
1892 + *     Free All IRQs
1893 + */
1894 +void edma_free_irqs(struct edma_adapter *adapter)
1895 +{
1896 +       struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1897 +       int i, j;
1898 +       int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2);
1899 +
1900 +       for (i = 0; i < CONFIG_NR_CPUS; i++) {
1901 +               for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++)
1902 +                       free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1903 +
1904 +               for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++)
1905 +                       free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]);
1906 +       }
1907 +}
1908 +
1909 +/* edma_enable_rx_ctrl()
1910 + *     Enable RX queue control
1911 + */
1912 +void edma_enable_rx_ctrl(struct edma_hw *hw)
1913 +{
1914 +       u32 data;
1915 +
1916 +       edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1917 +       data |= EDMA_RXQ_CTRL_EN;
1918 +       edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1919 +}
1920 +
1921 +
1922 +/* edma_enable_tx_ctrl()
1923 + *     Enable TX queue control
1924 + */
1925 +void edma_enable_tx_ctrl(struct edma_hw *hw)
1926 +{
1927 +       u32 data;
1928 +
1929 +       edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1930 +       data |= EDMA_TXQ_CTRL_TXQ_EN;
1931 +       edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1932 +}
1933 +
1934 +/* edma_stop_rx_tx()
1935 + *     Disable RX/TQ Queue control
1936 + */
1937 +void edma_stop_rx_tx(struct edma_hw *hw)
1938 +{
1939 +       u32 data;
1940 +
1941 +       edma_read_reg(EDMA_REG_RXQ_CTRL, &data);
1942 +       data &= ~EDMA_RXQ_CTRL_EN;
1943 +       edma_write_reg(EDMA_REG_RXQ_CTRL, data);
1944 +       edma_read_reg(EDMA_REG_TXQ_CTRL, &data);
1945 +       data &= ~EDMA_TXQ_CTRL_TXQ_EN;
1946 +       edma_write_reg(EDMA_REG_TXQ_CTRL, data);
1947 +}
1948 +
1949 +/* edma_reset()
1950 + *     Reset the EDMA
1951 + */
1952 +int edma_reset(struct edma_common_info *edma_cinfo)
1953 +{
1954 +       struct edma_hw *hw = &edma_cinfo->hw;
1955 +
1956 +       edma_irq_disable(edma_cinfo);
1957 +
1958 +       edma_clear_irq_status();
1959 +
1960 +       edma_stop_rx_tx(hw);
1961 +
1962 +       return 0;
1963 +}
1964 +
1965 +/* edma_fill_netdev()
1966 + *     Fill netdev for each etdr
1967 + */
1968 +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id,
1969 +                   int dev, int txq_id)
1970 +{
1971 +       struct edma_tx_desc_ring *etdr;
1972 +       int i = 0;
1973 +
1974 +       etdr = edma_cinfo->tpd_ring[queue_id];
1975 +
1976 +       while (etdr->netdev[i])
1977 +               i++;
1978 +
1979 +       if (i >= EDMA_MAX_NETDEV_PER_QUEUE)
1980 +               return -1;
1981 +
1982 +       /* Populate the netdev associated with the tpd ring */
1983 +       etdr->netdev[i] = edma_netdev[dev];
1984 +       etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id);
1985 +
1986 +       return 0;
1987 +}
1988 +
1989 +/* edma_change_mtu()
1990 + *     change the MTU of the NIC.
1991 + */
1992 +int edma_change_mtu(struct net_device *netdev, int new_mtu)
1993 +{
1994 +       struct edma_adapter *adapter = netdev_priv(netdev);
1995 +       struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1996 +       int old_mtu = netdev->mtu;
1997 +       int max_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + (2 * VLAN_HLEN);
1998 +
1999 +       if ((max_frame_size < ETH_ZLEN + ETH_FCS_LEN) ||
2000 +               (max_frame_size > EDMA_MAX_JUMBO_FRAME_SIZE)) {
2001 +                       dev_err(&edma_cinfo->pdev->dev, "MTU setting not correct\n");
2002 +                       return -EINVAL;
2003 +       }
2004 +
2005 +       /* set MTU */
2006 +       if (old_mtu != new_mtu) {
2007 +               netdev->mtu = new_mtu;
2008 +               netdev_update_features(netdev);
2009 +       }
2010 +
2011 +       return 0;
2012 +}
2013 +
2014 +/* edma_set_mac()
2015 + *     Change the Ethernet Address of the NIC
2016 + */
2017 +int edma_set_mac_addr(struct net_device *netdev, void *p)
2018 +{
2019 +       struct sockaddr *addr = p;
2020 +
2021 +       if (!is_valid_ether_addr(addr->sa_data))
2022 +               return -EINVAL;
2023 +
2024 +       if (netif_running(netdev))
2025 +               return -EBUSY;
2026 +
2027 +       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2028 +       return 0;
2029 +}
2030 +
2031 +/* edma_set_stp_rstp()
2032 + *     set stp/rstp
2033 + */
2034 +void edma_set_stp_rstp(bool rstp)
2035 +{
2036 +       edma_stp_rstp = rstp;
2037 +}
2038 +
2039 +/* edma_assign_ath_hdr_type()
2040 + *     assign atheros header eth type
2041 + */
2042 +void edma_assign_ath_hdr_type(int eth_type)
2043 +{
2044 +       edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK;
2045 +}
2046 +
2047 +/* edma_get_default_vlan_tag()
2048 + *     Used by other modules to get the default vlan tag
2049 + */
2050 +int edma_get_default_vlan_tag(struct net_device *netdev)
2051 +{
2052 +       struct edma_adapter *adapter = netdev_priv(netdev);
2053 +
2054 +       if (adapter->default_vlan_tag)
2055 +               return adapter->default_vlan_tag;
2056 +
2057 +       return 0;
2058 +}
2059 +
2060 +/* edma_open()
2061 + *     gets called when netdevice is up, start the queue.
2062 + */
2063 +int edma_open(struct net_device *netdev)
2064 +{
2065 +       struct edma_adapter *adapter = netdev_priv(netdev);
2066 +       struct platform_device *pdev = adapter->edma_cinfo->pdev;
2067 +
2068 +       netif_tx_start_all_queues(netdev);
2069 +       edma_initialise_rfs_flow_table(adapter);
2070 +       set_bit(__EDMA_UP, &adapter->state_flags);
2071 +
2072 +       /* if Link polling is enabled, in our case enabled for WAN, then
2073 +        * do a phy start, else always set link as UP
2074 +        */
2075 +       if (adapter->poll_required) {
2076 +               if (!IS_ERR(adapter->phydev)) {
2077 +                       phy_start(adapter->phydev);
2078 +                       phy_start_aneg(adapter->phydev);
2079 +                       adapter->link_state = __EDMA_LINKDOWN;
2080 +               } else {
2081 +                       dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n");
2082 +               }
2083 +       } else {
2084 +               adapter->link_state = __EDMA_LINKUP;
2085 +               netif_carrier_on(netdev);
2086 +       }
2087 +
2088 +       return 0;
2089 +}
2090 +
2091 +
2092 +/* edma_close()
2093 + *     gets called when netdevice is down, stops the queue.
2094 + */
2095 +int edma_close(struct net_device *netdev)
2096 +{
2097 +       struct edma_adapter *adapter = netdev_priv(netdev);
2098 +
2099 +       edma_free_rfs_flow_table(adapter);
2100 +       netif_carrier_off(netdev);
2101 +       netif_tx_stop_all_queues(netdev);
2102 +
2103 +       if (adapter->poll_required) {
2104 +               if (!IS_ERR(adapter->phydev))
2105 +                       phy_stop(adapter->phydev);
2106 +       }
2107 +
2108 +       adapter->link_state = __EDMA_LINKDOWN;
2109 +
2110 +       /* Set GMAC state to UP before link state is checked
2111 +        */
2112 +       clear_bit(__EDMA_UP, &adapter->state_flags);
2113 +
2114 +       return 0;
2115 +}
2116 +
2117 +/* edma_poll
2118 + *     polling function that gets called when the napi gets scheduled.
2119 + *
2120 + * Main sequence of task performed in this api
2121 + * is clear irq status -> clear_tx_irq -> clean_rx_irq->
2122 + * enable interrupts.
2123 + */
2124 +int edma_poll(struct napi_struct *napi, int budget)
2125 +{
2126 +       struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi,
2127 +               struct edma_per_cpu_queues_info, napi);
2128 +       struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2129 +       u32 reg_data;
2130 +       u32 shadow_rx_status, shadow_tx_status;
2131 +       int queue_id;
2132 +       int i, work_done = 0;
2133 +
2134 +       /* Store the Rx/Tx status by ANDing it with
2135 +        * appropriate CPU RX?TX mask
2136 +        */
2137 +       edma_read_reg(EDMA_REG_RX_ISR, &reg_data);
2138 +       edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask;
2139 +       shadow_rx_status = edma_percpu_info->rx_status;
2140 +       edma_read_reg(EDMA_REG_TX_ISR, &reg_data);
2141 +       edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask;
2142 +       shadow_tx_status = edma_percpu_info->tx_status;
2143 +
2144 +       /* Every core will have a start, which will be computed
2145 +        * in probe and stored in edma_percpu_info->tx_start variable.
2146 +        * We will shift the status bit by tx_start to obtain
2147 +        * status bits for the core on which the current processing
2148 +        * is happening. Since, there are 4 tx queues per core,
2149 +        * we will run the loop till we get the correct queue to clear.
2150 +        */
2151 +       while (edma_percpu_info->tx_status) {
2152 +               queue_id = ffs(edma_percpu_info->tx_status) - 1;
2153 +               edma_tx_complete(edma_cinfo, queue_id);
2154 +               edma_percpu_info->tx_status &= ~(1 << queue_id);
2155 +       }
2156 +
2157 +       /* Every core will have a start, which will be computed
2158 +        * in probe and stored in edma_percpu_info->tx_start variable.
2159 +        * We will shift the status bit by tx_start to obtain
2160 +        * status bits for the core on which the current processing
2161 +        * is happening. Since, there are 4 tx queues per core, we
2162 +        * will run the loop till we get the correct queue to clear.
2163 +        */
2164 +       while (edma_percpu_info->rx_status) {
2165 +               queue_id = ffs(edma_percpu_info->rx_status) - 1;
2166 +               edma_rx_complete(edma_cinfo, &work_done,
2167 +                               budget, queue_id, napi);
2168 +
2169 +               if (likely(work_done < budget))
2170 +                       edma_percpu_info->rx_status &= ~(1 << queue_id);
2171 +               else
2172 +                       break;
2173 +       }
2174 +
2175 +       /* Clear the status register, to avoid the interrupts to
2176 +        * reoccur.This clearing of interrupt status register is
2177 +        * done here as writing to status register only takes place
2178 +        * once the  producer/consumer index has been updated to
2179 +        * reflect that the packet transmission/reception went fine.
2180 +        */
2181 +       edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status);
2182 +       edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status);
2183 +
2184 +       /* If budget not fully consumed, exit the polling mode */
2185 +       if (likely(work_done < budget)) {
2186 +               napi_complete(napi);
2187 +
2188 +               /* re-enable the interrupts */
2189 +               for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2190 +                       edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1);
2191 +               for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2192 +                       edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1);
2193 +       }
2194 +
2195 +       return work_done;
2196 +}
2197 +
2198 +/* edma interrupt()
2199 + *     interrupt handler
2200 + */
2201 +irqreturn_t edma_interrupt(int irq, void *dev)
2202 +{
2203 +       struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev;
2204 +       struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo;
2205 +       int i;
2206 +
2207 +       /* Unmask the TX/RX interrupt register */
2208 +       for (i = 0; i < edma_cinfo->num_rxq_per_core; i++)
2209 +               edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0);
2210 +
2211 +       for (i = 0; i < edma_cinfo->num_txq_per_core; i++)
2212 +               edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0);
2213 +
2214 +       napi_schedule(&edma_percpu_info->napi);
2215 +
2216 +       return IRQ_HANDLED;
2217 +}
2218 --- /dev/null
2219 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.h
2220 @@ -0,0 +1,447 @@
2221 +/*
2222 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
2223 + *
2224 + * Permission to use, copy, modify, and/or distribute this software for
2225 + * any purpose with or without fee is hereby granted, provided that the
2226 + * above copyright notice and this permission notice appear in all copies.
2227 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2228 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2229 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2230 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2231 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2232 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2233 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2234 + */
2235 +
2236 +#ifndef _EDMA_H_
2237 +#define _EDMA_H_
2238 +
2239 +#include <linux/init.h>
2240 +#include <linux/interrupt.h>
2241 +#include <linux/types.h>
2242 +#include <linux/errno.h>
2243 +#include <linux/module.h>
2244 +#include <linux/netdevice.h>
2245 +#include <linux/etherdevice.h>
2246 +#include <linux/skbuff.h>
2247 +#include <linux/io.h>
2248 +#include <linux/vmalloc.h>
2249 +#include <linux/pagemap.h>
2250 +#include <linux/smp.h>
2251 +#include <linux/platform_device.h>
2252 +#include <linux/of.h>
2253 +#include <linux/of_device.h>
2254 +#include <linux/kernel.h>
2255 +#include <linux/device.h>
2256 +#include <linux/sysctl.h>
2257 +#include <linux/phy.h>
2258 +#include <linux/of_net.h>
2259 +#include <net/checksum.h>
2260 +#include <net/ip6_checksum.h>
2261 +#include <asm-generic/bug.h>
2262 +#include "ess_edma.h"
2263 +
2264 +#define EDMA_CPU_CORES_SUPPORTED 4
2265 +#define EDMA_MAX_PORTID_SUPPORTED 5
2266 +#define EDMA_MAX_VLAN_SUPPORTED  EDMA_MAX_PORTID_SUPPORTED
2267 +#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1)
2268 +#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f  /* 0001_1111 = 0x1f */
2269 +#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */
2270 +
2271 +#define EDMA_MAX_RECEIVE_QUEUE 8
2272 +#define EDMA_MAX_TRANSMIT_QUEUE 16
2273 +
2274 +/* WAN/LAN adapter number */
2275 +#define EDMA_WAN 0
2276 +#define EDMA_LAN 1
2277 +
2278 +/* VLAN tag */
2279 +#define EDMA_LAN_DEFAULT_VLAN 1
2280 +#define EDMA_WAN_DEFAULT_VLAN 2
2281 +
2282 +#define EDMA_DEFAULT_GROUP1_VLAN 1
2283 +#define EDMA_DEFAULT_GROUP2_VLAN 2
2284 +#define EDMA_DEFAULT_GROUP3_VLAN 3
2285 +#define EDMA_DEFAULT_GROUP4_VLAN 4
2286 +#define EDMA_DEFAULT_GROUP5_VLAN 5
2287 +
2288 +/* Queues exposed to linux kernel */
2289 +#define EDMA_NETDEV_TX_QUEUE 4
2290 +#define EDMA_NETDEV_RX_QUEUE 4
2291 +
2292 +/* Number of queues per core */
2293 +#define EDMA_NUM_TXQ_PER_CORE 4
2294 +#define EDMA_NUM_RXQ_PER_CORE 2
2295 +
2296 +#define EDMA_TPD_EOP_SHIFT 31
2297 +
2298 +#define EDMA_PORT_ID_SHIFT 12
2299 +#define EDMA_PORT_ID_MASK 0x7
2300 +
2301 +/* tpd word 3 bit 18-28 */
2302 +#define EDMA_TPD_PORT_BITMAP_SHIFT 18
2303 +
2304 +#define EDMA_TPD_FROM_CPU_SHIFT 25
2305 +
2306 +#define EDMA_FROM_CPU_MASK 0x80
2307 +#define EDMA_SKB_PRIORITY_MASK 0x38
2308 +
2309 +/* TX/RX descriptor ring count */
2310 +/* should be a power of 2 */
2311 +#define EDMA_RX_RING_SIZE 128
2312 +#define EDMA_TX_RING_SIZE 128
2313 +
2314 +/* Flags used in paged/non paged mode */
2315 +#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256
2316 +#define EDMA_RX_HEAD_BUFF_SIZE 1540
2317 +
2318 +/* MAX frame size supported by switch */
2319 +#define EDMA_MAX_JUMBO_FRAME_SIZE 9216
2320 +
2321 +/* Configurations */
2322 +#define EDMA_INTR_CLEAR_TYPE 0
2323 +#define EDMA_INTR_SW_IDX_W_TYPE 0
2324 +#define EDMA_FIFO_THRESH_TYPE 0
2325 +#define EDMA_RSS_TYPE 0
2326 +#define EDMA_RX_IMT 0x0020
2327 +#define EDMA_TX_IMT 0x0050
2328 +#define EDMA_TPD_BURST 5
2329 +#define EDMA_TXF_BURST 0x100
2330 +#define EDMA_RFD_BURST 8
2331 +#define EDMA_RFD_THR 16
2332 +#define EDMA_RFD_LTHR 0
2333 +
2334 +/* RX/TX per CPU based mask/shift */
2335 +#define EDMA_TX_PER_CPU_MASK 0xF
2336 +#define EDMA_RX_PER_CPU_MASK 0x3
2337 +#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2
2338 +#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1
2339 +#define EDMA_TX_CPU_START_SHIFT 0x2
2340 +#define EDMA_RX_CPU_START_SHIFT 0x1
2341 +
2342 +/* FLags used in transmit direction */
2343 +#define EDMA_HW_CHECKSUM 0x00000001
2344 +#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002
2345 +#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004
2346 +
2347 +#define EDMA_SW_DESC_FLAG_LAST 0x1
2348 +#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2
2349 +#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4
2350 +#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8
2351 +#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10
2352 +#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20
2353 +
2354 +
2355 +#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1)
2356 +
2357 +/* Ethtool specific list of EDMA supported features */
2358 +#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \
2359 +                                       | SUPPORTED_10baseT_Full \
2360 +                                       | SUPPORTED_100baseT_Half \
2361 +                                       | SUPPORTED_100baseT_Full \
2362 +                                       | SUPPORTED_1000baseT_Full)
2363 +
2364 +/* Recevie side atheros Header */
2365 +#define EDMA_RX_ATH_HDR_VERSION 0x2
2366 +#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14
2367 +#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11
2368 +#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6
2369 +#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4
2370 +
2371 +/* Transmit side atheros Header */
2372 +#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F
2373 +#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80
2374 +#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7
2375 +
2376 +#define EDMA_TXQ_START_CORE0 8
2377 +#define EDMA_TXQ_START_CORE1 12
2378 +#define EDMA_TXQ_START_CORE2 0
2379 +#define EDMA_TXQ_START_CORE3 4
2380 +
2381 +#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00
2382 +#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000
2383 +#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F
2384 +#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0
2385 +
2386 +#define EDMA_ETH_HDR_LEN 12
2387 +#define EDMA_ETH_TYPE_MASK 0xFFFF
2388 +
2389 +#define EDMA_RX_BUFFER_WRITE 16
2390 +#define EDMA_RFD_AVAIL_THR 80
2391 +
2392 +#define EDMA_GMAC_NO_MDIO_PHY  PHY_MAX_ADDR
2393 +
2394 +extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst,
2395 +                                 __be16 sport, __be16 dport,
2396 +                                 uint8_t proto, u16 loadbalance, bool action);
2397 +struct edma_ethtool_statistics {
2398 +       u32 tx_q0_pkt;
2399 +       u32 tx_q1_pkt;
2400 +       u32 tx_q2_pkt;
2401 +       u32 tx_q3_pkt;
2402 +       u32 tx_q4_pkt;
2403 +       u32 tx_q5_pkt;
2404 +       u32 tx_q6_pkt;
2405 +       u32 tx_q7_pkt;
2406 +       u32 tx_q8_pkt;
2407 +       u32 tx_q9_pkt;
2408 +       u32 tx_q10_pkt;
2409 +       u32 tx_q11_pkt;
2410 +       u32 tx_q12_pkt;
2411 +       u32 tx_q13_pkt;
2412 +       u32 tx_q14_pkt;
2413 +       u32 tx_q15_pkt;
2414 +       u32 tx_q0_byte;
2415 +       u32 tx_q1_byte;
2416 +       u32 tx_q2_byte;
2417 +       u32 tx_q3_byte;
2418 +       u32 tx_q4_byte;
2419 +       u32 tx_q5_byte;
2420 +       u32 tx_q6_byte;
2421 +       u32 tx_q7_byte;
2422 +       u32 tx_q8_byte;
2423 +       u32 tx_q9_byte;
2424 +       u32 tx_q10_byte;
2425 +       u32 tx_q11_byte;
2426 +       u32 tx_q12_byte;
2427 +       u32 tx_q13_byte;
2428 +       u32 tx_q14_byte;
2429 +       u32 tx_q15_byte;
2430 +       u32 rx_q0_pkt;
2431 +       u32 rx_q1_pkt;
2432 +       u32 rx_q2_pkt;
2433 +       u32 rx_q3_pkt;
2434 +       u32 rx_q4_pkt;
2435 +       u32 rx_q5_pkt;
2436 +       u32 rx_q6_pkt;
2437 +       u32 rx_q7_pkt;
2438 +       u32 rx_q0_byte;
2439 +       u32 rx_q1_byte;
2440 +       u32 rx_q2_byte;
2441 +       u32 rx_q3_byte;
2442 +       u32 rx_q4_byte;
2443 +       u32 rx_q5_byte;
2444 +       u32 rx_q6_byte;
2445 +       u32 rx_q7_byte;
2446 +       u32 tx_desc_error;
2447 +};
2448 +
2449 +struct edma_mdio_data {
2450 +       struct mii_bus  *mii_bus;
2451 +       void __iomem    *membase;
2452 +       int phy_irq[PHY_MAX_ADDR];
2453 +};
2454 +
2455 +/* EDMA LINK state */
2456 +enum edma_link_state {
2457 +       __EDMA_LINKUP, /* Indicate link is UP */
2458 +       __EDMA_LINKDOWN /* Indicate link is down */
2459 +};
2460 +
2461 +/* EDMA GMAC state */
2462 +enum edma_gmac_state {
2463 +       __EDMA_UP /* use to indicate GMAC is up */
2464 +};
2465 +
2466 +/* edma transmit descriptor */
2467 +struct edma_tx_desc {
2468 +       __le16  len; /* full packet including CRC */
2469 +       __le16  svlan_tag; /* vlan tag */
2470 +       __le32  word1; /* byte 4-7 */
2471 +       __le32  addr; /* address of buffer */
2472 +       __le32  word3; /* byte 12 */
2473 +};
2474 +
2475 +/* edma receive return descriptor */
2476 +struct edma_rx_return_desc {
2477 +       u16 rrd0;
2478 +       u16 rrd1;
2479 +       u16 rrd2;
2480 +       u16 rrd3;
2481 +       u16 rrd4;
2482 +       u16 rrd5;
2483 +       u16 rrd6;
2484 +       u16 rrd7;
2485 +};
2486 +
2487 +/* RFD descriptor */
2488 +struct edma_rx_free_desc {
2489 +       __le32  buffer_addr; /* buffer address */
2490 +};
2491 +
2492 +/* edma hw specific data */
2493 +struct edma_hw {
2494 +       u32  __iomem *hw_addr; /* inner register address */
2495 +       struct edma_adapter *adapter; /* netdevice adapter */
2496 +       u32 rx_intr_mask; /*rx interrupt mask */
2497 +       u32 tx_intr_mask; /* tx interrupt nask */
2498 +       u32 misc_intr_mask; /* misc interrupt mask */
2499 +       u32 wol_intr_mask; /* wake on lan interrupt mask */
2500 +       bool intr_clear_type; /* interrupt clear */
2501 +       bool intr_sw_idx_w; /* interrupt software index */
2502 +       u32 rx_head_buff_size; /* Rx buffer size */
2503 +       u8 rss_type; /* rss protocol type */
2504 +};
2505 +
2506 +/* edma_sw_desc stores software descriptor
2507 + * SW descriptor has 1:1 map with HW descriptor
2508 + */
2509 +struct edma_sw_desc {
2510 +       struct sk_buff *skb;
2511 +       dma_addr_t dma; /* dma address */
2512 +       u16 length; /* Tx/Rx buffer length */
2513 +       u32 flags;
2514 +};
2515 +
2516 +/* per core related information */
2517 +struct edma_per_cpu_queues_info {
2518 +       struct napi_struct napi; /* napi associated with the core */
2519 +       u32 tx_mask; /* tx interrupt mask */
2520 +       u32 rx_mask; /* rx interrupt mask */
2521 +       u32 tx_status; /* tx interrupt status */
2522 +       u32 rx_status; /* rx interrupt status */
2523 +       u32 tx_start; /* tx queue start */
2524 +       u32 rx_start; /* rx queue start */
2525 +       struct edma_common_info *edma_cinfo; /* edma common info */
2526 +};
2527 +
2528 +/* edma specific common info */
2529 +struct edma_common_info {
2530 +       struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */
2531 +       struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */
2532 +       struct platform_device *pdev; /* device structure */
2533 +       struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED];
2534 +       struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX];
2535 +       struct ctl_table_header *edma_ctl_table_hdr;
2536 +       int num_gmac;
2537 +       struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */
2538 +       int num_rx_queues; /* number of rx queue */
2539 +       u32 num_tx_queues; /* number of tx queue */
2540 +       u32 tx_irq[16]; /* number of tx irq */
2541 +       u32 rx_irq[8]; /* number of rx irq */
2542 +       u32 from_cpu; /* from CPU TPD field */
2543 +       u32 num_rxq_per_core; /* Rx queues per core */
2544 +       u32 num_txq_per_core; /* Tx queues per core */
2545 +       u16 tx_ring_count; /* Tx ring count */
2546 +       u16 rx_ring_count; /* Rx ring*/
2547 +       u16 rx_head_buffer_len; /* rx buffer length */
2548 +       u16 rx_page_buffer_len; /* rx buffer length */
2549 +       u32 page_mode; /* Jumbo frame supported flag */
2550 +       u32 fraglist_mode; /* fraglist supported flag */
2551 +       struct edma_hw hw; /* edma hw specific structure */
2552 +       struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */
2553 +       spinlock_t stats_lock; /* protect edma stats area for updation */
2554 +};
2555 +
2556 +/* transimit packet descriptor (tpd) ring */
2557 +struct edma_tx_desc_ring {
2558 +       struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */
2559 +       struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE];
2560 +                       /* Array of netdevs associated with the tpd ring */
2561 +       void *hw_desc; /* descriptor ring virtual address */
2562 +       struct edma_sw_desc *sw_desc; /* buffer associated with ring */
2563 +       int netdev_bmp; /* Bitmap for per-ring netdevs */
2564 +       u32 size; /* descriptor ring length in bytes */
2565 +       u16 count; /* number of descriptors in the ring */
2566 +       dma_addr_t dma; /* descriptor ring physical address */
2567 +       u16 sw_next_to_fill; /* next Tx descriptor to fill */
2568 +       u16 sw_next_to_clean; /* next Tx descriptor to clean */
2569 +};
2570 +
2571 +/* receive free descriptor (rfd) ring */
2572 +struct edma_rfd_desc_ring {
2573 +       void *hw_desc; /* descriptor ring virtual address */
2574 +       struct edma_sw_desc *sw_desc; /* buffer associated with ring */
2575 +       u16 size; /* bytes allocated to sw_desc */
2576 +       u16 count; /* number of descriptors in the ring */
2577 +       dma_addr_t dma; /* descriptor ring physical address */
2578 +       u16 sw_next_to_fill; /* next descriptor to fill */
2579 +       u16 sw_next_to_clean; /* next descriptor to clean */
2580 +};
2581 +
2582 +/* edma_rfs_flter_node - rfs filter node in hash table */
2583 +struct edma_rfs_filter_node {
2584 +       struct flow_keys keys;
2585 +       u32 flow_id; /* flow_id of filter provided by kernel */
2586 +       u16 filter_id; /* filter id of filter returned by adaptor */
2587 +       u16 rq_id; /* desired rq index */
2588 +       struct hlist_node node; /* edma rfs list node */
2589 +};
2590 +
2591 +/* edma_rfs_flow_tbl - rfs flow table */
2592 +struct edma_rfs_flow_table {
2593 +       u16 max_num_filter; /* Maximum number of filters edma supports */
2594 +       u16 hashtoclean; /* hash table index to clean next */
2595 +       int filter_available; /* Number of free filters available */
2596 +       struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES];
2597 +       spinlock_t rfs_ftab_lock;
2598 +       struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */
2599 +};
2600 +
2601 +/* EDMA net device structure */
2602 +struct edma_adapter {
2603 +       struct net_device *netdev; /* netdevice */
2604 +       struct platform_device *pdev; /* platform device */
2605 +       struct edma_common_info *edma_cinfo; /* edma common info */
2606 +       struct phy_device *phydev; /* Phy device */
2607 +       struct edma_rfs_flow_table rfs; /* edma rfs flow table */
2608 +       struct net_device_stats stats; /* netdev statistics */
2609 +       set_rfs_filter_callback_t set_rfs_rule;
2610 +       u32 flags;/* status flags */
2611 +       unsigned long state_flags; /* GMAC up/down flags */
2612 +       u32 forced_speed; /* link force speed */
2613 +       u32 forced_duplex; /* link force duplex */
2614 +       u32 link_state; /* phy link state */
2615 +       u32 phy_mdio_addr; /* PHY device address on MII interface */
2616 +       u32 poll_required; /* check if link polling is required */
2617 +       u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */
2618 +       u32 default_vlan_tag; /* vlan tag */
2619 +       u32 dp_bitmap;
2620 +       uint8_t phy_id[MII_BUS_ID_SIZE + 3];
2621 +};
2622 +
2623 +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo);
2624 +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo);
2625 +int edma_open(struct net_device *netdev);
2626 +int edma_close(struct net_device *netdev);
2627 +void edma_free_tx_resources(struct edma_common_info *edma_c_info);
2628 +void edma_free_rx_resources(struct edma_common_info *edma_c_info);
2629 +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo);
2630 +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo);
2631 +void edma_free_tx_rings(struct edma_common_info *edma_cinfo);
2632 +void edma_free_rx_rings(struct edma_common_info *edma_cinfo);
2633 +void edma_free_queues(struct edma_common_info *edma_cinfo);
2634 +void edma_irq_disable(struct edma_common_info *edma_cinfo);
2635 +int edma_reset(struct edma_common_info *edma_cinfo);
2636 +int edma_poll(struct napi_struct *napi, int budget);
2637 +netdev_tx_t edma_xmit(struct sk_buff *skb,
2638 +               struct net_device *netdev);
2639 +int edma_configure(struct edma_common_info *edma_cinfo);
2640 +void edma_irq_enable(struct edma_common_info *edma_cinfo);
2641 +void edma_enable_tx_ctrl(struct edma_hw *hw);
2642 +void edma_enable_rx_ctrl(struct edma_hw *hw);
2643 +void edma_stop_rx_tx(struct edma_hw *hw);
2644 +void edma_free_irqs(struct edma_adapter *adapter);
2645 +irqreturn_t edma_interrupt(int irq, void *dev);
2646 +void edma_write_reg(u16 reg_addr, u32 reg_value);
2647 +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value);
2648 +struct net_device_stats *edma_get_stats(struct net_device *netdev);
2649 +int edma_set_mac_addr(struct net_device *netdev, void *p);
2650 +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
2651 +               u16 rxq, u32 flow_id);
2652 +int edma_register_rfs_filter(struct net_device *netdev,
2653 +               set_rfs_filter_callback_t set_filter);
2654 +void edma_flow_may_expire(unsigned long data);
2655 +void edma_set_ethtool_ops(struct net_device *netdev);
2656 +int edma_change_mtu(struct net_device *netdev, int new_mtu);
2657 +void edma_set_stp_rstp(bool tag);
2658 +void edma_assign_ath_hdr_type(int tag);
2659 +int edma_get_default_vlan_tag(struct net_device *netdev);
2660 +void edma_adjust_link(struct net_device *netdev);
2661 +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id);
2662 +void edma_read_append_stats(struct edma_common_info *edma_cinfo);
2663 +void edma_change_tx_coalesce(int usecs);
2664 +void edma_change_rx_coalesce(int usecs);
2665 +void edma_get_tx_rx_coalesce(u32 *reg_val);
2666 +void edma_clear_irq_status(void);
2667 +#endif /* _EDMA_H_ */
2668 --- /dev/null
2669 +++ b/drivers/net/ethernet/qualcomm/essedma/edma_axi.c
2670 @@ -0,0 +1,1220 @@
2671 +/*
2672 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
2673 + *
2674 + * Permission to use, copy, modify, and/or distribute this software for
2675 + * any purpose with or without fee is hereby granted, provided that the
2676 + * above copyright notice and this permission notice appear in all copies.
2677 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
2678 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
2679 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
2680 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
2681 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
2682 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
2683 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2684 + */
2685 +
2686 +#include <linux/cpu_rmap.h>
2687 +#include <linux/of.h>
2688 +#include <linux/of_net.h>
2689 +#include <linux/timer.h>
2690 +#include "edma.h"
2691 +#include "ess_edma.h"
2692 +
2693 +/* Weight round robin and virtual QID mask */
2694 +#define EDMA_WRR_VID_SCTL_MASK 0xffff
2695 +
2696 +/* Weight round robin and virtual QID shift */
2697 +#define EDMA_WRR_VID_SCTL_SHIFT 16
2698 +
2699 +char edma_axi_driver_name[] = "ess_edma";
2700 +static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
2701 +       NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
2702 +
2703 +static u32 edma_hw_addr;
2704 +
2705 +struct timer_list edma_stats_timer;
2706 +
2707 +char edma_tx_irq[16][64];
2708 +char edma_rx_irq[8][64];
2709 +struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
2710 +static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
2711 +                       EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
2712 +static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
2713 +                       EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
2714 +
2715 +static u32 edma_default_ltag  __read_mostly = EDMA_LAN_DEFAULT_VLAN;
2716 +static u32 edma_default_wtag  __read_mostly = EDMA_WAN_DEFAULT_VLAN;
2717 +static u32 edma_default_group1_vtag  __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
2718 +static u32 edma_default_group2_vtag  __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
2719 +static u32 edma_default_group3_vtag  __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
2720 +static u32 edma_default_group4_vtag  __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
2721 +static u32 edma_default_group5_vtag  __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
2722 +static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE;
2723 +static u32 edma_rss_idt_idx;
2724 +
2725 +static int edma_weight_assigned_to_q __read_mostly;
2726 +static int edma_queue_to_virtual_q __read_mostly;
2727 +static bool edma_enable_rstp  __read_mostly;
2728 +static int edma_athr_hdr_eth_type __read_mostly;
2729 +
2730 +static int page_mode;
2731 +module_param(page_mode, int, 0);
2732 +MODULE_PARM_DESC(page_mode, "enable page mode");
2733 +
2734 +static int overwrite_mode;
2735 +module_param(overwrite_mode, int, 0);
2736 +MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
2737 +
2738 +static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
2739 +module_param(jumbo_mru, int, 0);
2740 +MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
2741 +
2742 +static int num_rxq = 4;
2743 +module_param(num_rxq, int, 0);
2744 +MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
2745 +
2746 +void edma_write_reg(u16 reg_addr, u32 reg_value)
2747 +{
2748 +       writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
2749 +}
2750 +
2751 +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
2752 +{
2753 +       *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
2754 +}
2755 +
2756 +/* edma_change_tx_coalesce()
2757 + *     change tx interrupt moderation timer
2758 + */
2759 +void edma_change_tx_coalesce(int usecs)
2760 +{
2761 +       u32 reg_value;
2762 +
2763 +       /* Here, we right shift the value from the user by 1, this is
2764 +        * done because IMT resolution timer is 2usecs. 1 count
2765 +        * of this register corresponds to 2 usecs.
2766 +        */
2767 +       edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
2768 +       reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
2769 +       edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
2770 +}
2771 +
2772 +/* edma_change_rx_coalesce()
2773 + *     change rx interrupt moderation timer
2774 + */
2775 +void edma_change_rx_coalesce(int usecs)
2776 +{
2777 +       u32 reg_value;
2778 +
2779 +       /* Here, we right shift the value from the user by 1, this is
2780 +        * done because IMT resolution timer is 2usecs. 1 count
2781 +        * of this register corresponds to 2 usecs.
2782 +        */
2783 +       edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
2784 +       reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
2785 +       edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
2786 +}
2787 +
2788 +/* edma_get_tx_rx_coalesce()
2789 + *     Get tx/rx interrupt moderation value
2790 + */
2791 +void edma_get_tx_rx_coalesce(u32 *reg_val)
2792 +{
2793 +       edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
2794 +}
2795 +
2796 +void edma_read_append_stats(struct edma_common_info *edma_cinfo)
2797 +{
2798 +       uint32_t *p;
2799 +       int i;
2800 +       u32 stat;
2801 +
2802 +       spin_lock(&edma_cinfo->stats_lock);
2803 +       p = (uint32_t *)&(edma_cinfo->edma_ethstats);
2804 +
2805 +       for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
2806 +               edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
2807 +               *p += stat;
2808 +               p++;
2809 +       }
2810 +
2811 +       for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
2812 +               edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
2813 +               *p += stat;
2814 +               p++;
2815 +       }
2816 +
2817 +       for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
2818 +               edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
2819 +               *p += stat;
2820 +               p++;
2821 +       }
2822 +
2823 +       for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
2824 +               edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
2825 +               *p += stat;
2826 +               p++;
2827 +       }
2828 +
2829 +       spin_unlock(&edma_cinfo->stats_lock);
2830 +}
2831 +
2832 +static void edma_statistics_timer(unsigned long data)
2833 +{
2834 +       struct edma_common_info *edma_cinfo = (struct edma_common_info *)data;
2835 +
2836 +       edma_read_append_stats(edma_cinfo);
2837 +
2838 +       mod_timer(&edma_stats_timer, jiffies + 1*HZ);
2839 +}
2840 +
2841 +static int edma_enable_stp_rstp(struct ctl_table *table, int write,
2842 +                               void __user *buffer, size_t *lenp,
2843 +                               loff_t *ppos)
2844 +{
2845 +       int ret;
2846 +
2847 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
2848 +       if (write)
2849 +               edma_set_stp_rstp(edma_enable_rstp);
2850 +
2851 +       return ret;
2852 +}
2853 +
2854 +static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
2855 +                                void __user *buffer, size_t *lenp,
2856 +                                loff_t *ppos)
2857 +{
2858 +       int ret;
2859 +
2860 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
2861 +       if (write)
2862 +               edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
2863 +
2864 +       return ret;
2865 +}
2866 +
2867 +static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
2868 +                                       void __user *buffer, size_t *lenp,
2869 +                                       loff_t *ppos)
2870 +{
2871 +       struct edma_adapter *adapter;
2872 +       int ret;
2873 +
2874 +       if (!edma_netdev[1]) {
2875 +               pr_err("Netdevice for default_lan does not exist\n");
2876 +               return -1;
2877 +       }
2878 +
2879 +       adapter = netdev_priv(edma_netdev[1]);
2880 +
2881 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
2882 +
2883 +       if (write)
2884 +               adapter->default_vlan_tag = edma_default_ltag;
2885 +
2886 +       return ret;
2887 +}
2888 +
2889 +static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
2890 +                                       void __user *buffer, size_t *lenp,
2891 +                                       loff_t *ppos)
2892 +{
2893 +       struct edma_adapter *adapter;
2894 +       int ret;
2895 +
2896 +       if (!edma_netdev[0]) {
2897 +               pr_err("Netdevice for default_wan does not exist\n");
2898 +               return -1;
2899 +       }
2900 +
2901 +       adapter = netdev_priv(edma_netdev[0]);
2902 +
2903 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
2904 +
2905 +       if (write)
2906 +               adapter->default_vlan_tag = edma_default_wtag;
2907 +
2908 +       return ret;
2909 +}
2910 +
2911 +static int edma_change_group1_vtag(struct ctl_table *table, int write,
2912 +                                  void __user *buffer, size_t *lenp,
2913 +                                  loff_t *ppos)
2914 +{
2915 +       struct edma_adapter *adapter;
2916 +       struct edma_common_info *edma_cinfo;
2917 +       int ret;
2918 +
2919 +       if (!edma_netdev[0]) {
2920 +               pr_err("Netdevice for Group 1 does not exist\n");
2921 +               return -1;
2922 +       }
2923 +
2924 +       adapter = netdev_priv(edma_netdev[0]);
2925 +       edma_cinfo = adapter->edma_cinfo;
2926 +
2927 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
2928 +
2929 +       if (write)
2930 +               adapter->default_vlan_tag = edma_default_group1_vtag;
2931 +
2932 +       return ret;
2933 +}
2934 +
2935 +static int edma_change_group2_vtag(struct ctl_table *table, int write,
2936 +                                  void __user *buffer, size_t *lenp,
2937 +                                  loff_t *ppos)
2938 +{
2939 +       struct edma_adapter *adapter;
2940 +       struct edma_common_info *edma_cinfo;
2941 +       int ret;
2942 +
2943 +       if (!edma_netdev[1]) {
2944 +               pr_err("Netdevice for Group 2 does not exist\n");
2945 +               return -1;
2946 +       }
2947 +
2948 +       adapter = netdev_priv(edma_netdev[1]);
2949 +       edma_cinfo = adapter->edma_cinfo;
2950 +
2951 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
2952 +
2953 +       if (write)
2954 +               adapter->default_vlan_tag = edma_default_group2_vtag;
2955 +
2956 +       return ret;
2957 +}
2958 +
2959 +static int edma_change_group3_vtag(struct ctl_table *table, int write,
2960 +                                  void __user *buffer, size_t *lenp,
2961 +                                  loff_t *ppos)
2962 +{
2963 +       struct edma_adapter *adapter;
2964 +       struct edma_common_info *edma_cinfo;
2965 +       int ret;
2966 +
2967 +       if (!edma_netdev[2]) {
2968 +               pr_err("Netdevice for Group 3 does not exist\n");
2969 +               return -1;
2970 +       }
2971 +
2972 +       adapter = netdev_priv(edma_netdev[2]);
2973 +       edma_cinfo = adapter->edma_cinfo;
2974 +
2975 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
2976 +
2977 +       if (write)
2978 +               adapter->default_vlan_tag = edma_default_group3_vtag;
2979 +
2980 +       return ret;
2981 +}
2982 +
2983 +static int edma_change_group4_vtag(struct ctl_table *table, int write,
2984 +                                  void __user *buffer, size_t *lenp,
2985 +                                  loff_t *ppos)
2986 +{
2987 +       struct edma_adapter *adapter;
2988 +       struct edma_common_info *edma_cinfo;
2989 +       int ret;
2990 +
2991 +       if (!edma_netdev[3]) {
2992 +               pr_err("Netdevice for Group 4 does not exist\n");
2993 +               return -1;
2994 +       }
2995 +
2996 +       adapter = netdev_priv(edma_netdev[3]);
2997 +       edma_cinfo = adapter->edma_cinfo;
2998 +
2999 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
3000 +
3001 +       if (write)
3002 +               adapter->default_vlan_tag = edma_default_group4_vtag;
3003 +
3004 +       return ret;
3005 +}
3006 +
3007 +static int edma_change_group5_vtag(struct ctl_table *table, int write,
3008 +                                  void __user *buffer, size_t *lenp,
3009 +                                  loff_t *ppos)
3010 +{
3011 +       struct edma_adapter *adapter;
3012 +       struct edma_common_info *edma_cinfo;
3013 +       int ret;
3014 +
3015 +       if (!edma_netdev[4]) {
3016 +               pr_err("Netdevice for Group 5 does not exist\n");
3017 +               return -1;
3018 +       }
3019 +
3020 +       adapter = netdev_priv(edma_netdev[4]);
3021 +       edma_cinfo = adapter->edma_cinfo;
3022 +
3023 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
3024 +
3025 +       if (write)
3026 +               adapter->default_vlan_tag = edma_default_group5_vtag;
3027 +
3028 +       return ret;
3029 +}
3030 +
3031 +static int edma_set_rss_idt_value(struct ctl_table *table, int write,
3032 +                                 void __user *buffer, size_t *lenp,
3033 +                                 loff_t *ppos)
3034 +{
3035 +       int ret;
3036 +
3037 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
3038 +       if (write && !ret)
3039 +               edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx),
3040 +                              edma_rss_idt_val);
3041 +       return ret;
3042 +}
3043 +
3044 +static int edma_set_rss_idt_idx(struct ctl_table *table, int write,
3045 +                               void __user *buffer, size_t *lenp,
3046 +                               loff_t *ppos)
3047 +{
3048 +       int ret;
3049 +       u32 old_value = edma_rss_idt_idx;
3050 +
3051 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
3052 +       if (!write || ret)
3053 +               return ret;
3054 +
3055 +       if (edma_rss_idt_idx >= EDMA_NUM_IDT) {
3056 +               pr_err("Invalid RSS indirection table index %d\n",
3057 +                      edma_rss_idt_idx);
3058 +               edma_rss_idt_idx = old_value;
3059 +               return -EINVAL;
3060 +       }
3061 +       return ret;
3062 +}
3063 +
3064 +static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
3065 +                                         void __user *buffer, size_t *lenp,
3066 +                                         loff_t *ppos)
3067 +{
3068 +       int ret, queue_id, weight;
3069 +       u32 reg_data, data, reg_addr;
3070 +
3071 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
3072 +       if (write) {
3073 +               queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
3074 +               if (queue_id < 0 || queue_id > 15) {
3075 +                       pr_err("queue_id not within desired range\n");
3076 +                       return -EINVAL;
3077 +               }
3078 +
3079 +               weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
3080 +               if (weight < 0 || weight > 0xF) {
3081 +                       pr_err("queue_id not within desired range\n");
3082 +                       return -EINVAL;
3083 +               }
3084 +
3085 +               data = weight << EDMA_WRR_SHIFT(queue_id);
3086 +
3087 +               reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
3088 +               edma_read_reg(reg_addr, &reg_data);
3089 +               reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
3090 +               edma_write_reg(reg_addr, data | reg_data);
3091 +       }
3092 +
3093 +       return ret;
3094 +}
3095 +
3096 +static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
3097 +                                          void __user *buffer, size_t *lenp,
3098 +                                          loff_t *ppos)
3099 +{
3100 +       int ret, queue_id, virtual_qid;
3101 +       u32 reg_data, data, reg_addr;
3102 +
3103 +       ret = proc_dointvec(table, write, buffer, lenp, ppos);
3104 +       if (write) {
3105 +               queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
3106 +               if (queue_id < 0 || queue_id > 15) {
3107 +                       pr_err("queue_id not within desired range\n");
3108 +                       return -EINVAL;
3109 +               }
3110 +
3111 +               virtual_qid = edma_queue_to_virtual_q >>
3112 +                       EDMA_WRR_VID_SCTL_SHIFT;
3113 +               if (virtual_qid < 0 || virtual_qid > 8) {
3114 +                       pr_err("queue_id not within desired range\n");
3115 +                       return -EINVAL;
3116 +               }
3117 +
3118 +               data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
3119 +
3120 +               reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3);
3121 +               edma_read_reg(reg_addr, &reg_data);
3122 +               reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id));
3123 +               edma_write_reg(reg_addr, data | reg_data);
3124 +       }
3125 +
3126 +       return ret;
3127 +}
3128 +
3129 +static struct ctl_table edma_table[] = {
3130 +       {
3131 +               .procname       = "default_lan_tag",
3132 +               .data           = &edma_default_ltag,
3133 +               .maxlen         = sizeof(int),
3134 +               .mode           = 0644,
3135 +               .proc_handler   = edma_change_default_lan_vlan
3136 +       },
3137 +       {
3138 +               .procname       = "default_wan_tag",
3139 +               .data           = &edma_default_wtag,
3140 +               .maxlen         = sizeof(int),
3141 +               .mode           = 0644,
3142 +               .proc_handler   = edma_change_default_wan_vlan
3143 +       },
3144 +       {
3145 +               .procname       = "weight_assigned_to_queues",
3146 +               .data           = &edma_weight_assigned_to_q,
3147 +               .maxlen         = sizeof(int),
3148 +               .mode           = 0644,
3149 +               .proc_handler   = edma_weight_assigned_to_queues
3150 +       },
3151 +       {
3152 +               .procname       = "queue_to_virtual_queue_map",
3153 +               .data           = &edma_queue_to_virtual_q,
3154 +               .maxlen         = sizeof(int),
3155 +               .mode           = 0644,
3156 +               .proc_handler   = edma_queue_to_virtual_queue_map
3157 +       },
3158 +       {
3159 +               .procname       = "enable_stp_rstp",
3160 +               .data           = &edma_enable_rstp,
3161 +               .maxlen         = sizeof(int),
3162 +               .mode           = 0644,
3163 +               .proc_handler   = edma_enable_stp_rstp
3164 +       },
3165 +       {
3166 +               .procname       = "athr_hdr_eth_type",
3167 +               .data           = &edma_athr_hdr_eth_type,
3168 +               .maxlen         = sizeof(int),
3169 +               .mode           = 0644,
3170 +               .proc_handler   = edma_ath_hdr_eth_type
3171 +       },
3172 +       {
3173 +               .procname       = "default_group1_vlan_tag",
3174 +               .data           = &edma_default_group1_vtag,
3175 +               .maxlen         = sizeof(int),
3176 +               .mode           = 0644,
3177 +               .proc_handler   = edma_change_group1_vtag
3178 +       },
3179 +       {
3180 +               .procname       = "default_group2_vlan_tag",
3181 +               .data           = &edma_default_group2_vtag,
3182 +               .maxlen         = sizeof(int),
3183 +               .mode           = 0644,
3184 +               .proc_handler   = edma_change_group2_vtag
3185 +       },
3186 +       {
3187 +               .procname       = "default_group3_vlan_tag",
3188 +               .data           = &edma_default_group3_vtag,
3189 +               .maxlen         = sizeof(int),
3190 +               .mode           = 0644,
3191 +               .proc_handler   = edma_change_group3_vtag
3192 +       },
3193 +       {
3194 +               .procname       = "default_group4_vlan_tag",
3195 +               .data           = &edma_default_group4_vtag,
3196 +               .maxlen         = sizeof(int),
3197 +               .mode           = 0644,
3198 +               .proc_handler   = edma_change_group4_vtag
3199 +       },
3200 +       {
3201 +               .procname       = "default_group5_vlan_tag",
3202 +               .data           = &edma_default_group5_vtag,
3203 +               .maxlen         = sizeof(int),
3204 +               .mode           = 0644,
3205 +               .proc_handler   = edma_change_group5_vtag
3206 +       },
3207 +       {
3208 +               .procname       = "edma_rss_idt_value",
3209 +               .data           = &edma_rss_idt_val,
3210 +               .maxlen         = sizeof(int),
3211 +               .mode           = 0644,
3212 +               .proc_handler   = edma_set_rss_idt_value
3213 +       },
3214 +       {
3215 +               .procname       = "edma_rss_idt_idx",
3216 +               .data           = &edma_rss_idt_idx,
3217 +               .maxlen         = sizeof(int),
3218 +               .mode           = 0644,
3219 +               .proc_handler   = edma_set_rss_idt_idx
3220 +       },
3221 +       {}
3222 +};
3223 +
3224 +/* edma_axi_netdev_ops
3225 + *     Describe the operations supported by registered netdevices
3226 + *
3227 + * static const struct net_device_ops edma_axi_netdev_ops = {
3228 + *     .ndo_open               = edma_open,
3229 + *     .ndo_stop               = edma_close,
3230 + *     .ndo_start_xmit         = edma_xmit_frame,
3231 + *     .ndo_set_mac_address    = edma_set_mac_addr,
3232 + * }
3233 + */
3234 +static const struct net_device_ops edma_axi_netdev_ops = {
3235 +       .ndo_open               = edma_open,
3236 +       .ndo_stop               = edma_close,
3237 +       .ndo_start_xmit         = edma_xmit,
3238 +       .ndo_set_mac_address    = edma_set_mac_addr,
3239 +#ifdef CONFIG_RFS_ACCEL
3240 +       .ndo_rx_flow_steer      = edma_rx_flow_steer,
3241 +       .ndo_register_rfs_filter = edma_register_rfs_filter,
3242 +       .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
3243 +#endif
3244 +       .ndo_get_stats          = edma_get_stats,
3245 +       .ndo_change_mtu         = edma_change_mtu,
3246 +};
3247 +
3248 +/* edma_axi_probe()
3249 + *     Initialise an adapter identified by a platform_device structure.
3250 + *
3251 + * The OS initialization, configuring of the adapter private structure,
3252 + * and a hardware reset occur in the probe.
3253 + */
3254 +static int edma_axi_probe(struct platform_device *pdev)
3255 +{
3256 +       struct edma_common_info *edma_cinfo;
3257 +       struct edma_hw *hw;
3258 +       struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
3259 +       struct resource *res;
3260 +       struct device_node *np = pdev->dev.of_node;
3261 +       struct device_node *pnp;
3262 +       struct device_node *mdio_node = NULL;
3263 +       struct platform_device *mdio_plat = NULL;
3264 +       struct mii_bus *miibus = NULL;
3265 +       struct edma_mdio_data *mdio_data = NULL;
3266 +       int i, j, k, err = 0;
3267 +       int portid_bmp;
3268 +       int idx = 0, idx_mac = 0;
3269 +
3270 +       if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
3271 +               dev_err(&pdev->dev, "Invalid CPU Cores\n");
3272 +               return -EINVAL;
3273 +       }
3274 +
3275 +       if ((num_rxq != 4) && (num_rxq != 8)) {
3276 +               dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
3277 +               return -EINVAL;
3278 +       }
3279 +       edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
3280 +       if (!edma_cinfo) {
3281 +               err = -ENOMEM;
3282 +               goto err_alloc;
3283 +       }
3284 +
3285 +       edma_cinfo->pdev = pdev;
3286 +
3287 +       of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac);
3288 +       if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
3289 +               pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
3290 +               err = -EINVAL;
3291 +               goto err_cinfo;
3292 +       }
3293 +
3294 +       /* Initialize the netdev array before allocation
3295 +        * to avoid double free
3296 +        */
3297 +       for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
3298 +               edma_netdev[i] = NULL;
3299 +
3300 +       for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
3301 +               edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
3302 +                       EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
3303 +
3304 +               if (!edma_netdev[i]) {
3305 +                       dev_err(&pdev->dev,
3306 +                               "net device alloc fails for index=%d\n", i);
3307 +                       err = -ENODEV;
3308 +                       goto err_ioremap;
3309 +               }
3310 +
3311 +               SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
3312 +               platform_set_drvdata(pdev, edma_netdev[i]);
3313 +               edma_cinfo->netdev[i] = edma_netdev[i];
3314 +       }
3315 +
3316 +       /* Fill ring details */
3317 +       edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
3318 +       edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
3319 +       edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
3320 +
3321 +       /* Update num rx queues based on module parameter */
3322 +       edma_cinfo->num_rx_queues = num_rxq;
3323 +       edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
3324 +
3325 +       edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
3326 +
3327 +       hw = &edma_cinfo->hw;
3328 +
3329 +       /* Fill HW defaults */
3330 +       hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
3331 +       hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
3332 +
3333 +       of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
3334 +       of_property_read_u32(np, "qcom,rx_head_buf_size",
3335 +                            &hw->rx_head_buff_size);
3336 +
3337 +       if (overwrite_mode) {
3338 +               dev_info(&pdev->dev, "page mode overwritten");
3339 +               edma_cinfo->page_mode = page_mode;
3340 +       }
3341 +
3342 +       if (jumbo_mru)
3343 +               edma_cinfo->fraglist_mode = 1;
3344 +
3345 +       if (edma_cinfo->page_mode)
3346 +               hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
3347 +       else if (edma_cinfo->fraglist_mode)
3348 +               hw->rx_head_buff_size = jumbo_mru;
3349 +       else if (!hw->rx_head_buff_size)
3350 +               hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
3351 +
3352 +       hw->misc_intr_mask = 0;
3353 +       hw->wol_intr_mask = 0;
3354 +
3355 +       hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
3356 +       hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
3357 +
3358 +       /* configure RSS type to the different protocol that can be
3359 +        * supported
3360 +        */
3361 +       hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
3362 +               EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
3363 +               EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
3364 +
3365 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3366 +
3367 +       edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
3368 +       if (IS_ERR(edma_cinfo->hw.hw_addr)) {
3369 +               err = PTR_ERR(edma_cinfo->hw.hw_addr);
3370 +               goto err_ioremap;
3371 +       }
3372 +
3373 +       edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
3374 +
3375 +       /* Parse tx queue interrupt number from device tree */
3376 +       for (i = 0; i < edma_cinfo->num_tx_queues; i++)
3377 +               edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
3378 +
3379 +       /* Parse rx queue interrupt number from device tree
3380 +        * Here we are setting j to point to the point where we
3381 +        * left tx interrupt parsing(i.e 16) and run run the loop
3382 +        * from 0 to 7 to parse rx interrupt number.
3383 +        */
3384 +       for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
3385 +                       i < edma_cinfo->num_rx_queues; i++) {
3386 +               edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
3387 +               k += ((num_rxq == 4) ?  2 : 1);
3388 +               j += ((num_rxq == 4) ?  2 : 1);
3389 +       }
3390 +
3391 +       edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
3392 +       edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
3393 +
3394 +       err = edma_alloc_queues_tx(edma_cinfo);
3395 +       if (err) {
3396 +               dev_err(&pdev->dev, "Allocation of TX queue failed\n");
3397 +               goto err_tx_qinit;
3398 +       }
3399 +
3400 +       err = edma_alloc_queues_rx(edma_cinfo);
3401 +       if (err) {
3402 +               dev_err(&pdev->dev, "Allocation of RX queue failed\n");
3403 +               goto err_rx_qinit;
3404 +       }
3405 +
3406 +       err = edma_alloc_tx_rings(edma_cinfo);
3407 +       if (err) {
3408 +               dev_err(&pdev->dev, "Allocation of TX resources failed\n");
3409 +               goto err_tx_rinit;
3410 +       }
3411 +
3412 +       err = edma_alloc_rx_rings(edma_cinfo);
3413 +       if (err) {
3414 +               dev_err(&pdev->dev, "Allocation of RX resources failed\n");
3415 +               goto err_rx_rinit;
3416 +       }
3417 +
3418 +       /* Initialize netdev and netdev bitmap for transmit descriptor rings */
3419 +       for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
3420 +               struct edma_tx_desc_ring *etdr =  edma_cinfo->tpd_ring[i];
3421 +               int j;
3422 +
3423 +               etdr->netdev_bmp = 0;
3424 +               for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
3425 +                       etdr->netdev[j] = NULL;
3426 +                       etdr->nq[j] = NULL;
3427 +               }
3428 +       }
3429 +
3430 +       if (of_property_read_bool(np, "qcom,mdio_supported")) {
3431 +               mdio_node = of_find_compatible_node(NULL, NULL,
3432 +                                                   "qcom,ipq4019-mdio");
3433 +               if (!mdio_node) {
3434 +                       dev_err(&pdev->dev, "cannot find mdio node by phandle");
3435 +                       err = -EIO;
3436 +                       goto err_mdiobus_init_fail;
3437 +               }
3438 +
3439 +               mdio_plat = of_find_device_by_node(mdio_node);
3440 +               if (!mdio_plat) {
3441 +                       dev_err(&pdev->dev,
3442 +                               "cannot find platform device from mdio node");
3443 +                       of_node_put(mdio_node);
3444 +                       err = -EIO;
3445 +                       goto err_mdiobus_init_fail;
3446 +               }
3447 +
3448 +               mdio_data = dev_get_drvdata(&mdio_plat->dev);
3449 +               if (!mdio_data) {
3450 +                       dev_err(&pdev->dev,
3451 +                               "cannot get mii bus reference from device data");
3452 +                       of_node_put(mdio_node);
3453 +                       err = -EIO;
3454 +                       goto err_mdiobus_init_fail;
3455 +               }
3456 +
3457 +               miibus = mdio_data->mii_bus;
3458 +       }
3459 +
3460 +       for_each_available_child_of_node(np, pnp) {
3461 +               const char *mac_addr;
3462 +
3463 +               /* this check is needed if parent and daughter dts have
3464 +                * different number of gmac nodes
3465 +                */
3466 +               if (idx_mac == edma_cinfo->num_gmac) {
3467 +                       of_node_put(np);
3468 +                       break;
3469 +               }
3470 +
3471 +               mac_addr = of_get_mac_address(pnp);
3472 +               if (mac_addr)
3473 +                       memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN);
3474 +
3475 +               idx_mac++;
3476 +       }
3477 +
3478 +       /* Populate the adapter structure register the netdevice */
3479 +       for (i = 0; i < edma_cinfo->num_gmac; i++) {
3480 +               int k, m;
3481 +
3482 +               adapter[i] = netdev_priv(edma_netdev[i]);
3483 +               adapter[i]->netdev = edma_netdev[i];
3484 +               adapter[i]->pdev = pdev;
3485 +               for (j = 0; j < CONFIG_NR_CPUS; j++) {
3486 +                       m = i % 2;
3487 +                       adapter[i]->tx_start_offset[j] =
3488 +                               ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
3489 +                       /* Share the queues with available net-devices.
3490 +                        * For instance , with 5 net-devices
3491 +                        * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
3492 +                        * and eth1/eth3 will get the remaining.
3493 +                        */
3494 +                       for (k = adapter[i]->tx_start_offset[j]; k <
3495 +                            (adapter[i]->tx_start_offset[j] + 2); k++) {
3496 +                               if (edma_fill_netdev(edma_cinfo, k, i, j)) {
3497 +                                       pr_err("Netdev overflow Error\n");
3498 +                                       goto err_register;
3499 +                               }
3500 +                       }
3501 +               }
3502 +
3503 +               adapter[i]->edma_cinfo = edma_cinfo;
3504 +               edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
3505 +               edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
3506 +                                     | NETIF_F_HW_VLAN_CTAG_TX
3507 +                                     | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
3508 +                                     NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO;
3509 +               edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
3510 +                               NETIF_F_HW_VLAN_CTAG_RX
3511 +                               | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3512 +                               NETIF_F_GRO;
3513 +               edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
3514 +                                          NETIF_F_TSO | NETIF_F_TSO6 |
3515 +                                          NETIF_F_GRO;
3516 +               edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
3517 +                                            NETIF_F_TSO | NETIF_F_TSO6 |
3518 +                                            NETIF_F_GRO;
3519 +
3520 +#ifdef CONFIG_RFS_ACCEL
3521 +               edma_netdev[i]->features |=  NETIF_F_RXHASH | NETIF_F_NTUPLE;
3522 +               edma_netdev[i]->hw_features |=  NETIF_F_RXHASH | NETIF_F_NTUPLE;
3523 +               edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3524 +               edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
3525 +#endif
3526 +               edma_set_ethtool_ops(edma_netdev[i]);
3527 +
3528 +               /* This just fill in some default MAC address
3529 +                */
3530 +               if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
3531 +                       random_ether_addr(edma_netdev[i]->dev_addr);
3532 +                       pr_info("EDMA using MAC@ - using");
3533 +                       pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
3534 +                       *(edma_netdev[i]->dev_addr),
3535 +                       *(edma_netdev[i]->dev_addr + 1),
3536 +                       *(edma_netdev[i]->dev_addr + 2),
3537 +                       *(edma_netdev[i]->dev_addr + 3),
3538 +                       *(edma_netdev[i]->dev_addr + 4),
3539 +                       *(edma_netdev[i]->dev_addr + 5));
3540 +               }
3541 +
3542 +               err = register_netdev(edma_netdev[i]);
3543 +               if (err)
3544 +                       goto err_register;
3545 +
3546 +               /* carrier off reporting is important to
3547 +                * ethtool even BEFORE open
3548 +                */
3549 +               netif_carrier_off(edma_netdev[i]);
3550 +
3551 +               /* Allocate reverse irq cpu mapping structure for
3552 +               * receive queues
3553 +               */
3554 +#ifdef CONFIG_RFS_ACCEL
3555 +               edma_netdev[i]->rx_cpu_rmap =
3556 +                       alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
3557 +               if (!edma_netdev[i]->rx_cpu_rmap) {
3558 +                       err = -ENOMEM;
3559 +                       goto err_rmap_alloc_fail;
3560 +               }
3561 +#endif
3562 +       }
3563 +
3564 +       for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
3565 +               edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
3566 +
3567 +       for_each_available_child_of_node(np, pnp) {
3568 +               const uint32_t *vlan_tag = NULL;
3569 +               int len;
3570 +
3571 +               /* this check is needed if parent and daughter dts have
3572 +                * different number of gmac nodes
3573 +                */
3574 +               if (idx == edma_cinfo->num_gmac)
3575 +                       break;
3576 +
3577 +               /* Populate port-id to netdev lookup table */
3578 +               vlan_tag = of_get_property(pnp, "vlan_tag", &len);
3579 +               if (!vlan_tag) {
3580 +                       pr_err("Vlan tag parsing Failed.\n");
3581 +                       goto err_rmap_alloc_fail;
3582 +               }
3583 +
3584 +               adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
3585 +               vlan_tag++;
3586 +               portid_bmp = of_read_number(vlan_tag, 1);
3587 +               adapter[idx]->dp_bitmap = portid_bmp;
3588 +
3589 +               portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
3590 +               while (portid_bmp) {
3591 +                       int port_bit = ffs(portid_bmp);
3592 +
3593 +                       if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
3594 +                               goto err_rmap_alloc_fail;
3595 +                       edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
3596 +                               edma_netdev[idx];
3597 +                       portid_bmp &= ~(1 << (port_bit - 1));
3598 +               }
3599 +
3600 +               if (!of_property_read_u32(pnp, "qcom,poll_required",
3601 +                                         &adapter[idx]->poll_required)) {
3602 +                       if (adapter[idx]->poll_required) {
3603 +                               of_property_read_u32(pnp, "qcom,phy_mdio_addr",
3604 +                                                    &adapter[idx]->phy_mdio_addr);
3605 +                               of_property_read_u32(pnp, "qcom,forced_speed",
3606 +                                                    &adapter[idx]->forced_speed);
3607 +                               of_property_read_u32(pnp, "qcom,forced_duplex",
3608 +                                                    &adapter[idx]->forced_duplex);
3609 +
3610 +                               /* create a phyid using MDIO bus id
3611 +                                * and MDIO bus address
3612 +                                */
3613 +                               snprintf(adapter[idx]->phy_id,
3614 +                                        MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
3615 +                                        miibus->id,
3616 +                                        adapter[idx]->phy_mdio_addr);
3617 +                       }
3618 +               } else {
3619 +                       adapter[idx]->poll_required = 0;
3620 +                       adapter[idx]->forced_speed = SPEED_1000;
3621 +                       adapter[idx]->forced_duplex = DUPLEX_FULL;
3622 +               }
3623 +
3624 +               idx++;
3625 +       }
3626 +
3627 +       edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
3628 +                                                            "net/edma",
3629 +                                                            edma_table);
3630 +       if (!edma_cinfo->edma_ctl_table_hdr) {
3631 +               dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
3632 +               goto err_unregister_sysctl_tbl;
3633 +       }
3634 +
3635 +       /* Disable all 16 Tx and 8 rx irqs */
3636 +       edma_irq_disable(edma_cinfo);
3637 +
3638 +       err = edma_reset(edma_cinfo);
3639 +       if (err) {
3640 +               err = -EIO;
3641 +               goto err_reset;
3642 +       }
3643 +
3644 +       /* populate per_core_info, do a napi_Add, request 16 TX irqs,
3645 +        * 8 RX irqs, do a napi enable
3646 +        */
3647 +       for (i = 0; i < CONFIG_NR_CPUS; i++) {
3648 +               u8 rx_start;
3649 +
3650 +               edma_cinfo->edma_percpu_info[i].napi.state = 0;
3651 +
3652 +               netif_napi_add(edma_netdev[0],
3653 +                              &edma_cinfo->edma_percpu_info[i].napi,
3654 +                              edma_poll, 64);
3655 +               napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
3656 +               edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
3657 +               edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
3658 +                               << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
3659 +               edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
3660 +               edma_cinfo->edma_percpu_info[i].rx_start =
3661 +                       i << EDMA_RX_CPU_START_SHIFT;
3662 +               rx_start = i << EDMA_RX_CPU_START_SHIFT;
3663 +               edma_cinfo->edma_percpu_info[i].tx_status = 0;
3664 +               edma_cinfo->edma_percpu_info[i].rx_status = 0;
3665 +               edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
3666 +
3667 +               /* Request irq per core */
3668 +               for (j = edma_cinfo->edma_percpu_info[i].tx_start;
3669 +                    j < tx_start[i] + 4; j++) {
3670 +                       sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j);
3671 +                       err = request_irq(edma_cinfo->tx_irq[j],
3672 +                                         edma_interrupt,
3673 +                                         0,
3674 +                                         &edma_tx_irq[j][0],
3675 +                                         &edma_cinfo->edma_percpu_info[i]);
3676 +                       if (err)
3677 +                               goto err_reset;
3678 +               }
3679 +
3680 +               for (j = edma_cinfo->edma_percpu_info[i].rx_start;
3681 +                    j < (rx_start +
3682 +                    ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
3683 +                    j++) {
3684 +                       sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j);
3685 +                       err = request_irq(edma_cinfo->rx_irq[j],
3686 +                                         edma_interrupt,
3687 +                                         0,
3688 +                                         &edma_rx_irq[j][0],
3689 +                                         &edma_cinfo->edma_percpu_info[i]);
3690 +                       if (err)
3691 +                               goto err_reset;
3692 +               }
3693 +
3694 +#ifdef CONFIG_RFS_ACCEL
3695 +               for (j = edma_cinfo->edma_percpu_info[i].rx_start;
3696 +                    j < rx_start + 2; j += 2) {
3697 +                       err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
3698 +                                              edma_cinfo->rx_irq[j]);
3699 +                       if (err)
3700 +                               goto err_rmap_add_fail;
3701 +               }
3702 +#endif
3703 +       }
3704 +
3705 +       /* Used to clear interrupt status, allocate rx buffer,
3706 +        * configure edma descriptors registers
3707 +        */
3708 +       err = edma_configure(edma_cinfo);
3709 +       if (err) {
3710 +               err = -EIO;
3711 +               goto err_configure;
3712 +       }
3713 +
3714 +       /* Configure RSS indirection table.
3715 +        * 128 hash will be configured in the following
3716 +        * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
3717 +        * and so on
3718 +        */
3719 +       for (i = 0; i < EDMA_NUM_IDT; i++)
3720 +               edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
3721 +
3722 +       /* Configure load balance mapping table.
3723 +        * 4 table entry will be configured according to the
3724 +        * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
3725 +        * respectively.
3726 +        */
3727 +       edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
3728 +
3729 +       /* Configure Virtual queue for Tx rings
3730 +        * User can also change this value runtime through
3731 +        * a sysctl
3732 +        */
3733 +       edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
3734 +       edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
3735 +
3736 +       /* Configure Max AXI Burst write size to 128 bytes*/
3737 +       edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
3738 +                      EDMA_AXIW_MAXWRSIZE_VALUE);
3739 +
3740 +       /* Enable All 16 tx and 8 rx irq mask */
3741 +       edma_irq_enable(edma_cinfo);
3742 +       edma_enable_tx_ctrl(&edma_cinfo->hw);
3743 +       edma_enable_rx_ctrl(&edma_cinfo->hw);
3744 +
3745 +       for (i = 0; i < edma_cinfo->num_gmac; i++) {
3746 +               if (adapter[i]->poll_required) {
3747 +                       adapter[i]->phydev =
3748 +                               phy_connect(edma_netdev[i],
3749 +                                           (const char *)adapter[i]->phy_id,
3750 +                                           &edma_adjust_link,
3751 +                                           PHY_INTERFACE_MODE_SGMII);
3752 +                       if (IS_ERR(adapter[i]->phydev)) {
3753 +                               dev_dbg(&pdev->dev, "PHY attach FAIL");
3754 +                               err = -EIO;
3755 +                               goto edma_phy_attach_fail;
3756 +                       } else {
3757 +                               adapter[i]->phydev->advertising |=
3758 +                                       ADVERTISED_Pause |
3759 +                                       ADVERTISED_Asym_Pause;
3760 +                               adapter[i]->phydev->supported |=
3761 +                                       SUPPORTED_Pause |
3762 +                                       SUPPORTED_Asym_Pause;
3763 +                       }
3764 +               } else {
3765 +                       adapter[i]->phydev = NULL;
3766 +               }
3767 +       }
3768 +
3769 +       spin_lock_init(&edma_cinfo->stats_lock);
3770 +
3771 +       init_timer(&edma_stats_timer);
3772 +       edma_stats_timer.expires = jiffies + 1*HZ;
3773 +       edma_stats_timer.data = (unsigned long)edma_cinfo;
3774 +       edma_stats_timer.function = edma_statistics_timer; /* timer handler */
3775 +       add_timer(&edma_stats_timer);
3776 +
3777 +       return 0;
3778 +
3779 +edma_phy_attach_fail:
3780 +       miibus = NULL;
3781 +err_configure:
3782 +#ifdef CONFIG_RFS_ACCEL
3783 +       for (i = 0; i < edma_cinfo->num_gmac; i++) {
3784 +               free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
3785 +               adapter[i]->netdev->rx_cpu_rmap = NULL;
3786 +       }
3787 +#endif
3788 +err_rmap_add_fail:
3789 +       edma_free_irqs(adapter[0]);
3790 +       for (i = 0; i < CONFIG_NR_CPUS; i++)
3791 +               napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
3792 +err_reset:
3793 +err_unregister_sysctl_tbl:
3794 +err_rmap_alloc_fail:
3795 +       for (i = 0; i < edma_cinfo->num_gmac; i++)
3796 +               unregister_netdev(edma_netdev[i]);
3797 +err_register:
3798 +err_mdiobus_init_fail:
3799 +       edma_free_rx_rings(edma_cinfo);
3800 +err_rx_rinit:
3801 +       edma_free_tx_rings(edma_cinfo);
3802 +err_tx_rinit:
3803 +       edma_free_queues(edma_cinfo);
3804 +err_rx_qinit:
3805 +err_tx_qinit:
3806 +       iounmap(edma_cinfo->hw.hw_addr);
3807 +err_ioremap:
3808 +       for (i = 0; i < edma_cinfo->num_gmac; i++) {
3809 +               if (edma_netdev[i])
3810 +                       free_netdev(edma_netdev[i]);
3811 +       }
3812 +err_cinfo:
3813 +       kfree(edma_cinfo);
3814 +err_alloc:
3815 +       return err;
3816 +}
3817 +
3818 +/* edma_axi_remove()
3819 + *     Device Removal Routine
3820 + *
3821 + * edma_axi_remove is called by the platform subsystem to alert the driver
3822 + * that it should release a platform device.
3823 + */
3824 +static int edma_axi_remove(struct platform_device *pdev)
3825 +{
3826 +       struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
3827 +       struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
3828 +       struct edma_hw *hw = &edma_cinfo->hw;
3829 +       int i;
3830 +
3831 +       for (i = 0; i < edma_cinfo->num_gmac; i++)
3832 +               unregister_netdev(edma_netdev[i]);
3833 +
3834 +       edma_stop_rx_tx(hw);
3835 +       for (i = 0; i < CONFIG_NR_CPUS; i++)
3836 +               napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
3837 +
3838 +       edma_irq_disable(edma_cinfo);
3839 +       edma_write_reg(EDMA_REG_RX_ISR, 0xff);
3840 +       edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
3841 +#ifdef CONFIG_RFS_ACCEL
3842 +       for (i = 0; i < edma_cinfo->num_gmac; i++) {
3843 +               free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap);
3844 +               edma_netdev[i]->rx_cpu_rmap = NULL;
3845 +       }
3846 +#endif
3847 +
3848 +       for (i = 0; i < edma_cinfo->num_gmac; i++) {
3849 +               struct edma_adapter *adapter = netdev_priv(edma_netdev[i]);
3850 +
3851 +               if (adapter->phydev)
3852 +                       phy_disconnect(adapter->phydev);
3853 +       }
3854 +
3855 +       del_timer_sync(&edma_stats_timer);
3856 +       edma_free_irqs(adapter);
3857 +       unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
3858 +       edma_free_tx_resources(edma_cinfo);
3859 +       edma_free_rx_resources(edma_cinfo);
3860 +       edma_free_tx_rings(edma_cinfo);
3861 +       edma_free_rx_rings(edma_cinfo);
3862 +       edma_free_queues(edma_cinfo);
3863 +       for (i = 0; i < edma_cinfo->num_gmac; i++)
3864 +               free_netdev(edma_netdev[i]);
3865 +
3866 +       kfree(edma_cinfo);
3867 +
3868 +       return 0;
3869 +}
3870 +
3871 +static const struct of_device_id edma_of_mtable[] = {
3872 +       {.compatible = "qcom,ess-edma" },
3873 +       {}
3874 +};
3875 +MODULE_DEVICE_TABLE(of, edma_of_mtable);
3876 +
3877 +static struct platform_driver edma_axi_driver = {
3878 +       .driver = {
3879 +               .name    = edma_axi_driver_name,
3880 +               .of_match_table = edma_of_mtable,
3881 +       },
3882 +       .probe    = edma_axi_probe,
3883 +       .remove   = edma_axi_remove,
3884 +};
3885 +
3886 +module_platform_driver(edma_axi_driver);
3887 +
3888 +MODULE_AUTHOR("Qualcomm Atheros Inc");
3889 +MODULE_DESCRIPTION("QCA ESS EDMA driver");
3890 +MODULE_LICENSE("GPL");
3891 --- /dev/null
3892 +++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
3893 @@ -0,0 +1,374 @@
3894 +/*
3895 + * Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
3896 + *
3897 + * Permission to use, copy, modify, and/or distribute this software for
3898 + * any purpose with or without fee is hereby granted, provided that the
3899 + * above copyright notice and this permission notice appear in all copies.
3900 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
3901 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
3902 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
3903 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
3904 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
3905 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
3906 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
3907 + */
3908 +
3909 +#include <linux/ethtool.h>
3910 +#include <linux/netdevice.h>
3911 +#include <linux/string.h>
3912 +#include "edma.h"
3913 +
3914 +struct edma_ethtool_stats {
3915 +       uint8_t stat_string[ETH_GSTRING_LEN];
3916 +       uint32_t stat_offset;
3917 +};
3918 +
3919 +#define EDMA_STAT(m)    offsetof(struct edma_ethtool_statistics, m)
3920 +#define DRVINFO_LEN    32
3921 +
3922 +/* Array of strings describing statistics
3923 + */
3924 +static const struct edma_ethtool_stats edma_gstrings_stats[] = {
3925 +       {"tx_q0_pkt", EDMA_STAT(tx_q0_pkt)},
3926 +       {"tx_q1_pkt", EDMA_STAT(tx_q1_pkt)},
3927 +       {"tx_q2_pkt", EDMA_STAT(tx_q2_pkt)},
3928 +       {"tx_q3_pkt", EDMA_STAT(tx_q3_pkt)},
3929 +       {"tx_q4_pkt", EDMA_STAT(tx_q4_pkt)},
3930 +       {"tx_q5_pkt", EDMA_STAT(tx_q5_pkt)},
3931 +       {"tx_q6_pkt", EDMA_STAT(tx_q6_pkt)},
3932 +       {"tx_q7_pkt", EDMA_STAT(tx_q7_pkt)},
3933 +       {"tx_q8_pkt", EDMA_STAT(tx_q8_pkt)},
3934 +       {"tx_q9_pkt", EDMA_STAT(tx_q9_pkt)},
3935 +       {"tx_q10_pkt", EDMA_STAT(tx_q10_pkt)},
3936 +       {"tx_q11_pkt", EDMA_STAT(tx_q11_pkt)},
3937 +       {"tx_q12_pkt", EDMA_STAT(tx_q12_pkt)},
3938 +       {"tx_q13_pkt", EDMA_STAT(tx_q13_pkt)},
3939 +       {"tx_q14_pkt", EDMA_STAT(tx_q14_pkt)},
3940 +       {"tx_q15_pkt", EDMA_STAT(tx_q15_pkt)},
3941 +       {"tx_q0_byte", EDMA_STAT(tx_q0_byte)},
3942 +       {"tx_q1_byte", EDMA_STAT(tx_q1_byte)},
3943 +       {"tx_q2_byte", EDMA_STAT(tx_q2_byte)},
3944 +       {"tx_q3_byte", EDMA_STAT(tx_q3_byte)},
3945 +       {"tx_q4_byte", EDMA_STAT(tx_q4_byte)},
3946 +       {"tx_q5_byte", EDMA_STAT(tx_q5_byte)},
3947 +       {"tx_q6_byte", EDMA_STAT(tx_q6_byte)},
3948 +       {"tx_q7_byte", EDMA_STAT(tx_q7_byte)},
3949 +       {"tx_q8_byte", EDMA_STAT(tx_q8_byte)},
3950 +       {"tx_q9_byte", EDMA_STAT(tx_q9_byte)},
3951 +       {"tx_q10_byte", EDMA_STAT(tx_q10_byte)},
3952 +       {"tx_q11_byte", EDMA_STAT(tx_q11_byte)},
3953 +       {"tx_q12_byte", EDMA_STAT(tx_q12_byte)},
3954 +       {"tx_q13_byte", EDMA_STAT(tx_q13_byte)},
3955 +       {"tx_q14_byte", EDMA_STAT(tx_q14_byte)},
3956 +       {"tx_q15_byte", EDMA_STAT(tx_q15_byte)},
3957 +       {"rx_q0_pkt", EDMA_STAT(rx_q0_pkt)},
3958 +       {"rx_q1_pkt", EDMA_STAT(rx_q1_pkt)},
3959 +       {"rx_q2_pkt", EDMA_STAT(rx_q2_pkt)},
3960 +       {"rx_q3_pkt", EDMA_STAT(rx_q3_pkt)},
3961 +       {"rx_q4_pkt", EDMA_STAT(rx_q4_pkt)},
3962 +       {"rx_q5_pkt", EDMA_STAT(rx_q5_pkt)},
3963 +       {"rx_q6_pkt", EDMA_STAT(rx_q6_pkt)},
3964 +       {"rx_q7_pkt", EDMA_STAT(rx_q7_pkt)},
3965 +       {"rx_q0_byte", EDMA_STAT(rx_q0_byte)},
3966 +       {"rx_q1_byte", EDMA_STAT(rx_q1_byte)},
3967 +       {"rx_q2_byte", EDMA_STAT(rx_q2_byte)},
3968 +       {"rx_q3_byte", EDMA_STAT(rx_q3_byte)},
3969 +       {"rx_q4_byte", EDMA_STAT(rx_q4_byte)},
3970 +       {"rx_q5_byte", EDMA_STAT(rx_q5_byte)},
3971 +       {"rx_q6_byte", EDMA_STAT(rx_q6_byte)},
3972 +       {"rx_q7_byte", EDMA_STAT(rx_q7_byte)},
3973 +       {"tx_desc_error", EDMA_STAT(tx_desc_error)},
3974 +};
3975 +
3976 +#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)
3977 +
3978 +/* edma_get_strset_count()
3979 + *     Get strset count
3980 + */
3981 +static int edma_get_strset_count(struct net_device *netdev,
3982 +                                int sset)
3983 +{
3984 +       switch (sset) {
3985 +       case ETH_SS_STATS:
3986 +               return EDMA_STATS_LEN;
3987 +       default:
3988 +               netdev_dbg(netdev, "%s: Invalid string set", __func__);
3989 +               return -EOPNOTSUPP;
3990 +       }
3991 +}
3992 +
3993 +
3994 +/* edma_get_strings()
3995 + *     get stats string
3996 + */
3997 +static void edma_get_strings(struct net_device *netdev, uint32_t stringset,
3998 +                            uint8_t *data)
3999 +{
4000 +       uint8_t *p = data;
4001 +       uint32_t i;
4002 +
4003 +       switch (stringset) {
4004 +       case ETH_SS_STATS:
4005 +               for (i = 0; i < EDMA_STATS_LEN; i++) {
4006 +                       memcpy(p, edma_gstrings_stats[i].stat_string,
4007 +                               min((size_t)ETH_GSTRING_LEN,
4008 +                                   strlen(edma_gstrings_stats[i].stat_string)
4009 +                                   + 1));
4010 +                       p += ETH_GSTRING_LEN;
4011 +               }
4012 +               break;
4013 +       }
4014 +}
4015 +
4016 +/* edma_get_ethtool_stats()
4017 + *     Get ethtool statistics
4018 + */
4019 +static void edma_get_ethtool_stats(struct net_device *netdev,
4020 +                                  struct ethtool_stats *stats, uint64_t *data)
4021 +{
4022 +       struct edma_adapter *adapter = netdev_priv(netdev);
4023 +       struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
4024 +       int i;
4025 +       uint8_t *p = NULL;
4026 +
4027 +       edma_read_append_stats(edma_cinfo);
4028 +
4029 +       for(i = 0; i < EDMA_STATS_LEN; i++) {
4030 +               p = (uint8_t *)&(edma_cinfo->edma_ethstats) +
4031 +                       edma_gstrings_stats[i].stat_offset;
4032 +               data[i] = *(uint32_t *)p;
4033 +       }
4034 +}
4035 +
4036 +/* edma_get_drvinfo()
4037 + *     get edma driver info
4038 + */
4039 +static void edma_get_drvinfo(struct net_device *dev,
4040 +                            struct ethtool_drvinfo *info)
4041 +{
4042 +       strlcpy(info->driver, "ess_edma", DRVINFO_LEN);
4043 +       strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
4044 +}
4045 +
4046 +/* edma_nway_reset()
4047 + *     Reset the phy, if available.
4048 + */
4049 +static int edma_nway_reset(struct net_device *netdev)
4050 +{
4051 +       return -EINVAL;
4052 +}
4053 +
4054 +/* edma_get_wol()
4055 + *     get wake on lan info
4056 + */
4057 +static void edma_get_wol(struct net_device *netdev,
4058 +                        struct ethtool_wolinfo *wol)
4059 +{
4060 +       wol->supported = 0;
4061 +       wol->wolopts = 0;
4062 +}
4063 +
4064 +/* edma_get_msglevel()
4065 + *     get message level.
4066 + */
4067 +static uint32_t edma_get_msglevel(struct net_device *netdev)
4068 +{
4069 +       return 0;
4070 +}
4071 +
4072 +/* edma_get_settings()
4073 + *     Get edma settings
4074 + */
4075 +static int edma_get_settings(struct net_device *netdev,
4076 +                            struct ethtool_cmd *ecmd)
4077 +{
4078 +       struct edma_adapter *adapter = netdev_priv(netdev);
4079 +
4080 +       if (adapter->poll_required) {
4081 +               struct phy_device *phydev = NULL;
4082 +               uint16_t phyreg;
4083 +
4084 +               if ((adapter->forced_speed != SPEED_UNKNOWN)
4085 +                       && !(adapter->poll_required))
4086 +                       return -EPERM;
4087 +
4088 +               phydev = adapter->phydev;
4089 +
4090 +               ecmd->advertising = phydev->advertising;
4091 +               ecmd->autoneg = phydev->autoneg;
4092 +
4093 +               if (adapter->link_state == __EDMA_LINKDOWN) {
4094 +                       ecmd->speed =  SPEED_UNKNOWN;
4095 +                       ecmd->duplex = DUPLEX_UNKNOWN;
4096 +               } else {
4097 +                       ecmd->speed = phydev->speed;
4098 +                       ecmd->duplex = phydev->duplex;
4099 +               }
4100 +
4101 +               ecmd->phy_address = adapter->phy_mdio_addr;
4102 +
4103 +               phyreg = (uint16_t)phy_read(adapter->phydev, MII_LPA);
4104 +               if (phyreg & LPA_10HALF)
4105 +                       ecmd->lp_advertising |= ADVERTISED_10baseT_Half;
4106 +
4107 +               if (phyreg & LPA_10FULL)
4108 +                       ecmd->lp_advertising |= ADVERTISED_10baseT_Full;
4109 +
4110 +               if (phyreg & LPA_100HALF)
4111 +                       ecmd->lp_advertising |= ADVERTISED_100baseT_Half;
4112 +
4113 +               if (phyreg & LPA_100FULL)
4114 +                       ecmd->lp_advertising |= ADVERTISED_100baseT_Full;
4115 +
4116 +               phyreg = (uint16_t)phy_read(adapter->phydev, MII_STAT1000);
4117 +               if (phyreg & LPA_1000HALF)
4118 +                       ecmd->lp_advertising |= ADVERTISED_1000baseT_Half;
4119 +
4120 +               if (phyreg & LPA_1000FULL)
4121 +                       ecmd->lp_advertising |= ADVERTISED_1000baseT_Full;
4122 +       } else {
4123 +               /* If the speed/duplex for this GMAC is forced and we
4124 +                * are not polling for link state changes, return the
4125 +                * values as specified by platform. This will be true
4126 +                * for GMACs connected to switch, and interfaces that
4127 +                * do not use a PHY.
4128 +                */
4129 +               if (!(adapter->poll_required)) {
4130 +                       if (adapter->forced_speed != SPEED_UNKNOWN) {
4131 +                               /* set speed and duplex */
4132 +                               ethtool_cmd_speed_set(ecmd, SPEED_1000);
4133 +                               ecmd->duplex = DUPLEX_FULL;
4134 +
4135 +                               /* Populate capabilities advertised by self */
4136 +                               ecmd->advertising = 0;
4137 +                               ecmd->autoneg = 0;
4138 +                               ecmd->port = PORT_TP;
4139 +                               ecmd->transceiver = XCVR_EXTERNAL;
4140 +                       } else {
4141 +                               /* non link polled and non
4142 +                                * forced speed/duplex interface
4143 +                                */
4144 +                               return -EIO;
4145 +                       }
4146 +               }
4147 +       }
4148 +
4149 +       return 0;
4150 +}
4151 +
4152 +/* edma_set_settings()
4153 + *     Set EDMA settings
4154 + */
4155 +static int edma_set_settings(struct net_device *netdev,
4156 +                            struct ethtool_cmd *ecmd)
4157 +{
4158 +       struct edma_adapter *adapter = netdev_priv(netdev);
4159 +       struct phy_device *phydev = NULL;
4160 +
4161 +       if ((adapter->forced_speed != SPEED_UNKNOWN) &&
4162 +            !adapter->poll_required)
4163 +               return -EPERM;
4164 +
4165 +       phydev = adapter->phydev;
4166 +       phydev->advertising = ecmd->advertising;
4167 +       phydev->autoneg = ecmd->autoneg;
4168 +       phydev->speed = ethtool_cmd_speed(ecmd);
4169 +       phydev->duplex = ecmd->duplex;
4170 +
4171 +       genphy_config_aneg(phydev);
4172 +
4173 +       return 0;
4174 +}
4175 +
4176 +/* edma_get_coalesce
4177 + *     get interrupt mitigation
4178 + */
4179 +static int edma_get_coalesce(struct net_device *netdev,
4180 +                            struct ethtool_coalesce *ec)
4181 +{
4182 +       u32 reg_val;
4183 +
4184 +       edma_get_tx_rx_coalesce(&reg_val);
4185 +
4186 +       /* We read the Interrupt Moderation Timer(IMT) register value,
4187 +        * use lower 16 bit for rx and higher 16 bit for Tx. We do a
4188 +        * left shift by 1, because IMT resolution timer is 2usecs.
4189 +        * Hence the value given by the register is multiplied by 2 to
4190 +        * get the actual time in usecs.
4191 +        */
4192 +       ec->tx_coalesce_usecs = (((reg_val >> 16) & 0xffff) << 1);
4193 +       ec->rx_coalesce_usecs = ((reg_val & 0xffff) << 1);
4194 +
4195 +       return 0;
4196 +}
4197 +
4198 +/* edma_set_coalesce
4199 + *     set interrupt mitigation
4200 + */
4201 +static int edma_set_coalesce(struct net_device *netdev,
4202 +                            struct ethtool_coalesce *ec)
4203 +{
4204 +       if (ec->tx_coalesce_usecs)
4205 +               edma_change_tx_coalesce(ec->tx_coalesce_usecs);
4206 +       if (ec->rx_coalesce_usecs)
4207 +               edma_change_rx_coalesce(ec->rx_coalesce_usecs);
4208 +
4209 +       return 0;
4210 +}
4211 +
4212 +/* edma_set_priv_flags()
4213 + *     Set EDMA private flags
4214 + */
4215 +static int edma_set_priv_flags(struct net_device *netdev, u32 flags)
4216 +{
4217 +       return 0;
4218 +}
4219 +
4220 +/* edma_get_priv_flags()
4221 + *     get edma driver flags
4222 + */
4223 +static u32 edma_get_priv_flags(struct net_device *netdev)
4224 +{
4225 +       return 0;
4226 +}
4227 +
4228 +/* edma_get_ringparam()
4229 + *     get ring size
4230 + */
4231 +static void edma_get_ringparam(struct net_device *netdev,
4232 +                              struct ethtool_ringparam *ring)
4233 +{
4234 +       struct edma_adapter *adapter = netdev_priv(netdev);
4235 +       struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
4236 +
4237 +       ring->tx_max_pending = edma_cinfo->tx_ring_count;
4238 +       ring->rx_max_pending = edma_cinfo->rx_ring_count;
4239 +}
4240 +
4241 +/* Ethtool operations
4242 + */
4243 +static const struct ethtool_ops edma_ethtool_ops = {
4244 +       .get_drvinfo = &edma_get_drvinfo,
4245 +       .get_link = &ethtool_op_get_link,
4246 +       .get_msglevel = &edma_get_msglevel,
4247 +       .nway_reset = &edma_nway_reset,
4248 +       .get_wol = &edma_get_wol,
4249 +       .get_settings = &edma_get_settings,
4250 +       .set_settings = &edma_set_settings,
4251 +       .get_strings = &edma_get_strings,
4252 +       .get_sset_count = &edma_get_strset_count,
4253 +       .get_ethtool_stats = &edma_get_ethtool_stats,
4254 +       .get_coalesce = &edma_get_coalesce,
4255 +       .set_coalesce = &edma_set_coalesce,
4256 +       .get_priv_flags = edma_get_priv_flags,
4257 +       .set_priv_flags = edma_set_priv_flags,
4258 +       .get_ringparam = edma_get_ringparam,
4259 +};
4260 +
4261 +/* edma_set_ethtool_ops
4262 + *     Set ethtool operations
4263 + */
4264 +void edma_set_ethtool_ops(struct net_device *netdev)
4265 +{
4266 +       netdev->ethtool_ops = &edma_ethtool_ops;
4267 +}
4268 --- /dev/null
4269 +++ b/drivers/net/ethernet/qualcomm/essedma/ess_edma.h
4270 @@ -0,0 +1,332 @@
4271 +/*
4272 + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
4273 + *
4274 + * Permission to use, copy, modify, and/or distribute this software for
4275 + * any purpose with or without fee is hereby granted, provided that the
4276 + * above copyright notice and this permission notice appear in all copies.
4277 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
4278 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
4279 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
4280 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
4281 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
4282 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
4283 + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
4284 + */
4285 +
4286 +#ifndef _ESS_EDMA_H_
4287 +#define _ESS_EDMA_H_
4288 +
4289 +#include <linux/types.h>
4290 +
4291 +struct edma_adapter;
4292 +struct edma_hw;
4293 +
4294 +/* register definition */
4295 +#define EDMA_REG_MAS_CTRL 0x0
4296 +#define EDMA_REG_TIMEOUT_CTRL 0x004
4297 +#define EDMA_REG_DBG0 0x008
4298 +#define EDMA_REG_DBG1 0x00C
4299 +#define EDMA_REG_SW_CTRL0 0x100
4300 +#define EDMA_REG_SW_CTRL1 0x104
4301 +
4302 +/* Interrupt Status Register */
4303 +#define EDMA_REG_RX_ISR 0x200
4304 +#define EDMA_REG_TX_ISR 0x208
4305 +#define EDMA_REG_MISC_ISR 0x210
4306 +#define EDMA_REG_WOL_ISR 0x218
4307 +
4308 +#define EDMA_MISC_ISR_RX_URG_Q(x) (1 << x)
4309 +
4310 +#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100
4311 +#define EDMA_MISC_ISR_AXIR_ERR 0x00000200
4312 +#define EDMA_MISC_ISR_TXF_DEAD 0x00000400
4313 +#define EDMA_MISC_ISR_AXIW_ERR 0x00000800
4314 +#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000
4315 +
4316 +#define EDMA_WOL_ISR 0x00000001
4317 +
4318 +/* Interrupt Mask Register */
4319 +#define EDMA_REG_MISC_IMR 0x214
4320 +#define EDMA_REG_WOL_IMR 0x218
4321 +
4322 +#define EDMA_RX_IMR_NORMAL_MASK 0x1
4323 +#define EDMA_TX_IMR_NORMAL_MASK 0x1
4324 +#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF
4325 +#define EDMA_WOL_IMR_NORMAL_MASK 0x1
4326 +
4327 +/* Edma receive consumer index */
4328 +#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
4329 +/* Edma transmit consumer index */
4330 +#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
4331 +
4332 +/* IRQ Moderator Initial Timer Register */
4333 +#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280
4334 +#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF
4335 +#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0
4336 +#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16
4337 +
4338 +/* Interrupt Control Register */
4339 +#define EDMA_REG_INTR_CTRL 0x284
4340 +#define EDMA_INTR_CLR_TYP_SHIFT 0
4341 +#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1
4342 +#define EDMA_INTR_CLEAR_TYPE_W1 0
4343 +#define EDMA_INTR_CLEAR_TYPE_R 1
4344 +
4345 +/* RX Interrupt Mask Register */
4346 +#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
4347 +
4348 +/* TX Interrupt mask register */
4349 +#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
4350 +
4351 +/* Load Ptr Register
4352 + * Software sets this bit after the initialization of the head and tail
4353 + */
4354 +#define EDMA_REG_TX_SRAM_PART 0x400
4355 +#define EDMA_LOAD_PTR_SHIFT 16
4356 +
4357 +/* TXQ Control Register */
4358 +#define EDMA_REG_TXQ_CTRL 0x404
4359 +#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10
4360 +#define EDMA_TXQ_CTRL_TXQ_EN 0x20
4361 +#define EDMA_TXQ_CTRL_ENH_MODE 0x40
4362 +#define EDMA_TXQ_CTRL_LS_8023_EN 0x80
4363 +#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100
4364 +#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200
4365 +#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF
4366 +#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF
4367 +#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0
4368 +#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16
4369 +
4370 +#define        EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
4371 +#define EDMA_TXF_WATER_MARK_MASK 0x0FFF
4372 +#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0
4373 +#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16
4374 +#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000
4375 +
4376 +/* WRR Control Register */
4377 +#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c
4378 +#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410
4379 +#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414
4380 +#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418
4381 +
4382 +/* Weight round robin(WRR), it takes queue as input, and computes
4383 + * starting bits where we need to write the weight for a particular
4384 + * queue
4385 + */
4386 +#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20)
4387 +
4388 +/* Tx Descriptor Control Register */
4389 +#define EDMA_REG_TPD_RING_SIZE 0x41C
4390 +#define EDMA_TPD_RING_SIZE_SHIFT 0
4391 +#define EDMA_TPD_RING_SIZE_MASK 0xFFFF
4392 +
4393 +/* Transmit descriptor base address */
4394 +#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
4395 +
4396 +/* TPD Index Register */
4397 +#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
4398 +
4399 +#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF
4400 +#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000
4401 +#define EDMA_TPD_PROD_IDX_MASK 0xFFFF
4402 +#define EDMA_TPD_CONS_IDX_MASK 0xFFFF
4403 +#define EDMA_TPD_PROD_IDX_SHIFT 0
4404 +#define EDMA_TPD_CONS_IDX_SHIFT 16
4405 +
4406 +/* TX Virtual Queue Mapping Control Register */
4407 +#define EDMA_REG_VQ_CTRL0 0x4A0
4408 +#define EDMA_REG_VQ_CTRL1 0x4A4
4409 +
4410 +/* Virtual QID shift, it takes queue as input, and computes
4411 + * Virtual QID position in virtual qid control register
4412 + */
4413 +#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24)
4414 +
4415 +/* Virtual Queue Default Value */
4416 +#define EDMA_VQ_REG_VALUE 0x240240
4417 +
4418 +/* Tx side Port Interface Control Register */
4419 +#define EDMA_REG_PORT_CTRL 0x4A8
4420 +#define EDMA_PAD_EN_SHIFT 15
4421 +
4422 +/* Tx side VLAN Configuration Register */
4423 +#define EDMA_REG_VLAN_CFG 0x4AC
4424 +
4425 +#define EDMA_TX_CVLAN 16
4426 +#define EDMA_TX_INS_CVLAN 17
4427 +#define EDMA_TX_CVLAN_TAG_SHIFT 0
4428 +
4429 +#define EDMA_TX_SVLAN 14
4430 +#define EDMA_TX_INS_SVLAN 15
4431 +#define EDMA_TX_SVLAN_TAG_SHIFT 16
4432 +
4433 +/* Tx Queue Packet Statistic Register */
4434 +#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
4435 +
4436 +#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF
4437 +
4438 +/* Tx Queue Byte Statistic Register */
4439 +#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
4440 +
4441 +/* Load Balance Based Ring Offset Register */
4442 +#define EDMA_REG_LB_RING 0x800
4443 +#define EDMA_LB_RING_ENTRY_MASK 0xff
4444 +#define EDMA_LB_RING_ID_MASK 0x7
4445 +#define EDMA_LB_RING_PROFILE_ID_MASK 0x3
4446 +#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8
4447 +#define EDMA_LB_RING_ID_OFFSET 0
4448 +#define EDMA_LB_RING_PROFILE_ID_OFFSET 3
4449 +#define EDMA_LB_REG_VALUE 0x6040200
4450 +
4451 +/* Load Balance Priority Mapping Register */
4452 +#define EDMA_REG_LB_PRI_START 0x804
4453 +#define EDMA_REG_LB_PRI_END 0x810
4454 +#define EDMA_LB_PRI_REG_INC 4
4455 +#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4
4456 +#define EDMA_LB_PRI_ENTRY_MASK 0xf
4457 +
4458 +/* RSS Priority Mapping Register */
4459 +#define EDMA_REG_RSS_PRI 0x820
4460 +#define EDMA_RSS_PRI_ENTRY_MASK 0xf
4461 +#define EDMA_RSS_RING_ID_MASK 0x7
4462 +#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4
4463 +
4464 +/* RSS Indirection Register */
4465 +#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
4466 +#define EDMA_NUM_IDT 16
4467 +#define EDMA_RSS_IDT_VALUE 0x64206420
4468 +
4469 +/* Default RSS Ring Register */
4470 +#define EDMA_REG_DEF_RSS 0x890
4471 +#define EDMA_DEF_RSS_MASK 0x7
4472 +
4473 +/* RSS Hash Function Type Register */
4474 +#define EDMA_REG_RSS_TYPE 0x894
4475 +#define EDMA_RSS_TYPE_NONE 0x01
4476 +#define EDMA_RSS_TYPE_IPV4TCP 0x02
4477 +#define EDMA_RSS_TYPE_IPV6_TCP 0x04
4478 +#define EDMA_RSS_TYPE_IPV4_UDP 0x08
4479 +#define EDMA_RSS_TYPE_IPV6UDP 0x10
4480 +#define EDMA_RSS_TYPE_IPV4 0x20
4481 +#define EDMA_RSS_TYPE_IPV6 0x40
4482 +#define EDMA_RSS_HASH_MODE_MASK 0x7f
4483 +
4484 +#define EDMA_REG_RSS_HASH_VALUE 0x8C0
4485 +
4486 +#define EDMA_REG_RSS_TYPE_RESULT 0x8C4
4487 +
4488 +#define EDMA_HASH_TYPE_START 0
4489 +#define EDMA_HASH_TYPE_END 5
4490 +#define EDMA_HASH_TYPE_SHIFT 12
4491 +
4492 +#define EDMA_RFS_FLOW_ENTRIES 1024
4493 +#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1)
4494 +#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128
4495 +
4496 +/* RFD Base Address Register */
4497 +#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
4498 +
4499 +/* RFD Index Register */
4500 +#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2))
4501 +
4502 +#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF
4503 +#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000
4504 +#define EDMA_RFD_PROD_IDX_MASK 0xFFF
4505 +#define EDMA_RFD_CONS_IDX_MASK 0xFFF
4506 +#define EDMA_RFD_PROD_IDX_SHIFT 0
4507 +#define EDMA_RFD_CONS_IDX_SHIFT 16
4508 +
4509 +/* Rx Descriptor Control Register */
4510 +#define EDMA_REG_RX_DESC0 0xA10
4511 +#define EDMA_RFD_RING_SIZE_MASK 0xFFF
4512 +#define EDMA_RX_BUF_SIZE_MASK 0xFFFF
4513 +#define EDMA_RFD_RING_SIZE_SHIFT 0
4514 +#define EDMA_RX_BUF_SIZE_SHIFT 16
4515 +
4516 +#define EDMA_REG_RX_DESC1 0xA14
4517 +#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F
4518 +#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F
4519 +#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF
4520 +#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0
4521 +#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8
4522 +#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16
4523 +
4524 +/* RXQ Control Register */
4525 +#define EDMA_REG_RXQ_CTRL 0xA18
4526 +#define EDMA_FIFO_THRESH_TYPE_SHIF 0
4527 +#define EDMA_FIFO_THRESH_128_BYTE 0x0
4528 +#define EDMA_FIFO_THRESH_64_BYTE 0x1
4529 +#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002
4530 +#define EDMA_RXQ_CTRL_EN 0x0000FF00
4531 +
4532 +/* AXI Burst Size Config */
4533 +#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
4534 +#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0
4535 +
4536 +/* Rx Statistics Register */
4537 +#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
4538 +#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
4539 +
4540 +/* WoL Pattern Length Register */
4541 +#define EDMA_REG_WOL_PATTERN_LEN0 0xC00
4542 +#define EDMA_WOL_PT_LEN_MASK 0xFF
4543 +#define EDMA_WOL_PT0_LEN_SHIFT 0
4544 +#define EDMA_WOL_PT1_LEN_SHIFT 8
4545 +#define EDMA_WOL_PT2_LEN_SHIFT 16
4546 +#define EDMA_WOL_PT3_LEN_SHIFT 24
4547 +
4548 +#define EDMA_REG_WOL_PATTERN_LEN1 0xC04
4549 +#define EDMA_WOL_PT4_LEN_SHIFT 0
4550 +#define EDMA_WOL_PT5_LEN_SHIFT 8
4551 +#define EDMA_WOL_PT6_LEN_SHIFT 16
4552 +
4553 +/* WoL Control Register */
4554 +#define EDMA_REG_WOL_CTRL 0xC08
4555 +#define EDMA_WOL_WK_EN 0x00000001
4556 +#define EDMA_WOL_MG_EN 0x00000002
4557 +#define EDMA_WOL_PT0_EN 0x00000004
4558 +#define EDMA_WOL_PT1_EN 0x00000008
4559 +#define EDMA_WOL_PT2_EN 0x00000010
4560 +#define EDMA_WOL_PT3_EN 0x00000020
4561 +#define EDMA_WOL_PT4_EN 0x00000040
4562 +#define EDMA_WOL_PT5_EN 0x00000080
4563 +#define EDMA_WOL_PT6_EN 0x00000100
4564 +
4565 +/* MAC Control Register */
4566 +#define EDMA_REG_MAC_CTRL0 0xC20
4567 +#define EDMA_REG_MAC_CTRL1 0xC24
4568 +
4569 +/* WoL Pattern Register */
4570 +#define EDMA_REG_WOL_PATTERN_START 0x5000
4571 +#define EDMA_PATTERN_PART_REG_OFFSET 0x40
4572 +
4573 +
4574 +/* TX descriptor fields */
4575 +#define EDMA_TPD_HDR_SHIFT 0
4576 +#define EDMA_TPD_PPPOE_EN 0x00000100
4577 +#define EDMA_TPD_IP_CSUM_EN 0x00000200
4578 +#define EDMA_TPD_TCP_CSUM_EN 0x0000400
4579 +#define EDMA_TPD_UDP_CSUM_EN 0x00000800
4580 +#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00
4581 +#define EDMA_TPD_LSO_EN 0x00001000
4582 +#define EDMA_TPD_LSO_V2_EN 0x00002000
4583 +#define EDMA_TPD_IPV4_EN 0x00010000
4584 +#define EDMA_TPD_MSS_MASK 0x1FFF
4585 +#define EDMA_TPD_MSS_SHIFT 18
4586 +#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18
4587 +
4588 +/* RRD descriptor fields */
4589 +#define EDMA_RRD_NUM_RFD_MASK 0x000F
4590 +#define EDMA_RRD_SVLAN 0x8000
4591 +#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF;
4592 +
4593 +#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF
4594 +#define EDMA_RRD_CSUM_FAIL_MASK 0xC000
4595 +#define EDMA_RRD_CVLAN 0x0001
4596 +#define EDMA_RRD_DESC_VALID 0x8000
4597 +
4598 +#define EDMA_RRD_PRIORITY_SHIFT 4
4599 +#define EDMA_RRD_PRIORITY_MASK 0x7
4600 +#define EDMA_RRD_PORT_TYPE_SHIFT 7
4601 +#define EDMA_RRD_PORT_TYPE_MASK 0x1F
4602 +#endif /* _ESS_EDMA_H_ */