2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/platform_device.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <net/busy_poll.h>
122 #include <linux/clk.h>
123 #include <linux/if_ether.h>
124 #include <linux/net_tstamp.h>
125 #include <linux/phy.h>
128 #include "xgbe-common.h"
130 static int xgbe_one_poll(struct napi_struct *, int);
131 static int xgbe_all_poll(struct napi_struct *, int);
133 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
135 struct xgbe_channel *channel_mem, *channel;
136 struct xgbe_ring *tx_ring, *rx_ring;
137 unsigned int count, i;
140 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
142 channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
146 tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
151 rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
156 for (i = 0, channel = channel_mem; i < count; i++, channel++) {
157 snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
158 channel->pdata = pdata;
159 channel->queue_index = i;
160 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
163 if (pdata->per_channel_irq) {
164 /* Get the DMA interrupt (offset 1) */
165 ret = platform_get_irq(pdata->pdev, i + 1);
167 netdev_err(pdata->netdev,
168 "platform_get_irq %u failed\n",
173 channel->dma_irq = ret;
176 if (i < pdata->tx_ring_count) {
177 spin_lock_init(&tx_ring->lock);
178 channel->tx_ring = tx_ring++;
181 if (i < pdata->rx_ring_count) {
182 spin_lock_init(&rx_ring->lock);
183 channel->rx_ring = rx_ring++;
186 netif_dbg(pdata, drv, pdata->netdev,
187 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
188 channel->name, channel->dma_regs, channel->dma_irq,
189 channel->tx_ring, channel->rx_ring);
192 pdata->channel = channel_mem;
193 pdata->channel_count = count;
210 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
215 kfree(pdata->channel->rx_ring);
216 kfree(pdata->channel->tx_ring);
217 kfree(pdata->channel);
219 pdata->channel = NULL;
220 pdata->channel_count = 0;
223 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
225 return (ring->rdesc_count - (ring->cur - ring->dirty));
228 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
230 return (ring->cur - ring->dirty);
233 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
234 struct xgbe_ring *ring, unsigned int count)
236 struct xgbe_prv_data *pdata = channel->pdata;
238 if (count > xgbe_tx_avail_desc(ring)) {
239 netif_info(pdata, drv, pdata->netdev,
240 "Tx queue stopped, not enough descriptors available\n");
241 netif_stop_subqueue(pdata->netdev, channel->queue_index);
242 ring->tx.queue_stopped = 1;
244 /* If we haven't notified the hardware because of xmit_more
245 * support, tell it now
247 if (ring->tx.xmit_more)
248 pdata->hw_if.tx_start_xmit(channel, ring);
250 return NETDEV_TX_BUSY;
256 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
258 unsigned int rx_buf_size;
260 if (mtu > XGMAC_JUMBO_PACKET_MTU) {
261 netdev_alert(netdev, "MTU exceeds maximum supported value\n");
265 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
266 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
268 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
269 ~(XGBE_RX_BUF_ALIGN - 1);
274 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
276 struct xgbe_hw_if *hw_if = &pdata->hw_if;
277 struct xgbe_channel *channel;
278 enum xgbe_int int_id;
281 channel = pdata->channel;
282 for (i = 0; i < pdata->channel_count; i++, channel++) {
283 if (channel->tx_ring && channel->rx_ring)
284 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
285 else if (channel->tx_ring)
286 int_id = XGMAC_INT_DMA_CH_SR_TI;
287 else if (channel->rx_ring)
288 int_id = XGMAC_INT_DMA_CH_SR_RI;
292 hw_if->enable_int(channel, int_id);
296 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
298 struct xgbe_hw_if *hw_if = &pdata->hw_if;
299 struct xgbe_channel *channel;
300 enum xgbe_int int_id;
303 channel = pdata->channel;
304 for (i = 0; i < pdata->channel_count; i++, channel++) {
305 if (channel->tx_ring && channel->rx_ring)
306 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
307 else if (channel->tx_ring)
308 int_id = XGMAC_INT_DMA_CH_SR_TI;
309 else if (channel->rx_ring)
310 int_id = XGMAC_INT_DMA_CH_SR_RI;
314 hw_if->disable_int(channel, int_id);
318 static irqreturn_t xgbe_isr(int irq, void *data)
320 struct xgbe_prv_data *pdata = data;
321 struct xgbe_hw_if *hw_if = &pdata->hw_if;
322 struct xgbe_channel *channel;
323 unsigned int dma_isr, dma_ch_isr;
324 unsigned int mac_isr, mac_tssr;
327 /* The DMA interrupt status register also reports MAC and MTL
328 * interrupts. So for polling mode, we just need to check for
329 * this register to be non-zero
331 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
335 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
337 for (i = 0; i < pdata->channel_count; i++) {
338 if (!(dma_isr & (1 << i)))
341 channel = pdata->channel + i;
343 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
344 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
347 /* The TI or RI interrupt bits may still be set even if using
348 * per channel DMA interrupts. Check to be sure those are not
349 * enabled before using the private data napi structure.
351 if (!pdata->per_channel_irq &&
352 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
353 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
354 if (napi_schedule_prep(&pdata->napi)) {
355 /* Disable Tx and Rx interrupts */
356 xgbe_disable_rx_tx_ints(pdata);
358 /* Turn on polling */
359 __napi_schedule(&pdata->napi);
363 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
364 pdata->ext_stats.rx_buffer_unavailable++;
366 /* Restart the device on a Fatal Bus Error */
367 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
368 schedule_work(&pdata->restart_work);
370 /* Clear all interrupt signals */
371 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
374 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
375 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
377 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
378 hw_if->tx_mmc_int(pdata);
380 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
381 hw_if->rx_mmc_int(pdata);
383 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
384 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
386 if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
387 /* Read Tx Timestamp to clear interrupt */
389 hw_if->get_tx_tstamp(pdata);
390 queue_work(pdata->dev_workqueue,
391 &pdata->tx_tstamp_work);
400 static irqreturn_t xgbe_dma_isr(int irq, void *data)
402 struct xgbe_channel *channel = data;
404 /* Per channel DMA interrupts are enabled, so we use the per
405 * channel napi structure and not the private data napi structure
407 if (napi_schedule_prep(&channel->napi)) {
408 /* Disable Tx and Rx interrupts */
409 disable_irq_nosync(channel->dma_irq);
411 /* Turn on polling */
412 __napi_schedule(&channel->napi);
418 static void xgbe_tx_timer(unsigned long data)
420 struct xgbe_channel *channel = (struct xgbe_channel *)data;
421 struct xgbe_prv_data *pdata = channel->pdata;
422 struct napi_struct *napi;
424 DBGPR("-->xgbe_tx_timer\n");
426 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
428 if (napi_schedule_prep(napi)) {
429 /* Disable Tx and Rx interrupts */
430 if (pdata->per_channel_irq)
431 disable_irq_nosync(channel->dma_irq);
433 xgbe_disable_rx_tx_ints(pdata);
435 /* Turn on polling */
436 __napi_schedule(napi);
439 channel->tx_timer_active = 0;
441 DBGPR("<--xgbe_tx_timer\n");
444 static void xgbe_service(struct work_struct *work)
446 struct xgbe_prv_data *pdata = container_of(work,
447 struct xgbe_prv_data,
450 pdata->phy_if.phy_status(pdata);
453 static void xgbe_service_timer(unsigned long data)
455 struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
457 queue_work(pdata->dev_workqueue, &pdata->service_work);
459 mod_timer(&pdata->service_timer, jiffies + HZ);
462 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
464 struct xgbe_channel *channel;
467 setup_timer(&pdata->service_timer, xgbe_service_timer,
468 (unsigned long)pdata);
470 channel = pdata->channel;
471 for (i = 0; i < pdata->channel_count; i++, channel++) {
472 if (!channel->tx_ring)
475 setup_timer(&channel->tx_timer, xgbe_tx_timer,
476 (unsigned long)channel);
480 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
482 mod_timer(&pdata->service_timer, jiffies + HZ);
485 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
487 struct xgbe_channel *channel;
490 del_timer_sync(&pdata->service_timer);
492 channel = pdata->channel;
493 for (i = 0; i < pdata->channel_count; i++, channel++) {
494 if (!channel->tx_ring)
497 del_timer_sync(&channel->tx_timer);
501 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
503 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
504 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
506 DBGPR("-->xgbe_get_all_hw_features\n");
508 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
509 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
510 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
512 memset(hw_feat, 0, sizeof(*hw_feat));
514 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
516 /* Hardware feature register 0 */
517 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
518 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
519 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
520 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
521 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
522 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
523 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
524 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
525 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
526 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
527 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
528 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
530 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
531 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
533 /* Hardware feature register 1 */
534 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
536 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
538 hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
539 hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
540 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
541 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
542 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
543 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
544 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
545 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
546 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
548 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
551 /* Hardware feature register 2 */
552 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
553 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
554 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
555 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
556 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
557 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
559 /* Translate the Hash Table size into actual number */
560 switch (hw_feat->hash_table_size) {
564 hw_feat->hash_table_size = 64;
567 hw_feat->hash_table_size = 128;
570 hw_feat->hash_table_size = 256;
574 /* Translate the address width setting into actual number */
575 switch (hw_feat->dma_width) {
577 hw_feat->dma_width = 32;
580 hw_feat->dma_width = 40;
583 hw_feat->dma_width = 48;
586 hw_feat->dma_width = 32;
589 /* The Queue, Channel and TC counts are zero based so increment them
590 * to get the actual number
594 hw_feat->rx_ch_cnt++;
595 hw_feat->tx_ch_cnt++;
598 DBGPR("<--xgbe_get_all_hw_features\n");
601 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
603 struct xgbe_channel *channel;
606 if (pdata->per_channel_irq) {
607 channel = pdata->channel;
608 for (i = 0; i < pdata->channel_count; i++, channel++) {
610 netif_napi_add(pdata->netdev, &channel->napi,
611 xgbe_one_poll, NAPI_POLL_WEIGHT);
613 napi_enable(&channel->napi);
617 netif_napi_add(pdata->netdev, &pdata->napi,
618 xgbe_all_poll, NAPI_POLL_WEIGHT);
620 napi_enable(&pdata->napi);
624 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
626 struct xgbe_channel *channel;
629 if (pdata->per_channel_irq) {
630 channel = pdata->channel;
631 for (i = 0; i < pdata->channel_count; i++, channel++) {
632 napi_disable(&channel->napi);
635 netif_napi_del(&channel->napi);
638 napi_disable(&pdata->napi);
641 netif_napi_del(&pdata->napi);
645 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
647 struct xgbe_channel *channel;
648 struct net_device *netdev = pdata->netdev;
652 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
653 netdev->name, pdata);
655 netdev_alert(netdev, "error requesting irq %d\n",
660 if (!pdata->per_channel_irq)
663 channel = pdata->channel;
664 for (i = 0; i < pdata->channel_count; i++, channel++) {
665 snprintf(channel->dma_irq_name,
666 sizeof(channel->dma_irq_name) - 1,
667 "%s-TxRx-%u", netdev_name(netdev),
668 channel->queue_index);
670 ret = devm_request_irq(pdata->dev, channel->dma_irq,
672 channel->dma_irq_name, channel);
674 netdev_alert(netdev, "error requesting irq %d\n",
683 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
684 for (i--, channel--; i < pdata->channel_count; i--, channel--)
685 devm_free_irq(pdata->dev, channel->dma_irq, channel);
687 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
692 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
694 struct xgbe_channel *channel;
697 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
699 if (!pdata->per_channel_irq)
702 channel = pdata->channel;
703 for (i = 0; i < pdata->channel_count; i++, channel++)
704 devm_free_irq(pdata->dev, channel->dma_irq, channel);
707 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
709 struct xgbe_hw_if *hw_if = &pdata->hw_if;
711 DBGPR("-->xgbe_init_tx_coalesce\n");
713 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
714 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
716 hw_if->config_tx_coalesce(pdata);
718 DBGPR("<--xgbe_init_tx_coalesce\n");
721 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
723 struct xgbe_hw_if *hw_if = &pdata->hw_if;
725 DBGPR("-->xgbe_init_rx_coalesce\n");
727 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
728 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
729 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
731 hw_if->config_rx_coalesce(pdata);
733 DBGPR("<--xgbe_init_rx_coalesce\n");
736 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
738 struct xgbe_desc_if *desc_if = &pdata->desc_if;
739 struct xgbe_channel *channel;
740 struct xgbe_ring *ring;
741 struct xgbe_ring_data *rdata;
744 DBGPR("-->xgbe_free_tx_data\n");
746 channel = pdata->channel;
747 for (i = 0; i < pdata->channel_count; i++, channel++) {
748 ring = channel->tx_ring;
752 for (j = 0; j < ring->rdesc_count; j++) {
753 rdata = XGBE_GET_DESC_DATA(ring, j);
754 desc_if->unmap_rdata(pdata, rdata);
758 DBGPR("<--xgbe_free_tx_data\n");
761 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
763 struct xgbe_desc_if *desc_if = &pdata->desc_if;
764 struct xgbe_channel *channel;
765 struct xgbe_ring *ring;
766 struct xgbe_ring_data *rdata;
769 DBGPR("-->xgbe_free_rx_data\n");
771 channel = pdata->channel;
772 for (i = 0; i < pdata->channel_count; i++, channel++) {
773 ring = channel->rx_ring;
777 for (j = 0; j < ring->rdesc_count; j++) {
778 rdata = XGBE_GET_DESC_DATA(ring, j);
779 desc_if->unmap_rdata(pdata, rdata);
783 DBGPR("<--xgbe_free_rx_data\n");
786 static int xgbe_phy_init(struct xgbe_prv_data *pdata)
788 pdata->phy_link = -1;
789 pdata->phy_speed = SPEED_UNKNOWN;
791 return pdata->phy_if.phy_reset(pdata);
794 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
796 struct xgbe_prv_data *pdata = netdev_priv(netdev);
797 struct xgbe_hw_if *hw_if = &pdata->hw_if;
800 DBGPR("-->xgbe_powerdown\n");
802 if (!netif_running(netdev) ||
803 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
804 netdev_alert(netdev, "Device is already powered down\n");
805 DBGPR("<--xgbe_powerdown\n");
809 spin_lock_irqsave(&pdata->lock, flags);
811 if (caller == XGMAC_DRIVER_CONTEXT)
812 netif_device_detach(netdev);
814 netif_tx_stop_all_queues(netdev);
816 xgbe_stop_timers(pdata);
817 flush_workqueue(pdata->dev_workqueue);
819 hw_if->powerdown_tx(pdata);
820 hw_if->powerdown_rx(pdata);
822 xgbe_napi_disable(pdata, 0);
824 pdata->power_down = 1;
826 spin_unlock_irqrestore(&pdata->lock, flags);
828 DBGPR("<--xgbe_powerdown\n");
833 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
835 struct xgbe_prv_data *pdata = netdev_priv(netdev);
836 struct xgbe_hw_if *hw_if = &pdata->hw_if;
839 DBGPR("-->xgbe_powerup\n");
841 if (!netif_running(netdev) ||
842 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
843 netdev_alert(netdev, "Device is already powered up\n");
844 DBGPR("<--xgbe_powerup\n");
848 spin_lock_irqsave(&pdata->lock, flags);
850 pdata->power_down = 0;
852 xgbe_napi_enable(pdata, 0);
854 hw_if->powerup_tx(pdata);
855 hw_if->powerup_rx(pdata);
857 if (caller == XGMAC_DRIVER_CONTEXT)
858 netif_device_attach(netdev);
860 netif_tx_start_all_queues(netdev);
862 xgbe_start_timers(pdata);
864 spin_unlock_irqrestore(&pdata->lock, flags);
866 DBGPR("<--xgbe_powerup\n");
871 static int xgbe_start(struct xgbe_prv_data *pdata)
873 struct xgbe_hw_if *hw_if = &pdata->hw_if;
874 struct xgbe_phy_if *phy_if = &pdata->phy_if;
875 struct net_device *netdev = pdata->netdev;
878 DBGPR("-->xgbe_start\n");
880 ret = hw_if->init(pdata);
884 ret = phy_if->phy_start(pdata);
888 xgbe_napi_enable(pdata, 1);
890 ret = xgbe_request_irqs(pdata);
894 hw_if->enable_tx(pdata);
895 hw_if->enable_rx(pdata);
897 netif_tx_start_all_queues(netdev);
899 xgbe_start_timers(pdata);
900 queue_work(pdata->dev_workqueue, &pdata->service_work);
902 DBGPR("<--xgbe_start\n");
907 xgbe_napi_disable(pdata, 1);
909 phy_if->phy_stop(pdata);
917 static void xgbe_stop(struct xgbe_prv_data *pdata)
919 struct xgbe_hw_if *hw_if = &pdata->hw_if;
920 struct xgbe_phy_if *phy_if = &pdata->phy_if;
921 struct xgbe_channel *channel;
922 struct net_device *netdev = pdata->netdev;
923 struct netdev_queue *txq;
926 DBGPR("-->xgbe_stop\n");
928 netif_tx_stop_all_queues(netdev);
930 xgbe_stop_timers(pdata);
931 flush_workqueue(pdata->dev_workqueue);
933 hw_if->disable_tx(pdata);
934 hw_if->disable_rx(pdata);
936 xgbe_free_irqs(pdata);
938 xgbe_napi_disable(pdata, 1);
940 phy_if->phy_stop(pdata);
944 channel = pdata->channel;
945 for (i = 0; i < pdata->channel_count; i++, channel++) {
946 if (!channel->tx_ring)
949 txq = netdev_get_tx_queue(netdev, channel->queue_index);
950 netdev_tx_reset_queue(txq);
953 DBGPR("<--xgbe_stop\n");
956 static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
958 DBGPR("-->xgbe_restart_dev\n");
960 /* If not running, "restart" will happen on open */
961 if (!netif_running(pdata->netdev))
966 xgbe_free_tx_data(pdata);
967 xgbe_free_rx_data(pdata);
971 DBGPR("<--xgbe_restart_dev\n");
974 static void xgbe_restart(struct work_struct *work)
976 struct xgbe_prv_data *pdata = container_of(work,
977 struct xgbe_prv_data,
982 xgbe_restart_dev(pdata);
987 static void xgbe_tx_tstamp(struct work_struct *work)
989 struct xgbe_prv_data *pdata = container_of(work,
990 struct xgbe_prv_data,
992 struct skb_shared_hwtstamps hwtstamps;
996 if (pdata->tx_tstamp) {
997 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1000 memset(&hwtstamps, 0, sizeof(hwtstamps));
1001 hwtstamps.hwtstamp = ns_to_ktime(nsec);
1002 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1005 dev_kfree_skb_any(pdata->tx_tstamp_skb);
1007 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1008 pdata->tx_tstamp_skb = NULL;
1009 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1012 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1013 struct ifreq *ifreq)
1015 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1016 sizeof(pdata->tstamp_config)))
1022 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1023 struct ifreq *ifreq)
1025 struct hwtstamp_config config;
1026 unsigned int mac_tscr;
1028 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1036 switch (config.tx_type) {
1037 case HWTSTAMP_TX_OFF:
1040 case HWTSTAMP_TX_ON:
1041 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1048 switch (config.rx_filter) {
1049 case HWTSTAMP_FILTER_NONE:
1052 case HWTSTAMP_FILTER_ALL:
1053 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1054 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1057 /* PTP v2, UDP, any kind of event packet */
1058 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1059 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1060 /* PTP v1, UDP, any kind of event packet */
1061 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1062 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1063 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1064 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1065 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1068 /* PTP v2, UDP, Sync packet */
1069 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1070 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1071 /* PTP v1, UDP, Sync packet */
1072 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1073 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1074 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1075 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1076 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1079 /* PTP v2, UDP, Delay_req packet */
1080 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1081 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1082 /* PTP v1, UDP, Delay_req packet */
1083 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1084 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1085 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1086 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1087 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1088 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1091 /* 802.AS1, Ethernet, any kind of event packet */
1092 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1093 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1094 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1095 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1098 /* 802.AS1, Ethernet, Sync packet */
1099 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1100 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1101 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1102 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1105 /* 802.AS1, Ethernet, Delay_req packet */
1106 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1107 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1108 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1109 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1110 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1113 /* PTP v2/802.AS1, any layer, any kind of event packet */
1114 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1115 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1116 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1117 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1118 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1119 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1120 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1123 /* PTP v2/802.AS1, any layer, Sync packet */
1124 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1125 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1126 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1127 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1128 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1129 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1130 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1133 /* PTP v2/802.AS1, any layer, Delay_req packet */
1134 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1135 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1136 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1137 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1138 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1139 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1140 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1141 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1148 pdata->hw_if.config_tstamp(pdata, mac_tscr);
1150 memcpy(&pdata->tstamp_config, &config, sizeof(config));
1155 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1156 struct sk_buff *skb,
1157 struct xgbe_packet_data *packet)
1159 unsigned long flags;
1161 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1162 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1163 if (pdata->tx_tstamp_skb) {
1164 /* Another timestamp in progress, ignore this one */
1165 XGMAC_SET_BITS(packet->attributes,
1166 TX_PACKET_ATTRIBUTES, PTP, 0);
1168 pdata->tx_tstamp_skb = skb_get(skb);
1169 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1171 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1174 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1175 skb_tx_timestamp(skb);
1178 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1180 if (skb_vlan_tag_present(skb))
1181 packet->vlan_ctag = skb_vlan_tag_get(skb);
1184 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1188 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1192 ret = skb_cow_head(skb, 0);
1196 packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1197 packet->tcp_header_len = tcp_hdrlen(skb);
1198 packet->tcp_payload_len = skb->len - packet->header_len;
1199 packet->mss = skb_shinfo(skb)->gso_size;
1200 DBGPR(" packet->header_len=%u\n", packet->header_len);
1201 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1202 packet->tcp_header_len, packet->tcp_payload_len);
1203 DBGPR(" packet->mss=%u\n", packet->mss);
1205 /* Update the number of packets that will ultimately be transmitted
1206 * along with the extra bytes for each extra packet
1208 packet->tx_packets = skb_shinfo(skb)->gso_segs;
1209 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1214 static int xgbe_is_tso(struct sk_buff *skb)
1216 if (skb->ip_summed != CHECKSUM_PARTIAL)
1219 if (!skb_is_gso(skb))
1222 DBGPR(" TSO packet to be processed\n");
1227 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1228 struct xgbe_ring *ring, struct sk_buff *skb,
1229 struct xgbe_packet_data *packet)
1231 struct skb_frag_struct *frag;
1232 unsigned int context_desc;
1239 packet->rdesc_count = 0;
1241 packet->tx_packets = 1;
1242 packet->tx_bytes = skb->len;
1244 if (xgbe_is_tso(skb)) {
1245 /* TSO requires an extra descriptor if mss is different */
1246 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1248 packet->rdesc_count++;
1251 /* TSO requires an extra descriptor for TSO header */
1252 packet->rdesc_count++;
1254 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1256 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1258 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1259 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1262 if (skb_vlan_tag_present(skb)) {
1263 /* VLAN requires an extra descriptor if tag is different */
1264 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1265 /* We can share with the TSO context descriptor */
1266 if (!context_desc) {
1268 packet->rdesc_count++;
1271 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1275 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1276 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1277 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1280 for (len = skb_headlen(skb); len;) {
1281 packet->rdesc_count++;
1282 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1285 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1286 frag = &skb_shinfo(skb)->frags[i];
1287 for (len = skb_frag_size(frag); len; ) {
1288 packet->rdesc_count++;
1289 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1294 static int xgbe_open(struct net_device *netdev)
1296 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1297 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1300 DBGPR("-->xgbe_open\n");
1302 /* Initialize the phy */
1303 ret = xgbe_phy_init(pdata);
1307 /* Enable the clocks */
1308 ret = clk_prepare_enable(pdata->sysclk);
1310 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1314 ret = clk_prepare_enable(pdata->ptpclk);
1316 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1320 /* Calculate the Rx buffer size before allocating rings */
1321 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1324 pdata->rx_buf_size = ret;
1326 /* Allocate the channel and ring structures */
1327 ret = xgbe_alloc_channels(pdata);
1331 /* Allocate the ring descriptors and buffers */
1332 ret = desc_if->alloc_ring_resources(pdata);
1336 INIT_WORK(&pdata->service_work, xgbe_service);
1337 INIT_WORK(&pdata->restart_work, xgbe_restart);
1338 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1339 xgbe_init_timers(pdata);
1341 ret = xgbe_start(pdata);
1345 clear_bit(XGBE_DOWN, &pdata->dev_state);
1347 DBGPR("<--xgbe_open\n");
1352 desc_if->free_ring_resources(pdata);
1355 xgbe_free_channels(pdata);
1358 clk_disable_unprepare(pdata->ptpclk);
1361 clk_disable_unprepare(pdata->sysclk);
1366 static int xgbe_close(struct net_device *netdev)
1368 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1369 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1371 DBGPR("-->xgbe_close\n");
1373 /* Stop the device */
1376 /* Free the ring descriptors and buffers */
1377 desc_if->free_ring_resources(pdata);
1379 /* Free the channel and ring structures */
1380 xgbe_free_channels(pdata);
1382 /* Disable the clocks */
1383 clk_disable_unprepare(pdata->ptpclk);
1384 clk_disable_unprepare(pdata->sysclk);
1386 set_bit(XGBE_DOWN, &pdata->dev_state);
1388 DBGPR("<--xgbe_close\n");
1393 static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1395 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1396 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1397 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1398 struct xgbe_channel *channel;
1399 struct xgbe_ring *ring;
1400 struct xgbe_packet_data *packet;
1401 struct netdev_queue *txq;
1404 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1406 channel = pdata->channel + skb->queue_mapping;
1407 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1408 ring = channel->tx_ring;
1409 packet = &ring->packet_data;
1413 if (skb->len == 0) {
1414 netif_err(pdata, tx_err, netdev,
1415 "empty skb received from stack\n");
1416 dev_kfree_skb_any(skb);
1417 goto tx_netdev_return;
1420 /* Calculate preliminary packet info */
1421 memset(packet, 0, sizeof(*packet));
1422 xgbe_packet_info(pdata, ring, skb, packet);
1424 /* Check that there are enough descriptors available */
1425 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1427 goto tx_netdev_return;
1429 ret = xgbe_prep_tso(skb, packet);
1431 netif_err(pdata, tx_err, netdev,
1432 "error processing TSO packet\n");
1433 dev_kfree_skb_any(skb);
1434 goto tx_netdev_return;
1436 xgbe_prep_vlan(skb, packet);
1438 if (!desc_if->map_tx_skb(channel, skb)) {
1439 dev_kfree_skb_any(skb);
1440 goto tx_netdev_return;
1443 xgbe_prep_tx_tstamp(pdata, skb, packet);
1445 /* Report on the actual number of bytes (to be) sent */
1446 netdev_tx_sent_queue(txq, packet->tx_bytes);
1448 /* Configure required descriptor fields for transmission */
1449 hw_if->dev_xmit(channel);
1451 if (netif_msg_pktdata(pdata))
1452 xgbe_print_pkt(netdev, skb, true);
1454 /* Stop the queue in advance if there may not be enough descriptors */
1455 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1463 static void xgbe_set_rx_mode(struct net_device *netdev)
1465 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1466 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1468 DBGPR("-->xgbe_set_rx_mode\n");
1470 hw_if->config_rx_mode(pdata);
1472 DBGPR("<--xgbe_set_rx_mode\n");
1475 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1477 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1478 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1479 struct sockaddr *saddr = addr;
1481 DBGPR("-->xgbe_set_mac_address\n");
1483 if (!is_valid_ether_addr(saddr->sa_data))
1484 return -EADDRNOTAVAIL;
1486 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
1488 hw_if->set_mac_address(pdata, netdev->dev_addr);
1490 DBGPR("<--xgbe_set_mac_address\n");
1495 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1497 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1502 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1506 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1516 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1518 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1521 DBGPR("-->xgbe_change_mtu\n");
1523 ret = xgbe_calc_rx_buf_size(netdev, mtu);
1527 pdata->rx_buf_size = ret;
1530 xgbe_restart_dev(pdata);
1532 DBGPR("<--xgbe_change_mtu\n");
1537 static void xgbe_tx_timeout(struct net_device *netdev)
1539 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1541 netdev_warn(netdev, "tx timeout, device restarting\n");
1542 schedule_work(&pdata->restart_work);
1545 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
1546 struct rtnl_link_stats64 *s)
1548 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1549 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1551 DBGPR("-->%s\n", __func__);
1553 pdata->hw_if.read_mmc_stats(pdata);
1555 s->rx_packets = pstats->rxframecount_gb;
1556 s->rx_bytes = pstats->rxoctetcount_gb;
1557 s->rx_errors = pstats->rxframecount_gb -
1558 pstats->rxbroadcastframes_g -
1559 pstats->rxmulticastframes_g -
1560 pstats->rxunicastframes_g;
1561 s->multicast = pstats->rxmulticastframes_g;
1562 s->rx_length_errors = pstats->rxlengtherror;
1563 s->rx_crc_errors = pstats->rxcrcerror;
1564 s->rx_fifo_errors = pstats->rxfifooverflow;
1566 s->tx_packets = pstats->txframecount_gb;
1567 s->tx_bytes = pstats->txoctetcount_gb;
1568 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1569 s->tx_dropped = netdev->stats.tx_dropped;
1571 DBGPR("<--%s\n", __func__);
1576 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1579 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1580 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1582 DBGPR("-->%s\n", __func__);
1584 set_bit(vid, pdata->active_vlans);
1585 hw_if->update_vlan_hash_table(pdata);
1587 DBGPR("<--%s\n", __func__);
1592 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1595 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1596 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1598 DBGPR("-->%s\n", __func__);
1600 clear_bit(vid, pdata->active_vlans);
1601 hw_if->update_vlan_hash_table(pdata);
1603 DBGPR("<--%s\n", __func__);
1608 #ifdef CONFIG_NET_POLL_CONTROLLER
1609 static void xgbe_poll_controller(struct net_device *netdev)
1611 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1612 struct xgbe_channel *channel;
1615 DBGPR("-->xgbe_poll_controller\n");
1617 if (pdata->per_channel_irq) {
1618 channel = pdata->channel;
1619 for (i = 0; i < pdata->channel_count; i++, channel++)
1620 xgbe_dma_isr(channel->dma_irq, channel);
1622 disable_irq(pdata->dev_irq);
1623 xgbe_isr(pdata->dev_irq, pdata);
1624 enable_irq(pdata->dev_irq);
1627 DBGPR("<--xgbe_poll_controller\n");
1629 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1631 static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
1633 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1634 unsigned int offset, queue;
1637 if (tc && (tc != pdata->hw_feat.tc_cnt))
1641 netdev_set_num_tc(netdev, tc);
1642 for (i = 0, queue = 0, offset = 0; i < tc; i++) {
1643 while ((queue < pdata->tx_q_count) &&
1644 (pdata->q2tc_map[queue] == i))
1647 netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
1648 i, offset, queue - 1);
1649 netdev_set_tc_queue(netdev, i, queue - offset, offset);
1653 netdev_reset_tc(netdev);
1659 static int xgbe_set_features(struct net_device *netdev,
1660 netdev_features_t features)
1662 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1663 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1664 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
1667 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
1668 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1669 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1670 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1672 if ((features & NETIF_F_RXHASH) && !rxhash)
1673 ret = hw_if->enable_rss(pdata);
1674 else if (!(features & NETIF_F_RXHASH) && rxhash)
1675 ret = hw_if->disable_rss(pdata);
1679 if ((features & NETIF_F_RXCSUM) && !rxcsum)
1680 hw_if->enable_rx_csum(pdata);
1681 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
1682 hw_if->disable_rx_csum(pdata);
1684 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1685 hw_if->enable_rx_vlan_stripping(pdata);
1686 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1687 hw_if->disable_rx_vlan_stripping(pdata);
1689 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1690 hw_if->enable_rx_vlan_filtering(pdata);
1691 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1692 hw_if->disable_rx_vlan_filtering(pdata);
1694 pdata->netdev_features = features;
1696 DBGPR("<--xgbe_set_features\n");
1701 static const struct net_device_ops xgbe_netdev_ops = {
1702 .ndo_open = xgbe_open,
1703 .ndo_stop = xgbe_close,
1704 .ndo_start_xmit = xgbe_xmit,
1705 .ndo_set_rx_mode = xgbe_set_rx_mode,
1706 .ndo_set_mac_address = xgbe_set_mac_address,
1707 .ndo_validate_addr = eth_validate_addr,
1708 .ndo_do_ioctl = xgbe_ioctl,
1709 .ndo_change_mtu = xgbe_change_mtu,
1710 .ndo_tx_timeout = xgbe_tx_timeout,
1711 .ndo_get_stats64 = xgbe_get_stats64,
1712 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
1713 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
1714 #ifdef CONFIG_NET_POLL_CONTROLLER
1715 .ndo_poll_controller = xgbe_poll_controller,
1717 .ndo_setup_tc = xgbe_setup_tc,
1718 .ndo_set_features = xgbe_set_features,
1721 struct net_device_ops *xgbe_get_netdev_ops(void)
1723 return (struct net_device_ops *)&xgbe_netdev_ops;
1726 static void xgbe_rx_refresh(struct xgbe_channel *channel)
1728 struct xgbe_prv_data *pdata = channel->pdata;
1729 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1730 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1731 struct xgbe_ring *ring = channel->rx_ring;
1732 struct xgbe_ring_data *rdata;
1734 while (ring->dirty != ring->cur) {
1735 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1737 /* Reset rdata values */
1738 desc_if->unmap_rdata(pdata, rdata);
1740 if (desc_if->map_rx_buffer(pdata, ring, rdata))
1743 hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
1748 /* Make sure everything is written before the register write */
1751 /* Update the Rx Tail Pointer Register with address of
1752 * the last cleaned entry */
1753 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
1754 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1755 lower_32_bits(rdata->rdesc_dma));
1758 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1759 struct napi_struct *napi,
1760 struct xgbe_ring_data *rdata,
1763 struct sk_buff *skb;
1766 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
1770 /* Pull in the header buffer which may contain just the header
1771 * or the header plus data
1773 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
1774 rdata->rx.hdr.dma_off,
1775 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
1777 packet = page_address(rdata->rx.hdr.pa.pages) +
1778 rdata->rx.hdr.pa.pages_offset;
1779 skb_copy_to_linear_data(skb, packet, len);
1785 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
1786 struct xgbe_packet_data *packet)
1788 /* Always zero if not the first descriptor */
1789 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
1792 /* First descriptor with split header, return header length */
1793 if (rdata->rx.hdr_len)
1794 return rdata->rx.hdr_len;
1796 /* First descriptor but not the last descriptor and no split header,
1797 * so the full buffer was used
1799 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
1800 return rdata->rx.hdr.dma_len;
1802 /* First descriptor and last descriptor and no split header, so
1803 * calculate how much of the buffer was used
1805 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
1808 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
1809 struct xgbe_packet_data *packet,
1812 /* Always the full buffer if not the last descriptor */
1813 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
1814 return rdata->rx.buf.dma_len;
1816 /* Last descriptor so calculate how much of the buffer was used
1817 * for the last bit of data
1819 return rdata->rx.len - len;
1822 static int xgbe_tx_poll(struct xgbe_channel *channel)
1824 struct xgbe_prv_data *pdata = channel->pdata;
1825 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1826 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1827 struct xgbe_ring *ring = channel->tx_ring;
1828 struct xgbe_ring_data *rdata;
1829 struct xgbe_ring_desc *rdesc;
1830 struct net_device *netdev = pdata->netdev;
1831 struct netdev_queue *txq;
1833 unsigned int tx_packets = 0, tx_bytes = 0;
1836 DBGPR("-->xgbe_tx_poll\n");
1838 /* Nothing to do if there isn't a Tx ring for this channel */
1844 /* Be sure we get ring->cur before accessing descriptor data */
1847 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1849 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1850 (ring->dirty != cur)) {
1851 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1852 rdesc = rdata->rdesc;
1854 if (!hw_if->tx_complete(rdesc))
1857 /* Make sure descriptor fields are read after reading the OWN
1861 if (netif_msg_tx_done(pdata))
1862 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
1864 if (hw_if->is_last_desc(rdesc)) {
1865 tx_packets += rdata->tx.packets;
1866 tx_bytes += rdata->tx.bytes;
1869 /* Free the SKB and reset the descriptor for re-use */
1870 desc_if->unmap_rdata(pdata, rdata);
1871 hw_if->tx_desc_reset(rdata);
1880 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1882 if ((ring->tx.queue_stopped == 1) &&
1883 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
1884 ring->tx.queue_stopped = 0;
1885 netif_tx_wake_queue(txq);
1888 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1893 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1895 struct xgbe_prv_data *pdata = channel->pdata;
1896 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1897 struct xgbe_ring *ring = channel->rx_ring;
1898 struct xgbe_ring_data *rdata;
1899 struct xgbe_packet_data *packet;
1900 struct net_device *netdev = pdata->netdev;
1901 struct napi_struct *napi;
1902 struct sk_buff *skb;
1903 struct skb_shared_hwtstamps *hwtstamps;
1904 unsigned int last, error, context_next, context;
1905 unsigned int len, buf1_len, buf2_len, max_len;
1906 unsigned int received = 0;
1907 int packet_count = 0;
1909 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1911 /* Nothing to do if there isn't a Rx ring for this channel */
1918 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1920 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1921 packet = &ring->packet_data;
1922 while (packet_count < budget) {
1923 DBGPR(" cur = %d\n", ring->cur);
1925 /* First time in loop see if we need to restore state */
1926 if (!received && rdata->state_saved) {
1927 skb = rdata->state.skb;
1928 error = rdata->state.error;
1929 len = rdata->state.len;
1931 memset(packet, 0, sizeof(*packet));
1938 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1940 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
1941 xgbe_rx_refresh(channel);
1943 if (hw_if->dev_read(channel))
1949 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1951 context_next = XGMAC_GET_BITS(packet->attributes,
1952 RX_PACKET_ATTRIBUTES,
1954 context = XGMAC_GET_BITS(packet->attributes,
1955 RX_PACKET_ATTRIBUTES,
1958 /* Earlier error, just drain the remaining data */
1959 if ((!last || context_next) && error)
1962 if (error || packet->errors) {
1964 netif_err(pdata, rx_err, netdev,
1965 "error in received packet\n");
1971 /* Get the data length in the descriptor buffers */
1972 buf1_len = xgbe_rx_buf1_len(rdata, packet);
1974 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
1978 skb = xgbe_create_skb(pdata, napi, rdata,
1987 dma_sync_single_range_for_cpu(pdata->dev,
1988 rdata->rx.buf.dma_base,
1989 rdata->rx.buf.dma_off,
1990 rdata->rx.buf.dma_len,
1993 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1994 rdata->rx.buf.pa.pages,
1995 rdata->rx.buf.pa.pages_offset,
1997 rdata->rx.buf.dma_len);
1998 rdata->rx.buf.pa.pages = NULL;
2003 if (!last || context_next)
2009 /* Be sure we don't exceed the configured MTU */
2010 max_len = netdev->mtu + ETH_HLEN;
2011 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2012 (skb->protocol == htons(ETH_P_8021Q)))
2013 max_len += VLAN_HLEN;
2015 if (skb->len > max_len) {
2016 netif_err(pdata, rx_err, netdev,
2017 "packet length exceeds configured MTU\n");
2022 if (netif_msg_pktdata(pdata))
2023 xgbe_print_pkt(netdev, skb, false);
2025 skb_checksum_none_assert(skb);
2026 if (XGMAC_GET_BITS(packet->attributes,
2027 RX_PACKET_ATTRIBUTES, CSUM_DONE))
2028 skb->ip_summed = CHECKSUM_UNNECESSARY;
2030 if (XGMAC_GET_BITS(packet->attributes,
2031 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2032 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2035 if (XGMAC_GET_BITS(packet->attributes,
2036 RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2039 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2041 hwtstamps = skb_hwtstamps(skb);
2042 hwtstamps->hwtstamp = ns_to_ktime(nsec);
2045 if (XGMAC_GET_BITS(packet->attributes,
2046 RX_PACKET_ATTRIBUTES, RSS_HASH))
2047 skb_set_hash(skb, packet->rss_hash,
2048 packet->rss_hash_type);
2051 skb->protocol = eth_type_trans(skb, netdev);
2052 skb_record_rx_queue(skb, channel->queue_index);
2053 skb_mark_napi_id(skb, napi);
2055 napi_gro_receive(napi, skb);
2061 /* Check if we need to save state before leaving */
2062 if (received && (!last || context_next)) {
2063 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2064 rdata->state_saved = 1;
2065 rdata->state.skb = skb;
2066 rdata->state.len = len;
2067 rdata->state.error = error;
2070 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2072 return packet_count;
2075 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2077 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2081 DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2083 /* Cleanup Tx ring first */
2084 xgbe_tx_poll(channel);
2086 /* Process Rx ring next */
2087 processed = xgbe_rx_poll(channel, budget);
2089 /* If we processed everything, we are done */
2090 if (processed < budget) {
2091 /* Turn off polling */
2092 napi_complete(napi);
2094 /* Enable Tx and Rx interrupts */
2095 enable_irq(channel->dma_irq);
2098 DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2103 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2105 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2107 struct xgbe_channel *channel;
2109 int processed, last_processed;
2112 DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2115 ring_budget = budget / pdata->rx_ring_count;
2117 last_processed = processed;
2119 channel = pdata->channel;
2120 for (i = 0; i < pdata->channel_count; i++, channel++) {
2121 /* Cleanup Tx ring first */
2122 xgbe_tx_poll(channel);
2124 /* Process Rx ring next */
2125 if (ring_budget > (budget - processed))
2126 ring_budget = budget - processed;
2127 processed += xgbe_rx_poll(channel, ring_budget);
2129 } while ((processed < budget) && (processed != last_processed));
2131 /* If we processed everything, we are done */
2132 if (processed < budget) {
2133 /* Turn off polling */
2134 napi_complete(napi);
2136 /* Enable Tx and Rx interrupts */
2137 xgbe_enable_rx_tx_ints(pdata);
2140 DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2145 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2146 unsigned int idx, unsigned int count, unsigned int flag)
2148 struct xgbe_ring_data *rdata;
2149 struct xgbe_ring_desc *rdesc;
2152 rdata = XGBE_GET_DESC_DATA(ring, idx);
2153 rdesc = rdata->rdesc;
2154 netdev_dbg(pdata->netdev,
2155 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2156 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2157 le32_to_cpu(rdesc->desc0),
2158 le32_to_cpu(rdesc->desc1),
2159 le32_to_cpu(rdesc->desc2),
2160 le32_to_cpu(rdesc->desc3));
2165 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2168 struct xgbe_ring_data *rdata;
2169 struct xgbe_ring_desc *rdesc;
2171 rdata = XGBE_GET_DESC_DATA(ring, idx);
2172 rdesc = rdata->rdesc;
2173 netdev_dbg(pdata->netdev,
2174 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2175 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2176 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2179 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2181 struct ethhdr *eth = (struct ethhdr *)skb->data;
2182 unsigned char *buf = skb->data;
2183 unsigned char buffer[128];
2186 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2188 netdev_dbg(netdev, "%s packet of %d bytes\n",
2189 (tx_rx ? "TX" : "RX"), skb->len);
2191 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2192 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2193 netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
2195 for (i = 0, j = 0; i < skb->len;) {
2196 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
2199 if ((i % 32) == 0) {
2200 netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
2202 } else if ((i % 16) == 0) {
2205 } else if ((i % 4) == 0) {
2210 netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
2212 netdev_dbg(netdev, "\n************** SKB dump ****************\n");