2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/phy.h>
118 #include <linux/mdio.h>
119 #include <linux/clk.h>
120 #include <linux/bitrev.h>
121 #include <linux/crc32.h>
124 #include "xgbe-common.h"
126 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
132 DBGPR("-->xgbe_usec_to_riwt\n");
134 rate = pdata->sysclk_rate;
137 * Convert the input usec value to the watchdog timer value. Each
138 * watchdog timer value is equivalent to 256 clock cycles.
139 * Calculate the required value as:
140 * ( usec * ( system_clock_mhz / 10^6 ) / 256
142 ret = (usec * (rate / 1000000)) / 256;
144 DBGPR("<--xgbe_usec_to_riwt\n");
149 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
155 DBGPR("-->xgbe_riwt_to_usec\n");
157 rate = pdata->sysclk_rate;
160 * Convert the input watchdog timer value to the usec value. Each
161 * watchdog timer value is equivalent to 256 clock cycles.
162 * Calculate the required value as:
163 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
165 ret = (riwt * 256) / (rate / 1000000);
167 DBGPR("<--xgbe_riwt_to_usec\n");
172 static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
174 struct xgbe_channel *channel;
177 channel = pdata->channel;
178 for (i = 0; i < pdata->channel_count; i++, channel++)
179 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
185 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
187 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
190 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
192 struct xgbe_channel *channel;
195 channel = pdata->channel;
196 for (i = 0; i < pdata->channel_count; i++, channel++) {
197 if (!channel->tx_ring)
200 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
207 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
209 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
212 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
214 struct xgbe_channel *channel;
217 channel = pdata->channel;
218 for (i = 0; i < pdata->channel_count; i++, channel++) {
219 if (!channel->rx_ring)
222 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
229 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
231 struct xgbe_channel *channel;
234 channel = pdata->channel;
235 for (i = 0; i < pdata->channel_count; i++, channel++) {
236 if (!channel->tx_ring)
239 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
246 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
250 for (i = 0; i < pdata->rx_q_count; i++)
251 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
256 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
260 for (i = 0; i < pdata->tx_q_count; i++)
261 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
266 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
271 for (i = 0; i < pdata->rx_q_count; i++)
272 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
277 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
282 for (i = 0; i < pdata->tx_q_count; i++)
283 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
288 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
290 struct xgbe_channel *channel;
293 channel = pdata->channel;
294 for (i = 0; i < pdata->channel_count; i++, channel++) {
295 if (!channel->rx_ring)
298 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
305 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
310 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
312 struct xgbe_channel *channel;
315 channel = pdata->channel;
316 for (i = 0; i < pdata->channel_count; i++, channel++) {
317 if (!channel->rx_ring)
320 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
325 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
327 struct xgbe_channel *channel;
330 channel = pdata->channel;
331 for (i = 0; i < pdata->channel_count; i++, channel++) {
332 if (!channel->tx_ring)
335 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
339 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
341 struct xgbe_channel *channel;
344 channel = pdata->channel;
345 for (i = 0; i < pdata->channel_count; i++, channel++) {
346 if (!channel->rx_ring)
349 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
352 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
355 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
356 unsigned int index, unsigned int val)
361 mutex_lock(&pdata->rss_mutex);
363 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
368 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
370 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
371 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
372 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
373 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
377 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
380 usleep_range(1000, 1500);
386 mutex_unlock(&pdata->rss_mutex);
391 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
393 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
394 unsigned int *key = (unsigned int *)&pdata->rss_key;
398 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
407 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
412 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
413 ret = xgbe_write_rss_reg(pdata,
414 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
415 pdata->rss_table[i]);
423 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
425 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
427 return xgbe_write_rss_hash_key(pdata);
430 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
435 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
436 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
438 return xgbe_write_rss_lookup_table(pdata);
441 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
445 if (!pdata->hw_feat.rss)
448 /* Program the hash key */
449 ret = xgbe_write_rss_hash_key(pdata);
453 /* Program the lookup table */
454 ret = xgbe_write_rss_lookup_table(pdata);
458 /* Set the RSS options */
459 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
462 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
467 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
469 if (!pdata->hw_feat.rss)
472 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
477 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
481 if (!pdata->hw_feat.rss)
484 if (pdata->netdev->features & NETIF_F_RXHASH)
485 ret = xgbe_enable_rss(pdata);
487 ret = xgbe_disable_rss(pdata);
490 netdev_err(pdata->netdev,
491 "error configuring RSS, RSS disabled\n");
494 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
496 unsigned int max_q_count, q_count;
497 unsigned int reg, reg_val;
500 /* Clear MTL flow control */
501 for (i = 0; i < pdata->rx_q_count; i++)
502 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
504 /* Clear MAC flow control */
505 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
506 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
508 for (i = 0; i < q_count; i++) {
509 reg_val = XGMAC_IOREAD(pdata, reg);
510 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
511 XGMAC_IOWRITE(pdata, reg, reg_val);
513 reg += MAC_QTFCR_INC;
519 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
521 struct ieee_pfc *pfc = pdata->pfc;
522 struct ieee_ets *ets = pdata->ets;
523 unsigned int max_q_count, q_count;
524 unsigned int reg, reg_val;
527 /* Set MTL flow control */
528 for (i = 0; i < pdata->rx_q_count; i++) {
529 unsigned int ehfc = 0;
534 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
537 /* Does this queue handle the priority? */
538 if (pdata->prio2q_map[prio] != i)
541 /* Get the Traffic Class for this priority */
542 tc = ets->prio_tc[prio];
544 /* Check if flow control should be enabled */
545 if (pfc->pfc_en & (1 << tc)) {
554 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
556 netif_dbg(pdata, drv, pdata->netdev,
557 "flow control %s for RXq%u\n",
558 ehfc ? "enabled" : "disabled", i);
561 /* Set MAC flow control */
562 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
563 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
565 for (i = 0; i < q_count; i++) {
566 reg_val = XGMAC_IOREAD(pdata, reg);
568 /* Enable transmit flow control */
569 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
571 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
573 XGMAC_IOWRITE(pdata, reg, reg_val);
575 reg += MAC_QTFCR_INC;
581 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
583 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
588 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
590 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
595 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
597 struct ieee_pfc *pfc = pdata->pfc;
599 if (pdata->tx_pause || (pfc && pfc->pfc_en))
600 xgbe_enable_tx_flow_control(pdata);
602 xgbe_disable_tx_flow_control(pdata);
607 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
609 struct ieee_pfc *pfc = pdata->pfc;
611 if (pdata->rx_pause || (pfc && pfc->pfc_en))
612 xgbe_enable_rx_flow_control(pdata);
614 xgbe_disable_rx_flow_control(pdata);
619 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
621 struct ieee_pfc *pfc = pdata->pfc;
623 xgbe_config_tx_flow_control(pdata);
624 xgbe_config_rx_flow_control(pdata);
626 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
627 (pfc && pfc->pfc_en) ? 1 : 0);
630 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
632 struct xgbe_channel *channel;
633 unsigned int dma_ch_isr, dma_ch_ier;
636 channel = pdata->channel;
637 for (i = 0; i < pdata->channel_count; i++, channel++) {
638 /* Clear all the interrupts which are set */
639 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
640 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
642 /* Clear all interrupt enable bits */
645 /* Enable following interrupts
646 * NIE - Normal Interrupt Summary Enable
647 * AIE - Abnormal Interrupt Summary Enable
648 * FBEE - Fatal Bus Error Enable
650 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
651 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
652 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
654 if (channel->tx_ring) {
655 /* Enable the following Tx interrupts
656 * TIE - Transmit Interrupt Enable (unless using
657 * per channel interrupts)
659 if (!pdata->per_channel_irq)
660 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
662 if (channel->rx_ring) {
663 /* Enable following Rx interrupts
664 * RBUE - Receive Buffer Unavailable Enable
665 * RIE - Receive Interrupt Enable (unless using
666 * per channel interrupts)
668 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
669 if (!pdata->per_channel_irq)
670 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
673 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
677 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
679 unsigned int mtl_q_isr;
680 unsigned int q_count, i;
682 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
683 for (i = 0; i < q_count; i++) {
684 /* Clear all the interrupts which are set */
685 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
686 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
688 /* No MTL interrupts to be enabled */
689 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
693 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
695 unsigned int mac_ier = 0;
697 /* Enable Timestamp interrupt */
698 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
700 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
702 /* Enable all counter interrupts */
703 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
704 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
707 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
709 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
712 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
717 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
719 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
722 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
727 static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
729 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
732 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
737 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
739 /* Put the VLAN tag in the Rx descriptor */
740 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
742 /* Don't check the VLAN type */
743 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
745 /* Check only C-TAG (0x8100) packets */
746 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
748 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
749 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
751 /* Enable VLAN tag stripping */
752 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
757 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
759 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
764 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
766 /* Enable VLAN filtering */
767 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
769 /* Enable VLAN Hash Table filtering */
770 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
772 /* Disable VLAN tag inverse matching */
773 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
775 /* Only filter on the lower 12-bits of the VLAN tag */
776 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
778 /* In order for the VLAN Hash Table filtering to be effective,
779 * the VLAN tag identifier in the VLAN Tag Register must not
780 * be zero. Set the VLAN tag identifier to "1" to enable the
781 * VLAN Hash Table filtering. This implies that a VLAN tag of
782 * 1 will always pass filtering.
784 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
789 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
791 /* Disable VLAN filtering */
792 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
797 static u32 xgbe_vid_crc32_le(__le16 vid_le)
799 u32 poly = 0xedb88320; /* CRCPOLY_LE */
802 unsigned char *data = (unsigned char *)&vid_le;
803 unsigned char data_byte = 0;
806 bits = get_bitmask_order(VLAN_VID_MASK);
807 for (i = 0; i < bits; i++) {
809 data_byte = data[i / 8];
811 temp = ((crc & 1) ^ data_byte) & 1;
822 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
827 u16 vlan_hash_table = 0;
829 /* Generate the VLAN Hash Table value */
830 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
831 /* Get the CRC32 value of the VLAN ID */
832 vid_le = cpu_to_le16(vid);
833 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
835 vlan_hash_table |= (1 << crc);
838 /* Set the VLAN Hash Table filtering register */
839 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
844 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
847 unsigned int val = enable ? 1 : 0;
849 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
852 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
853 enable ? "entering" : "leaving");
854 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
856 /* Hardware will still perform VLAN filtering in promiscuous mode */
858 xgbe_disable_rx_vlan_filtering(pdata);
860 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
861 xgbe_enable_rx_vlan_filtering(pdata);
867 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
870 unsigned int val = enable ? 1 : 0;
872 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
875 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
876 enable ? "entering" : "leaving");
877 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
882 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
883 struct netdev_hw_addr *ha, unsigned int *mac_reg)
885 unsigned int mac_addr_hi, mac_addr_lo;
892 mac_addr = (u8 *)&mac_addr_lo;
893 mac_addr[0] = ha->addr[0];
894 mac_addr[1] = ha->addr[1];
895 mac_addr[2] = ha->addr[2];
896 mac_addr[3] = ha->addr[3];
897 mac_addr = (u8 *)&mac_addr_hi;
898 mac_addr[0] = ha->addr[4];
899 mac_addr[1] = ha->addr[5];
901 netif_dbg(pdata, drv, pdata->netdev,
902 "adding mac address %pM at %#x\n",
905 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
908 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
909 *mac_reg += MAC_MACA_INC;
910 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
911 *mac_reg += MAC_MACA_INC;
914 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
916 struct net_device *netdev = pdata->netdev;
917 struct netdev_hw_addr *ha;
918 unsigned int mac_reg;
919 unsigned int addn_macs;
921 mac_reg = MAC_MACA1HR;
922 addn_macs = pdata->hw_feat.addn_mac;
924 if (netdev_uc_count(netdev) > addn_macs) {
925 xgbe_set_promiscuous_mode(pdata, 1);
927 netdev_for_each_uc_addr(ha, netdev) {
928 xgbe_set_mac_reg(pdata, ha, &mac_reg);
932 if (netdev_mc_count(netdev) > addn_macs) {
933 xgbe_set_all_multicast_mode(pdata, 1);
935 netdev_for_each_mc_addr(ha, netdev) {
936 xgbe_set_mac_reg(pdata, ha, &mac_reg);
942 /* Clear remaining additional MAC address entries */
944 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
947 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
949 struct net_device *netdev = pdata->netdev;
950 struct netdev_hw_addr *ha;
951 unsigned int hash_reg;
952 unsigned int hash_table_shift, hash_table_count;
953 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
957 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
958 hash_table_count = pdata->hw_feat.hash_table_size / 32;
959 memset(hash_table, 0, sizeof(hash_table));
961 /* Build the MAC Hash Table register values */
962 netdev_for_each_uc_addr(ha, netdev) {
963 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
964 crc >>= hash_table_shift;
965 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
968 netdev_for_each_mc_addr(ha, netdev) {
969 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
970 crc >>= hash_table_shift;
971 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
974 /* Set the MAC Hash Table registers */
976 for (i = 0; i < hash_table_count; i++) {
977 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
978 hash_reg += MAC_HTR_INC;
982 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
984 if (pdata->hw_feat.hash_table_size)
985 xgbe_set_mac_hash_table(pdata);
987 xgbe_set_mac_addn_addrs(pdata);
992 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
994 unsigned int mac_addr_hi, mac_addr_lo;
996 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
997 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
998 (addr[1] << 8) | (addr[0] << 0);
1000 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1001 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1006 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
1008 struct net_device *netdev = pdata->netdev;
1009 unsigned int pr_mode, am_mode;
1011 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1012 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1014 xgbe_set_promiscuous_mode(pdata, pr_mode);
1015 xgbe_set_all_multicast_mode(pdata, am_mode);
1017 xgbe_add_mac_addresses(pdata);
1022 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1025 unsigned long flags;
1026 unsigned int mmd_address;
1029 if (mmd_reg & MII_ADDR_C45)
1030 mmd_address = mmd_reg & ~MII_ADDR_C45;
1032 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1034 /* The PCS registers are accessed using mmio. The underlying APB3
1035 * management interface uses indirect addressing to access the MMD
1036 * register sets. This requires accessing of the PCS register in two
1037 * phases, an address phase and a data phase.
1039 * The mmio interface is based on 32-bit offsets and values. All
1040 * register offsets must therefore be adjusted by left shifting the
1041 * offset 2 bits and reading 32 bits of data.
1043 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1044 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
1045 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
1046 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1051 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1052 int mmd_reg, int mmd_data)
1054 unsigned int mmd_address;
1055 unsigned long flags;
1057 if (mmd_reg & MII_ADDR_C45)
1058 mmd_address = mmd_reg & ~MII_ADDR_C45;
1060 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1062 /* The PCS registers are accessed using mmio. The underlying APB3
1063 * management interface uses indirect addressing to access the MMD
1064 * register sets. This requires accessing of the PCS register in two
1065 * phases, an address phase and a data phase.
1067 * The mmio interface is based on 32-bit offsets and values. All
1068 * register offsets must therefore be adjusted by left shifting the
1069 * offset 2 bits and reading 32 bits of data.
1071 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1072 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
1073 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1074 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1077 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1079 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
1082 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1084 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1089 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1091 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1096 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1098 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1100 /* Reset the Tx descriptor
1101 * Set buffer 1 (lo) address to zero
1102 * Set buffer 1 (hi) address to zero
1103 * Reset all other control bits (IC, TTSE, B2L & B1L)
1104 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1111 /* Make sure ownership is written to the descriptor */
1115 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1117 struct xgbe_ring *ring = channel->tx_ring;
1118 struct xgbe_ring_data *rdata;
1120 int start_index = ring->cur;
1122 DBGPR("-->tx_desc_init\n");
1124 /* Initialze all descriptors */
1125 for (i = 0; i < ring->rdesc_count; i++) {
1126 rdata = XGBE_GET_DESC_DATA(ring, i);
1128 /* Initialize Tx descriptor */
1129 xgbe_tx_desc_reset(rdata);
1132 /* Update the total number of Tx descriptors */
1133 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1135 /* Update the starting address of descriptor ring */
1136 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1137 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1138 upper_32_bits(rdata->rdesc_dma));
1139 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1140 lower_32_bits(rdata->rdesc_dma));
1142 DBGPR("<--tx_desc_init\n");
1145 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1146 struct xgbe_ring_data *rdata, unsigned int index)
1148 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1149 unsigned int rx_usecs = pdata->rx_usecs;
1150 unsigned int rx_frames = pdata->rx_frames;
1152 dma_addr_t hdr_dma, buf_dma;
1154 if (!rx_usecs && !rx_frames) {
1155 /* No coalescing, interrupt for every descriptor */
1158 /* Set interrupt based on Rx frame coalescing setting */
1159 if (rx_frames && !((index + 1) % rx_frames))
1165 /* Reset the Rx descriptor
1166 * Set buffer 1 (lo) address to header dma address (lo)
1167 * Set buffer 1 (hi) address to header dma address (hi)
1168 * Set buffer 2 (lo) address to buffer dma address (lo)
1169 * Set buffer 2 (hi) address to buffer dma address (hi) and
1170 * set control bits OWN and INTE
1172 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1173 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1174 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1175 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1176 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1177 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1179 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1181 /* Since the Rx DMA engine is likely running, make sure everything
1182 * is written to the descriptor(s) before setting the OWN bit
1183 * for the descriptor
1187 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1189 /* Make sure ownership is written to the descriptor */
1193 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1195 struct xgbe_prv_data *pdata = channel->pdata;
1196 struct xgbe_ring *ring = channel->rx_ring;
1197 struct xgbe_ring_data *rdata;
1198 unsigned int start_index = ring->cur;
1201 DBGPR("-->rx_desc_init\n");
1203 /* Initialize all descriptors */
1204 for (i = 0; i < ring->rdesc_count; i++) {
1205 rdata = XGBE_GET_DESC_DATA(ring, i);
1207 /* Initialize Rx descriptor */
1208 xgbe_rx_desc_reset(pdata, rdata, i);
1211 /* Update the total number of Rx descriptors */
1212 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1214 /* Update the starting address of descriptor ring */
1215 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1216 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1217 upper_32_bits(rdata->rdesc_dma));
1218 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1219 lower_32_bits(rdata->rdesc_dma));
1221 /* Update the Rx Descriptor Tail Pointer */
1222 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1223 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1224 lower_32_bits(rdata->rdesc_dma));
1226 DBGPR("<--rx_desc_init\n");
1229 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1230 unsigned int addend)
1232 /* Set the addend register value and tell the device */
1233 XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1234 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1236 /* Wait for addend update to complete */
1237 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1241 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1244 /* Set the time values and tell the device */
1245 XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1246 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1247 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1249 /* Wait for time update to complete */
1250 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1254 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1258 nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1259 nsec *= NSEC_PER_SEC;
1260 nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1265 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1267 unsigned int tx_snr;
1270 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1271 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
1274 nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
1275 nsec *= NSEC_PER_SEC;
1281 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
1282 struct xgbe_ring_desc *rdesc)
1286 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
1287 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
1288 nsec = le32_to_cpu(rdesc->desc1);
1290 nsec |= le32_to_cpu(rdesc->desc0);
1291 if (nsec != 0xffffffffffffffffULL) {
1292 packet->rx_tstamp = nsec;
1293 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1299 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1300 unsigned int mac_tscr)
1302 /* Set one nano-second accuracy */
1303 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1305 /* Set fine timestamp update */
1306 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1308 /* Overwrite earlier timestamps */
1309 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1311 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1313 /* Exit if timestamping is not enabled */
1314 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
1317 /* Initialize time registers */
1318 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1319 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1320 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1321 xgbe_set_tstamp_time(pdata, 0, 0);
1323 /* Initialize the timecounter */
1324 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1325 ktime_to_ns(ktime_get_real()));
1330 static void xgbe_config_tc(struct xgbe_prv_data *pdata)
1332 unsigned int offset, queue, prio;
1335 netdev_reset_tc(pdata->netdev);
1336 if (!pdata->num_tcs)
1339 netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
1341 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
1342 while ((queue < pdata->tx_q_count) &&
1343 (pdata->q2tc_map[queue] == i))
1346 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
1347 i, offset, queue - 1);
1348 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
1355 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
1356 netdev_set_prio_tc_map(pdata->netdev, prio,
1357 pdata->ets->prio_tc[prio]);
1360 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
1362 struct ieee_ets *ets = pdata->ets;
1363 unsigned int total_weight, min_weight, weight;
1364 unsigned int mask, reg, reg_val;
1365 unsigned int i, prio;
1370 /* Set Tx to deficit weighted round robin scheduling algorithm (when
1371 * traffic class is using ETS algorithm)
1373 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
1375 /* Set Traffic Class algorithms */
1376 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
1377 min_weight = total_weight / 100;
1381 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1382 /* Map the priorities to the traffic class */
1384 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
1385 if (ets->prio_tc[prio] == i)
1386 mask |= (1 << prio);
1390 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
1392 reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
1393 reg_val = XGMAC_IOREAD(pdata, reg);
1395 reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
1396 reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
1398 XGMAC_IOWRITE(pdata, reg, reg_val);
1400 /* Set the traffic class algorithm */
1401 switch (ets->tc_tsa[i]) {
1402 case IEEE_8021QAZ_TSA_STRICT:
1403 netif_dbg(pdata, drv, pdata->netdev,
1404 "TC%u using SP\n", i);
1405 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1408 case IEEE_8021QAZ_TSA_ETS:
1409 weight = total_weight * ets->tc_tx_bw[i] / 100;
1410 weight = clamp(weight, min_weight, total_weight);
1412 netif_dbg(pdata, drv, pdata->netdev,
1413 "TC%u using DWRR (weight %u)\n", i, weight);
1414 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1416 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
1422 xgbe_config_tc(pdata);
1425 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
1427 xgbe_config_flow_control(pdata);
1430 static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1431 struct xgbe_ring *ring)
1433 struct xgbe_prv_data *pdata = channel->pdata;
1434 struct xgbe_ring_data *rdata;
1436 /* Make sure everything is written before the register write */
1439 /* Issue a poll command to Tx DMA by writing address
1440 * of next immediate free descriptor */
1441 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1442 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1443 lower_32_bits(rdata->rdesc_dma));
1445 /* Start the Tx timer */
1446 if (pdata->tx_usecs && !channel->tx_timer_active) {
1447 channel->tx_timer_active = 1;
1448 mod_timer(&channel->tx_timer,
1449 jiffies + usecs_to_jiffies(pdata->tx_usecs));
1452 ring->tx.xmit_more = 0;
1455 static void xgbe_dev_xmit(struct xgbe_channel *channel)
1457 struct xgbe_prv_data *pdata = channel->pdata;
1458 struct xgbe_ring *ring = channel->tx_ring;
1459 struct xgbe_ring_data *rdata;
1460 struct xgbe_ring_desc *rdesc;
1461 struct xgbe_packet_data *packet = &ring->packet_data;
1462 unsigned int csum, tso, vlan;
1463 unsigned int tso_context, vlan_context;
1464 unsigned int tx_set_ic;
1465 int start_index = ring->cur;
1466 int cur_index = ring->cur;
1469 DBGPR("-->xgbe_dev_xmit\n");
1471 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1473 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1475 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1478 if (tso && (packet->mss != ring->tx.cur_mss))
1483 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1488 /* Determine if an interrupt should be generated for this Tx:
1490 * - Tx frame count exceeds the frame count setting
1491 * - Addition of Tx frame count to the frame count since the
1492 * last interrupt was set exceeds the frame count setting
1494 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
1495 * - Addition of Tx frame count to the frame count since the
1496 * last interrupt was set does not exceed the frame count setting
1498 ring->coalesce_count += packet->tx_packets;
1499 if (!pdata->tx_frames)
1501 else if (packet->tx_packets > pdata->tx_frames)
1503 else if ((ring->coalesce_count % pdata->tx_frames) <
1509 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1510 rdesc = rdata->rdesc;
1512 /* Create a context descriptor if this is a TSO packet */
1513 if (tso_context || vlan_context) {
1515 netif_dbg(pdata, tx_queued, pdata->netdev,
1516 "TSO context descriptor, mss=%u\n",
1519 /* Set the MSS size */
1520 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1523 /* Mark it as a CONTEXT descriptor */
1524 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1527 /* Indicate this descriptor contains the MSS */
1528 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1531 ring->tx.cur_mss = packet->mss;
1535 netif_dbg(pdata, tx_queued, pdata->netdev,
1536 "VLAN context descriptor, ctag=%u\n",
1539 /* Mark it as a CONTEXT descriptor */
1540 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1543 /* Set the VLAN tag */
1544 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1545 VT, packet->vlan_ctag);
1547 /* Indicate this descriptor contains the VLAN tag */
1548 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1551 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1555 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1556 rdesc = rdata->rdesc;
1559 /* Update buffer address (for TSO this is the header) */
1560 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1561 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1563 /* Update the buffer length */
1564 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1565 rdata->skb_dma_len);
1567 /* VLAN tag insertion check */
1569 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1570 TX_NORMAL_DESC2_VLAN_INSERT);
1572 /* Timestamp enablement check */
1573 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1574 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1576 /* Mark it as First Descriptor */
1577 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1579 /* Mark it as a NORMAL descriptor */
1580 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1582 /* Set OWN bit if not the first descriptor */
1583 if (cur_index != start_index)
1584 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1588 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1589 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1590 packet->tcp_payload_len);
1591 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1592 packet->tcp_header_len / 4);
1594 pdata->ext_stats.tx_tso_packets++;
1596 /* Enable CRC and Pad Insertion */
1597 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1599 /* Enable HW CSUM */
1601 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1604 /* Set the total length to be transmitted */
1605 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1609 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1611 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1612 rdesc = rdata->rdesc;
1614 /* Update buffer address */
1615 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1616 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1618 /* Update the buffer length */
1619 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1620 rdata->skb_dma_len);
1623 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1625 /* Mark it as NORMAL descriptor */
1626 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1628 /* Enable HW CSUM */
1630 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1634 /* Set LAST bit for the last descriptor */
1635 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1637 /* Set IC bit based on Tx coalescing settings */
1639 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1641 /* Save the Tx info to report back during cleanup */
1642 rdata->tx.packets = packet->tx_packets;
1643 rdata->tx.bytes = packet->tx_bytes;
1645 /* In case the Tx DMA engine is running, make sure everything
1646 * is written to the descriptor(s) before setting the OWN bit
1647 * for the first descriptor
1651 /* Set OWN bit for the first descriptor */
1652 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1653 rdesc = rdata->rdesc;
1654 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1656 if (netif_msg_tx_queued(pdata))
1657 xgbe_dump_tx_desc(pdata, ring, start_index,
1658 packet->rdesc_count, 1);
1660 /* Make sure ownership is written to the descriptor */
1663 ring->cur = cur_index + 1;
1664 if (!packet->skb->xmit_more ||
1665 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1666 channel->queue_index)))
1667 xgbe_tx_start_xmit(channel, ring);
1669 ring->tx.xmit_more = 1;
1671 DBGPR(" %s: descriptors %u to %u written\n",
1672 channel->name, start_index & (ring->rdesc_count - 1),
1673 (ring->cur - 1) & (ring->rdesc_count - 1));
1675 DBGPR("<--xgbe_dev_xmit\n");
1678 static int xgbe_dev_read(struct xgbe_channel *channel)
1680 struct xgbe_prv_data *pdata = channel->pdata;
1681 struct xgbe_ring *ring = channel->rx_ring;
1682 struct xgbe_ring_data *rdata;
1683 struct xgbe_ring_desc *rdesc;
1684 struct xgbe_packet_data *packet = &ring->packet_data;
1685 struct net_device *netdev = pdata->netdev;
1686 unsigned int err, etlt, l34t;
1688 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1690 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1691 rdesc = rdata->rdesc;
1693 /* Check for data availability */
1694 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1697 /* Make sure descriptor fields are read after reading the OWN bit */
1700 if (netif_msg_rx_status(pdata))
1701 xgbe_dump_rx_desc(pdata, ring, ring->cur);
1703 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1704 /* Timestamp Context Descriptor */
1705 xgbe_get_rx_tstamp(packet, rdesc);
1707 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1709 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1714 /* Normal Descriptor, be sure Context Descriptor bit is off */
1715 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1717 /* Indicate if a Context Descriptor is next */
1718 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1719 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1722 /* Get the header length */
1723 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1724 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1726 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1727 RX_NORMAL_DESC2, HL);
1728 if (rdata->rx.hdr_len)
1729 pdata->ext_stats.rx_split_header_packets++;
1731 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1735 /* Get the RSS hash */
1736 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1737 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1740 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1742 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1744 case RX_DESC3_L34T_IPV4_TCP:
1745 case RX_DESC3_L34T_IPV4_UDP:
1746 case RX_DESC3_L34T_IPV6_TCP:
1747 case RX_DESC3_L34T_IPV6_UDP:
1748 packet->rss_hash_type = PKT_HASH_TYPE_L4;
1751 packet->rss_hash_type = PKT_HASH_TYPE_L3;
1755 /* Not all the data has been transferred for this packet */
1756 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
1759 /* This is the last of the data for this packet */
1760 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1763 /* Get the packet length */
1764 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1766 /* Set checksum done indicator as appropriate */
1767 if (netdev->features & NETIF_F_RXCSUM)
1768 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1771 /* Check for errors (only valid in last descriptor) */
1772 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1773 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1774 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
1776 if (!err || !etlt) {
1777 /* No error if err is 0 or etlt is 0 */
1778 if ((etlt == 0x09) &&
1779 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1780 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1782 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1785 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
1789 if ((etlt == 0x05) || (etlt == 0x06))
1790 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1793 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1797 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1798 ring->cur & (ring->rdesc_count - 1), ring->cur);
1803 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1805 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1806 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1809 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1811 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1812 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1815 static int xgbe_enable_int(struct xgbe_channel *channel,
1816 enum xgbe_int int_id)
1818 unsigned int dma_ch_ier;
1820 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1823 case XGMAC_INT_DMA_CH_SR_TI:
1824 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1826 case XGMAC_INT_DMA_CH_SR_TPS:
1827 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
1829 case XGMAC_INT_DMA_CH_SR_TBU:
1830 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
1832 case XGMAC_INT_DMA_CH_SR_RI:
1833 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1835 case XGMAC_INT_DMA_CH_SR_RBU:
1836 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
1838 case XGMAC_INT_DMA_CH_SR_RPS:
1839 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
1841 case XGMAC_INT_DMA_CH_SR_TI_RI:
1842 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1843 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1845 case XGMAC_INT_DMA_CH_SR_FBE:
1846 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
1848 case XGMAC_INT_DMA_ALL:
1849 dma_ch_ier |= channel->saved_ier;
1855 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1860 static int xgbe_disable_int(struct xgbe_channel *channel,
1861 enum xgbe_int int_id)
1863 unsigned int dma_ch_ier;
1865 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1868 case XGMAC_INT_DMA_CH_SR_TI:
1869 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1871 case XGMAC_INT_DMA_CH_SR_TPS:
1872 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
1874 case XGMAC_INT_DMA_CH_SR_TBU:
1875 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
1877 case XGMAC_INT_DMA_CH_SR_RI:
1878 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1880 case XGMAC_INT_DMA_CH_SR_RBU:
1881 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
1883 case XGMAC_INT_DMA_CH_SR_RPS:
1884 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
1886 case XGMAC_INT_DMA_CH_SR_TI_RI:
1887 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1888 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1890 case XGMAC_INT_DMA_CH_SR_FBE:
1891 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
1893 case XGMAC_INT_DMA_ALL:
1894 channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
1895 dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
1901 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1906 static int xgbe_exit(struct xgbe_prv_data *pdata)
1908 unsigned int count = 2000;
1910 DBGPR("-->xgbe_exit\n");
1912 /* Issue a software reset */
1913 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1914 usleep_range(10, 15);
1916 /* Poll Until Poll Condition */
1917 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1918 usleep_range(500, 600);
1923 DBGPR("<--xgbe_exit\n");
1928 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1930 unsigned int i, count;
1932 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
1935 for (i = 0; i < pdata->tx_q_count; i++)
1936 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1938 /* Poll Until Poll Condition */
1939 for (i = 0; i < pdata->tx_q_count; i++) {
1941 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
1943 usleep_range(500, 600);
1952 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1954 /* Set enhanced addressing mode */
1955 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
1957 /* Set the System Bus mode */
1958 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
1959 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
1962 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1964 unsigned int arcache, awcache;
1967 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
1968 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
1969 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
1970 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
1971 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
1972 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
1973 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
1976 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
1977 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
1978 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
1979 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
1980 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
1981 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
1982 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
1983 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
1984 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
1987 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1991 /* Set Tx to weighted round robin scheduling algorithm */
1992 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1994 /* Set Tx traffic classes to use WRR algorithm with equal weights */
1995 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1996 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1998 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
2001 /* Set Rx to strict priority algorithm */
2002 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
2005 static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
2006 unsigned int queue_count)
2008 unsigned int q_fifo_size;
2009 unsigned int p_fifo;
2011 /* Calculate the configured fifo size */
2012 q_fifo_size = 1 << (fifo_size + 7);
2014 /* The configured value may not be the actual amount of fifo RAM */
2015 q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
2017 q_fifo_size = q_fifo_size / queue_count;
2019 /* Each increment in the queue fifo size represents 256 bytes of
2020 * fifo, with 0 representing 256 bytes. Distribute the fifo equally
2021 * between the queues.
2023 p_fifo = q_fifo_size / 256;
2030 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2032 unsigned int fifo_size;
2035 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
2038 for (i = 0; i < pdata->tx_q_count; i++)
2039 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
2041 netif_info(pdata, drv, pdata->netdev,
2042 "%d Tx hardware queues, %d byte fifo per queue\n",
2043 pdata->tx_q_count, ((fifo_size + 1) * 256));
2046 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2048 unsigned int fifo_size;
2051 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
2054 for (i = 0; i < pdata->rx_q_count; i++)
2055 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
2057 netif_info(pdata, drv, pdata->netdev,
2058 "%d Rx hardware queues, %d byte fifo per queue\n",
2059 pdata->rx_q_count, ((fifo_size + 1) * 256));
2062 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2064 unsigned int qptc, qptc_extra, queue;
2065 unsigned int prio_queues;
2066 unsigned int ppq, ppq_extra, prio;
2068 unsigned int i, j, reg, reg_val;
2070 /* Map the MTL Tx Queues to Traffic Classes
2071 * Note: Tx Queues >= Traffic Classes
2073 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2074 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2076 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2077 for (j = 0; j < qptc; j++) {
2078 netif_dbg(pdata, drv, pdata->netdev,
2079 "TXq%u mapped to TC%u\n", queue, i);
2080 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2082 pdata->q2tc_map[queue++] = i;
2085 if (i < qptc_extra) {
2086 netif_dbg(pdata, drv, pdata->netdev,
2087 "TXq%u mapped to TC%u\n", queue, i);
2088 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2090 pdata->q2tc_map[queue++] = i;
2094 /* Map the 8 VLAN priority values to available MTL Rx queues */
2095 prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
2097 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2098 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2102 for (i = 0, prio = 0; i < prio_queues;) {
2104 for (j = 0; j < ppq; j++) {
2105 netif_dbg(pdata, drv, pdata->netdev,
2106 "PRIO%u mapped to RXq%u\n", prio, i);
2107 mask |= (1 << prio);
2108 pdata->prio2q_map[prio++] = i;
2111 if (i < ppq_extra) {
2112 netif_dbg(pdata, drv, pdata->netdev,
2113 "PRIO%u mapped to RXq%u\n", prio, i);
2114 mask |= (1 << prio);
2115 pdata->prio2q_map[prio++] = i;
2118 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2120 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2123 XGMAC_IOWRITE(pdata, reg, reg_val);
2124 reg += MAC_RQC2_INC;
2128 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2131 for (i = 0; i < pdata->rx_q_count;) {
2132 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2134 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2137 XGMAC_IOWRITE(pdata, reg, reg_val);
2139 reg += MTL_RQDCM_INC;
2144 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2148 for (i = 0; i < pdata->rx_q_count; i++) {
2149 /* Activate flow control when less than 4k left in fifo */
2150 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
2152 /* De-activate flow control when more than 6k left in fifo */
2153 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
2157 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2159 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2161 /* Filtering is done using perfect filtering and hash filtering */
2162 if (pdata->hw_feat.hash_table_size) {
2163 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2164 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2165 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2169 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2173 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2175 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2178 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2180 switch (pdata->phy_speed) {
2182 xgbe_set_xgmii_speed(pdata);
2186 xgbe_set_gmii_2500_speed(pdata);
2190 xgbe_set_gmii_speed(pdata);
2195 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2197 if (pdata->netdev->features & NETIF_F_RXCSUM)
2198 xgbe_enable_rx_csum(pdata);
2200 xgbe_disable_rx_csum(pdata);
2203 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2205 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2206 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2207 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2209 /* Set the current VLAN Hash Table register value */
2210 xgbe_update_vlan_hash_table(pdata);
2212 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2213 xgbe_enable_rx_vlan_filtering(pdata);
2215 xgbe_disable_rx_vlan_filtering(pdata);
2217 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2218 xgbe_enable_rx_vlan_stripping(pdata);
2220 xgbe_disable_rx_vlan_stripping(pdata);
2223 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2229 /* These registers are always 64 bit */
2230 case MMC_TXOCTETCOUNT_GB_LO:
2231 case MMC_TXOCTETCOUNT_G_LO:
2232 case MMC_RXOCTETCOUNT_GB_LO:
2233 case MMC_RXOCTETCOUNT_G_LO:
2241 val = XGMAC_IOREAD(pdata, reg_lo);
2244 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2249 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2251 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2252 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2254 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2255 stats->txoctetcount_gb +=
2256 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2258 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2259 stats->txframecount_gb +=
2260 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2262 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2263 stats->txbroadcastframes_g +=
2264 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2266 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2267 stats->txmulticastframes_g +=
2268 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2270 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2271 stats->tx64octets_gb +=
2272 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2274 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2275 stats->tx65to127octets_gb +=
2276 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2278 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2279 stats->tx128to255octets_gb +=
2280 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2282 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2283 stats->tx256to511octets_gb +=
2284 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2286 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2287 stats->tx512to1023octets_gb +=
2288 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2290 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2291 stats->tx1024tomaxoctets_gb +=
2292 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2294 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2295 stats->txunicastframes_gb +=
2296 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2298 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2299 stats->txmulticastframes_gb +=
2300 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2302 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2303 stats->txbroadcastframes_g +=
2304 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2306 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2307 stats->txunderflowerror +=
2308 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2310 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2311 stats->txoctetcount_g +=
2312 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2314 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2315 stats->txframecount_g +=
2316 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2318 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2319 stats->txpauseframes +=
2320 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2322 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2323 stats->txvlanframes_g +=
2324 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2327 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2329 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2330 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2332 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2333 stats->rxframecount_gb +=
2334 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2336 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2337 stats->rxoctetcount_gb +=
2338 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2340 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2341 stats->rxoctetcount_g +=
2342 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2344 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2345 stats->rxbroadcastframes_g +=
2346 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2348 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2349 stats->rxmulticastframes_g +=
2350 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2352 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2353 stats->rxcrcerror +=
2354 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2356 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2357 stats->rxrunterror +=
2358 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2360 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2361 stats->rxjabbererror +=
2362 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2364 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2365 stats->rxundersize_g +=
2366 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2368 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2369 stats->rxoversize_g +=
2370 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2372 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2373 stats->rx64octets_gb +=
2374 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2376 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2377 stats->rx65to127octets_gb +=
2378 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2380 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2381 stats->rx128to255octets_gb +=
2382 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2384 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2385 stats->rx256to511octets_gb +=
2386 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2388 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2389 stats->rx512to1023octets_gb +=
2390 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2392 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2393 stats->rx1024tomaxoctets_gb +=
2394 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2396 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2397 stats->rxunicastframes_g +=
2398 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2400 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2401 stats->rxlengtherror +=
2402 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2404 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2405 stats->rxoutofrangetype +=
2406 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2408 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2409 stats->rxpauseframes +=
2410 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2412 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2413 stats->rxfifooverflow +=
2414 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2416 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2417 stats->rxvlanframes_gb +=
2418 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2420 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2421 stats->rxwatchdogerror +=
2422 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2425 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2427 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2429 /* Freeze counters */
2430 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2432 stats->txoctetcount_gb +=
2433 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2435 stats->txframecount_gb +=
2436 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2438 stats->txbroadcastframes_g +=
2439 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2441 stats->txmulticastframes_g +=
2442 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2444 stats->tx64octets_gb +=
2445 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2447 stats->tx65to127octets_gb +=
2448 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2450 stats->tx128to255octets_gb +=
2451 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2453 stats->tx256to511octets_gb +=
2454 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2456 stats->tx512to1023octets_gb +=
2457 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2459 stats->tx1024tomaxoctets_gb +=
2460 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2462 stats->txunicastframes_gb +=
2463 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2465 stats->txmulticastframes_gb +=
2466 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2468 stats->txbroadcastframes_g +=
2469 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2471 stats->txunderflowerror +=
2472 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2474 stats->txoctetcount_g +=
2475 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2477 stats->txframecount_g +=
2478 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2480 stats->txpauseframes +=
2481 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2483 stats->txvlanframes_g +=
2484 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2486 stats->rxframecount_gb +=
2487 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2489 stats->rxoctetcount_gb +=
2490 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2492 stats->rxoctetcount_g +=
2493 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2495 stats->rxbroadcastframes_g +=
2496 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2498 stats->rxmulticastframes_g +=
2499 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2501 stats->rxcrcerror +=
2502 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2504 stats->rxrunterror +=
2505 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2507 stats->rxjabbererror +=
2508 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2510 stats->rxundersize_g +=
2511 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2513 stats->rxoversize_g +=
2514 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2516 stats->rx64octets_gb +=
2517 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2519 stats->rx65to127octets_gb +=
2520 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2522 stats->rx128to255octets_gb +=
2523 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2525 stats->rx256to511octets_gb +=
2526 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2528 stats->rx512to1023octets_gb +=
2529 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2531 stats->rx1024tomaxoctets_gb +=
2532 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2534 stats->rxunicastframes_g +=
2535 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2537 stats->rxlengtherror +=
2538 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2540 stats->rxoutofrangetype +=
2541 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2543 stats->rxpauseframes +=
2544 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2546 stats->rxfifooverflow +=
2547 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2549 stats->rxvlanframes_gb +=
2550 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2552 stats->rxwatchdogerror +=
2553 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2555 /* Un-freeze counters */
2556 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
2559 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
2561 /* Set counters to reset on read */
2562 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
2564 /* Reset the counters */
2565 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
2568 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
2569 struct xgbe_channel *channel)
2571 unsigned int tx_dsr, tx_pos, tx_qidx;
2572 unsigned int tx_status;
2573 unsigned long tx_timeout;
2575 /* Calculate the status register to read and the position within */
2576 if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
2578 tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
2581 tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
2583 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
2584 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
2588 /* The Tx engine cannot be stopped if it is actively processing
2589 * descriptors. Wait for the Tx engine to enter the stopped or
2590 * suspended state. Don't wait forever though...
2592 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
2593 while (time_before(jiffies, tx_timeout)) {
2594 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
2595 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
2596 if ((tx_status == DMA_TPS_STOPPED) ||
2597 (tx_status == DMA_TPS_SUSPENDED))
2600 usleep_range(500, 1000);
2603 if (!time_before(jiffies, tx_timeout))
2604 netdev_info(pdata->netdev,
2605 "timed out waiting for Tx DMA channel %u to stop\n",
2606 channel->queue_index);
2609 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
2611 struct xgbe_channel *channel;
2614 /* Enable each Tx DMA channel */
2615 channel = pdata->channel;
2616 for (i = 0; i < pdata->channel_count; i++, channel++) {
2617 if (!channel->tx_ring)
2620 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2623 /* Enable each Tx queue */
2624 for (i = 0; i < pdata->tx_q_count; i++)
2625 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
2629 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2632 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
2634 struct xgbe_channel *channel;
2637 /* Prepare for Tx DMA channel stop */
2638 channel = pdata->channel;
2639 for (i = 0; i < pdata->channel_count; i++, channel++) {
2640 if (!channel->tx_ring)
2643 xgbe_prepare_tx_stop(pdata, channel);
2646 /* Disable MAC Tx */
2647 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2649 /* Disable each Tx queue */
2650 for (i = 0; i < pdata->tx_q_count; i++)
2651 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
2653 /* Disable each Tx DMA channel */
2654 channel = pdata->channel;
2655 for (i = 0; i < pdata->channel_count; i++, channel++) {
2656 if (!channel->tx_ring)
2659 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2663 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
2666 unsigned int rx_status;
2667 unsigned long rx_timeout;
2669 /* The Rx engine cannot be stopped if it is actively processing
2670 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
2671 * wait forever though...
2673 rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
2674 while (time_before(jiffies, rx_timeout)) {
2675 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
2676 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
2677 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
2680 usleep_range(500, 1000);
2683 if (!time_before(jiffies, rx_timeout))
2684 netdev_info(pdata->netdev,
2685 "timed out waiting for Rx queue %u to empty\n",
2689 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
2691 struct xgbe_channel *channel;
2692 unsigned int reg_val, i;
2694 /* Enable each Rx DMA channel */
2695 channel = pdata->channel;
2696 for (i = 0; i < pdata->channel_count; i++, channel++) {
2697 if (!channel->rx_ring)
2700 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2703 /* Enable each Rx queue */
2705 for (i = 0; i < pdata->rx_q_count; i++)
2706 reg_val |= (0x02 << (i << 1));
2707 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
2710 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
2711 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
2712 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
2713 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
2716 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
2718 struct xgbe_channel *channel;
2721 /* Disable MAC Rx */
2722 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
2723 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
2724 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
2725 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
2727 /* Prepare for Rx DMA channel stop */
2728 for (i = 0; i < pdata->rx_q_count; i++)
2729 xgbe_prepare_rx_stop(pdata, i);
2731 /* Disable each Rx queue */
2732 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
2734 /* Disable each Rx DMA channel */
2735 channel = pdata->channel;
2736 for (i = 0; i < pdata->channel_count; i++, channel++) {
2737 if (!channel->rx_ring)
2740 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2744 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
2746 struct xgbe_channel *channel;
2749 /* Enable each Tx DMA channel */
2750 channel = pdata->channel;
2751 for (i = 0; i < pdata->channel_count; i++, channel++) {
2752 if (!channel->tx_ring)
2755 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2759 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2762 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
2764 struct xgbe_channel *channel;
2767 /* Prepare for Tx DMA channel stop */
2768 channel = pdata->channel;
2769 for (i = 0; i < pdata->channel_count; i++, channel++) {
2770 if (!channel->tx_ring)
2773 xgbe_prepare_tx_stop(pdata, channel);
2776 /* Disable MAC Tx */
2777 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2779 /* Disable each Tx DMA channel */
2780 channel = pdata->channel;
2781 for (i = 0; i < pdata->channel_count; i++, channel++) {
2782 if (!channel->tx_ring)
2785 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2789 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2791 struct xgbe_channel *channel;
2794 /* Enable each Rx DMA channel */
2795 channel = pdata->channel;
2796 for (i = 0; i < pdata->channel_count; i++, channel++) {
2797 if (!channel->rx_ring)
2800 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2804 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2806 struct xgbe_channel *channel;
2809 /* Disable each Rx DMA channel */
2810 channel = pdata->channel;
2811 for (i = 0; i < pdata->channel_count; i++, channel++) {
2812 if (!channel->rx_ring)
2815 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2819 static int xgbe_init(struct xgbe_prv_data *pdata)
2821 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2824 DBGPR("-->xgbe_init\n");
2826 /* Flush Tx queues */
2827 ret = xgbe_flush_tx_queues(pdata);
2829 netdev_err(pdata->netdev, "error flushing TX queues\n");
2834 * Initialize DMA related features
2836 xgbe_config_dma_bus(pdata);
2837 xgbe_config_dma_cache(pdata);
2838 xgbe_config_osp_mode(pdata);
2839 xgbe_config_pblx8(pdata);
2840 xgbe_config_tx_pbl_val(pdata);
2841 xgbe_config_rx_pbl_val(pdata);
2842 xgbe_config_rx_coalesce(pdata);
2843 xgbe_config_tx_coalesce(pdata);
2844 xgbe_config_rx_buffer_size(pdata);
2845 xgbe_config_tso_mode(pdata);
2846 xgbe_config_sph_mode(pdata);
2847 xgbe_config_rss(pdata);
2848 desc_if->wrapper_tx_desc_init(pdata);
2849 desc_if->wrapper_rx_desc_init(pdata);
2850 xgbe_enable_dma_interrupts(pdata);
2853 * Initialize MTL related features
2855 xgbe_config_mtl_mode(pdata);
2856 xgbe_config_queue_mapping(pdata);
2857 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2858 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2859 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2860 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2861 xgbe_config_tx_fifo_size(pdata);
2862 xgbe_config_rx_fifo_size(pdata);
2863 xgbe_config_flow_control_threshold(pdata);
2864 /*TODO: Error Packet and undersized good Packet forwarding enable
2867 xgbe_config_dcb_tc(pdata);
2868 xgbe_config_dcb_pfc(pdata);
2869 xgbe_enable_mtl_interrupts(pdata);
2872 * Initialize MAC related features
2874 xgbe_config_mac_address(pdata);
2875 xgbe_config_rx_mode(pdata);
2876 xgbe_config_jumbo_enable(pdata);
2877 xgbe_config_flow_control(pdata);
2878 xgbe_config_mac_speed(pdata);
2879 xgbe_config_checksum_offload(pdata);
2880 xgbe_config_vlan_support(pdata);
2881 xgbe_config_mmc(pdata);
2882 xgbe_enable_mac_interrupts(pdata);
2884 DBGPR("<--xgbe_init\n");
2889 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2891 DBGPR("-->xgbe_init_function_ptrs\n");
2893 hw_if->tx_complete = xgbe_tx_complete;
2895 hw_if->set_mac_address = xgbe_set_mac_address;
2896 hw_if->config_rx_mode = xgbe_config_rx_mode;
2898 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2899 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2901 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2902 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2903 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
2904 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
2905 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
2907 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2908 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2910 hw_if->set_gmii_speed = xgbe_set_gmii_speed;
2911 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
2912 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
2914 hw_if->enable_tx = xgbe_enable_tx;
2915 hw_if->disable_tx = xgbe_disable_tx;
2916 hw_if->enable_rx = xgbe_enable_rx;
2917 hw_if->disable_rx = xgbe_disable_rx;
2919 hw_if->powerup_tx = xgbe_powerup_tx;
2920 hw_if->powerdown_tx = xgbe_powerdown_tx;
2921 hw_if->powerup_rx = xgbe_powerup_rx;
2922 hw_if->powerdown_rx = xgbe_powerdown_rx;
2924 hw_if->dev_xmit = xgbe_dev_xmit;
2925 hw_if->dev_read = xgbe_dev_read;
2926 hw_if->enable_int = xgbe_enable_int;
2927 hw_if->disable_int = xgbe_disable_int;
2928 hw_if->init = xgbe_init;
2929 hw_if->exit = xgbe_exit;
2931 /* Descriptor related Sequences have to be initialized here */
2932 hw_if->tx_desc_init = xgbe_tx_desc_init;
2933 hw_if->rx_desc_init = xgbe_rx_desc_init;
2934 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2935 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
2936 hw_if->is_last_desc = xgbe_is_last_desc;
2937 hw_if->is_context_desc = xgbe_is_context_desc;
2938 hw_if->tx_start_xmit = xgbe_tx_start_xmit;
2941 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2942 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2944 /* For RX coalescing */
2945 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2946 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2947 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2948 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2950 /* For RX and TX threshold config */
2951 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2952 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2954 /* For RX and TX Store and Forward Mode config */
2955 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2956 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2958 /* For TX DMA Operating on Second Frame config */
2959 hw_if->config_osp_mode = xgbe_config_osp_mode;
2961 /* For RX and TX PBL config */
2962 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
2963 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
2964 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
2965 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
2966 hw_if->config_pblx8 = xgbe_config_pblx8;
2968 /* For MMC statistics support */
2969 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2970 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2971 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2973 /* For PTP config */
2974 hw_if->config_tstamp = xgbe_config_tstamp;
2975 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
2976 hw_if->set_tstamp_time = xgbe_set_tstamp_time;
2977 hw_if->get_tstamp_time = xgbe_get_tstamp_time;
2978 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
2980 /* For Data Center Bridging config */
2981 hw_if->config_tc = xgbe_config_tc;
2982 hw_if->config_dcb_tc = xgbe_config_dcb_tc;
2983 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
2985 /* For Receive Side Scaling */
2986 hw_if->enable_rss = xgbe_enable_rss;
2987 hw_if->disable_rss = xgbe_disable_rss;
2988 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
2989 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
2991 DBGPR("<--xgbe_init_function_ptrs\n");