1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
7 #include <linux/of_mdio.h>
8 #include <linux/vmalloc.h>
10 /* ENETC overhead: optional extension BD + 1 BD gap */
11 #define ENETC_TXBDS_NEEDED(val) ((val) + 2)
12 /* max # of chained Tx BDs is 15, including head and extension BD */
13 #define ENETC_MAX_SKB_FRAGS 13
14 #define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
16 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
19 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
21 struct enetc_ndev_priv *priv = netdev_priv(ndev);
22 struct enetc_bdr *tx_ring;
25 tx_ring = priv->tx_ring[skb->queue_mapping];
27 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
28 if (unlikely(skb_linearize(skb)))
31 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
32 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
33 netif_stop_subqueue(ndev, tx_ring->index);
34 return NETDEV_TX_BUSY;
37 count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
41 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
42 netif_stop_subqueue(ndev, tx_ring->index);
47 dev_kfree_skb_any(skb);
51 static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
53 int l3_start, l3_hsize;
54 u16 l3_flags, l4_flags;
56 if (skb->ip_summed != CHECKSUM_PARTIAL)
59 switch (skb->csum_offset) {
60 case offsetof(struct tcphdr, check):
61 l4_flags = ENETC_TXBD_L4_TCP;
63 case offsetof(struct udphdr, check):
64 l4_flags = ENETC_TXBD_L4_UDP;
67 skb_checksum_help(skb);
71 l3_start = skb_network_offset(skb);
72 l3_hsize = skb_network_header_len(skb);
75 if (skb->protocol == htons(ETH_P_IPV6))
76 l3_flags = ENETC_TXBD_L3_IPV6;
79 txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
80 txbd->l4_csoff = l4_flags;
85 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
86 struct enetc_tx_swbd *tx_swbd)
88 if (tx_swbd->is_dma_page)
89 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
90 tx_swbd->len, DMA_TO_DEVICE);
92 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
93 tx_swbd->len, DMA_TO_DEVICE);
97 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
98 struct enetc_tx_swbd *tx_swbd)
101 enetc_unmap_tx_buff(tx_ring, tx_swbd);
104 dev_kfree_skb_any(tx_swbd->skb);
109 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
112 struct enetc_tx_swbd *tx_swbd;
114 int len = skb_headlen(skb);
115 union enetc_tx_bd temp_bd;
116 union enetc_tx_bd *txbd;
117 bool do_vlan, do_tstamp;
123 i = tx_ring->next_to_use;
124 txbd = ENETC_TXBD(*tx_ring, i);
127 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
128 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
131 temp_bd.addr = cpu_to_le64(dma);
132 temp_bd.buf_len = cpu_to_le16(len);
135 tx_swbd = &tx_ring->tx_swbd[i];
138 tx_swbd->is_dma_page = 0;
141 do_vlan = skb_vlan_tag_present(skb);
142 do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
143 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
144 tx_swbd->do_tstamp = do_tstamp;
145 tx_swbd->check_wb = tx_swbd->do_tstamp;
147 if (do_vlan || do_tstamp)
148 flags |= ENETC_TXBD_FLAGS_EX;
150 if (enetc_tx_csum(skb, &temp_bd))
151 flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
152 else if (tx_ring->tsd_enable)
153 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
155 /* first BD needs frm_len and offload flags set */
156 temp_bd.frm_len = cpu_to_le16(skb->len);
157 temp_bd.flags = flags;
159 if (flags & ENETC_TXBD_FLAGS_TSE) {
162 temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK)
163 | (flags << ENETC_TXBD_FLAGS_OFFSET);
164 temp_bd.txstart = cpu_to_le32(temp);
167 if (flags & ENETC_TXBD_FLAGS_EX) {
170 enetc_clear_tx_bd(&temp_bd);
172 /* add extension BD for VLAN and/or timestamping */
177 if (unlikely(i == tx_ring->bd_count)) {
179 tx_swbd = tx_ring->tx_swbd;
180 txbd = ENETC_TXBD(*tx_ring, 0);
185 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
186 temp_bd.ext.tpid = 0; /* < C-TAG */
187 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
191 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
192 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
195 temp_bd.ext.e_flags = e_flags;
199 frag = &skb_shinfo(skb)->frags[0];
200 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
201 len = skb_frag_size(frag);
202 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
204 if (dma_mapping_error(tx_ring->dev, dma))
208 enetc_clear_tx_bd(&temp_bd);
214 if (unlikely(i == tx_ring->bd_count)) {
216 tx_swbd = tx_ring->tx_swbd;
217 txbd = ENETC_TXBD(*tx_ring, 0);
221 temp_bd.addr = cpu_to_le64(dma);
222 temp_bd.buf_len = cpu_to_le16(len);
226 tx_swbd->is_dma_page = 1;
230 /* last BD needs 'F' bit set */
231 flags |= ENETC_TXBD_FLAGS_F;
232 temp_bd.flags = flags;
235 tx_ring->tx_swbd[i].skb = skb;
237 enetc_bdr_idx_inc(tx_ring, &i);
238 tx_ring->next_to_use = i;
240 skb_tx_timestamp(skb);
242 /* let H/W know BD ring has been updated */
243 enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
248 dev_err(tx_ring->dev, "DMA map error");
251 tx_swbd = &tx_ring->tx_swbd[i];
252 enetc_free_tx_skb(tx_ring, tx_swbd);
254 i = tx_ring->bd_count;
261 static irqreturn_t enetc_msix(int irq, void *data)
263 struct enetc_int_vector *v = data;
266 /* disable interrupts */
267 enetc_wr_reg(v->rbier, 0);
269 for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
270 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
272 napi_schedule_irqoff(&v->napi);
277 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
278 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
279 struct napi_struct *napi, int work_limit);
281 static int enetc_poll(struct napi_struct *napi, int budget)
283 struct enetc_int_vector
284 *v = container_of(napi, struct enetc_int_vector, napi);
285 bool complete = true;
289 for (i = 0; i < v->count_tx_rings; i++)
290 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
293 work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
294 if (work_done == budget)
300 napi_complete_done(napi, work_done);
302 /* enable interrupts */
303 enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
305 for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
306 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
312 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
314 int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
316 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
319 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
322 u32 lo, hi, tstamp_lo;
324 lo = enetc_rd(hw, ENETC_SICTR0);
325 hi = enetc_rd(hw, ENETC_SICTR1);
326 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
329 *tstamp = (u64)hi << 32 | tstamp_lo;
332 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
334 struct skb_shared_hwtstamps shhwtstamps;
336 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
337 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
338 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
339 skb_tstamp_tx(skb, &shhwtstamps);
343 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
345 struct net_device *ndev = tx_ring->ndev;
346 int tx_frm_cnt = 0, tx_byte_cnt = 0;
347 struct enetc_tx_swbd *tx_swbd;
352 i = tx_ring->next_to_clean;
353 tx_swbd = &tx_ring->tx_swbd[i];
354 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
358 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
359 bool is_eof = !!tx_swbd->skb;
361 if (unlikely(tx_swbd->check_wb)) {
362 struct enetc_ndev_priv *priv = netdev_priv(ndev);
363 union enetc_tx_bd *txbd;
365 txbd = ENETC_TXBD(*tx_ring, i);
367 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
368 tx_swbd->do_tstamp) {
369 enetc_get_tx_tstamp(&priv->si->hw, txbd,
375 if (likely(tx_swbd->dma))
376 enetc_unmap_tx_buff(tx_ring, tx_swbd);
379 if (unlikely(do_tstamp)) {
380 enetc_tstamp_tx(tx_swbd->skb, tstamp);
383 napi_consume_skb(tx_swbd->skb, napi_budget);
387 tx_byte_cnt += tx_swbd->len;
392 if (unlikely(i == tx_ring->bd_count)) {
394 tx_swbd = tx_ring->tx_swbd;
397 /* BD iteration loop end */
400 /* re-arm interrupt source */
401 enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
402 BIT(16 + tx_ring->index));
405 if (unlikely(!bds_to_clean))
406 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
409 tx_ring->next_to_clean = i;
410 tx_ring->stats.packets += tx_frm_cnt;
411 tx_ring->stats.bytes += tx_byte_cnt;
413 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
414 __netif_subqueue_stopped(ndev, tx_ring->index) &&
415 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
416 netif_wake_subqueue(ndev, tx_ring->index);
419 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
422 static bool enetc_new_page(struct enetc_bdr *rx_ring,
423 struct enetc_rx_swbd *rx_swbd)
428 page = dev_alloc_page();
432 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
433 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
440 rx_swbd->page = page;
441 rx_swbd->page_offset = ENETC_RXB_PAD;
446 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
448 struct enetc_rx_swbd *rx_swbd;
449 union enetc_rx_bd *rxbd;
452 i = rx_ring->next_to_use;
453 rx_swbd = &rx_ring->rx_swbd[i];
454 rxbd = enetc_rxbd(rx_ring, i);
456 for (j = 0; j < buff_cnt; j++) {
458 if (unlikely(!rx_swbd->page)) {
459 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
460 rx_ring->stats.rx_alloc_errs++;
466 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
467 rx_swbd->page_offset);
468 /* clear 'R" as well */
471 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
474 if (unlikely(i == rx_ring->bd_count)) {
476 rx_swbd = rx_ring->rx_swbd;
481 rx_ring->next_to_alloc = i; /* keep track from page reuse */
482 rx_ring->next_to_use = i;
483 /* update ENETC's consumer index */
484 enetc_wr_reg(rx_ring->rcir, i);
490 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
491 static void enetc_get_rx_tstamp(struct net_device *ndev,
492 union enetc_rx_bd *rxbd,
495 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
496 struct enetc_ndev_priv *priv = netdev_priv(ndev);
497 struct enetc_hw *hw = &priv->si->hw;
498 u32 lo, hi, tstamp_lo;
501 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
502 lo = enetc_rd(hw, ENETC_SICTR0);
503 hi = enetc_rd(hw, ENETC_SICTR1);
504 rxbd = enetc_rxbd_ext(rxbd);
505 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
509 tstamp = (u64)hi << 32 | tstamp_lo;
510 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
511 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
516 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
517 union enetc_rx_bd *rxbd, struct sk_buff *skb)
519 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
520 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
523 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
524 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
526 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
527 skb->ip_summed = CHECKSUM_COMPLETE;
530 /* copy VLAN to skb, if one is extracted, for now we assume it's a
531 * standard TPID, but HW also supports custom values
533 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
535 le16_to_cpu(rxbd->r.vlan_opt));
536 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
537 if (priv->active_offloads & ENETC_F_RX_TSTAMP)
538 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
542 static void enetc_process_skb(struct enetc_bdr *rx_ring,
545 skb_record_rx_queue(skb, rx_ring->index);
546 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
549 static bool enetc_page_reusable(struct page *page)
551 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
554 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
555 struct enetc_rx_swbd *old)
557 struct enetc_rx_swbd *new;
559 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
561 /* next buf that may reuse a page */
562 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
564 /* copy page reference */
568 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
571 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
573 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
574 rx_swbd->page_offset,
575 size, DMA_FROM_DEVICE);
579 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
580 struct enetc_rx_swbd *rx_swbd)
582 if (likely(enetc_page_reusable(rx_swbd->page))) {
583 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
584 page_ref_inc(rx_swbd->page);
586 enetc_reuse_page(rx_ring, rx_swbd);
588 /* sync for use by the device */
589 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
590 rx_swbd->page_offset,
594 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
595 PAGE_SIZE, DMA_FROM_DEVICE);
598 rx_swbd->page = NULL;
601 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
604 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
608 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
609 skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
610 if (unlikely(!skb)) {
611 rx_ring->stats.rx_alloc_errs++;
615 skb_reserve(skb, ENETC_RXB_PAD);
616 __skb_put(skb, size);
618 enetc_put_rx_buff(rx_ring, rx_swbd);
623 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
624 u16 size, struct sk_buff *skb)
626 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
628 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
629 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
631 enetc_put_rx_buff(rx_ring, rx_swbd);
634 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
636 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
637 struct napi_struct *napi, int work_limit)
639 int rx_frm_cnt = 0, rx_byte_cnt = 0;
642 cleaned_cnt = enetc_bd_unused(rx_ring);
643 /* next descriptor to process */
644 i = rx_ring->next_to_clean;
646 while (likely(rx_frm_cnt < work_limit)) {
647 union enetc_rx_bd *rxbd;
652 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
653 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
655 cleaned_cnt -= count;
658 rxbd = enetc_rxbd(rx_ring, i);
659 bd_status = le32_to_cpu(rxbd->r.lstatus);
663 enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
664 dma_rmb(); /* for reading other rxbd fields */
665 size = le16_to_cpu(rxbd->r.buf_len);
666 skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
670 enetc_get_offloads(rx_ring, rxbd, skb);
674 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
675 if (unlikely(++i == rx_ring->bd_count))
678 if (unlikely(bd_status &
679 ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
681 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
683 bd_status = le32_to_cpu(rxbd->r.lstatus);
685 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
686 if (unlikely(++i == rx_ring->bd_count))
690 rx_ring->ndev->stats.rx_dropped++;
691 rx_ring->ndev->stats.rx_errors++;
696 /* not last BD in frame? */
697 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
698 bd_status = le32_to_cpu(rxbd->r.lstatus);
699 size = ENETC_RXB_DMA_SIZE;
701 if (bd_status & ENETC_RXBD_LSTATUS_F) {
703 size = le16_to_cpu(rxbd->r.buf_len);
706 enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
710 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
711 if (unlikely(++i == rx_ring->bd_count))
715 rx_byte_cnt += skb->len;
717 enetc_process_skb(rx_ring, skb);
719 napi_gro_receive(napi, skb);
724 rx_ring->next_to_clean = i;
726 rx_ring->stats.packets += rx_frm_cnt;
727 rx_ring->stats.bytes += rx_byte_cnt;
732 /* Probing and Init */
733 #define ENETC_MAX_RFS_SIZE 64
734 void enetc_get_si_caps(struct enetc_si *si)
736 struct enetc_hw *hw = &si->hw;
739 /* find out how many of various resources we have to work with */
740 val = enetc_rd(hw, ENETC_SICAPR0);
741 si->num_rx_rings = (val >> 16) & 0xff;
742 si->num_tx_rings = val & 0xff;
744 val = enetc_rd(hw, ENETC_SIRFSCAPR);
745 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
746 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
749 val = enetc_rd(hw, ENETC_SIPCAPR0);
750 if (val & ENETC_SIPCAPR0_RSS) {
753 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
754 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
757 if (val & ENETC_SIPCAPR0_QBV)
758 si->hw_features |= ENETC_SI_F_QBV;
761 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
763 r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
764 &r->bd_dma_base, GFP_KERNEL);
768 /* h/w requires 128B alignment */
769 if (!IS_ALIGNED(r->bd_dma_base, 128)) {
770 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
778 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
782 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
786 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
792 txr->next_to_clean = 0;
793 txr->next_to_use = 0;
798 static void enetc_free_txbdr(struct enetc_bdr *txr)
802 for (i = 0; i < txr->bd_count; i++)
803 enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
805 size = txr->bd_count * sizeof(union enetc_tx_bd);
807 dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
814 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
818 for (i = 0; i < priv->num_tx_rings; i++) {
819 err = enetc_alloc_txbdr(priv->tx_ring[i]);
829 enetc_free_txbdr(priv->tx_ring[i]);
834 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
838 for (i = 0; i < priv->num_tx_rings; i++)
839 enetc_free_txbdr(priv->tx_ring[i]);
842 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
844 size_t size = sizeof(union enetc_rx_bd);
847 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
854 err = enetc_dma_alloc_bdr(rxr, size);
860 rxr->next_to_clean = 0;
861 rxr->next_to_use = 0;
862 rxr->next_to_alloc = 0;
863 rxr->ext_en = extended;
868 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
872 size = rxr->bd_count * sizeof(union enetc_rx_bd);
874 dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
881 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
883 bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
886 for (i = 0; i < priv->num_rx_rings; i++) {
887 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
897 enetc_free_rxbdr(priv->rx_ring[i]);
902 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
906 for (i = 0; i < priv->num_rx_rings; i++)
907 enetc_free_rxbdr(priv->rx_ring[i]);
910 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
914 if (!tx_ring->tx_swbd)
917 for (i = 0; i < tx_ring->bd_count; i++) {
918 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
920 enetc_free_tx_skb(tx_ring, tx_swbd);
923 tx_ring->next_to_clean = 0;
924 tx_ring->next_to_use = 0;
927 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
931 if (!rx_ring->rx_swbd)
934 for (i = 0; i < rx_ring->bd_count; i++) {
935 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
940 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
941 PAGE_SIZE, DMA_FROM_DEVICE);
942 __free_page(rx_swbd->page);
943 rx_swbd->page = NULL;
946 rx_ring->next_to_clean = 0;
947 rx_ring->next_to_use = 0;
948 rx_ring->next_to_alloc = 0;
951 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
955 for (i = 0; i < priv->num_rx_rings; i++)
956 enetc_free_rx_ring(priv->rx_ring[i]);
958 for (i = 0; i < priv->num_tx_rings; i++)
959 enetc_free_tx_ring(priv->tx_ring[i]);
962 static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
964 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
966 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
971 /* h/w requires 128B alignment */
972 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
973 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
977 cbdr->next_to_clean = 0;
978 cbdr->next_to_use = 0;
983 static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
985 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
987 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
988 cbdr->bd_base = NULL;
991 static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
993 /* set CBDR cache attributes */
994 enetc_wr(hw, ENETC_SICAR2,
995 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
997 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
998 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
999 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
1001 enetc_wr(hw, ENETC_SICBDRPIR, 0);
1002 enetc_wr(hw, ENETC_SICBDRCIR, 0);
1005 enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
1007 cbdr->pir = hw->reg + ENETC_SICBDRPIR;
1008 cbdr->cir = hw->reg + ENETC_SICBDRCIR;
1011 static void enetc_clear_cbdr(struct enetc_hw *hw)
1013 enetc_wr(hw, ENETC_SICBDRMR, 0);
1016 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1021 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
1025 /* Set up RSS table defaults */
1026 for (i = 0; i < si->num_rss; i++)
1027 rss_table[i] = i % num_groups;
1029 enetc_set_rss_table(si, rss_table, si->num_rss);
1036 static int enetc_configure_si(struct enetc_ndev_priv *priv)
1038 struct enetc_si *si = priv->si;
1039 struct enetc_hw *hw = &si->hw;
1042 enetc_setup_cbdr(hw, &si->cbd_ring);
1043 /* set SI cache attributes */
1044 enetc_wr(hw, ENETC_SICAR0,
1045 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1046 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1048 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1051 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1059 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
1061 struct enetc_si *si = priv->si;
1062 int cpus = num_online_cpus();
1064 priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
1065 priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
1067 /* Enable all available TX rings in order to configure as many
1068 * priorities as possible, when needed.
1069 * TODO: Make # of TX rings run-time configurable
1071 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
1072 priv->num_tx_rings = si->num_tx_rings;
1073 priv->bdr_int_num = cpus;
1076 si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
1079 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1081 struct enetc_si *si = priv->si;
1084 err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
1088 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1090 if (!priv->cls_rules) {
1095 err = enetc_configure_si(priv);
1102 kfree(priv->cls_rules);
1104 enetc_clear_cbdr(&si->hw);
1105 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1110 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1112 struct enetc_si *si = priv->si;
1114 enetc_clear_cbdr(&si->hw);
1115 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1117 kfree(priv->cls_rules);
1120 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1122 int idx = tx_ring->index;
1125 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1126 lower_32_bits(tx_ring->bd_dma_base));
1128 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1129 upper_32_bits(tx_ring->bd_dma_base));
1131 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1132 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1133 ENETC_RTBLENR_LEN(tx_ring->bd_count));
1135 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1136 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1137 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1139 /* enable Tx ints by setting pkt thr to 1 */
1140 enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
1142 tbmr = ENETC_TBMR_EN;
1143 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1144 tbmr |= ENETC_TBMR_VIH;
1147 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1149 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1150 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1151 tx_ring->idr = hw->reg + ENETC_SITXIDR;
1154 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1156 int idx = rx_ring->index;
1159 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1160 lower_32_bits(rx_ring->bd_dma_base));
1162 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1163 upper_32_bits(rx_ring->bd_dma_base));
1165 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1166 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1167 ENETC_RTBLENR_LEN(rx_ring->bd_count));
1169 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1171 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1173 /* enable Rx ints by setting pkt thr to 1 */
1174 enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
1176 rbmr = ENETC_RBMR_EN;
1178 if (rx_ring->ext_en)
1179 rbmr |= ENETC_RBMR_BDS;
1181 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1182 rbmr |= ENETC_RBMR_VTE;
1184 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1185 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1187 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1190 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1193 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1197 for (i = 0; i < priv->num_tx_rings; i++)
1198 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1200 for (i = 0; i < priv->num_rx_rings; i++)
1201 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1204 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1206 int idx = rx_ring->index;
1208 /* disable EN bit on ring */
1209 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1212 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1214 int delay = 8, timeout = 100;
1215 int idx = tx_ring->index;
1217 /* disable EN bit on ring */
1218 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1220 /* wait for busy to clear */
1221 while (delay < timeout &&
1222 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1227 if (delay >= timeout)
1228 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1232 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1236 for (i = 0; i < priv->num_tx_rings; i++)
1237 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1239 for (i = 0; i < priv->num_rx_rings; i++)
1240 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1245 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1247 struct pci_dev *pdev = priv->si->pdev;
1251 for (i = 0; i < priv->bdr_int_num; i++) {
1252 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1253 struct enetc_int_vector *v = priv->int_vector[i];
1254 int entry = ENETC_BDR_INT_BASE_IDX + i;
1255 struct enetc_hw *hw = &priv->si->hw;
1257 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1258 priv->ndev->name, i);
1259 err = request_irq(irq, enetc_msix, 0, v->name, v);
1261 dev_err(priv->dev, "request_irq() failed!\n");
1265 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1266 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1268 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1270 for (j = 0; j < v->count_tx_rings; j++) {
1271 int idx = v->tx_ring[j].index;
1273 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1275 cpumask_clear(&cpu_mask);
1276 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1277 irq_set_affinity_hint(irq, &cpu_mask);
1284 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1286 irq_set_affinity_hint(irq, NULL);
1287 free_irq(irq, priv->int_vector[i]);
1293 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1295 struct pci_dev *pdev = priv->si->pdev;
1298 for (i = 0; i < priv->bdr_int_num; i++) {
1299 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1301 irq_set_affinity_hint(irq, NULL);
1302 free_irq(irq, priv->int_vector[i]);
1306 static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
1310 /* enable Tx & Rx event indication */
1311 for (i = 0; i < priv->num_rx_rings; i++) {
1312 enetc_rxbdr_wr(&priv->si->hw, i,
1313 ENETC_RBIER, ENETC_RBIER_RXTIE);
1316 for (i = 0; i < priv->num_tx_rings; i++) {
1317 enetc_txbdr_wr(&priv->si->hw, i,
1318 ENETC_TBIER, ENETC_TBIER_TXTIE);
1322 static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
1326 for (i = 0; i < priv->num_tx_rings; i++)
1327 enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1329 for (i = 0; i < priv->num_rx_rings; i++)
1330 enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1333 static void adjust_link(struct net_device *ndev)
1335 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1336 struct phy_device *phydev = ndev->phydev;
1338 if (priv->active_offloads & ENETC_F_QBV)
1339 enetc_sched_speed_set(ndev);
1341 phy_print_status(phydev);
1344 static int enetc_phy_connect(struct net_device *ndev)
1346 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1347 struct phy_device *phydev;
1348 struct ethtool_eee edata;
1350 if (!priv->phy_node)
1351 return 0; /* phy-less mode */
1353 phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
1356 dev_err(&ndev->dev, "could not attach to PHY\n");
1360 phy_attached_info(phydev);
1362 /* disable EEE autoneg, until ENETC driver supports it */
1363 memset(&edata, 0, sizeof(struct ethtool_eee));
1364 phy_ethtool_set_eee(phydev, &edata);
1369 int enetc_open(struct net_device *ndev)
1371 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1374 err = enetc_setup_irqs(priv);
1378 err = enetc_phy_connect(ndev);
1380 goto err_phy_connect;
1382 err = enetc_alloc_tx_resources(priv);
1386 err = enetc_alloc_rx_resources(priv);
1390 enetc_setup_bdrs(priv);
1392 err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1394 goto err_set_queues;
1396 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1398 goto err_set_queues;
1400 for (i = 0; i < priv->bdr_int_num; i++)
1401 napi_enable(&priv->int_vector[i]->napi);
1403 enetc_enable_interrupts(priv);
1406 phy_start(ndev->phydev);
1408 netif_carrier_on(ndev);
1410 netif_tx_start_all_queues(ndev);
1415 enetc_free_rx_resources(priv);
1417 enetc_free_tx_resources(priv);
1420 phy_disconnect(ndev->phydev);
1422 enetc_free_irqs(priv);
1427 int enetc_close(struct net_device *ndev)
1429 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1432 netif_tx_stop_all_queues(ndev);
1435 phy_stop(ndev->phydev);
1436 phy_disconnect(ndev->phydev);
1438 netif_carrier_off(ndev);
1441 for (i = 0; i < priv->bdr_int_num; i++) {
1442 napi_synchronize(&priv->int_vector[i]->napi);
1443 napi_disable(&priv->int_vector[i]->napi);
1446 enetc_disable_interrupts(priv);
1447 enetc_clear_bdrs(priv);
1449 enetc_free_rxtx_rings(priv);
1450 enetc_free_rx_resources(priv);
1451 enetc_free_tx_resources(priv);
1452 enetc_free_irqs(priv);
1457 static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
1459 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1460 struct tc_mqprio_qopt *mqprio = type_data;
1461 struct enetc_bdr *tx_ring;
1465 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1466 num_tc = mqprio->num_tc;
1469 netdev_reset_tc(ndev);
1470 netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1472 /* Reset all ring priorities to 0 */
1473 for (i = 0; i < priv->num_tx_rings; i++) {
1474 tx_ring = priv->tx_ring[i];
1475 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
1481 /* Check if we have enough BD rings available to accommodate all TCs */
1482 if (num_tc > priv->num_tx_rings) {
1483 netdev_err(ndev, "Max %d traffic classes supported\n",
1484 priv->num_tx_rings);
1488 /* For the moment, we use only one BD ring per TC.
1490 * Configure num_tc BD rings with increasing priorities.
1492 for (i = 0; i < num_tc; i++) {
1493 tx_ring = priv->tx_ring[i];
1494 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
1497 /* Reset the number of netdev queues based on the TC count */
1498 netif_set_real_num_tx_queues(ndev, num_tc);
1500 netdev_set_num_tc(ndev, num_tc);
1502 /* Each TC is associated with one netdev queue */
1503 for (i = 0; i < num_tc; i++)
1504 netdev_set_tc_queue(ndev, i, 1, i);
1509 int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1513 case TC_SETUP_QDISC_MQPRIO:
1514 return enetc_setup_tc_mqprio(ndev, type_data);
1515 case TC_SETUP_QDISC_TAPRIO:
1516 return enetc_setup_tc_taprio(ndev, type_data);
1517 case TC_SETUP_QDISC_CBS:
1518 return enetc_setup_tc_cbs(ndev, type_data);
1519 case TC_SETUP_QDISC_ETF:
1520 return enetc_setup_tc_txtime(ndev, type_data);
1526 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1528 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1529 struct net_device_stats *stats = &ndev->stats;
1530 unsigned long packets = 0, bytes = 0;
1533 for (i = 0; i < priv->num_rx_rings; i++) {
1534 packets += priv->rx_ring[i]->stats.packets;
1535 bytes += priv->rx_ring[i]->stats.bytes;
1538 stats->rx_packets = packets;
1539 stats->rx_bytes = bytes;
1543 for (i = 0; i < priv->num_tx_rings; i++) {
1544 packets += priv->tx_ring[i]->stats.packets;
1545 bytes += priv->tx_ring[i]->stats.bytes;
1548 stats->tx_packets = packets;
1549 stats->tx_bytes = bytes;
1554 static int enetc_set_rss(struct net_device *ndev, int en)
1556 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1557 struct enetc_hw *hw = &priv->si->hw;
1560 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
1562 reg = enetc_rd(hw, ENETC_SIMR);
1563 reg &= ~ENETC_SIMR_RSSE;
1564 reg |= (en) ? ENETC_SIMR_RSSE : 0;
1565 enetc_wr(hw, ENETC_SIMR, reg);
1570 int enetc_set_features(struct net_device *ndev,
1571 netdev_features_t features)
1573 netdev_features_t changed = ndev->features ^ features;
1575 if (changed & NETIF_F_RXHASH)
1576 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1581 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1582 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
1584 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1585 struct hwtstamp_config config;
1588 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1591 switch (config.tx_type) {
1592 case HWTSTAMP_TX_OFF:
1593 priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
1595 case HWTSTAMP_TX_ON:
1596 priv->active_offloads |= ENETC_F_TX_TSTAMP;
1602 ao = priv->active_offloads;
1603 switch (config.rx_filter) {
1604 case HWTSTAMP_FILTER_NONE:
1605 priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
1608 priv->active_offloads |= ENETC_F_RX_TSTAMP;
1609 config.rx_filter = HWTSTAMP_FILTER_ALL;
1612 if (netif_running(ndev) && ao != priv->active_offloads) {
1617 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1621 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
1623 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1624 struct hwtstamp_config config;
1628 if (priv->active_offloads & ENETC_F_TX_TSTAMP)
1629 config.tx_type = HWTSTAMP_TX_ON;
1631 config.tx_type = HWTSTAMP_TX_OFF;
1633 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
1634 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1636 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1641 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1643 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1644 if (cmd == SIOCSHWTSTAMP)
1645 return enetc_hwtstamp_set(ndev, rq);
1646 if (cmd == SIOCGHWTSTAMP)
1647 return enetc_hwtstamp_get(ndev, rq);
1652 return phy_mii_ioctl(ndev->phydev, rq, cmd);
1655 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1657 struct pci_dev *pdev = priv->si->pdev;
1658 int size, v_tx_rings;
1659 int i, n, err, nvec;
1661 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1662 /* allocate MSIX for both messaging and Rx/Tx interrupts */
1663 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1671 /* # of tx rings per int vector */
1672 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1673 size = sizeof(struct enetc_int_vector) +
1674 sizeof(struct enetc_bdr) * v_tx_rings;
1676 for (i = 0; i < priv->bdr_int_num; i++) {
1677 struct enetc_int_vector *v;
1678 struct enetc_bdr *bdr;
1681 v = kzalloc(size, GFP_KERNEL);
1687 priv->int_vector[i] = v;
1689 netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1691 v->count_tx_rings = v_tx_rings;
1693 for (j = 0; j < v_tx_rings; j++) {
1696 /* default tx ring mapping policy */
1697 if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1698 idx = 2 * j + i; /* 2 CPUs */
1700 idx = j + i * v_tx_rings; /* default */
1702 __set_bit(idx, &v->tx_rings_map);
1703 bdr = &v->tx_ring[j];
1705 bdr->ndev = priv->ndev;
1706 bdr->dev = priv->dev;
1707 bdr->bd_count = priv->tx_bd_count;
1708 priv->tx_ring[idx] = bdr;
1713 bdr->ndev = priv->ndev;
1714 bdr->dev = priv->dev;
1715 bdr->bd_count = priv->rx_bd_count;
1716 priv->rx_ring[i] = bdr;
1723 netif_napi_del(&priv->int_vector[i]->napi);
1724 kfree(priv->int_vector[i]);
1727 pci_free_irq_vectors(pdev);
1732 void enetc_free_msix(struct enetc_ndev_priv *priv)
1736 for (i = 0; i < priv->bdr_int_num; i++) {
1737 struct enetc_int_vector *v = priv->int_vector[i];
1739 netif_napi_del(&v->napi);
1742 for (i = 0; i < priv->num_rx_rings; i++)
1743 priv->rx_ring[i] = NULL;
1745 for (i = 0; i < priv->num_tx_rings; i++)
1746 priv->tx_ring[i] = NULL;
1748 for (i = 0; i < priv->bdr_int_num; i++) {
1749 kfree(priv->int_vector[i]);
1750 priv->int_vector[i] = NULL;
1753 /* disable all MSIX for this device */
1754 pci_free_irq_vectors(priv->si->pdev);
1757 static void enetc_kfree_si(struct enetc_si *si)
1759 char *p = (char *)si - si->pad;
1764 static void enetc_detect_errata(struct enetc_si *si)
1766 if (si->pdev->revision == ENETC_REV1)
1767 si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
1771 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1773 struct enetc_si *si, *p;
1774 struct enetc_hw *hw;
1779 err = pci_enable_device_mem(pdev);
1781 dev_err(&pdev->dev, "device enable failed\n");
1785 /* set up for high or low dma */
1786 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1788 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1791 "DMA configuration failed: 0x%x\n", err);
1796 err = pci_request_mem_regions(pdev, name);
1798 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1799 goto err_pci_mem_reg;
1802 pci_set_master(pdev);
1804 alloc_size = sizeof(struct enetc_si);
1806 /* align priv to 32B */
1807 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1808 alloc_size += sizeof_priv;
1810 /* force 32B alignment for enetc_si */
1811 alloc_size += ENETC_SI_ALIGN - 1;
1813 p = kzalloc(alloc_size, GFP_KERNEL);
1819 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1820 si->pad = (char *)si - (char *)p;
1822 pci_set_drvdata(pdev, si);
1826 len = pci_resource_len(pdev, ENETC_BAR_REGS);
1827 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1830 dev_err(&pdev->dev, "ioremap() failed\n");
1833 if (len > ENETC_PORT_BASE)
1834 hw->port = hw->reg + ENETC_PORT_BASE;
1835 if (len > ENETC_GLOBAL_BASE)
1836 hw->global = hw->reg + ENETC_GLOBAL_BASE;
1838 enetc_detect_errata(si);
1845 pci_release_mem_regions(pdev);
1848 pci_disable_device(pdev);
1853 void enetc_pci_remove(struct pci_dev *pdev)
1855 struct enetc_si *si = pci_get_drvdata(pdev);
1856 struct enetc_hw *hw = &si->hw;
1860 pci_release_mem_regions(pdev);
1861 pci_disable_device(pdev);