2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
3 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
19 #include <net/ieee80211_radiotap.h>
20 #include <linux/if_arp.h>
21 #include <linux/moduleparam.h>
23 #include <linux/ipv6.h>
25 #include <linux/prefetch.h>
31 #include "txrx_edma.h"
34 module_param(rx_align_2, bool, 0444);
35 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
38 module_param(rx_large_buf, bool, 0444);
39 MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
41 /* Drop Tx packets in case Tx ring is full */
42 bool drop_if_ring_full;
44 static inline uint wil_rx_snaplen(void)
46 return rx_align_2 ? 6 : 0;
49 /* wil_ring_wmark_low - low watermark for available descriptor space */
50 static inline int wil_ring_wmark_low(struct wil_ring *ring)
52 return ring->size / 8;
55 /* wil_ring_wmark_high - high watermark for available descriptor space */
56 static inline int wil_ring_wmark_high(struct wil_ring *ring)
58 return ring->size / 4;
61 /* returns true if num avail descriptors is lower than wmark_low */
62 static inline int wil_ring_avail_low(struct wil_ring *ring)
64 return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
67 /* returns true if num avail descriptors is higher than wmark_high */
68 static inline int wil_ring_avail_high(struct wil_ring *ring)
70 return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
73 /* returns true when all tx vrings are empty */
74 bool wil_is_tx_idle(struct wil6210_priv *wil)
77 unsigned long data_comp_to;
78 int min_ring_id = wil_get_min_tx_ring_id(wil);
80 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
81 struct wil_ring *vring = &wil->ring_tx[i];
82 int vring_index = vring - wil->ring_tx;
83 struct wil_ring_tx_data *txdata =
84 &wil->ring_tx_data[vring_index];
86 spin_lock(&txdata->lock);
88 if (!vring->va || !txdata->enabled) {
89 spin_unlock(&txdata->lock);
93 data_comp_to = jiffies + msecs_to_jiffies(
94 WIL_DATA_COMPLETION_TO_MS);
95 if (test_bit(wil_status_napi_en, wil->status)) {
96 while (!wil_ring_is_empty(vring)) {
97 if (time_after(jiffies, data_comp_to)) {
99 "TO waiting for idle tx\n");
100 spin_unlock(&txdata->lock);
103 wil_dbg_ratelimited(wil,
104 "tx vring is not empty -> NAPI\n");
105 spin_unlock(&txdata->lock);
106 napi_synchronize(&wil->napi_tx);
108 spin_lock(&txdata->lock);
109 if (!vring->va || !txdata->enabled)
114 spin_unlock(&txdata->lock);
120 static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
122 struct device *dev = wil_to_dev(wil);
123 size_t sz = vring->size * sizeof(vring->va[0]);
126 wil_dbg_misc(wil, "vring_alloc:\n");
128 BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
132 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
138 /* vring->va should be aligned on its size rounded up to power of 2
139 * This is granted by the dma_alloc_coherent.
141 * HW has limitation that all vrings addresses must share the same
142 * upper 16 msb bits part of 48 bits address. To workaround that,
143 * if we are using more than 32 bit addresses switch to 32 bit
144 * allocation before allocating vring memory.
146 * There's no check for the return value of dma_set_mask_and_coherent,
147 * since we assume if we were able to set the mask during
148 * initialization in this system it will not fail if we set it again
150 if (wil->dma_addr_size > 32)
151 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
153 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
160 if (wil->dma_addr_size > 32)
161 dma_set_mask_and_coherent(dev,
162 DMA_BIT_MASK(wil->dma_addr_size));
164 /* initially, all descriptors are SW owned
165 * For Tx and Rx, ownership bit is at the same location, thus
168 for (i = 0; i < vring->size; i++) {
169 volatile struct vring_tx_desc *_d =
170 &vring->va[i].tx.legacy;
172 _d->dma.status = TX_DMA_STATUS_DU;
175 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
176 vring->va, &vring->pa, vring->ctx);
181 static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
184 struct vring_tx_desc *d = &desc->legacy;
185 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
186 u16 dmalen = le16_to_cpu(d->dma.length);
188 switch (ctx->mapped_as) {
189 case wil_mapped_as_single:
190 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
192 case wil_mapped_as_page:
193 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
200 static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
202 struct device *dev = wil_to_dev(wil);
203 size_t sz = vring->size * sizeof(vring->va[0]);
205 lockdep_assert_held(&wil->mutex);
207 int vring_index = vring - wil->ring_tx;
209 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
210 vring_index, vring->size, vring->va,
211 &vring->pa, vring->ctx);
213 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
214 vring->size, vring->va,
215 &vring->pa, vring->ctx);
218 while (!wil_ring_is_empty(vring)) {
224 struct vring_tx_desc dd, *d = ⅆ
225 volatile struct vring_tx_desc *_d =
226 &vring->va[vring->swtail].tx.legacy;
228 ctx = &vring->ctx[vring->swtail];
231 "ctx(%d) was already completed\n",
233 vring->swtail = wil_ring_next_tail(vring);
237 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
239 dev_kfree_skb_any(ctx->skb);
240 vring->swtail = wil_ring_next_tail(vring);
242 struct vring_rx_desc dd, *d = ⅆ
243 volatile struct vring_rx_desc *_d =
244 &vring->va[vring->swhead].rx.legacy;
246 ctx = &vring->ctx[vring->swhead];
248 pa = wil_desc_addr(&d->dma.addr);
249 dmalen = le16_to_cpu(d->dma.length);
250 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
252 wil_ring_advance_head(vring, 1);
255 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
263 * Allocate one skb for Rx VRING
265 * Safe to call from IRQ
267 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
270 struct device *dev = wil_to_dev(wil);
271 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
272 struct vring_rx_desc dd, *d = ⅆ
273 volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
275 struct sk_buff *skb = dev_alloc_skb(sz + headroom);
280 skb_reserve(skb, headroom);
284 * Make sure that the network stack calculates checksum for packets
285 * which failed the HW checksum calculation
287 skb->ip_summed = CHECKSUM_NONE;
289 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
290 if (unlikely(dma_mapping_error(dev, pa))) {
295 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
296 wil_desc_addr_set(&d->dma.addr, pa);
297 /* ip_length don't care */
299 /* error don't care */
300 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
301 d->dma.length = cpu_to_le16(sz);
303 vring->ctx[i].skb = skb;
309 * Adds radiotap header
311 * Any error indicated as "Bad FCS"
313 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
314 * - Rx descriptor: 32 bytes
317 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
320 struct wil6210_rtap {
321 struct ieee80211_radiotap_header rthdr;
322 /* fields should be in the order of bits in rthdr.it_present */
326 __le16 chnl_freq __aligned(2);
333 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
334 struct wil6210_rtap *rtap;
335 int rtap_len = sizeof(struct wil6210_rtap);
336 struct ieee80211_channel *ch = wil->monitor_chandef.chan;
338 if (skb_headroom(skb) < rtap_len &&
339 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
340 wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
344 rtap = skb_push(skb, rtap_len);
345 memset(rtap, 0, rtap_len);
347 rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
348 rtap->rthdr.it_len = cpu_to_le16(rtap_len);
349 rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
350 (1 << IEEE80211_RADIOTAP_CHANNEL) |
351 (1 << IEEE80211_RADIOTAP_MCS));
352 if (d->dma.status & RX_DMA_STATUS_ERROR)
353 rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS;
355 rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
356 rtap->chnl_flags = cpu_to_le16(0);
358 rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
360 rtap->mcs_index = wil_rxdesc_mcs(d);
363 static bool wil_is_rx_idle(struct wil6210_priv *wil)
365 struct vring_rx_desc *_d;
366 struct wil_ring *ring = &wil->ring_rx;
368 _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
369 if (_d->dma.status & RX_DMA_STATUS_DU)
375 static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
377 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
378 int mid = wil_rxdesc_mid(d);
379 struct wil6210_vif *vif = wil->vifs[mid];
380 /* cid from DMA descriptor is limited to 3 bits.
381 * In case of cid>=8, the value would be cid modulo 8 and we need to
382 * find real cid by locating the transmitter (ta) inside sta array
384 int cid = wil_rxdesc_cid(d);
385 unsigned int snaplen = wil_rx_snaplen();
386 struct ieee80211_hdr_3addr *hdr;
391 /* in monitor mode there are no connections */
392 if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
395 ftype = wil_rxdesc_ftype(d) << 2;
396 if (likely(ftype == IEEE80211_FTYPE_DATA)) {
397 if (unlikely(skb->len < ETH_HLEN + snaplen)) {
398 wil_err_ratelimited(wil,
399 "Short data frame, len = %d\n",
403 ta = wil_skb_get_sa(skb);
405 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
406 wil_err_ratelimited(wil, "Short frame, len = %d\n",
410 hdr = (void *)skb->data;
414 if (wil->max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
417 /* assuming no concurrency between AP interfaces and STA interfaces.
418 * multista is used only in P2P_GO or AP mode. In other modes return
419 * cid from the rx descriptor
421 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO &&
422 vif->wdev.iftype != NL80211_IFTYPE_AP)
425 /* For Rx packets cid from rx descriptor is limited to 3 bits (0..7),
426 * to find the real cid, compare transmitter address with the stored
427 * stations mac address in the driver sta array
429 for (i = cid; i < wil->max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
430 if (wil->sta[i].status != wil_sta_unused &&
431 ether_addr_equal(wil->sta[i].addr, ta)) {
436 if (i >= wil->max_assoc_sta) {
437 wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
438 ta, vif->wdev.iftype, ftype, skb->len);
446 * reap 1 frame from @swhead
448 * Rx descriptor copied to skb->cb
450 * Safe to call from IRQ
452 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
453 struct wil_ring *vring)
455 struct device *dev = wil_to_dev(wil);
456 struct wil6210_vif *vif;
457 struct net_device *ndev;
458 volatile struct vring_rx_desc *_d;
459 struct vring_rx_desc *d;
462 unsigned int snaplen = wil_rx_snaplen();
463 unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
468 struct wil_net_stats *stats;
470 BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
473 if (unlikely(wil_ring_is_empty(vring)))
476 i = (int)vring->swhead;
477 _d = &vring->va[i].rx.legacy;
478 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
479 /* it is not error, we just reached end of Rx done area */
483 skb = vring->ctx[i].skb;
484 vring->ctx[i].skb = NULL;
485 wil_ring_advance_head(vring, 1);
487 wil_err(wil, "No Rx skb at [%d]\n", i);
490 d = wil_skb_rxdesc(skb);
492 pa = wil_desc_addr(&d->dma.addr);
494 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
495 dmalen = le16_to_cpu(d->dma.length);
497 trace_wil6210_rx(i, d);
498 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
499 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
500 (const void *)d, sizeof(*d), false);
502 mid = wil_rxdesc_mid(d);
503 vif = wil->vifs[mid];
505 if (unlikely(!vif)) {
506 wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d",
511 ndev = vif_to_ndev(vif);
512 if (unlikely(dmalen > sz)) {
513 wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n",
518 skb_trim(skb, dmalen);
522 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
523 skb->data, skb_headlen(skb), false);
525 cid = wil_rx_get_cid_by_skb(wil, skb);
526 if (cid == -ENOENT) {
530 wil_skb_set_cid(skb, (u8)cid);
531 stats = &wil->sta[cid].stats;
533 stats->last_mcs_rx = wil_rxdesc_mcs(d);
534 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
535 stats->rx_per_mcs[stats->last_mcs_rx]++;
537 /* use radiotap header only if required */
538 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
539 wil_rx_add_radiotap_header(wil, skb);
541 /* no extra checks if in sniffer mode */
542 if (ndev->type != ARPHRD_ETHER)
544 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
545 * Driver should recognize it by frame type, that is found
546 * in Rx descriptor. If type is not data, it is 802.11 frame as is
548 ftype = wil_rxdesc_ftype(d) << 2;
549 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
550 u8 fc1 = wil_rxdesc_fc1(d);
551 int tid = wil_rxdesc_tid(d);
552 u16 seq = wil_rxdesc_seq(d);
555 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
556 fc1, mid, cid, tid, seq);
557 stats->rx_non_data_frame++;
558 if (wil_is_back_req(fc1)) {
560 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
562 wil_rx_bar(wil, vif, cid, tid, seq);
564 /* print again all info. One can enable only this
565 * without overhead for printing every Rx frame
568 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
569 fc1, mid, cid, tid, seq);
570 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
571 (const void *)d, sizeof(*d), false);
572 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
573 skb->data, skb_headlen(skb), false);
579 /* L4 IDENT is on when HW calculated checksum, check status
580 * and in case of error drop the packet
581 * higher stack layers will handle retransmission (if required)
583 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
584 /* L4 protocol identified, csum calculated */
585 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
586 skb->ip_summed = CHECKSUM_UNNECESSARY;
587 /* If HW reports bad checksum, let IP stack re-check it
588 * For example, HW don't understand Microsoft IP stack that
589 * mis-calculates TCP checksum - if it should be 0x0,
590 * it writes 0xffff in violation of RFC 1624
593 stats->rx_csum_err++;
598 * +-------+-------+---------+------------+------+
599 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
600 * +-------+-------+---------+------------+------+
601 * Need to remove SNAP, shifting SA and DA forward
603 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
604 skb_pull(skb, snaplen);
611 * allocate and fill up to @count buffers in rx ring
612 * buffers posted at @swtail
613 * Note: we have a single RX queue for servicing all VIFs, but we
614 * allocate skbs with headroom according to main interface only. This
615 * means it will not work with monitor interface together with other VIFs.
616 * Currently we only support monitor interface on its own without other VIFs,
617 * and we will need to fix this code once we add support.
619 static int wil_rx_refill(struct wil6210_priv *wil, int count)
621 struct net_device *ndev = wil->main_ndev;
622 struct wil_ring *v = &wil->ring_rx;
625 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
626 WIL6210_RTAP_SIZE : 0;
628 for (; next_tail = wil_ring_next_tail(v),
629 (next_tail != v->swhead) && (count-- > 0);
630 v->swtail = next_tail) {
631 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
633 wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
639 /* make sure all writes to descriptors (shared memory) are done before
640 * committing them to HW
644 wil_w(wil, v->hwtail, v->swtail);
650 * reverse_memcmp - Compare two areas of memory, in reverse order
651 * @cs: One area of memory
652 * @ct: Another area of memory
653 * @count: The size of the area.
655 * Cut'n'paste from original memcmp (see lib/string.c)
656 * with minimal modifications
658 int reverse_memcmp(const void *cs, const void *ct, size_t count)
660 const unsigned char *su1, *su2;
663 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
664 --su1, --su2, count--) {
672 static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
674 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
675 int cid = wil_skb_get_cid(skb);
676 int tid = wil_rxdesc_tid(d);
677 int key_id = wil_rxdesc_key_id(d);
678 int mc = wil_rxdesc_mcast(d);
679 struct wil_sta_info *s = &wil->sta[cid];
680 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
681 &s->tid_crypto_rx[tid];
682 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
683 const u8 *pn = (u8 *)&d->mac.pn_15_0;
686 wil_err_ratelimited(wil,
687 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
688 cid, tid, mc, key_id);
692 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
693 wil_err_ratelimited(wil,
694 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
695 cid, tid, mc, key_id, pn, cc->pn);
698 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
703 static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
704 struct wil_net_stats *stats)
706 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
708 if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
709 (d->dma.error & RX_DMA_ERROR_MIC)) {
710 stats->rx_mic_error++;
711 wil_dbg_txrx(wil, "MIC error, dropping packet\n");
718 static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
721 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
723 *cid = wil_skb_get_cid(skb);
724 *security = wil_rxdesc_security(d);
728 * Pass Rx packet to the netif. Update statistics.
729 * Called in softirq context (NAPI poll).
731 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
733 gro_result_t rc = GRO_NORMAL;
734 struct wil6210_vif *vif = ndev_to_vif(ndev);
735 struct wil6210_priv *wil = ndev_to_wil(ndev);
736 struct wireless_dev *wdev = vif_to_wdev(vif);
737 unsigned int len = skb->len;
740 u8 *sa, *da = wil_skb_get_da(skb);
741 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
742 * is not suitable, need to look at data
744 int mcast = is_multicast_ether_addr(da);
745 struct wil_net_stats *stats;
746 struct sk_buff *xmit_skb = NULL;
747 static const char * const gro_res_str[] = {
748 [GRO_MERGED] = "GRO_MERGED",
749 [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
750 [GRO_HELD] = "GRO_HELD",
751 [GRO_NORMAL] = "GRO_NORMAL",
752 [GRO_DROP] = "GRO_DROP",
753 [GRO_CONSUMED] = "GRO_CONSUMED",
756 wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
758 stats = &wil->sta[cid].stats;
762 if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
769 /* check errors reported by HW and update statistics */
770 if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
775 if (wdev->iftype == NL80211_IFTYPE_STATION) {
776 sa = wil_skb_get_sa(skb);
777 if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
778 /* mcast packet looped back to us */
783 } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
785 /* send multicast frames both to higher layers in
786 * local net stack and back to the wireless medium
788 xmit_skb = skb_copy(skb, GFP_ATOMIC);
790 int xmit_cid = wil_find_cid(wil, vif->mid, da);
793 /* The destination station is associated to
794 * this AP (in this VLAN), so send the frame
795 * directly to it and do not pass it to local
804 /* Send to wireless media and increase priority by 256 to
805 * keep the received priority instead of reclassifying
806 * the frame (see cfg80211_classify8021d).
808 xmit_skb->dev = ndev;
809 xmit_skb->priority += 256;
810 xmit_skb->protocol = htons(ETH_P_802_3);
811 skb_reset_network_header(xmit_skb);
812 skb_reset_mac_header(xmit_skb);
813 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
814 dev_queue_xmit(xmit_skb);
817 if (skb) { /* deliver to local stack */
818 skb->protocol = eth_type_trans(skb, ndev);
820 rc = napi_gro_receive(&wil->napi_rx, skb);
821 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
822 len, gro_res_str[rc]);
825 /* statistics. rc set to GRO_NORMAL for AP bridging */
826 if (unlikely(rc == GRO_DROP)) {
827 ndev->stats.rx_dropped++;
829 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
831 ndev->stats.rx_packets++;
833 ndev->stats.rx_bytes += len;
834 stats->rx_bytes += len;
836 ndev->stats.multicast++;
841 * Proceed all completed skb's from Rx VRING
843 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
845 void wil_rx_handle(struct wil6210_priv *wil, int *quota)
847 struct net_device *ndev = wil->main_ndev;
848 struct wireless_dev *wdev = ndev->ieee80211_ptr;
849 struct wil_ring *v = &wil->ring_rx;
852 if (unlikely(!v->va)) {
853 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
856 wil_dbg_txrx(wil, "rx_handle\n");
857 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
860 /* monitor is currently supported on main interface only */
861 if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
863 skb_reset_mac_header(skb);
864 skb->ip_summed = CHECKSUM_UNNECESSARY;
865 skb->pkt_type = PACKET_OTHERHOST;
866 skb->protocol = htons(ETH_P_802_2);
867 wil_netif_rx_any(skb, ndev);
869 wil_rx_reorder(wil, skb);
872 wil_rx_refill(wil, v->size);
875 static void wil_rx_buf_len_init(struct wil6210_priv *wil)
877 wil->rx_buf_len = rx_large_buf ?
878 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
879 if (mtu_max > wil->rx_buf_len) {
880 /* do not allow RX buffers to be smaller than mtu_max, for
881 * backward compatibility (mtu_max parameter was also used
882 * to support receiving large packets)
884 wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
885 wil->rx_buf_len = mtu_max;
889 static int wil_rx_init(struct wil6210_priv *wil, uint order)
891 struct wil_ring *vring = &wil->ring_rx;
894 wil_dbg_misc(wil, "rx_init\n");
897 wil_err(wil, "Rx ring already allocated\n");
901 wil_rx_buf_len_init(wil);
903 vring->size = 1 << order;
905 rc = wil_vring_alloc(wil, vring);
909 rc = wmi_rx_chain_add(wil, vring);
913 rc = wil_rx_refill(wil, vring->size);
919 wil_vring_free(wil, vring);
924 static void wil_rx_fini(struct wil6210_priv *wil)
926 struct wil_ring *vring = &wil->ring_rx;
928 wil_dbg_misc(wil, "rx_fini\n");
931 wil_vring_free(wil, vring);
934 static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
935 u32 len, int vring_index)
937 struct vring_tx_desc *d = &desc->legacy;
939 wil_desc_addr_set(&d->dma.addr, pa);
940 d->dma.ip_length = 0;
941 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
942 d->dma.b11 = 0/*14 | BIT(7)*/;
944 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
945 d->dma.length = cpu_to_le16((u16)len);
946 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
950 d->mac.ucode_cmd = 0;
951 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
952 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
953 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
958 void wil_tx_data_init(struct wil_ring_tx_data *txdata)
960 spin_lock_bh(&txdata->lock);
961 txdata->dot1x_open = 0;
964 txdata->last_idle = 0;
966 txdata->agg_wsize = 0;
967 txdata->agg_timeout = 0;
968 txdata->agg_amsdu = 0;
969 txdata->addba_in_progress = false;
970 txdata->mid = U8_MAX;
971 spin_unlock_bh(&txdata->lock);
974 static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
977 struct wil6210_priv *wil = vif_to_wil(vif);
979 struct wmi_vring_cfg_cmd cmd = {
980 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
984 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
985 .ring_size = cpu_to_le16(size),
988 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
993 .priority = cpu_to_le16(0),
994 .timeslot_us = cpu_to_le16(0xfff),
999 struct wmi_cmd_hdr wmi;
1000 struct wmi_vring_cfg_done_event cmd;
1001 } __packed reply = {
1002 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1004 struct wil_ring *vring = &wil->ring_tx[id];
1005 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
1007 if (cid >= WIL6210_RX_DESC_MAX_CID) {
1008 cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID;
1009 cmd.vring_cfg.cid = cid;
1010 cmd.vring_cfg.tid = tid;
1012 cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid);
1015 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
1016 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1017 lockdep_assert_held(&wil->mutex);
1020 wil_err(wil, "Tx ring [%d] already allocated\n", id);
1025 wil_tx_data_init(txdata);
1026 vring->is_rx = false;
1028 rc = wil_vring_alloc(wil, vring);
1032 wil->ring2cid_tid[id][0] = cid;
1033 wil->ring2cid_tid[id][1] = tid;
1035 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1038 txdata->dot1x_open = true;
1039 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
1040 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
1041 WIL_WMI_CALL_GENERAL_TO_MS);
1045 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1046 wil_err(wil, "Tx config failed, status 0x%02x\n",
1052 spin_lock_bh(&txdata->lock);
1053 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1054 txdata->mid = vif->mid;
1055 txdata->enabled = 1;
1056 spin_unlock_bh(&txdata->lock);
1058 if (txdata->dot1x_open && (agg_wsize >= 0))
1059 wil_addba_tx_request(wil, id, agg_wsize);
1063 spin_lock_bh(&txdata->lock);
1064 txdata->dot1x_open = false;
1065 txdata->enabled = 0;
1066 spin_unlock_bh(&txdata->lock);
1067 wil_vring_free(wil, vring);
1068 wil->ring2cid_tid[id][0] = wil->max_assoc_sta;
1069 wil->ring2cid_tid[id][1] = 0;
1076 static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
1079 struct wil6210_priv *wil = vif_to_wil(vif);
1081 struct wmi_vring_cfg_cmd cmd = {
1082 .action = cpu_to_le32(WMI_VRING_CMD_MODIFY),
1086 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1090 .cidxtid = mk_cidxtid(cid, tid),
1091 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1096 .priority = cpu_to_le16(0),
1097 .timeslot_us = cpu_to_le16(0xfff),
1102 struct wmi_cmd_hdr wmi;
1103 struct wmi_vring_cfg_done_event cmd;
1104 } __packed reply = {
1105 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1107 struct wil_ring *vring = &wil->ring_tx[ring_id];
1108 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
1110 wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id,
1112 lockdep_assert_held(&wil->mutex);
1115 wil_err(wil, "Tx ring [%d] not allocated\n", ring_id);
1119 if (wil->ring2cid_tid[ring_id][0] != cid ||
1120 wil->ring2cid_tid[ring_id][1] != tid) {
1121 wil_err(wil, "ring info does not match cid=%u tid=%u\n",
1122 wil->ring2cid_tid[ring_id][0],
1123 wil->ring2cid_tid[ring_id][1]);
1126 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1128 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
1129 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
1130 WIL_WMI_CALL_GENERAL_TO_MS);
1134 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1135 wil_err(wil, "Tx modify failed, status 0x%02x\n",
1141 /* set BA aggregation window size to 0 to force a new BA with the
1144 txdata->agg_wsize = 0;
1145 if (txdata->dot1x_open && agg_wsize >= 0)
1146 wil_addba_tx_request(wil, ring_id, agg_wsize);
1150 spin_lock_bh(&txdata->lock);
1151 txdata->dot1x_open = false;
1152 txdata->enabled = 0;
1153 spin_unlock_bh(&txdata->lock);
1154 wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
1155 wil->ring2cid_tid[ring_id][1] = 0;
1159 int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
1161 struct wil6210_priv *wil = vif_to_wil(vif);
1163 struct wmi_bcast_vring_cfg_cmd cmd = {
1164 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
1168 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1169 .ring_size = cpu_to_le16(size),
1172 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1176 struct wmi_cmd_hdr wmi;
1177 struct wmi_vring_cfg_done_event cmd;
1178 } __packed reply = {
1179 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1181 struct wil_ring *vring = &wil->ring_tx[id];
1182 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
1184 wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
1185 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1186 lockdep_assert_held(&wil->mutex);
1189 wil_err(wil, "Tx ring [%d] already allocated\n", id);
1194 wil_tx_data_init(txdata);
1195 vring->is_rx = false;
1197 rc = wil_vring_alloc(wil, vring);
1201 wil->ring2cid_tid[id][0] = wil->max_assoc_sta; /* CID */
1202 wil->ring2cid_tid[id][1] = 0; /* TID */
1204 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1207 txdata->dot1x_open = true;
1208 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
1210 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
1211 WIL_WMI_CALL_GENERAL_TO_MS);
1215 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1216 wil_err(wil, "Tx config failed, status 0x%02x\n",
1222 spin_lock_bh(&txdata->lock);
1223 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1224 txdata->mid = vif->mid;
1225 txdata->enabled = 1;
1226 spin_unlock_bh(&txdata->lock);
1230 spin_lock_bh(&txdata->lock);
1231 txdata->enabled = 0;
1232 txdata->dot1x_open = false;
1233 spin_unlock_bh(&txdata->lock);
1234 wil_vring_free(wil, vring);
1240 static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
1241 struct wil6210_vif *vif,
1242 struct sk_buff *skb)
1245 const u8 *da = wil_skb_get_da(skb);
1246 int min_ring_id = wil_get_min_tx_ring_id(wil);
1248 cid = wil_find_cid(wil, vif->mid, da);
1250 if (cid < 0 || cid >= wil->max_assoc_sta)
1253 /* TODO: fix for multiple TID */
1254 for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
1255 if (!wil->ring_tx_data[i].dot1x_open &&
1256 skb->protocol != cpu_to_be16(ETH_P_PAE))
1258 if (wil->ring2cid_tid[i][0] == cid) {
1259 struct wil_ring *v = &wil->ring_tx[i];
1260 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
1262 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
1264 if (v->va && txdata->enabled) {
1268 "find_tx_ucast: vring[%d] not valid\n",
1278 static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1279 struct wil_ring *ring, struct sk_buff *skb);
1281 static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
1282 struct wil6210_vif *vif,
1283 struct sk_buff *skb)
1285 struct wil_ring *ring;
1288 struct wil_ring_tx_data *txdata;
1289 int min_ring_id = wil_get_min_tx_ring_id(wil);
1291 /* In the STA mode, it is expected to have only 1 VRING
1292 * for the AP we connected to.
1293 * find 1-st vring eligible for this skb and use it.
1295 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1296 ring = &wil->ring_tx[i];
1297 txdata = &wil->ring_tx_data[i];
1298 if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
1301 cid = wil->ring2cid_tid[i][0];
1302 if (cid >= wil->max_assoc_sta) /* skip BCAST */
1305 if (!wil->ring_tx_data[i].dot1x_open &&
1306 skb->protocol != cpu_to_be16(ETH_P_PAE))
1309 wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
1314 wil_dbg_txrx(wil, "Tx while no rings active?\n");
1319 /* Use one of 2 strategies:
1321 * 1. New (real broadcast):
1322 * use dedicated broadcast vring
1323 * 2. Old (pseudo-DMS):
1324 * Find 1-st vring and return it;
1325 * duplicate skb and send it to other active vrings;
1326 * in all cases override dest address to unicast peer's address
1327 * Use old strategy when new is not supported yet:
1330 static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
1331 struct wil6210_vif *vif,
1332 struct sk_buff *skb)
1335 struct wil_ring_tx_data *txdata;
1336 int i = vif->bcast_ring;
1340 v = &wil->ring_tx[i];
1341 txdata = &wil->ring_tx_data[i];
1342 if (!v->va || !txdata->enabled)
1344 if (!wil->ring_tx_data[i].dot1x_open &&
1345 skb->protocol != cpu_to_be16(ETH_P_PAE))
1351 static void wil_set_da_for_vring(struct wil6210_priv *wil,
1352 struct sk_buff *skb, int vring_index)
1354 u8 *da = wil_skb_get_da(skb);
1355 int cid = wil->ring2cid_tid[vring_index][0];
1357 ether_addr_copy(da, wil->sta[cid].addr);
1360 static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
1361 struct wil6210_vif *vif,
1362 struct sk_buff *skb)
1364 struct wil_ring *v, *v2;
1365 struct sk_buff *skb2;
1368 const u8 *src = wil_skb_get_sa(skb);
1369 struct wil_ring_tx_data *txdata, *txdata2;
1370 int min_ring_id = wil_get_min_tx_ring_id(wil);
1372 /* find 1-st vring eligible for data */
1373 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1374 v = &wil->ring_tx[i];
1375 txdata = &wil->ring_tx_data[i];
1376 if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
1379 cid = wil->ring2cid_tid[i][0];
1380 if (cid >= wil->max_assoc_sta) /* skip BCAST */
1382 if (!wil->ring_tx_data[i].dot1x_open &&
1383 skb->protocol != cpu_to_be16(ETH_P_PAE))
1386 /* don't Tx back to source when re-routing Rx->Tx at the AP */
1387 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1393 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1398 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
1399 wil_set_da_for_vring(wil, skb, i);
1401 /* find other active vrings and duplicate skb for each */
1402 for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
1403 v2 = &wil->ring_tx[i];
1404 txdata2 = &wil->ring_tx_data[i];
1405 if (!v2->va || txdata2->mid != vif->mid)
1407 cid = wil->ring2cid_tid[i][0];
1408 if (cid >= wil->max_assoc_sta) /* skip BCAST */
1410 if (!wil->ring_tx_data[i].dot1x_open &&
1411 skb->protocol != cpu_to_be16(ETH_P_PAE))
1414 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1417 skb2 = skb_copy(skb, GFP_ATOMIC);
1419 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
1420 wil_set_da_for_vring(wil, skb2, i);
1421 wil_tx_ring(wil, vif, v2, skb2);
1422 /* successful call to wil_tx_ring takes skb2 ref */
1423 dev_kfree_skb_any(skb2);
1425 wil_err(wil, "skb_copy failed\n");
1433 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
1435 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
1439 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1440 * @skb is used to obtain the protocol and headers length.
1441 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1442 * 2 - middle, 3 - last descriptor.
1445 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
1446 struct sk_buff *skb,
1447 int tso_desc_type, bool is_ipv4,
1448 int tcp_hdr_len, int skb_net_hdr_len)
1450 d->dma.b11 = ETH_HLEN; /* MAC header length */
1451 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1453 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1454 /* L4 header len: TCP header length */
1455 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1457 /* Setup TSO: bit and desc type */
1458 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
1459 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
1460 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
1462 d->dma.ip_length = skb_net_hdr_len;
1463 /* Enable TCP/UDP checksum */
1464 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1465 /* Calculate pseudo-header */
1466 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1470 * Sets the descriptor @d up for csum. The corresponding
1471 * @skb is used to obtain the protocol and headers length.
1472 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
1473 * Note, if d==NULL, the function only returns the protocol result.
1475 * It is very similar to previous wil_tx_desc_offload_setup_tso. This
1476 * is "if unrolling" to optimize the critical path.
1479 static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
1480 struct sk_buff *skb){
1483 if (skb->ip_summed != CHECKSUM_PARTIAL)
1486 d->dma.b11 = ETH_HLEN; /* MAC header length */
1488 switch (skb->protocol) {
1489 case cpu_to_be16(ETH_P_IP):
1490 protocol = ip_hdr(skb)->protocol;
1491 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
1493 case cpu_to_be16(ETH_P_IPV6):
1494 protocol = ipv6_hdr(skb)->nexthdr;
1502 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1503 /* L4 header len: TCP header length */
1505 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1508 /* L4 header len: UDP header length */
1510 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1516 d->dma.ip_length = skb_network_header_len(skb);
1517 /* Enable TCP/UDP checksum */
1518 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1519 /* Calculate pseudo-header */
1520 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1525 static inline void wil_tx_last_desc(struct vring_tx_desc *d)
1527 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
1528 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
1529 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1532 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
1534 d->dma.d0 |= wil_tso_type_lst <<
1535 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
1538 static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1539 struct wil_ring *vring, struct sk_buff *skb)
1541 struct device *dev = wil_to_dev(wil);
1543 /* point to descriptors in shared memory */
1544 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
1545 *_first_desc = NULL;
1547 /* pointers to shadow descriptors */
1548 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
1549 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
1550 *first_desc = &first_desc_mem;
1552 /* pointer to shadow descriptors' context */
1553 struct wil_ctx *hdr_ctx, *first_ctx = NULL;
1555 int descs_used = 0; /* total number of used descriptors */
1556 int sg_desc_cnt = 0; /* number of descriptors for current mss*/
1558 u32 swhead = vring->swhead;
1559 int used, avail = wil_ring_avail_tx(vring);
1560 int nr_frags = skb_shinfo(skb)->nr_frags;
1561 int min_desc_required = nr_frags + 1;
1562 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
1563 int f, len, hdrlen, headlen;
1564 int vring_index = vring - wil->ring_tx;
1565 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
1568 const skb_frag_t *frag = NULL;
1571 int hdr_compensation_need = true;
1572 int desc_tso_type = wil_tso_type_first;
1575 int skb_net_hdr_len;
1579 wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
1582 if (unlikely(!txdata->enabled))
1585 /* A typical page 4K is 3-4 payloads, we assume each fragment
1586 * is a full payload, that's how min_desc_required has been
1587 * calculated. In real we might need more or less descriptors,
1588 * this is the initial check only.
1590 if (unlikely(avail < min_desc_required)) {
1591 wil_err_ratelimited(wil,
1592 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1593 vring_index, min_desc_required);
1597 /* Header Length = MAC header len + IP header len + TCP header len*/
1599 (int)skb_network_header_len(skb) +
1602 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1605 /* TCP v4, zero out the IP length and IPv4 checksum fields
1606 * as required by the offloading doc
1608 ip_hdr(skb)->tot_len = 0;
1609 ip_hdr(skb)->check = 0;
1613 /* TCP v6, zero out the payload length */
1614 ipv6_hdr(skb)->payload_len = 0;
1618 /* other than TCPv4 or TCPv6 types are not supported for TSO.
1619 * It is also illegal for both to be set simultaneously
1624 if (skb->ip_summed != CHECKSUM_PARTIAL)
1627 /* tcp header length and skb network header length are fixed for all
1628 * packet's descriptors - read then once here
1630 tcp_hdr_len = tcp_hdrlen(skb);
1631 skb_net_hdr_len = skb_network_header_len(skb);
1633 _hdr_desc = &vring->va[i].tx.legacy;
1635 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
1636 if (unlikely(dma_mapping_error(dev, pa))) {
1637 wil_err(wil, "TSO: Skb head DMA map error\n");
1641 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
1642 hdrlen, vring_index);
1643 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
1644 tcp_hdr_len, skb_net_hdr_len);
1645 wil_tx_last_desc(hdr_desc);
1647 vring->ctx[i].mapped_as = wil_mapped_as_single;
1648 hdr_ctx = &vring->ctx[i];
1651 headlen = skb_headlen(skb) - hdrlen;
1653 for (f = headlen ? -1 : 0; f < nr_frags; f++) {
1656 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
1659 frag = &skb_shinfo(skb)->frags[f];
1661 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
1666 "TSO: len %d, rem_data %d, descs_used %d\n",
1667 len, rem_data, descs_used);
1669 if (descs_used == avail) {
1670 wil_err_ratelimited(wil, "TSO: ring overflow\n");
1675 lenmss = min_t(int, rem_data, len);
1676 i = (swhead + descs_used) % vring->size;
1677 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
1680 pa = skb_frag_dma_map(dev, frag,
1681 frag->size - len, lenmss,
1683 vring->ctx[i].mapped_as = wil_mapped_as_page;
1685 pa = dma_map_single(dev,
1687 skb_headlen(skb) - headlen,
1690 vring->ctx[i].mapped_as = wil_mapped_as_single;
1694 if (unlikely(dma_mapping_error(dev, pa))) {
1695 wil_err(wil, "TSO: DMA map page error\n");
1699 _desc = &vring->va[i].tx.legacy;
1702 _first_desc = _desc;
1703 first_ctx = &vring->ctx[i];
1709 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
1710 pa, lenmss, vring_index);
1711 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
1712 is_ipv4, tcp_hdr_len,
1715 /* use tso_type_first only once */
1716 desc_tso_type = wil_tso_type_mid;
1718 descs_used++; /* desc used so far */
1719 sg_desc_cnt++; /* desc used for this segment */
1724 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1725 len, rem_data, descs_used, sg_desc_cnt);
1727 /* Close the segment if reached mss size or last frag*/
1728 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
1729 if (hdr_compensation_need) {
1730 /* first segment include hdr desc for
1733 hdr_ctx->nr_frags = sg_desc_cnt;
1734 wil_tx_desc_set_nr_frags(first_desc,
1737 hdr_compensation_need = false;
1739 wil_tx_desc_set_nr_frags(first_desc,
1742 first_ctx->nr_frags = sg_desc_cnt - 1;
1744 wil_tx_last_desc(d);
1746 /* first descriptor may also be the last
1747 * for this mss - make sure not to copy
1750 if (first_desc != d)
1751 *_first_desc = *first_desc;
1753 /*last descriptor will be copied at the end
1754 * of this TS processing
1756 if (f < nr_frags - 1 || len > 0)
1762 } else if (first_desc != d) /* update mid descriptor */
1770 /* first descriptor may also be the last.
1771 * in this case d pointer is invalid
1773 if (_first_desc == _desc)
1776 /* Last data descriptor */
1777 wil_set_tx_desc_last_tso(d);
1780 /* Fill the total number of descriptors in first desc (hdr)*/
1781 wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
1782 *_hdr_desc = *hdr_desc;
1784 /* hold reference to skb
1785 * to prevent skb release before accounting
1786 * in case of immediate "tx done"
1788 vring->ctx[i].skb = skb_get(skb);
1790 /* performance monitoring */
1791 used = wil_ring_used_tx(vring);
1792 if (wil_val_in_range(wil->ring_idle_trsh,
1793 used, used + descs_used)) {
1794 txdata->idle += get_cycles() - txdata->last_idle;
1795 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1796 vring_index, used, used + descs_used);
1799 /* Make sure to advance the head only after descriptor update is done.
1800 * This will prevent a race condition where the completion thread
1801 * will see the DU bit set from previous run and will handle the
1802 * skb before it was completed.
1806 /* advance swhead */
1807 wil_ring_advance_head(vring, descs_used);
1808 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1810 /* make sure all writes to descriptors (shared memory) are done before
1811 * committing them to HW
1815 if (wil->tx_latency)
1816 *(ktime_t *)&skb->cb = ktime_get();
1818 memset(skb->cb, 0, sizeof(ktime_t));
1820 wil_w(wil, vring->hwtail, vring->swhead);
1824 while (descs_used > 0) {
1825 struct wil_ctx *ctx;
1827 i = (swhead + descs_used - 1) % vring->size;
1828 d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
1829 _desc = &vring->va[i].tx.legacy;
1831 _desc->dma.status = TX_DMA_STATUS_DU;
1832 ctx = &vring->ctx[i];
1833 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
1834 memset(ctx, 0, sizeof(*ctx));
1841 static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1842 struct wil_ring *ring, struct sk_buff *skb)
1844 struct device *dev = wil_to_dev(wil);
1845 struct vring_tx_desc dd, *d = ⅆ
1846 volatile struct vring_tx_desc *_d;
1847 u32 swhead = ring->swhead;
1848 int avail = wil_ring_avail_tx(ring);
1849 int nr_frags = skb_shinfo(skb)->nr_frags;
1851 int ring_index = ring - wil->ring_tx;
1852 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
1856 bool mcast = (ring_index == vif->bcast_ring);
1857 uint len = skb_headlen(skb);
1859 wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
1860 skb->len, ring_index, nr_frags);
1862 if (unlikely(!txdata->enabled))
1865 if (unlikely(avail < 1 + nr_frags)) {
1866 wil_err_ratelimited(wil,
1867 "Tx ring[%2d] full. No space for %d fragments\n",
1868 ring_index, 1 + nr_frags);
1871 _d = &ring->va[i].tx.legacy;
1873 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1875 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
1876 skb_headlen(skb), skb->data, &pa);
1877 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
1878 skb->data, skb_headlen(skb), false);
1880 if (unlikely(dma_mapping_error(dev, pa)))
1882 ring->ctx[i].mapped_as = wil_mapped_as_single;
1884 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
1886 if (unlikely(mcast)) {
1887 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
1888 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
1889 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
1891 /* Process TCP/UDP checksum offloading */
1892 if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
1893 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
1898 ring->ctx[i].nr_frags = nr_frags;
1899 wil_tx_desc_set_nr_frags(d, nr_frags + 1);
1901 /* middle segments */
1902 for (; f < nr_frags; f++) {
1903 const struct skb_frag_struct *frag =
1904 &skb_shinfo(skb)->frags[f];
1905 int len = skb_frag_size(frag);
1908 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
1909 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1910 (const void *)d, sizeof(*d), false);
1911 i = (swhead + f + 1) % ring->size;
1912 _d = &ring->va[i].tx.legacy;
1913 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
1915 if (unlikely(dma_mapping_error(dev, pa))) {
1916 wil_err(wil, "Tx[%2d] failed to map fragment\n",
1920 ring->ctx[i].mapped_as = wil_mapped_as_page;
1921 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
1922 pa, len, ring_index);
1923 /* no need to check return code -
1924 * if it succeeded for 1-st descriptor,
1925 * it will succeed here too
1927 wil_tx_desc_offload_setup(d, skb);
1929 /* for the last seg only */
1930 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
1931 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
1932 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1934 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
1935 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1936 (const void *)d, sizeof(*d), false);
1938 /* hold reference to skb
1939 * to prevent skb release before accounting
1940 * in case of immediate "tx done"
1942 ring->ctx[i].skb = skb_get(skb);
1944 /* performance monitoring */
1945 used = wil_ring_used_tx(ring);
1946 if (wil_val_in_range(wil->ring_idle_trsh,
1947 used, used + nr_frags + 1)) {
1948 txdata->idle += get_cycles() - txdata->last_idle;
1949 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1950 ring_index, used, used + nr_frags + 1);
1953 /* Make sure to advance the head only after descriptor update is done.
1954 * This will prevent a race condition where the completion thread
1955 * will see the DU bit set from previous run and will handle the
1956 * skb before it was completed.
1960 /* advance swhead */
1961 wil_ring_advance_head(ring, nr_frags + 1);
1962 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
1964 trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
1966 /* make sure all writes to descriptors (shared memory) are done before
1967 * committing them to HW
1971 if (wil->tx_latency)
1972 *(ktime_t *)&skb->cb = ktime_get();
1974 memset(skb->cb, 0, sizeof(ktime_t));
1976 wil_w(wil, ring->hwtail, ring->swhead);
1980 /* unmap what we have mapped */
1981 nr_frags = f + 1; /* frags mapped + one for skb head */
1982 for (f = 0; f < nr_frags; f++) {
1983 struct wil_ctx *ctx;
1985 i = (swhead + f) % ring->size;
1986 ctx = &ring->ctx[i];
1987 _d = &ring->va[i].tx.legacy;
1989 _d->dma.status = TX_DMA_STATUS_DU;
1990 wil->txrx_ops.tx_desc_unmap(dev,
1991 (union wil_tx_desc *)d,
1994 memset(ctx, 0, sizeof(*ctx));
2000 static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
2001 struct wil_ring *ring, struct sk_buff *skb)
2003 int ring_index = ring - wil->ring_tx;
2004 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
2007 spin_lock(&txdata->lock);
2009 if (test_bit(wil_status_suspending, wil->status) ||
2010 test_bit(wil_status_suspended, wil->status) ||
2011 test_bit(wil_status_resuming, wil->status)) {
2013 "suspend/resume in progress. drop packet\n");
2014 spin_unlock(&txdata->lock);
2018 rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
2019 (wil, vif, ring, skb);
2021 spin_unlock(&txdata->lock);
2027 * Check status of tx vrings and stop/wake net queues if needed
2028 * It will start/stop net queues of a specific VIF net_device.
2030 * This function does one of two checks:
2031 * In case check_stop is true, will check if net queues need to be stopped. If
2032 * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
2033 * In case check_stop is false, will check if net queues need to be waked. If
2034 * the conditions for waking are met, netif_tx_wake_all_queues() is called.
2035 * vring is the vring which is currently being modified by either adding
2036 * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
2037 * be null when irrelevant (e.g. connect/disconnect events).
2039 * The implementation is to stop net queues if modified vring has low
2040 * descriptor availability. Wake if all vrings are not in low descriptor
2041 * availability and modified vring has high descriptor availability.
2043 static inline void __wil_update_net_queues(struct wil6210_priv *wil,
2044 struct wil6210_vif *vif,
2045 struct wil_ring *ring,
2049 int min_ring_id = wil_get_min_tx_ring_id(wil);
2055 wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
2056 (int)(ring - wil->ring_tx), vif->mid, check_stop,
2057 vif->net_queue_stopped);
2059 wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
2060 check_stop, vif->mid, vif->net_queue_stopped);
2062 if (ring && drop_if_ring_full)
2063 /* no need to stop/wake net queues */
2066 if (check_stop == vif->net_queue_stopped)
2067 /* net queues already in desired state */
2071 if (!ring || unlikely(wil_ring_avail_low(ring))) {
2072 /* not enough room in the vring */
2073 netif_tx_stop_all_queues(vif_to_ndev(vif));
2074 vif->net_queue_stopped = true;
2075 wil_dbg_txrx(wil, "netif_tx_stop called\n");
2080 /* Do not wake the queues in suspend flow */
2081 if (test_bit(wil_status_suspending, wil->status) ||
2082 test_bit(wil_status_suspended, wil->status))
2086 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
2087 struct wil_ring *cur_ring = &wil->ring_tx[i];
2088 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
2090 if (txdata->mid != vif->mid || !cur_ring->va ||
2091 !txdata->enabled || cur_ring == ring)
2094 if (wil_ring_avail_low(cur_ring)) {
2095 wil_dbg_txrx(wil, "ring %d full, can't wake\n",
2096 (int)(cur_ring - wil->ring_tx));
2101 if (!ring || wil_ring_avail_high(ring)) {
2102 /* enough room in the ring */
2103 wil_dbg_txrx(wil, "calling netif_tx_wake\n");
2104 netif_tx_wake_all_queues(vif_to_ndev(vif));
2105 vif->net_queue_stopped = false;
2109 void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
2110 struct wil_ring *ring, bool check_stop)
2112 spin_lock(&wil->net_queue_lock);
2113 __wil_update_net_queues(wil, vif, ring, check_stop);
2114 spin_unlock(&wil->net_queue_lock);
2117 void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
2118 struct wil_ring *ring, bool check_stop)
2120 spin_lock_bh(&wil->net_queue_lock);
2121 __wil_update_net_queues(wil, vif, ring, check_stop);
2122 spin_unlock_bh(&wil->net_queue_lock);
2125 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2127 struct wil6210_vif *vif = ndev_to_vif(ndev);
2128 struct wil6210_priv *wil = vif_to_wil(vif);
2129 const u8 *da = wil_skb_get_da(skb);
2130 bool bcast = is_multicast_ether_addr(da);
2131 struct wil_ring *ring;
2132 static bool pr_once_fw;
2135 wil_dbg_txrx(wil, "start_xmit\n");
2136 if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
2138 wil_err(wil, "FW not ready\n");
2143 if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) {
2144 wil_dbg_ratelimited(wil,
2145 "VIF not connected, packet dropped\n");
2148 if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) {
2149 wil_err(wil, "Xmit in monitor mode not supported\n");
2155 if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
2156 /* in STA mode (ESS), all to same VRING (to AP) */
2157 ring = wil_find_tx_ring_sta(wil, vif, skb);
2160 /* in pbss, no bcast VRING - duplicate skb in
2161 * all stations VRINGs
2163 ring = wil_find_tx_bcast_2(wil, vif, skb);
2164 else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
2165 /* AP has a dedicated bcast VRING */
2166 ring = wil_find_tx_bcast_1(wil, vif, skb);
2168 /* unexpected combination, fallback to duplicating
2169 * the skb in all stations VRINGs
2171 ring = wil_find_tx_bcast_2(wil, vif, skb);
2173 /* unicast, find specific VRING by dest. address */
2174 ring = wil_find_tx_ucast(wil, vif, skb);
2176 if (unlikely(!ring)) {
2177 wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da);
2180 /* set up vring entry */
2181 rc = wil_tx_ring(wil, vif, ring, skb);
2185 /* shall we stop net queues? */
2186 wil_update_net_queues_bh(wil, vif, ring, true);
2187 /* statistics will be updated on the tx_complete */
2188 dev_kfree_skb_any(skb);
2189 return NETDEV_TX_OK;
2191 if (drop_if_ring_full)
2193 return NETDEV_TX_BUSY;
2195 break; /* goto drop; */
2198 ndev->stats.tx_dropped++;
2199 dev_kfree_skb_any(skb);
2201 return NET_XMIT_DROP;
2204 void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
2205 struct wil_sta_info *sta)
2210 if (!wil->tx_latency)
2213 if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
2216 skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
2217 bin = skb_time_us / wil->tx_latency_res;
2218 bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
2220 wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
2221 sta->tx_latency_bins[bin]++;
2222 sta->stats.tx_latency_total_us += skb_time_us;
2223 if (skb_time_us < sta->stats.tx_latency_min_us)
2224 sta->stats.tx_latency_min_us = skb_time_us;
2225 if (skb_time_us > sta->stats.tx_latency_max_us)
2226 sta->stats.tx_latency_max_us = skb_time_us;
2230 * Clean up transmitted skb's from the Tx VRING
2232 * Return number of descriptors cleared
2234 * Safe to call from IRQ
2236 int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2238 struct wil6210_priv *wil = vif_to_wil(vif);
2239 struct net_device *ndev = vif_to_ndev(vif);
2240 struct device *dev = wil_to_dev(wil);
2241 struct wil_ring *vring = &wil->ring_tx[ringid];
2242 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
2244 int cid = wil->ring2cid_tid[ringid][0];
2245 struct wil_net_stats *stats = NULL;
2246 volatile struct vring_tx_desc *_d;
2247 int used_before_complete;
2250 if (unlikely(!vring->va)) {
2251 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
2255 if (unlikely(!txdata->enabled)) {
2256 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
2260 wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
2262 used_before_complete = wil_ring_used_tx(vring);
2264 if (cid < wil->max_assoc_sta)
2265 stats = &wil->sta[cid].stats;
2267 while (!wil_ring_is_empty(vring)) {
2269 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
2271 * For the fragmented skb, HW will set DU bit only for the
2272 * last fragment. look for it.
2273 * In TSO the first DU will include hdr desc
2275 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
2276 /* TODO: check we are not past head */
2278 _d = &vring->va[lf].tx.legacy;
2279 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
2282 new_swtail = (lf + 1) % vring->size;
2283 while (vring->swtail != new_swtail) {
2284 struct vring_tx_desc dd, *d = ⅆ
2286 struct sk_buff *skb;
2288 ctx = &vring->ctx[vring->swtail];
2290 _d = &vring->va[vring->swtail].tx.legacy;
2294 dmalen = le16_to_cpu(d->dma.length);
2295 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
2298 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
2299 ringid, vring->swtail, dmalen,
2300 d->dma.status, d->dma.error);
2301 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
2302 (const void *)d, sizeof(*d), false);
2304 wil->txrx_ops.tx_desc_unmap(dev,
2305 (union wil_tx_desc *)d,
2309 if (likely(d->dma.error == 0)) {
2310 ndev->stats.tx_packets++;
2311 ndev->stats.tx_bytes += skb->len;
2313 stats->tx_packets++;
2314 stats->tx_bytes += skb->len;
2316 wil_tx_latency_calc(wil, skb,
2320 ndev->stats.tx_errors++;
2324 wil_consume_skb(skb, d->dma.error == 0);
2326 memset(ctx, 0, sizeof(*ctx));
2327 /* Make sure the ctx is zeroed before updating the tail
2328 * to prevent a case where wil_tx_ring will see
2329 * this descriptor as used and handle it before ctx zero
2333 /* There is no need to touch HW descriptor:
2334 * - ststus bit TX_DMA_STATUS_DU is set by design,
2335 * so hardware will not try to process this desc.,
2336 * - rest of descriptor will be initialized on Tx.
2338 vring->swtail = wil_ring_next_tail(vring);
2343 /* performance monitoring */
2344 used_new = wil_ring_used_tx(vring);
2345 if (wil_val_in_range(wil->ring_idle_trsh,
2346 used_new, used_before_complete)) {
2347 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
2348 ringid, used_before_complete, used_new);
2349 txdata->last_idle = get_cycles();
2352 /* shall we wake net queues? */
2354 wil_update_net_queues(wil, vif, vring, false);
2359 static inline int wil_tx_init(struct wil6210_priv *wil)
2364 static inline void wil_tx_fini(struct wil6210_priv *wil) {}
2366 static void wil_get_reorder_params(struct wil6210_priv *wil,
2367 struct sk_buff *skb, int *tid, int *cid,
2368 int *mid, u16 *seq, int *mcast, int *retry)
2370 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
2372 *tid = wil_rxdesc_tid(d);
2373 *cid = wil_skb_get_cid(skb);
2374 *mid = wil_rxdesc_mid(d);
2375 *seq = wil_rxdesc_seq(d);
2376 *mcast = wil_rxdesc_mcast(d);
2377 *retry = wil_rxdesc_retry(d);
2380 void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
2382 wil->txrx_ops.configure_interrupt_moderation =
2383 wil_configure_interrupt_moderation;
2385 wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
2386 wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
2387 wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
2388 wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
2389 wil->txrx_ops.ring_fini_tx = wil_vring_free;
2390 wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
2391 wil->txrx_ops.tx_init = wil_tx_init;
2392 wil->txrx_ops.tx_fini = wil_tx_fini;
2393 wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify;
2395 wil->txrx_ops.rx_init = wil_rx_init;
2396 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
2397 wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
2398 wil->txrx_ops.get_netif_rx_params =
2399 wil_get_netif_rx_params;
2400 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
2401 wil->txrx_ops.rx_error_check = wil_rx_error_check;
2402 wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
2403 wil->txrx_ops.rx_fini = wil_rx_fini;