Linux-libre 4.9.46-gnu
[librecmc/linux-libre.git] / drivers / net / wireless / ath / ath10k / htt_rx.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include "core.h"
19 #include "htc.h"
20 #include "htt.h"
21 #include "txrx.h"
22 #include "debug.h"
23 #include "trace.h"
24 #include "mac.h"
25
26 #include <linux/log2.h>
27
28 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
30
31 /* when under memory pressure rx ring refill may fail and needs a retry */
32 #define HTT_RX_RING_REFILL_RETRY_MS 50
33
34 #define HTT_RX_RING_REFILL_RESCHED_MS 5
35
36 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
37
38 static struct sk_buff *
39 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
40 {
41         struct ath10k_skb_rxcb *rxcb;
42
43         hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
44                 if (rxcb->paddr == paddr)
45                         return ATH10K_RXCB_SKB(rxcb);
46
47         WARN_ON_ONCE(1);
48         return NULL;
49 }
50
51 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
52 {
53         struct sk_buff *skb;
54         struct ath10k_skb_rxcb *rxcb;
55         struct hlist_node *n;
56         int i;
57
58         if (htt->rx_ring.in_ord_rx) {
59                 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
60                         skb = ATH10K_RXCB_SKB(rxcb);
61                         dma_unmap_single(htt->ar->dev, rxcb->paddr,
62                                          skb->len + skb_tailroom(skb),
63                                          DMA_FROM_DEVICE);
64                         hash_del(&rxcb->hlist);
65                         dev_kfree_skb_any(skb);
66                 }
67         } else {
68                 for (i = 0; i < htt->rx_ring.size; i++) {
69                         skb = htt->rx_ring.netbufs_ring[i];
70                         if (!skb)
71                                 continue;
72
73                         rxcb = ATH10K_SKB_RXCB(skb);
74                         dma_unmap_single(htt->ar->dev, rxcb->paddr,
75                                          skb->len + skb_tailroom(skb),
76                                          DMA_FROM_DEVICE);
77                         dev_kfree_skb_any(skb);
78                 }
79         }
80
81         htt->rx_ring.fill_cnt = 0;
82         hash_init(htt->rx_ring.skb_table);
83         memset(htt->rx_ring.netbufs_ring, 0,
84                htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
85 }
86
87 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
88 {
89         struct htt_rx_desc *rx_desc;
90         struct ath10k_skb_rxcb *rxcb;
91         struct sk_buff *skb;
92         dma_addr_t paddr;
93         int ret = 0, idx;
94
95         /* The Full Rx Reorder firmware has no way of telling the host
96          * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
97          * To keep things simple make sure ring is always half empty. This
98          * guarantees there'll be no replenishment overruns possible.
99          */
100         BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
101
102         idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
103         while (num > 0) {
104                 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
105                 if (!skb) {
106                         ret = -ENOMEM;
107                         goto fail;
108                 }
109
110                 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
111                         skb_pull(skb,
112                                  PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
113                                  skb->data);
114
115                 /* Clear rx_desc attention word before posting to Rx ring */
116                 rx_desc = (struct htt_rx_desc *)skb->data;
117                 rx_desc->attention.flags = __cpu_to_le32(0);
118
119                 paddr = dma_map_single(htt->ar->dev, skb->data,
120                                        skb->len + skb_tailroom(skb),
121                                        DMA_FROM_DEVICE);
122
123                 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
124                         dev_kfree_skb_any(skb);
125                         ret = -ENOMEM;
126                         goto fail;
127                 }
128
129                 rxcb = ATH10K_SKB_RXCB(skb);
130                 rxcb->paddr = paddr;
131                 htt->rx_ring.netbufs_ring[idx] = skb;
132                 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
133                 htt->rx_ring.fill_cnt++;
134
135                 if (htt->rx_ring.in_ord_rx) {
136                         hash_add(htt->rx_ring.skb_table,
137                                  &ATH10K_SKB_RXCB(skb)->hlist,
138                                  (u32)paddr);
139                 }
140
141                 num--;
142                 idx++;
143                 idx &= htt->rx_ring.size_mask;
144         }
145
146 fail:
147         /*
148          * Make sure the rx buffer is updated before available buffer
149          * index to avoid any potential rx ring corruption.
150          */
151         mb();
152         *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
153         return ret;
154 }
155
156 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
157 {
158         lockdep_assert_held(&htt->rx_ring.lock);
159         return __ath10k_htt_rx_ring_fill_n(htt, num);
160 }
161
162 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
163 {
164         int ret, num_deficit, num_to_fill;
165
166         /* Refilling the whole RX ring buffer proves to be a bad idea. The
167          * reason is RX may take up significant amount of CPU cycles and starve
168          * other tasks, e.g. TX on an ethernet device while acting as a bridge
169          * with ath10k wlan interface. This ended up with very poor performance
170          * once CPU the host system was overwhelmed with RX on ath10k.
171          *
172          * By limiting the number of refills the replenishing occurs
173          * progressively. This in turns makes use of the fact tasklets are
174          * processed in FIFO order. This means actual RX processing can starve
175          * out refilling. If there's not enough buffers on RX ring FW will not
176          * report RX until it is refilled with enough buffers. This
177          * automatically balances load wrt to CPU power.
178          *
179          * This probably comes at a cost of lower maximum throughput but
180          * improves the average and stability. */
181         spin_lock_bh(&htt->rx_ring.lock);
182         num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
183         num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
184         num_deficit -= num_to_fill;
185         ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
186         if (ret == -ENOMEM) {
187                 /*
188                  * Failed to fill it to the desired level -
189                  * we'll start a timer and try again next time.
190                  * As long as enough buffers are left in the ring for
191                  * another A-MPDU rx, no special recovery is needed.
192                  */
193                 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
194                           msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
195         } else if (num_deficit > 0) {
196                 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
197                           msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
198         }
199         spin_unlock_bh(&htt->rx_ring.lock);
200 }
201
202 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
203 {
204         struct ath10k_htt *htt = (struct ath10k_htt *)arg;
205
206         ath10k_htt_rx_msdu_buff_replenish(htt);
207 }
208
209 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
210 {
211         struct ath10k_htt *htt = &ar->htt;
212         int ret;
213
214         spin_lock_bh(&htt->rx_ring.lock);
215         ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
216                                               htt->rx_ring.fill_cnt));
217         spin_unlock_bh(&htt->rx_ring.lock);
218
219         if (ret)
220                 ath10k_htt_rx_ring_free(htt);
221
222         return ret;
223 }
224
225 void ath10k_htt_rx_free(struct ath10k_htt *htt)
226 {
227         del_timer_sync(&htt->rx_ring.refill_retry_timer);
228
229         skb_queue_purge(&htt->rx_compl_q);
230         skb_queue_purge(&htt->rx_in_ord_compl_q);
231         skb_queue_purge(&htt->tx_fetch_ind_q);
232
233         ath10k_htt_rx_ring_free(htt);
234
235         dma_free_coherent(htt->ar->dev,
236                           (htt->rx_ring.size *
237                            sizeof(htt->rx_ring.paddrs_ring)),
238                           htt->rx_ring.paddrs_ring,
239                           htt->rx_ring.base_paddr);
240
241         dma_free_coherent(htt->ar->dev,
242                           sizeof(*htt->rx_ring.alloc_idx.vaddr),
243                           htt->rx_ring.alloc_idx.vaddr,
244                           htt->rx_ring.alloc_idx.paddr);
245
246         kfree(htt->rx_ring.netbufs_ring);
247 }
248
249 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
250 {
251         struct ath10k *ar = htt->ar;
252         int idx;
253         struct sk_buff *msdu;
254
255         lockdep_assert_held(&htt->rx_ring.lock);
256
257         if (htt->rx_ring.fill_cnt == 0) {
258                 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
259                 return NULL;
260         }
261
262         idx = htt->rx_ring.sw_rd_idx.msdu_payld;
263         msdu = htt->rx_ring.netbufs_ring[idx];
264         htt->rx_ring.netbufs_ring[idx] = NULL;
265         htt->rx_ring.paddrs_ring[idx] = 0;
266
267         idx++;
268         idx &= htt->rx_ring.size_mask;
269         htt->rx_ring.sw_rd_idx.msdu_payld = idx;
270         htt->rx_ring.fill_cnt--;
271
272         dma_unmap_single(htt->ar->dev,
273                          ATH10K_SKB_RXCB(msdu)->paddr,
274                          msdu->len + skb_tailroom(msdu),
275                          DMA_FROM_DEVICE);
276         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
277                         msdu->data, msdu->len + skb_tailroom(msdu));
278
279         return msdu;
280 }
281
282 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
283 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
284                                    struct sk_buff_head *amsdu)
285 {
286         struct ath10k *ar = htt->ar;
287         int msdu_len, msdu_chaining = 0;
288         struct sk_buff *msdu;
289         struct htt_rx_desc *rx_desc;
290
291         lockdep_assert_held(&htt->rx_ring.lock);
292
293         for (;;) {
294                 int last_msdu, msdu_len_invalid, msdu_chained;
295
296                 msdu = ath10k_htt_rx_netbuf_pop(htt);
297                 if (!msdu) {
298                         __skb_queue_purge(amsdu);
299                         return -ENOENT;
300                 }
301
302                 __skb_queue_tail(amsdu, msdu);
303
304                 rx_desc = (struct htt_rx_desc *)msdu->data;
305
306                 /* FIXME: we must report msdu payload since this is what caller
307                  *        expects now */
308                 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
309                 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
310
311                 /*
312                  * Sanity check - confirm the HW is finished filling in the
313                  * rx data.
314                  * If the HW and SW are working correctly, then it's guaranteed
315                  * that the HW's MAC DMA is done before this point in the SW.
316                  * To prevent the case that we handle a stale Rx descriptor,
317                  * just assert for now until we have a way to recover.
318                  */
319                 if (!(__le32_to_cpu(rx_desc->attention.flags)
320                                 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
321                         __skb_queue_purge(amsdu);
322                         return -EIO;
323                 }
324
325                 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
326                                         & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
327                                            RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
328                 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
329                               RX_MSDU_START_INFO0_MSDU_LENGTH);
330                 msdu_chained = rx_desc->frag_info.ring2_more_count;
331
332                 if (msdu_len_invalid)
333                         msdu_len = 0;
334
335                 skb_trim(msdu, 0);
336                 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
337                 msdu_len -= msdu->len;
338
339                 /* Note: Chained buffers do not contain rx descriptor */
340                 while (msdu_chained--) {
341                         msdu = ath10k_htt_rx_netbuf_pop(htt);
342                         if (!msdu) {
343                                 __skb_queue_purge(amsdu);
344                                 return -ENOENT;
345                         }
346
347                         __skb_queue_tail(amsdu, msdu);
348                         skb_trim(msdu, 0);
349                         skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
350                         msdu_len -= msdu->len;
351                         msdu_chaining = 1;
352                 }
353
354                 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
355                                 RX_MSDU_END_INFO0_LAST_MSDU;
356
357                 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
358                                          sizeof(*rx_desc) - sizeof(u32));
359
360                 if (last_msdu)
361                         break;
362         }
363
364         if (skb_queue_empty(amsdu))
365                 msdu_chaining = -1;
366
367         /*
368          * Don't refill the ring yet.
369          *
370          * First, the elements popped here are still in use - it is not
371          * safe to overwrite them until the matching call to
372          * mpdu_desc_list_next. Second, for efficiency it is preferable to
373          * refill the rx ring with 1 PPDU's worth of rx buffers (something
374          * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
375          * (something like 3 buffers). Consequently, we'll rely on the txrx
376          * SW to tell us when it is done pulling all the PPDU's rx buffers
377          * out of the rx ring, and then refill it just once.
378          */
379
380         return msdu_chaining;
381 }
382
383 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
384                                                u32 paddr)
385 {
386         struct ath10k *ar = htt->ar;
387         struct ath10k_skb_rxcb *rxcb;
388         struct sk_buff *msdu;
389
390         lockdep_assert_held(&htt->rx_ring.lock);
391
392         msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
393         if (!msdu)
394                 return NULL;
395
396         rxcb = ATH10K_SKB_RXCB(msdu);
397         hash_del(&rxcb->hlist);
398         htt->rx_ring.fill_cnt--;
399
400         dma_unmap_single(htt->ar->dev, rxcb->paddr,
401                          msdu->len + skb_tailroom(msdu),
402                          DMA_FROM_DEVICE);
403         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
404                         msdu->data, msdu->len + skb_tailroom(msdu));
405
406         return msdu;
407 }
408
409 static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
410                                         struct htt_rx_in_ord_ind *ev,
411                                         struct sk_buff_head *list)
412 {
413         struct ath10k *ar = htt->ar;
414         struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
415         struct htt_rx_desc *rxd;
416         struct sk_buff *msdu;
417         int msdu_count;
418         bool is_offload;
419         u32 paddr;
420
421         lockdep_assert_held(&htt->rx_ring.lock);
422
423         msdu_count = __le16_to_cpu(ev->msdu_count);
424         is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
425
426         while (msdu_count--) {
427                 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
428
429                 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
430                 if (!msdu) {
431                         __skb_queue_purge(list);
432                         return -ENOENT;
433                 }
434
435                 __skb_queue_tail(list, msdu);
436
437                 if (!is_offload) {
438                         rxd = (void *)msdu->data;
439
440                         trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
441
442                         skb_put(msdu, sizeof(*rxd));
443                         skb_pull(msdu, sizeof(*rxd));
444                         skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
445
446                         if (!(__le32_to_cpu(rxd->attention.flags) &
447                               RX_ATTENTION_FLAGS_MSDU_DONE)) {
448                                 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
449                                 return -EIO;
450                         }
451                 }
452
453                 msdu_desc++;
454         }
455
456         return 0;
457 }
458
459 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
460 {
461         struct ath10k *ar = htt->ar;
462         dma_addr_t paddr;
463         void *vaddr;
464         size_t size;
465         struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
466
467         htt->rx_confused = false;
468
469         /* XXX: The fill level could be changed during runtime in response to
470          * the host processing latency. Is this really worth it?
471          */
472         htt->rx_ring.size = HTT_RX_RING_SIZE;
473         htt->rx_ring.size_mask = htt->rx_ring.size - 1;
474         htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
475
476         if (!is_power_of_2(htt->rx_ring.size)) {
477                 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
478                 return -EINVAL;
479         }
480
481         htt->rx_ring.netbufs_ring =
482                 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
483                         GFP_KERNEL);
484         if (!htt->rx_ring.netbufs_ring)
485                 goto err_netbuf;
486
487         size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
488
489         vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
490         if (!vaddr)
491                 goto err_dma_ring;
492
493         htt->rx_ring.paddrs_ring = vaddr;
494         htt->rx_ring.base_paddr = paddr;
495
496         vaddr = dma_alloc_coherent(htt->ar->dev,
497                                    sizeof(*htt->rx_ring.alloc_idx.vaddr),
498                                    &paddr, GFP_KERNEL);
499         if (!vaddr)
500                 goto err_dma_idx;
501
502         htt->rx_ring.alloc_idx.vaddr = vaddr;
503         htt->rx_ring.alloc_idx.paddr = paddr;
504         htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
505         *htt->rx_ring.alloc_idx.vaddr = 0;
506
507         /* Initialize the Rx refill retry timer */
508         setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
509
510         spin_lock_init(&htt->rx_ring.lock);
511
512         htt->rx_ring.fill_cnt = 0;
513         htt->rx_ring.sw_rd_idx.msdu_payld = 0;
514         hash_init(htt->rx_ring.skb_table);
515
516         skb_queue_head_init(&htt->rx_compl_q);
517         skb_queue_head_init(&htt->rx_in_ord_compl_q);
518         skb_queue_head_init(&htt->tx_fetch_ind_q);
519         atomic_set(&htt->num_mpdus_ready, 0);
520
521         ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
522                    htt->rx_ring.size, htt->rx_ring.fill_level);
523         return 0;
524
525 err_dma_idx:
526         dma_free_coherent(htt->ar->dev,
527                           (htt->rx_ring.size *
528                            sizeof(htt->rx_ring.paddrs_ring)),
529                           htt->rx_ring.paddrs_ring,
530                           htt->rx_ring.base_paddr);
531 err_dma_ring:
532         kfree(htt->rx_ring.netbufs_ring);
533 err_netbuf:
534         return -ENOMEM;
535 }
536
537 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
538                                           enum htt_rx_mpdu_encrypt_type type)
539 {
540         switch (type) {
541         case HTT_RX_MPDU_ENCRYPT_NONE:
542                 return 0;
543         case HTT_RX_MPDU_ENCRYPT_WEP40:
544         case HTT_RX_MPDU_ENCRYPT_WEP104:
545                 return IEEE80211_WEP_IV_LEN;
546         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
547         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
548                 return IEEE80211_TKIP_IV_LEN;
549         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
550                 return IEEE80211_CCMP_HDR_LEN;
551         case HTT_RX_MPDU_ENCRYPT_WEP128:
552         case HTT_RX_MPDU_ENCRYPT_WAPI:
553                 break;
554         }
555
556         ath10k_warn(ar, "unsupported encryption type %d\n", type);
557         return 0;
558 }
559
560 #define MICHAEL_MIC_LEN 8
561
562 static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
563                                          enum htt_rx_mpdu_encrypt_type type)
564 {
565         switch (type) {
566         case HTT_RX_MPDU_ENCRYPT_NONE:
567                 return 0;
568         case HTT_RX_MPDU_ENCRYPT_WEP40:
569         case HTT_RX_MPDU_ENCRYPT_WEP104:
570                 return IEEE80211_WEP_ICV_LEN;
571         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
572         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
573                 return IEEE80211_TKIP_ICV_LEN;
574         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
575                 return IEEE80211_CCMP_MIC_LEN;
576         case HTT_RX_MPDU_ENCRYPT_WEP128:
577         case HTT_RX_MPDU_ENCRYPT_WAPI:
578                 break;
579         }
580
581         ath10k_warn(ar, "unsupported encryption type %d\n", type);
582         return 0;
583 }
584
585 struct amsdu_subframe_hdr {
586         u8 dst[ETH_ALEN];
587         u8 src[ETH_ALEN];
588         __be16 len;
589 } __packed;
590
591 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
592
593 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
594                                   struct ieee80211_rx_status *status,
595                                   struct htt_rx_desc *rxd)
596 {
597         struct ieee80211_supported_band *sband;
598         u8 cck, rate, bw, sgi, mcs, nss;
599         u8 preamble = 0;
600         u8 group_id;
601         u32 info1, info2, info3;
602
603         info1 = __le32_to_cpu(rxd->ppdu_start.info1);
604         info2 = __le32_to_cpu(rxd->ppdu_start.info2);
605         info3 = __le32_to_cpu(rxd->ppdu_start.info3);
606
607         preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
608
609         switch (preamble) {
610         case HTT_RX_LEGACY:
611                 /* To get legacy rate index band is required. Since band can't
612                  * be undefined check if freq is non-zero.
613                  */
614                 if (!status->freq)
615                         return;
616
617                 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
618                 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
619                 rate &= ~RX_PPDU_START_RATE_FLAG;
620
621                 sband = &ar->mac.sbands[status->band];
622                 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
623                 break;
624         case HTT_RX_HT:
625         case HTT_RX_HT_WITH_TXBF:
626                 /* HT-SIG - Table 20-11 in info2 and info3 */
627                 mcs = info2 & 0x1F;
628                 nss = mcs >> 3;
629                 bw = (info2 >> 7) & 1;
630                 sgi = (info3 >> 7) & 1;
631
632                 status->rate_idx = mcs;
633                 status->flag |= RX_FLAG_HT;
634                 if (sgi)
635                         status->flag |= RX_FLAG_SHORT_GI;
636                 if (bw)
637                         status->flag |= RX_FLAG_40MHZ;
638                 break;
639         case HTT_RX_VHT:
640         case HTT_RX_VHT_WITH_TXBF:
641                 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
642                    TODO check this */
643                 bw = info2 & 3;
644                 sgi = info3 & 1;
645                 group_id = (info2 >> 4) & 0x3F;
646
647                 if (GROUP_ID_IS_SU_MIMO(group_id)) {
648                         mcs = (info3 >> 4) & 0x0F;
649                         nss = ((info2 >> 10) & 0x07) + 1;
650                 } else {
651                         /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
652                          * so it's impossible to decode MCS. Also since
653                          * firmware consumes Group Id Management frames host
654                          * has no knowledge regarding group/user position
655                          * mapping so it's impossible to pick the correct Nsts
656                          * from VHT-SIG-A1.
657                          *
658                          * Bandwidth and SGI are valid so report the rateinfo
659                          * on best-effort basis.
660                          */
661                         mcs = 0;
662                         nss = 1;
663                 }
664
665                 if (mcs > 0x09) {
666                         ath10k_warn(ar, "invalid MCS received %u\n", mcs);
667                         ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
668                                     __le32_to_cpu(rxd->attention.flags),
669                                     __le32_to_cpu(rxd->mpdu_start.info0),
670                                     __le32_to_cpu(rxd->mpdu_start.info1),
671                                     __le32_to_cpu(rxd->msdu_start.common.info0),
672                                     __le32_to_cpu(rxd->msdu_start.common.info1),
673                                     rxd->ppdu_start.info0,
674                                     __le32_to_cpu(rxd->ppdu_start.info1),
675                                     __le32_to_cpu(rxd->ppdu_start.info2),
676                                     __le32_to_cpu(rxd->ppdu_start.info3),
677                                     __le32_to_cpu(rxd->ppdu_start.info4));
678
679                         ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
680                                     __le32_to_cpu(rxd->msdu_end.common.info0),
681                                     __le32_to_cpu(rxd->mpdu_end.info0));
682
683                         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
684                                         "rx desc msdu payload: ",
685                                         rxd->msdu_payload, 50);
686                 }
687
688                 status->rate_idx = mcs;
689                 status->vht_nss = nss;
690
691                 if (sgi)
692                         status->flag |= RX_FLAG_SHORT_GI;
693
694                 switch (bw) {
695                 /* 20MHZ */
696                 case 0:
697                         break;
698                 /* 40MHZ */
699                 case 1:
700                         status->flag |= RX_FLAG_40MHZ;
701                         break;
702                 /* 80MHZ */
703                 case 2:
704                         status->vht_flag |= RX_VHT_FLAG_80MHZ;
705                 }
706
707                 status->flag |= RX_FLAG_VHT;
708                 break;
709         default:
710                 break;
711         }
712 }
713
714 static struct ieee80211_channel *
715 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
716 {
717         struct ath10k_peer *peer;
718         struct ath10k_vif *arvif;
719         struct cfg80211_chan_def def;
720         u16 peer_id;
721
722         lockdep_assert_held(&ar->data_lock);
723
724         if (!rxd)
725                 return NULL;
726
727         if (rxd->attention.flags &
728             __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
729                 return NULL;
730
731         if (!(rxd->msdu_end.common.info0 &
732               __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
733                 return NULL;
734
735         peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
736                      RX_MPDU_START_INFO0_PEER_IDX);
737
738         peer = ath10k_peer_find_by_id(ar, peer_id);
739         if (!peer)
740                 return NULL;
741
742         arvif = ath10k_get_arvif(ar, peer->vdev_id);
743         if (WARN_ON_ONCE(!arvif))
744                 return NULL;
745
746         if (ath10k_mac_vif_chan(arvif->vif, &def))
747                 return NULL;
748
749         return def.chan;
750 }
751
752 static struct ieee80211_channel *
753 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
754 {
755         struct ath10k_vif *arvif;
756         struct cfg80211_chan_def def;
757
758         lockdep_assert_held(&ar->data_lock);
759
760         list_for_each_entry(arvif, &ar->arvifs, list) {
761                 if (arvif->vdev_id == vdev_id &&
762                     ath10k_mac_vif_chan(arvif->vif, &def) == 0)
763                         return def.chan;
764         }
765
766         return NULL;
767 }
768
769 static void
770 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
771                               struct ieee80211_chanctx_conf *conf,
772                               void *data)
773 {
774         struct cfg80211_chan_def *def = data;
775
776         *def = conf->def;
777 }
778
779 static struct ieee80211_channel *
780 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
781 {
782         struct cfg80211_chan_def def = {};
783
784         ieee80211_iter_chan_contexts_atomic(ar->hw,
785                                             ath10k_htt_rx_h_any_chan_iter,
786                                             &def);
787
788         return def.chan;
789 }
790
791 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
792                                     struct ieee80211_rx_status *status,
793                                     struct htt_rx_desc *rxd,
794                                     u32 vdev_id)
795 {
796         struct ieee80211_channel *ch;
797
798         spin_lock_bh(&ar->data_lock);
799         ch = ar->scan_channel;
800         if (!ch)
801                 ch = ar->rx_channel;
802         if (!ch)
803                 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
804         if (!ch)
805                 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
806         if (!ch)
807                 ch = ath10k_htt_rx_h_any_channel(ar);
808         if (!ch)
809                 ch = ar->tgt_oper_chan;
810         spin_unlock_bh(&ar->data_lock);
811
812         if (!ch)
813                 return false;
814
815         status->band = ch->band;
816         status->freq = ch->center_freq;
817
818         return true;
819 }
820
821 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
822                                    struct ieee80211_rx_status *status,
823                                    struct htt_rx_desc *rxd)
824 {
825         /* FIXME: Get real NF */
826         status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
827                          rxd->ppdu_start.rssi_comb;
828         status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
829 }
830
831 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
832                                     struct ieee80211_rx_status *status,
833                                     struct htt_rx_desc *rxd)
834 {
835         /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
836          * means all prior MSDUs in a PPDU are reported to mac80211 without the
837          * TSF. Is it worth holding frames until end of PPDU is known?
838          *
839          * FIXME: Can we get/compute 64bit TSF?
840          */
841         status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
842         status->flag |= RX_FLAG_MACTIME_END;
843 }
844
845 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
846                                  struct sk_buff_head *amsdu,
847                                  struct ieee80211_rx_status *status,
848                                  u32 vdev_id)
849 {
850         struct sk_buff *first;
851         struct htt_rx_desc *rxd;
852         bool is_first_ppdu;
853         bool is_last_ppdu;
854
855         if (skb_queue_empty(amsdu))
856                 return;
857
858         first = skb_peek(amsdu);
859         rxd = (void *)first->data - sizeof(*rxd);
860
861         is_first_ppdu = !!(rxd->attention.flags &
862                            __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
863         is_last_ppdu = !!(rxd->attention.flags &
864                           __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
865
866         if (is_first_ppdu) {
867                 /* New PPDU starts so clear out the old per-PPDU status. */
868                 status->freq = 0;
869                 status->rate_idx = 0;
870                 status->vht_nss = 0;
871                 status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
872                 status->flag &= ~(RX_FLAG_HT |
873                                   RX_FLAG_VHT |
874                                   RX_FLAG_SHORT_GI |
875                                   RX_FLAG_40MHZ |
876                                   RX_FLAG_MACTIME_END);
877                 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
878
879                 ath10k_htt_rx_h_signal(ar, status, rxd);
880                 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
881                 ath10k_htt_rx_h_rates(ar, status, rxd);
882         }
883
884         if (is_last_ppdu)
885                 ath10k_htt_rx_h_mactime(ar, status, rxd);
886 }
887
888 static const char * const tid_to_ac[] = {
889         "BE",
890         "BK",
891         "BK",
892         "BE",
893         "VI",
894         "VI",
895         "VO",
896         "VO",
897 };
898
899 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
900 {
901         u8 *qc;
902         int tid;
903
904         if (!ieee80211_is_data_qos(hdr->frame_control))
905                 return "";
906
907         qc = ieee80211_get_qos_ctl(hdr);
908         tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
909         if (tid < 8)
910                 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
911         else
912                 snprintf(out, size, "tid %d", tid);
913
914         return out;
915 }
916
917 static void ath10k_process_rx(struct ath10k *ar,
918                               struct ieee80211_rx_status *rx_status,
919                               struct sk_buff *skb)
920 {
921         struct ieee80211_rx_status *status;
922         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
923         char tid[32];
924
925         status = IEEE80211_SKB_RXCB(skb);
926         *status = *rx_status;
927
928         ath10k_dbg(ar, ATH10K_DBG_DATA,
929                    "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
930                    skb,
931                    skb->len,
932                    ieee80211_get_SA(hdr),
933                    ath10k_get_tid(hdr, tid, sizeof(tid)),
934                    is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
935                                                         "mcast" : "ucast",
936                    (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
937                    (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
938                                                         "legacy" : "",
939                    status->flag & RX_FLAG_HT ? "ht" : "",
940                    status->flag & RX_FLAG_VHT ? "vht" : "",
941                    status->flag & RX_FLAG_40MHZ ? "40" : "",
942                    status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
943                    status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
944                    status->rate_idx,
945                    status->vht_nss,
946                    status->freq,
947                    status->band, status->flag,
948                    !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
949                    !!(status->flag & RX_FLAG_MMIC_ERROR),
950                    !!(status->flag & RX_FLAG_AMSDU_MORE));
951         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
952                         skb->data, skb->len);
953         trace_ath10k_rx_hdr(ar, skb->data, skb->len);
954         trace_ath10k_rx_payload(ar, skb->data, skb->len);
955
956         ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
957 }
958
959 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
960                                       struct ieee80211_hdr *hdr)
961 {
962         int len = ieee80211_hdrlen(hdr->frame_control);
963
964         if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
965                       ar->running_fw->fw_file.fw_features))
966                 len = round_up(len, 4);
967
968         return len;
969 }
970
971 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
972                                         struct sk_buff *msdu,
973                                         struct ieee80211_rx_status *status,
974                                         enum htt_rx_mpdu_encrypt_type enctype,
975                                         bool is_decrypted)
976 {
977         struct ieee80211_hdr *hdr;
978         struct htt_rx_desc *rxd;
979         size_t hdr_len;
980         size_t crypto_len;
981         bool is_first;
982         bool is_last;
983
984         rxd = (void *)msdu->data - sizeof(*rxd);
985         is_first = !!(rxd->msdu_end.common.info0 &
986                       __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
987         is_last = !!(rxd->msdu_end.common.info0 &
988                      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
989
990         /* Delivered decapped frame:
991          * [802.11 header]
992          * [crypto param] <-- can be trimmed if !fcs_err &&
993          *                    !decrypt_err && !peer_idx_invalid
994          * [amsdu header] <-- only if A-MSDU
995          * [rfc1042/llc]
996          * [payload]
997          * [FCS] <-- at end, needs to be trimmed
998          */
999
1000         /* This probably shouldn't happen but warn just in case */
1001         if (unlikely(WARN_ON_ONCE(!is_first)))
1002                 return;
1003
1004         /* This probably shouldn't happen but warn just in case */
1005         if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1006                 return;
1007
1008         skb_trim(msdu, msdu->len - FCS_LEN);
1009
1010         /* In most cases this will be true for sniffed frames. It makes sense
1011          * to deliver them as-is without stripping the crypto param. This is
1012          * necessary for software based decryption.
1013          *
1014          * If there's no error then the frame is decrypted. At least that is
1015          * the case for frames that come in via fragmented rx indication.
1016          */
1017         if (!is_decrypted)
1018                 return;
1019
1020         /* The payload is decrypted so strip crypto params. Start from tail
1021          * since hdr is used to compute some stuff.
1022          */
1023
1024         hdr = (void *)msdu->data;
1025
1026         /* Tail */
1027         if (status->flag & RX_FLAG_IV_STRIPPED)
1028                 skb_trim(msdu, msdu->len -
1029                          ath10k_htt_rx_crypto_tail_len(ar, enctype));
1030
1031         /* MMIC */
1032         if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1033             !ieee80211_has_morefrags(hdr->frame_control) &&
1034             enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1035                 skb_trim(msdu, msdu->len - 8);
1036
1037         /* Head */
1038         if (status->flag & RX_FLAG_IV_STRIPPED) {
1039                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1040                 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1041
1042                 memmove((void *)msdu->data + crypto_len,
1043                         (void *)msdu->data, hdr_len);
1044                 skb_pull(msdu, crypto_len);
1045         }
1046 }
1047
1048 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1049                                           struct sk_buff *msdu,
1050                                           struct ieee80211_rx_status *status,
1051                                           const u8 first_hdr[64])
1052 {
1053         struct ieee80211_hdr *hdr;
1054         struct htt_rx_desc *rxd;
1055         size_t hdr_len;
1056         u8 da[ETH_ALEN];
1057         u8 sa[ETH_ALEN];
1058         int l3_pad_bytes;
1059
1060         /* Delivered decapped frame:
1061          * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1062          * [rfc1042/llc]
1063          *
1064          * Note: The nwifi header doesn't have QoS Control and is
1065          * (always?) a 3addr frame.
1066          *
1067          * Note2: There's no A-MSDU subframe header. Even if it's part
1068          * of an A-MSDU.
1069          */
1070
1071         /* pull decapped header and copy SA & DA */
1072         rxd = (void *)msdu->data - sizeof(*rxd);
1073
1074         l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1075         skb_put(msdu, l3_pad_bytes);
1076
1077         hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1078
1079         hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1080         ether_addr_copy(da, ieee80211_get_DA(hdr));
1081         ether_addr_copy(sa, ieee80211_get_SA(hdr));
1082         skb_pull(msdu, hdr_len);
1083
1084         /* push original 802.11 header */
1085         hdr = (struct ieee80211_hdr *)first_hdr;
1086         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1087         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1088
1089         /* original 802.11 header has a different DA and in
1090          * case of 4addr it may also have different SA
1091          */
1092         hdr = (struct ieee80211_hdr *)msdu->data;
1093         ether_addr_copy(ieee80211_get_DA(hdr), da);
1094         ether_addr_copy(ieee80211_get_SA(hdr), sa);
1095 }
1096
1097 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1098                                           struct sk_buff *msdu,
1099                                           enum htt_rx_mpdu_encrypt_type enctype)
1100 {
1101         struct ieee80211_hdr *hdr;
1102         struct htt_rx_desc *rxd;
1103         size_t hdr_len, crypto_len;
1104         void *rfc1042;
1105         bool is_first, is_last, is_amsdu;
1106         int bytes_aligned = ar->hw_params.decap_align_bytes;
1107
1108         rxd = (void *)msdu->data - sizeof(*rxd);
1109         hdr = (void *)rxd->rx_hdr_status;
1110
1111         is_first = !!(rxd->msdu_end.common.info0 &
1112                       __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1113         is_last = !!(rxd->msdu_end.common.info0 &
1114                      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1115         is_amsdu = !(is_first && is_last);
1116
1117         rfc1042 = hdr;
1118
1119         if (is_first) {
1120                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1121                 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1122
1123                 rfc1042 += round_up(hdr_len, bytes_aligned) +
1124                            round_up(crypto_len, bytes_aligned);
1125         }
1126
1127         if (is_amsdu)
1128                 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1129
1130         return rfc1042;
1131 }
1132
1133 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1134                                         struct sk_buff *msdu,
1135                                         struct ieee80211_rx_status *status,
1136                                         const u8 first_hdr[64],
1137                                         enum htt_rx_mpdu_encrypt_type enctype)
1138 {
1139         struct ieee80211_hdr *hdr;
1140         struct ethhdr *eth;
1141         size_t hdr_len;
1142         void *rfc1042;
1143         u8 da[ETH_ALEN];
1144         u8 sa[ETH_ALEN];
1145         int l3_pad_bytes;
1146         struct htt_rx_desc *rxd;
1147
1148         /* Delivered decapped frame:
1149          * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1150          * [payload]
1151          */
1152
1153         rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1154         if (WARN_ON_ONCE(!rfc1042))
1155                 return;
1156
1157         rxd = (void *)msdu->data - sizeof(*rxd);
1158         l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1159         skb_put(msdu, l3_pad_bytes);
1160         skb_pull(msdu, l3_pad_bytes);
1161
1162         /* pull decapped header and copy SA & DA */
1163         eth = (struct ethhdr *)msdu->data;
1164         ether_addr_copy(da, eth->h_dest);
1165         ether_addr_copy(sa, eth->h_source);
1166         skb_pull(msdu, sizeof(struct ethhdr));
1167
1168         /* push rfc1042/llc/snap */
1169         memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1170                sizeof(struct rfc1042_hdr));
1171
1172         /* push original 802.11 header */
1173         hdr = (struct ieee80211_hdr *)first_hdr;
1174         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1175         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1176
1177         /* original 802.11 header has a different DA and in
1178          * case of 4addr it may also have different SA
1179          */
1180         hdr = (struct ieee80211_hdr *)msdu->data;
1181         ether_addr_copy(ieee80211_get_DA(hdr), da);
1182         ether_addr_copy(ieee80211_get_SA(hdr), sa);
1183 }
1184
1185 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1186                                          struct sk_buff *msdu,
1187                                          struct ieee80211_rx_status *status,
1188                                          const u8 first_hdr[64])
1189 {
1190         struct ieee80211_hdr *hdr;
1191         size_t hdr_len;
1192         int l3_pad_bytes;
1193         struct htt_rx_desc *rxd;
1194
1195         /* Delivered decapped frame:
1196          * [amsdu header] <-- replaced with 802.11 hdr
1197          * [rfc1042/llc]
1198          * [payload]
1199          */
1200
1201         rxd = (void *)msdu->data - sizeof(*rxd);
1202         l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1203
1204         skb_put(msdu, l3_pad_bytes);
1205         skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1206
1207         hdr = (struct ieee80211_hdr *)first_hdr;
1208         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1209         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1210 }
1211
1212 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1213                                     struct sk_buff *msdu,
1214                                     struct ieee80211_rx_status *status,
1215                                     u8 first_hdr[64],
1216                                     enum htt_rx_mpdu_encrypt_type enctype,
1217                                     bool is_decrypted)
1218 {
1219         struct htt_rx_desc *rxd;
1220         enum rx_msdu_decap_format decap;
1221
1222         /* First msdu's decapped header:
1223          * [802.11 header] <-- padded to 4 bytes long
1224          * [crypto param] <-- padded to 4 bytes long
1225          * [amsdu header] <-- only if A-MSDU
1226          * [rfc1042/llc]
1227          *
1228          * Other (2nd, 3rd, ..) msdu's decapped header:
1229          * [amsdu header] <-- only if A-MSDU
1230          * [rfc1042/llc]
1231          */
1232
1233         rxd = (void *)msdu->data - sizeof(*rxd);
1234         decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1235                    RX_MSDU_START_INFO1_DECAP_FORMAT);
1236
1237         switch (decap) {
1238         case RX_MSDU_DECAP_RAW:
1239                 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1240                                             is_decrypted);
1241                 break;
1242         case RX_MSDU_DECAP_NATIVE_WIFI:
1243                 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1244                 break;
1245         case RX_MSDU_DECAP_ETHERNET2_DIX:
1246                 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1247                 break;
1248         case RX_MSDU_DECAP_8023_SNAP_LLC:
1249                 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1250                 break;
1251         }
1252 }
1253
1254 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1255 {
1256         struct htt_rx_desc *rxd;
1257         u32 flags, info;
1258         bool is_ip4, is_ip6;
1259         bool is_tcp, is_udp;
1260         bool ip_csum_ok, tcpudp_csum_ok;
1261
1262         rxd = (void *)skb->data - sizeof(*rxd);
1263         flags = __le32_to_cpu(rxd->attention.flags);
1264         info = __le32_to_cpu(rxd->msdu_start.common.info1);
1265
1266         is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1267         is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1268         is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1269         is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1270         ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1271         tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1272
1273         if (!is_ip4 && !is_ip6)
1274                 return CHECKSUM_NONE;
1275         if (!is_tcp && !is_udp)
1276                 return CHECKSUM_NONE;
1277         if (!ip_csum_ok)
1278                 return CHECKSUM_NONE;
1279         if (!tcpudp_csum_ok)
1280                 return CHECKSUM_NONE;
1281
1282         return CHECKSUM_UNNECESSARY;
1283 }
1284
1285 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1286 {
1287         msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1288 }
1289
1290 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1291                                  struct sk_buff_head *amsdu,
1292                                  struct ieee80211_rx_status *status)
1293 {
1294         struct sk_buff *first;
1295         struct sk_buff *last;
1296         struct sk_buff *msdu;
1297         struct htt_rx_desc *rxd;
1298         struct ieee80211_hdr *hdr;
1299         enum htt_rx_mpdu_encrypt_type enctype;
1300         u8 first_hdr[64];
1301         u8 *qos;
1302         size_t hdr_len;
1303         bool has_fcs_err;
1304         bool has_crypto_err;
1305         bool has_tkip_err;
1306         bool has_peer_idx_invalid;
1307         bool is_decrypted;
1308         bool is_mgmt;
1309         u32 attention;
1310
1311         if (skb_queue_empty(amsdu))
1312                 return;
1313
1314         first = skb_peek(amsdu);
1315         rxd = (void *)first->data - sizeof(*rxd);
1316
1317         is_mgmt = !!(rxd->attention.flags &
1318                      __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1319
1320         enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1321                      RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1322
1323         /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1324          * decapped header. It'll be used for undecapping of each MSDU.
1325          */
1326         hdr = (void *)rxd->rx_hdr_status;
1327         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1328         memcpy(first_hdr, hdr, hdr_len);
1329
1330         /* Each A-MSDU subframe will use the original header as the base and be
1331          * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1332          */
1333         hdr = (void *)first_hdr;
1334         qos = ieee80211_get_qos_ctl(hdr);
1335         qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1336
1337         /* Some attention flags are valid only in the last MSDU. */
1338         last = skb_peek_tail(amsdu);
1339         rxd = (void *)last->data - sizeof(*rxd);
1340         attention = __le32_to_cpu(rxd->attention.flags);
1341
1342         has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1343         has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1344         has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1345         has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1346
1347         /* Note: If hardware captures an encrypted frame that it can't decrypt,
1348          * e.g. due to fcs error, missing peer or invalid key data it will
1349          * report the frame as raw.
1350          */
1351         is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1352                         !has_fcs_err &&
1353                         !has_crypto_err &&
1354                         !has_peer_idx_invalid);
1355
1356         /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1357         status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1358                           RX_FLAG_MMIC_ERROR |
1359                           RX_FLAG_DECRYPTED |
1360                           RX_FLAG_IV_STRIPPED |
1361                           RX_FLAG_ONLY_MONITOR |
1362                           RX_FLAG_MMIC_STRIPPED);
1363
1364         if (has_fcs_err)
1365                 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1366
1367         if (has_tkip_err)
1368                 status->flag |= RX_FLAG_MMIC_ERROR;
1369
1370         /* Firmware reports all necessary management frames via WMI already.
1371          * They are not reported to monitor interfaces at all so pass the ones
1372          * coming via HTT to monitor interfaces instead. This simplifies
1373          * matters a lot.
1374          */
1375         if (is_mgmt)
1376                 status->flag |= RX_FLAG_ONLY_MONITOR;
1377
1378         if (is_decrypted) {
1379                 status->flag |= RX_FLAG_DECRYPTED;
1380
1381                 if (likely(!is_mgmt))
1382                         status->flag |= RX_FLAG_IV_STRIPPED |
1383                                         RX_FLAG_MMIC_STRIPPED;
1384 }
1385
1386         skb_queue_walk(amsdu, msdu) {
1387                 ath10k_htt_rx_h_csum_offload(msdu);
1388                 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1389                                         is_decrypted);
1390
1391                 /* Undecapping involves copying the original 802.11 header back
1392                  * to sk_buff. If frame is protected and hardware has decrypted
1393                  * it then remove the protected bit.
1394                  */
1395                 if (!is_decrypted)
1396                         continue;
1397                 if (is_mgmt)
1398                         continue;
1399
1400                 hdr = (void *)msdu->data;
1401                 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1402         }
1403 }
1404
1405 static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1406                                     struct sk_buff_head *amsdu,
1407                                     struct ieee80211_rx_status *status)
1408 {
1409         struct sk_buff *msdu;
1410
1411         while ((msdu = __skb_dequeue(amsdu))) {
1412                 /* Setup per-MSDU flags */
1413                 if (skb_queue_empty(amsdu))
1414                         status->flag &= ~RX_FLAG_AMSDU_MORE;
1415                 else
1416                         status->flag |= RX_FLAG_AMSDU_MORE;
1417
1418                 ath10k_process_rx(ar, status, msdu);
1419         }
1420 }
1421
1422 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1423 {
1424         struct sk_buff *skb, *first;
1425         int space;
1426         int total_len = 0;
1427
1428         /* TODO:  Might could optimize this by using
1429          * skb_try_coalesce or similar method to
1430          * decrease copying, or maybe get mac80211 to
1431          * provide a way to just receive a list of
1432          * skb?
1433          */
1434
1435         first = __skb_dequeue(amsdu);
1436
1437         /* Allocate total length all at once. */
1438         skb_queue_walk(amsdu, skb)
1439                 total_len += skb->len;
1440
1441         space = total_len - skb_tailroom(first);
1442         if ((space > 0) &&
1443             (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1444                 /* TODO:  bump some rx-oom error stat */
1445                 /* put it back together so we can free the
1446                  * whole list at once.
1447                  */
1448                 __skb_queue_head(amsdu, first);
1449                 return -1;
1450         }
1451
1452         /* Walk list again, copying contents into
1453          * msdu_head
1454          */
1455         while ((skb = __skb_dequeue(amsdu))) {
1456                 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1457                                           skb->len);
1458                 dev_kfree_skb_any(skb);
1459         }
1460
1461         __skb_queue_head(amsdu, first);
1462         return 0;
1463 }
1464
1465 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1466                                     struct sk_buff_head *amsdu,
1467                                     bool chained)
1468 {
1469         struct sk_buff *first;
1470         struct htt_rx_desc *rxd;
1471         enum rx_msdu_decap_format decap;
1472
1473         first = skb_peek(amsdu);
1474         rxd = (void *)first->data - sizeof(*rxd);
1475         decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1476                    RX_MSDU_START_INFO1_DECAP_FORMAT);
1477
1478         if (!chained)
1479                 return;
1480
1481         /* FIXME: Current unchaining logic can only handle simple case of raw
1482          * msdu chaining. If decapping is other than raw the chaining may be
1483          * more complex and this isn't handled by the current code. Don't even
1484          * try re-constructing such frames - it'll be pretty much garbage.
1485          */
1486         if (decap != RX_MSDU_DECAP_RAW ||
1487             skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1488                 __skb_queue_purge(amsdu);
1489                 return;
1490         }
1491
1492         ath10k_unchain_msdu(amsdu);
1493 }
1494
1495 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1496                                         struct sk_buff_head *amsdu,
1497                                         struct ieee80211_rx_status *rx_status)
1498 {
1499         /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1500          * invalid/dangerous frames.
1501          */
1502
1503         if (!rx_status->freq) {
1504                 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
1505                 return false;
1506         }
1507
1508         if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1509                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1510                 return false;
1511         }
1512
1513         return true;
1514 }
1515
1516 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1517                                    struct sk_buff_head *amsdu,
1518                                    struct ieee80211_rx_status *rx_status)
1519 {
1520         if (skb_queue_empty(amsdu))
1521                 return;
1522
1523         if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1524                 return;
1525
1526         __skb_queue_purge(amsdu);
1527 }
1528
1529 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1530 {
1531         struct ath10k *ar = htt->ar;
1532         struct ieee80211_rx_status *rx_status = &htt->rx_status;
1533         struct sk_buff_head amsdu;
1534         int ret, num_msdus;
1535
1536         __skb_queue_head_init(&amsdu);
1537
1538         spin_lock_bh(&htt->rx_ring.lock);
1539         if (htt->rx_confused) {
1540                 spin_unlock_bh(&htt->rx_ring.lock);
1541                 return -EIO;
1542         }
1543         ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1544         spin_unlock_bh(&htt->rx_ring.lock);
1545
1546         if (ret < 0) {
1547                 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1548                 __skb_queue_purge(&amsdu);
1549                 /* FIXME: It's probably a good idea to reboot the
1550                  * device instead of leaving it inoperable.
1551                  */
1552                 htt->rx_confused = true;
1553                 return ret;
1554         }
1555
1556         num_msdus = skb_queue_len(&amsdu);
1557         ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1558         ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1559         ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1560         ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1561         ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1562
1563         return num_msdus;
1564 }
1565
1566 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1567                                       struct htt_rx_indication *rx)
1568 {
1569         struct ath10k *ar = htt->ar;
1570         struct htt_rx_indication_mpdu_range *mpdu_ranges;
1571         int num_mpdu_ranges;
1572         int i, mpdu_count = 0;
1573
1574         num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1575                              HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1576         mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1577
1578         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1579                         rx, sizeof(*rx) +
1580                         (sizeof(struct htt_rx_indication_mpdu_range) *
1581                                 num_mpdu_ranges));
1582
1583         for (i = 0; i < num_mpdu_ranges; i++)
1584                 mpdu_count += mpdu_ranges[i].mpdu_count;
1585
1586         atomic_add(mpdu_count, &htt->num_mpdus_ready);
1587 }
1588
1589 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1590                                        struct sk_buff *skb)
1591 {
1592         struct ath10k_htt *htt = &ar->htt;
1593         struct htt_resp *resp = (struct htt_resp *)skb->data;
1594         struct htt_tx_done tx_done = {};
1595         int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1596         __le16 msdu_id;
1597         int i;
1598
1599         switch (status) {
1600         case HTT_DATA_TX_STATUS_NO_ACK:
1601                 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1602                 break;
1603         case HTT_DATA_TX_STATUS_OK:
1604                 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1605                 break;
1606         case HTT_DATA_TX_STATUS_DISCARD:
1607         case HTT_DATA_TX_STATUS_POSTPONE:
1608         case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1609                 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1610                 break;
1611         default:
1612                 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1613                 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1614                 break;
1615         }
1616
1617         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1618                    resp->data_tx_completion.num_msdus);
1619
1620         for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1621                 msdu_id = resp->data_tx_completion.msdus[i];
1622                 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1623
1624                 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1625                  * interrupt and main interrupt (MSI/-X range case) for the same
1626                  * HTC service so it should be safe to use kfifo_put w/o lock.
1627                  *
1628                  * From kfifo_put() documentation:
1629                  *  Note that with only one concurrent reader and one concurrent
1630                  *  writer, you don't need extra locking to use these macro.
1631                  */
1632                 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1633                         ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1634                                     tx_done.msdu_id, tx_done.status);
1635                         ath10k_txrx_tx_unref(htt, &tx_done);
1636                 }
1637         }
1638 }
1639
1640 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1641 {
1642         struct htt_rx_addba *ev = &resp->rx_addba;
1643         struct ath10k_peer *peer;
1644         struct ath10k_vif *arvif;
1645         u16 info0, tid, peer_id;
1646
1647         info0 = __le16_to_cpu(ev->info0);
1648         tid = MS(info0, HTT_RX_BA_INFO0_TID);
1649         peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1650
1651         ath10k_dbg(ar, ATH10K_DBG_HTT,
1652                    "htt rx addba tid %hu peer_id %hu size %hhu\n",
1653                    tid, peer_id, ev->window_size);
1654
1655         spin_lock_bh(&ar->data_lock);
1656         peer = ath10k_peer_find_by_id(ar, peer_id);
1657         if (!peer) {
1658                 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1659                             peer_id);
1660                 spin_unlock_bh(&ar->data_lock);
1661                 return;
1662         }
1663
1664         arvif = ath10k_get_arvif(ar, peer->vdev_id);
1665         if (!arvif) {
1666                 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1667                             peer->vdev_id);
1668                 spin_unlock_bh(&ar->data_lock);
1669                 return;
1670         }
1671
1672         ath10k_dbg(ar, ATH10K_DBG_HTT,
1673                    "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1674                    peer->addr, tid, ev->window_size);
1675
1676         ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1677         spin_unlock_bh(&ar->data_lock);
1678 }
1679
1680 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1681 {
1682         struct htt_rx_delba *ev = &resp->rx_delba;
1683         struct ath10k_peer *peer;
1684         struct ath10k_vif *arvif;
1685         u16 info0, tid, peer_id;
1686
1687         info0 = __le16_to_cpu(ev->info0);
1688         tid = MS(info0, HTT_RX_BA_INFO0_TID);
1689         peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1690
1691         ath10k_dbg(ar, ATH10K_DBG_HTT,
1692                    "htt rx delba tid %hu peer_id %hu\n",
1693                    tid, peer_id);
1694
1695         spin_lock_bh(&ar->data_lock);
1696         peer = ath10k_peer_find_by_id(ar, peer_id);
1697         if (!peer) {
1698                 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1699                             peer_id);
1700                 spin_unlock_bh(&ar->data_lock);
1701                 return;
1702         }
1703
1704         arvif = ath10k_get_arvif(ar, peer->vdev_id);
1705         if (!arvif) {
1706                 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1707                             peer->vdev_id);
1708                 spin_unlock_bh(&ar->data_lock);
1709                 return;
1710         }
1711
1712         ath10k_dbg(ar, ATH10K_DBG_HTT,
1713                    "htt rx stop rx ba session sta %pM tid %hu\n",
1714                    peer->addr, tid);
1715
1716         ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1717         spin_unlock_bh(&ar->data_lock);
1718 }
1719
1720 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1721                                        struct sk_buff_head *amsdu)
1722 {
1723         struct sk_buff *msdu;
1724         struct htt_rx_desc *rxd;
1725
1726         if (skb_queue_empty(list))
1727                 return -ENOBUFS;
1728
1729         if (WARN_ON(!skb_queue_empty(amsdu)))
1730                 return -EINVAL;
1731
1732         while ((msdu = __skb_dequeue(list))) {
1733                 __skb_queue_tail(amsdu, msdu);
1734
1735                 rxd = (void *)msdu->data - sizeof(*rxd);
1736                 if (rxd->msdu_end.common.info0 &
1737                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1738                         break;
1739         }
1740
1741         msdu = skb_peek_tail(amsdu);
1742         rxd = (void *)msdu->data - sizeof(*rxd);
1743         if (!(rxd->msdu_end.common.info0 &
1744               __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1745                 skb_queue_splice_init(amsdu, list);
1746                 return -EAGAIN;
1747         }
1748
1749         return 0;
1750 }
1751
1752 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1753                                             struct sk_buff *skb)
1754 {
1755         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1756
1757         if (!ieee80211_has_protected(hdr->frame_control))
1758                 return;
1759
1760         /* Offloaded frames are already decrypted but firmware insists they are
1761          * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
1762          * will drop the frame.
1763          */
1764
1765         hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1766         status->flag |= RX_FLAG_DECRYPTED |
1767                         RX_FLAG_IV_STRIPPED |
1768                         RX_FLAG_MMIC_STRIPPED;
1769 }
1770
1771 static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1772                                       struct sk_buff_head *list)
1773 {
1774         struct ath10k_htt *htt = &ar->htt;
1775         struct ieee80211_rx_status *status = &htt->rx_status;
1776         struct htt_rx_offload_msdu *rx;
1777         struct sk_buff *msdu;
1778         size_t offset;
1779         int num_msdu = 0;
1780
1781         while ((msdu = __skb_dequeue(list))) {
1782                 /* Offloaded frames don't have Rx descriptor. Instead they have
1783                  * a short meta information header.
1784                  */
1785
1786                 rx = (void *)msdu->data;
1787
1788                 skb_put(msdu, sizeof(*rx));
1789                 skb_pull(msdu, sizeof(*rx));
1790
1791                 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1792                         ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1793                         dev_kfree_skb_any(msdu);
1794                         continue;
1795                 }
1796
1797                 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1798
1799                 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1800                  * actual payload is unaligned. Align the frame.  Otherwise
1801                  * mac80211 complains.  This shouldn't reduce performance much
1802                  * because these offloaded frames are rare.
1803                  */
1804                 offset = 4 - ((unsigned long)msdu->data & 3);
1805                 skb_put(msdu, offset);
1806                 memmove(msdu->data + offset, msdu->data, msdu->len);
1807                 skb_pull(msdu, offset);
1808
1809                 /* FIXME: The frame is NWifi. Re-construct QoS Control
1810                  * if possible later.
1811                  */
1812
1813                 memset(status, 0, sizeof(*status));
1814                 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1815
1816                 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1817                 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
1818                 ath10k_process_rx(ar, status, msdu);
1819                 num_msdu++;
1820         }
1821         return num_msdu;
1822 }
1823
1824 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1825 {
1826         struct ath10k_htt *htt = &ar->htt;
1827         struct htt_resp *resp = (void *)skb->data;
1828         struct ieee80211_rx_status *status = &htt->rx_status;
1829         struct sk_buff_head list;
1830         struct sk_buff_head amsdu;
1831         u16 peer_id;
1832         u16 msdu_count;
1833         u8 vdev_id;
1834         u8 tid;
1835         bool offload;
1836         bool frag;
1837         int ret, num_msdus = 0;
1838
1839         lockdep_assert_held(&htt->rx_ring.lock);
1840
1841         if (htt->rx_confused)
1842                 return -EIO;
1843
1844         skb_pull(skb, sizeof(resp->hdr));
1845         skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1846
1847         peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1848         msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1849         vdev_id = resp->rx_in_ord_ind.vdev_id;
1850         tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1851         offload = !!(resp->rx_in_ord_ind.info &
1852                         HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1853         frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1854
1855         ath10k_dbg(ar, ATH10K_DBG_HTT,
1856                    "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1857                    vdev_id, peer_id, tid, offload, frag, msdu_count);
1858
1859         if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1860                 ath10k_warn(ar, "dropping invalid in order rx indication\n");
1861                 return -EINVAL;
1862         }
1863
1864         /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1865          * extracted and processed.
1866          */
1867         __skb_queue_head_init(&list);
1868         ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1869         if (ret < 0) {
1870                 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1871                 htt->rx_confused = true;
1872                 return -EIO;
1873         }
1874
1875         /* Offloaded frames are very different and need to be handled
1876          * separately.
1877          */
1878         if (offload)
1879                 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
1880
1881         while (!skb_queue_empty(&list)) {
1882                 __skb_queue_head_init(&amsdu);
1883                 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1884                 switch (ret) {
1885                 case 0:
1886                         /* Note: The in-order indication may report interleaved
1887                          * frames from different PPDUs meaning reported rx rate
1888                          * to mac80211 isn't accurate/reliable. It's still
1889                          * better to report something than nothing though. This
1890                          * should still give an idea about rx rate to the user.
1891                          */
1892                         num_msdus += skb_queue_len(&amsdu);
1893                         ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1894                         ath10k_htt_rx_h_filter(ar, &amsdu, status);
1895                         ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1896                         ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1897                         break;
1898                 case -EAGAIN:
1899                         /* fall through */
1900                 default:
1901                         /* Should not happen. */
1902                         ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1903                         htt->rx_confused = true;
1904                         __skb_queue_purge(&list);
1905                         return -EIO;
1906                 }
1907         }
1908         return num_msdus;
1909 }
1910
1911 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
1912                                                    const __le32 *resp_ids,
1913                                                    int num_resp_ids)
1914 {
1915         int i;
1916         u32 resp_id;
1917
1918         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
1919                    num_resp_ids);
1920
1921         for (i = 0; i < num_resp_ids; i++) {
1922                 resp_id = le32_to_cpu(resp_ids[i]);
1923
1924                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
1925                            resp_id);
1926
1927                 /* TODO: free resp_id */
1928         }
1929 }
1930
1931 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
1932 {
1933         struct ieee80211_hw *hw = ar->hw;
1934         struct ieee80211_txq *txq;
1935         struct htt_resp *resp = (struct htt_resp *)skb->data;
1936         struct htt_tx_fetch_record *record;
1937         size_t len;
1938         size_t max_num_bytes;
1939         size_t max_num_msdus;
1940         size_t num_bytes;
1941         size_t num_msdus;
1942         const __le32 *resp_ids;
1943         u16 num_records;
1944         u16 num_resp_ids;
1945         u16 peer_id;
1946         u8 tid;
1947         int ret;
1948         int i;
1949
1950         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
1951
1952         len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
1953         if (unlikely(skb->len < len)) {
1954                 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
1955                 return;
1956         }
1957
1958         num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
1959         num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
1960
1961         len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
1962         len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
1963
1964         if (unlikely(skb->len < len)) {
1965                 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
1966                 return;
1967         }
1968
1969         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
1970                    num_records, num_resp_ids,
1971                    le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
1972
1973         if (!ar->htt.tx_q_state.enabled) {
1974                 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
1975                 return;
1976         }
1977
1978         if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
1979                 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
1980                 return;
1981         }
1982
1983         rcu_read_lock();
1984
1985         for (i = 0; i < num_records; i++) {
1986                 record = &resp->tx_fetch_ind.records[i];
1987                 peer_id = MS(le16_to_cpu(record->info),
1988                              HTT_TX_FETCH_RECORD_INFO_PEER_ID);
1989                 tid = MS(le16_to_cpu(record->info),
1990                          HTT_TX_FETCH_RECORD_INFO_TID);
1991                 max_num_msdus = le16_to_cpu(record->num_msdus);
1992                 max_num_bytes = le32_to_cpu(record->num_bytes);
1993
1994                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
1995                            i, peer_id, tid, max_num_msdus, max_num_bytes);
1996
1997                 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
1998                     unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
1999                         ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2000                                     peer_id, tid);
2001                         continue;
2002                 }
2003
2004                 spin_lock_bh(&ar->data_lock);
2005                 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2006                 spin_unlock_bh(&ar->data_lock);
2007
2008                 /* It is okay to release the lock and use txq because RCU read
2009                  * lock is held.
2010                  */
2011
2012                 if (unlikely(!txq)) {
2013                         ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2014                                     peer_id, tid);
2015                         continue;
2016                 }
2017
2018                 num_msdus = 0;
2019                 num_bytes = 0;
2020
2021                 while (num_msdus < max_num_msdus &&
2022                        num_bytes < max_num_bytes) {
2023                         ret = ath10k_mac_tx_push_txq(hw, txq);
2024                         if (ret < 0)
2025                                 break;
2026
2027                         num_msdus++;
2028                         num_bytes += ret;
2029                 }
2030
2031                 record->num_msdus = cpu_to_le16(num_msdus);
2032                 record->num_bytes = cpu_to_le32(num_bytes);
2033
2034                 ath10k_htt_tx_txq_recalc(hw, txq);
2035         }
2036
2037         rcu_read_unlock();
2038
2039         resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2040         ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2041
2042         ret = ath10k_htt_tx_fetch_resp(ar,
2043                                        resp->tx_fetch_ind.token,
2044                                        resp->tx_fetch_ind.fetch_seq_num,
2045                                        resp->tx_fetch_ind.records,
2046                                        num_records);
2047         if (unlikely(ret)) {
2048                 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2049                             le32_to_cpu(resp->tx_fetch_ind.token), ret);
2050                 /* FIXME: request fw restart */
2051         }
2052
2053         ath10k_htt_tx_txq_sync(ar);
2054 }
2055
2056 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2057                                            struct sk_buff *skb)
2058 {
2059         const struct htt_resp *resp = (void *)skb->data;
2060         size_t len;
2061         int num_resp_ids;
2062
2063         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2064
2065         len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2066         if (unlikely(skb->len < len)) {
2067                 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2068                 return;
2069         }
2070
2071         num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2072         len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2073
2074         if (unlikely(skb->len < len)) {
2075                 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2076                 return;
2077         }
2078
2079         ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2080                                                resp->tx_fetch_confirm.resp_ids,
2081                                                num_resp_ids);
2082 }
2083
2084 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2085                                              struct sk_buff *skb)
2086 {
2087         const struct htt_resp *resp = (void *)skb->data;
2088         const struct htt_tx_mode_switch_record *record;
2089         struct ieee80211_txq *txq;
2090         struct ath10k_txq *artxq;
2091         size_t len;
2092         size_t num_records;
2093         enum htt_tx_mode_switch_mode mode;
2094         bool enable;
2095         u16 info0;
2096         u16 info1;
2097         u16 threshold;
2098         u16 peer_id;
2099         u8 tid;
2100         int i;
2101
2102         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2103
2104         len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2105         if (unlikely(skb->len < len)) {
2106                 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2107                 return;
2108         }
2109
2110         info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2111         info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2112
2113         enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2114         num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2115         mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2116         threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2117
2118         ath10k_dbg(ar, ATH10K_DBG_HTT,
2119                    "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2120                    info0, info1, enable, num_records, mode, threshold);
2121
2122         len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2123
2124         if (unlikely(skb->len < len)) {
2125                 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2126                 return;
2127         }
2128
2129         switch (mode) {
2130         case HTT_TX_MODE_SWITCH_PUSH:
2131         case HTT_TX_MODE_SWITCH_PUSH_PULL:
2132                 break;
2133         default:
2134                 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2135                             mode);
2136                 return;
2137         }
2138
2139         if (!enable)
2140                 return;
2141
2142         ar->htt.tx_q_state.enabled = enable;
2143         ar->htt.tx_q_state.mode = mode;
2144         ar->htt.tx_q_state.num_push_allowed = threshold;
2145
2146         rcu_read_lock();
2147
2148         for (i = 0; i < num_records; i++) {
2149                 record = &resp->tx_mode_switch_ind.records[i];
2150                 info0 = le16_to_cpu(record->info0);
2151                 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2152                 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2153
2154                 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2155                     unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2156                         ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2157                                     peer_id, tid);
2158                         continue;
2159                 }
2160
2161                 spin_lock_bh(&ar->data_lock);
2162                 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2163                 spin_unlock_bh(&ar->data_lock);
2164
2165                 /* It is okay to release the lock and use txq because RCU read
2166                  * lock is held.
2167                  */
2168
2169                 if (unlikely(!txq)) {
2170                         ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2171                                     peer_id, tid);
2172                         continue;
2173                 }
2174
2175                 spin_lock_bh(&ar->htt.tx_lock);
2176                 artxq = (void *)txq->drv_priv;
2177                 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2178                 spin_unlock_bh(&ar->htt.tx_lock);
2179         }
2180
2181         rcu_read_unlock();
2182
2183         ath10k_mac_tx_push_pending(ar);
2184 }
2185
2186 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2187 {
2188         bool release;
2189
2190         release = ath10k_htt_t2h_msg_handler(ar, skb);
2191
2192         /* Free the indication buffer */
2193         if (release)
2194                 dev_kfree_skb_any(skb);
2195 }
2196
2197 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2198 {
2199         struct ath10k_htt *htt = &ar->htt;
2200         struct htt_resp *resp = (struct htt_resp *)skb->data;
2201         enum htt_t2h_msg_type type;
2202
2203         /* confirm alignment */
2204         if (!IS_ALIGNED((unsigned long)skb->data, 4))
2205                 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2206
2207         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2208                    resp->hdr.msg_type);
2209
2210         if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2211                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2212                            resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2213                 return true;
2214         }
2215         type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2216
2217         switch (type) {
2218         case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2219                 htt->target_version_major = resp->ver_resp.major;
2220                 htt->target_version_minor = resp->ver_resp.minor;
2221                 complete(&htt->target_version_received);
2222                 break;
2223         }
2224         case HTT_T2H_MSG_TYPE_RX_IND:
2225                 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2226                 break;
2227         case HTT_T2H_MSG_TYPE_PEER_MAP: {
2228                 struct htt_peer_map_event ev = {
2229                         .vdev_id = resp->peer_map.vdev_id,
2230                         .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2231                 };
2232                 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2233                 ath10k_peer_map_event(htt, &ev);
2234                 break;
2235         }
2236         case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2237                 struct htt_peer_unmap_event ev = {
2238                         .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2239                 };
2240                 ath10k_peer_unmap_event(htt, &ev);
2241                 break;
2242         }
2243         case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2244                 struct htt_tx_done tx_done = {};
2245                 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2246
2247                 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2248
2249                 switch (status) {
2250                 case HTT_MGMT_TX_STATUS_OK:
2251                         tx_done.status = HTT_TX_COMPL_STATE_ACK;
2252                         break;
2253                 case HTT_MGMT_TX_STATUS_RETRY:
2254                         tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2255                         break;
2256                 case HTT_MGMT_TX_STATUS_DROP:
2257                         tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2258                         break;
2259                 }
2260
2261                 status = ath10k_txrx_tx_unref(htt, &tx_done);
2262                 if (!status) {
2263                         spin_lock_bh(&htt->tx_lock);
2264                         ath10k_htt_tx_mgmt_dec_pending(htt);
2265                         spin_unlock_bh(&htt->tx_lock);
2266                 }
2267                 break;
2268         }
2269         case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2270                 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2271                 break;
2272         case HTT_T2H_MSG_TYPE_SEC_IND: {
2273                 struct ath10k *ar = htt->ar;
2274                 struct htt_security_indication *ev = &resp->security_indication;
2275
2276                 ath10k_dbg(ar, ATH10K_DBG_HTT,
2277                            "sec ind peer_id %d unicast %d type %d\n",
2278                           __le16_to_cpu(ev->peer_id),
2279                           !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2280                           MS(ev->flags, HTT_SECURITY_TYPE));
2281                 complete(&ar->install_key_done);
2282                 break;
2283         }
2284         case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2285                 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2286                                 skb->data, skb->len);
2287                 atomic_inc(&htt->num_mpdus_ready);
2288                 break;
2289         }
2290         case HTT_T2H_MSG_TYPE_TEST:
2291                 break;
2292         case HTT_T2H_MSG_TYPE_STATS_CONF:
2293                 trace_ath10k_htt_stats(ar, skb->data, skb->len);
2294                 break;
2295         case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2296                 /* Firmware can return tx frames if it's unable to fully
2297                  * process them and suspects host may be able to fix it. ath10k
2298                  * sends all tx frames as already inspected so this shouldn't
2299                  * happen unless fw has a bug.
2300                  */
2301                 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2302                 break;
2303         case HTT_T2H_MSG_TYPE_RX_ADDBA:
2304                 ath10k_htt_rx_addba(ar, resp);
2305                 break;
2306         case HTT_T2H_MSG_TYPE_RX_DELBA:
2307                 ath10k_htt_rx_delba(ar, resp);
2308                 break;
2309         case HTT_T2H_MSG_TYPE_PKTLOG: {
2310                 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2311                                         skb->len -
2312                                         offsetof(struct htt_resp,
2313                                                  pktlog_msg.payload));
2314                 break;
2315         }
2316         case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2317                 /* Ignore this event because mac80211 takes care of Rx
2318                  * aggregation reordering.
2319                  */
2320                 break;
2321         }
2322         case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2323                 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2324                 return false;
2325         }
2326         case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2327                 break;
2328         case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2329                 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2330                 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2331
2332                 ar->tgt_oper_chan =
2333                         __ieee80211_get_channel(ar->hw->wiphy, freq);
2334                 ath10k_dbg(ar, ATH10K_DBG_HTT,
2335                            "htt chan change freq %u phymode %s\n",
2336                            freq, ath10k_wmi_phymode_str(phymode));
2337                 break;
2338         }
2339         case HTT_T2H_MSG_TYPE_AGGR_CONF:
2340                 break;
2341         case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2342                 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2343
2344                 if (!tx_fetch_ind) {
2345                         ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2346                         break;
2347                 }
2348                 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2349                 break;
2350         }
2351         case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2352                 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2353                 break;
2354         case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2355                 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2356                 break;
2357         case HTT_T2H_MSG_TYPE_EN_STATS:
2358         default:
2359                 ath10k_warn(ar, "htt event (%d) not handled\n",
2360                             resp->hdr.msg_type);
2361                 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2362                                 skb->data, skb->len);
2363                 break;
2364         };
2365         return true;
2366 }
2367 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2368
2369 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2370                                              struct sk_buff *skb)
2371 {
2372         trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2373         dev_kfree_skb_any(skb);
2374 }
2375 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2376
2377 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2378 {
2379         struct ath10k_htt *htt = &ar->htt;
2380         struct htt_tx_done tx_done = {};
2381         struct sk_buff_head tx_ind_q;
2382         struct sk_buff *skb;
2383         unsigned long flags;
2384         int quota = 0, done, num_rx_msdus;
2385         bool resched_napi = false;
2386
2387         __skb_queue_head_init(&tx_ind_q);
2388
2389         /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
2390          * process it first to utilize full available quota.
2391          */
2392         while (quota < budget) {
2393                 if (skb_queue_empty(&htt->rx_in_ord_compl_q))
2394                         break;
2395
2396                 skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
2397                 if (!skb) {
2398                         resched_napi = true;
2399                         goto exit;
2400                 }
2401
2402                 spin_lock_bh(&htt->rx_ring.lock);
2403                 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
2404                 spin_unlock_bh(&htt->rx_ring.lock);
2405                 if (num_rx_msdus < 0) {
2406                         resched_napi = true;
2407                         goto exit;
2408                 }
2409
2410                 dev_kfree_skb_any(skb);
2411                 if (num_rx_msdus > 0)
2412                         quota += num_rx_msdus;
2413
2414                 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2415                     !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
2416                         resched_napi = true;
2417                         goto exit;
2418                 }
2419         }
2420
2421         while (quota < budget) {
2422                 /* no more data to receive */
2423                 if (!atomic_read(&htt->num_mpdus_ready))
2424                         break;
2425
2426                 num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
2427                 if (num_rx_msdus < 0) {
2428                         resched_napi = true;
2429                         goto exit;
2430                 }
2431
2432                 quota += num_rx_msdus;
2433                 atomic_dec(&htt->num_mpdus_ready);
2434                 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2435                     atomic_read(&htt->num_mpdus_ready)) {
2436                         resched_napi = true;
2437                         goto exit;
2438                 }
2439         }
2440
2441         /* From NAPI documentation:
2442          *  The napi poll() function may also process TX completions, in which
2443          *  case if it processes the entire TX ring then it should count that
2444          *  work as the rest of the budget.
2445          */
2446         if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
2447                 quota = budget;
2448
2449         /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2450          * From kfifo_get() documentation:
2451          *  Note that with only one concurrent reader and one concurrent writer,
2452          *  you don't need extra locking to use these macro.
2453          */
2454         while (kfifo_get(&htt->txdone_fifo, &tx_done))
2455                 ath10k_txrx_tx_unref(htt, &tx_done);
2456
2457         ath10k_mac_tx_push_pending(ar);
2458
2459         spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2460         skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2461         spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2462
2463         while ((skb = __skb_dequeue(&tx_ind_q))) {
2464                 ath10k_htt_rx_tx_fetch_ind(ar, skb);
2465                 dev_kfree_skb_any(skb);
2466         }
2467
2468 exit:
2469         ath10k_htt_rx_msdu_buff_replenish(htt);
2470         /* In case of rx failure or more data to read, report budget
2471          * to reschedule NAPI poll
2472          */
2473         done = resched_napi ? budget : quota;
2474
2475         return done;
2476 }
2477 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);