Linux-libre 5.7.6-gnu
[librecmc/linux-libre.git] / drivers / net / wireless / ath / ath11k / dp_rx.c
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/ieee80211.h>
7 #include <linux/kernel.h>
8 #include <linux/skbuff.h>
9 #include <crypto/hash.h>
10 #include "core.h"
11 #include "debug.h"
12 #include "hal_desc.h"
13 #include "hw.h"
14 #include "dp_rx.h"
15 #include "hal_rx.h"
16 #include "dp_tx.h"
17 #include "peer.h"
18
19 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
20
21 static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
22 {
23         return desc->hdr_status;
24 }
25
26 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc)
27 {
28         if (!(__le32_to_cpu(desc->mpdu_start.info1) &
29             RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID))
30                 return HAL_ENCRYPT_TYPE_OPEN;
31
32         return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
33                          __le32_to_cpu(desc->mpdu_start.info2));
34 }
35
36 static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc)
37 {
38         return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
39                          __le32_to_cpu(desc->msdu_start.info2));
40 }
41
42 static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc)
43 {
44         return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
45                          __le32_to_cpu(desc->msdu_start.info2));
46 }
47
48 static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc)
49 {
50         return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
51                            __le32_to_cpu(desc->mpdu_start.info1));
52 }
53
54 static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc)
55 {
56         return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
57                            __le32_to_cpu(desc->mpdu_start.info1));
58 }
59
60 static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb)
61 {
62         struct ieee80211_hdr *hdr;
63
64         hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
65         return ieee80211_has_morefrags(hdr->frame_control);
66 }
67
68 static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb)
69 {
70         struct ieee80211_hdr *hdr;
71
72         hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
73         return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
74 }
75
76 static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc)
77 {
78         return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
79                          __le32_to_cpu(desc->mpdu_start.info1));
80 }
81
82 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
83 {
84         return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
85                            __le32_to_cpu(desc->attention.info2));
86 }
87
88 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
89 {
90         return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
91                            __le32_to_cpu(desc->attention.info1));
92 }
93
94 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc)
95 {
96         return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
97                            __le32_to_cpu(desc->attention.info1));
98 }
99
100 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc)
101 {
102         return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
103                           __le32_to_cpu(desc->attention.info2)) ==
104                 RX_DESC_DECRYPT_STATUS_CODE_OK);
105 }
106
107 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
108 {
109         u32 info = __le32_to_cpu(desc->attention.info1);
110         u32 errmap = 0;
111
112         if (info & RX_ATTENTION_INFO1_FCS_ERR)
113                 errmap |= DP_RX_MPDU_ERR_FCS;
114
115         if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
116                 errmap |= DP_RX_MPDU_ERR_DECRYPT;
117
118         if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
119                 errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
120
121         if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
122                 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
123
124         if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
125                 errmap |= DP_RX_MPDU_ERR_OVERFLOW;
126
127         if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
128                 errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
129
130         if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
131                 errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
132
133         return errmap;
134 }
135
136 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc)
137 {
138         return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
139                          __le32_to_cpu(desc->msdu_start.info1));
140 }
141
142 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc)
143 {
144         return FIELD_GET(RX_MSDU_START_INFO3_SGI,
145                          __le32_to_cpu(desc->msdu_start.info3));
146 }
147
148 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc)
149 {
150         return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
151                          __le32_to_cpu(desc->msdu_start.info3));
152 }
153
154 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc)
155 {
156         return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
157                          __le32_to_cpu(desc->msdu_start.info3));
158 }
159
160 static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc)
161 {
162         return __le32_to_cpu(desc->msdu_start.phy_meta_data);
163 }
164
165 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc)
166 {
167         return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
168                          __le32_to_cpu(desc->msdu_start.info3));
169 }
170
171 static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
172 {
173         u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
174                                       __le32_to_cpu(desc->msdu_start.info3));
175
176         return hweight8(mimo_ss_bitmap);
177 }
178
179 static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc)
180 {
181         return FIELD_GET(RX_MPDU_START_INFO2_TID,
182                          __le32_to_cpu(desc->mpdu_start.info2));
183 }
184
185 static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc)
186 {
187         return __le16_to_cpu(desc->mpdu_start.sw_peer_id);
188 }
189
190 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
191 {
192         return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
193                          __le32_to_cpu(desc->msdu_end.info2));
194 }
195
196 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc)
197 {
198         return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
199                            __le32_to_cpu(desc->msdu_end.info2));
200 }
201
202 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc)
203 {
204         return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
205                            __le32_to_cpu(desc->msdu_end.info2));
206 }
207
208 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc,
209                                            struct hal_rx_desc *ldesc)
210 {
211         memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end,
212                sizeof(struct rx_msdu_end));
213         memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention,
214                sizeof(struct rx_attention));
215         memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end,
216                sizeof(struct rx_mpdu_end));
217 }
218
219 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc)
220 {
221         struct rx_attention *rx_attn;
222
223         rx_attn = &rx_desc->attention;
224
225         return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
226                          __le32_to_cpu(rx_attn->info1));
227 }
228
229 static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc)
230 {
231         struct rx_msdu_start *rx_msdu_start;
232
233         rx_msdu_start = &rx_desc->msdu_start;
234
235         return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
236                          __le32_to_cpu(rx_msdu_start->info2));
237 }
238
239 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc)
240 {
241         u8 *rx_pkt_hdr;
242
243         rx_pkt_hdr = &rx_desc->msdu_payload[0];
244
245         return rx_pkt_hdr;
246 }
247
248 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
249 {
250         u32 tlv_tag;
251
252         tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
253                             __le32_to_cpu(rx_desc->mpdu_start_tag));
254
255         return tlv_tag == HAL_RX_MPDU_START ? true : false;
256 }
257
258 static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
259 {
260         return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
261 }
262
263 /* Returns number of Rx buffers replenished */
264 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
265                                struct dp_rxdma_ring *rx_ring,
266                                int req_entries,
267                                enum hal_rx_buf_return_buf_manager mgr,
268                                gfp_t gfp)
269 {
270         struct hal_srng *srng;
271         u32 *desc;
272         struct sk_buff *skb;
273         int num_free;
274         int num_remain;
275         int buf_id;
276         u32 cookie;
277         dma_addr_t paddr;
278
279         req_entries = min(req_entries, rx_ring->bufs_max);
280
281         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
282
283         spin_lock_bh(&srng->lock);
284
285         ath11k_hal_srng_access_begin(ab, srng);
286
287         num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
288         if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
289                 req_entries = num_free;
290
291         req_entries = min(num_free, req_entries);
292         num_remain = req_entries;
293
294         while (num_remain > 0) {
295                 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
296                                     DP_RX_BUFFER_ALIGN_SIZE);
297                 if (!skb)
298                         break;
299
300                 if (!IS_ALIGNED((unsigned long)skb->data,
301                                 DP_RX_BUFFER_ALIGN_SIZE)) {
302                         skb_pull(skb,
303                                  PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
304                                  skb->data);
305                 }
306
307                 paddr = dma_map_single(ab->dev, skb->data,
308                                        skb->len + skb_tailroom(skb),
309                                        DMA_FROM_DEVICE);
310                 if (dma_mapping_error(ab->dev, paddr))
311                         goto fail_free_skb;
312
313                 spin_lock_bh(&rx_ring->idr_lock);
314                 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
315                                    rx_ring->bufs_max * 3, gfp);
316                 spin_unlock_bh(&rx_ring->idr_lock);
317                 if (buf_id < 0)
318                         goto fail_dma_unmap;
319
320                 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
321                 if (!desc)
322                         goto fail_idr_remove;
323
324                 ATH11K_SKB_RXCB(skb)->paddr = paddr;
325
326                 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
327                          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
328
329                 num_remain--;
330
331                 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
332         }
333
334         ath11k_hal_srng_access_end(ab, srng);
335
336         spin_unlock_bh(&srng->lock);
337
338         return req_entries - num_remain;
339
340 fail_idr_remove:
341         spin_lock_bh(&rx_ring->idr_lock);
342         idr_remove(&rx_ring->bufs_idr, buf_id);
343         spin_unlock_bh(&rx_ring->idr_lock);
344 fail_dma_unmap:
345         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
346                          DMA_FROM_DEVICE);
347 fail_free_skb:
348         dev_kfree_skb_any(skb);
349
350         ath11k_hal_srng_access_end(ab, srng);
351
352         spin_unlock_bh(&srng->lock);
353
354         return req_entries - num_remain;
355 }
356
357 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
358                                          struct dp_rxdma_ring *rx_ring)
359 {
360         struct ath11k_pdev_dp *dp = &ar->dp;
361         struct sk_buff *skb;
362         int buf_id;
363
364         spin_lock_bh(&rx_ring->idr_lock);
365         idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
366                 idr_remove(&rx_ring->bufs_idr, buf_id);
367                 /* TODO: Understand where internal driver does this dma_unmap of
368                  * of rxdma_buffer.
369                  */
370                 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
371                                  skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
372                 dev_kfree_skb_any(skb);
373         }
374
375         idr_destroy(&rx_ring->bufs_idr);
376         spin_unlock_bh(&rx_ring->idr_lock);
377
378         rx_ring = &dp->rx_mon_status_refill_ring;
379
380         spin_lock_bh(&rx_ring->idr_lock);
381         idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
382                 idr_remove(&rx_ring->bufs_idr, buf_id);
383                 /* XXX: Understand where internal driver does this dma_unmap of
384                  * of rxdma_buffer.
385                  */
386                 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
387                                  skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
388                 dev_kfree_skb_any(skb);
389         }
390
391         idr_destroy(&rx_ring->bufs_idr);
392         spin_unlock_bh(&rx_ring->idr_lock);
393         return 0;
394 }
395
396 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
397 {
398         struct ath11k_pdev_dp *dp = &ar->dp;
399         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
400
401         ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
402
403         rx_ring = &dp->rxdma_mon_buf_ring;
404         ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
405
406         rx_ring = &dp->rx_mon_status_refill_ring;
407         ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
408         return 0;
409 }
410
411 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
412                                           struct dp_rxdma_ring *rx_ring,
413                                           u32 ringtype)
414 {
415         struct ath11k_pdev_dp *dp = &ar->dp;
416         int num_entries;
417
418         num_entries = rx_ring->refill_buf_ring.size /
419                       ath11k_hal_srng_get_entrysize(ringtype);
420
421         rx_ring->bufs_max = num_entries;
422         ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
423                                    HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
424         return 0;
425 }
426
427 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
428 {
429         struct ath11k_pdev_dp *dp = &ar->dp;
430         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
431
432         ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
433
434         rx_ring = &dp->rxdma_mon_buf_ring;
435         ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
436
437         rx_ring = &dp->rx_mon_status_refill_ring;
438         ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
439
440         return 0;
441 }
442
443 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
444 {
445         struct ath11k_pdev_dp *dp = &ar->dp;
446
447         ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
448         ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
449         ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
450         ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
451 }
452
453 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
454 {
455         struct ath11k_dp *dp = &ab->dp;
456         int i;
457
458         for (i = 0; i < DP_REO_DST_RING_MAX; i++)
459                 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
460 }
461
462 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
463 {
464         struct ath11k_dp *dp = &ab->dp;
465         int ret;
466         int i;
467
468         for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
469                 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
470                                            HAL_REO_DST, i, 0,
471                                            DP_REO_DST_RING_SIZE);
472                 if (ret) {
473                         ath11k_warn(ab, "failed to setup reo_dst_ring\n");
474                         goto err_reo_cleanup;
475                 }
476         }
477
478         return 0;
479
480 err_reo_cleanup:
481         ath11k_dp_pdev_reo_cleanup(ab);
482
483         return ret;
484 }
485
486 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
487 {
488         struct ath11k_pdev_dp *dp = &ar->dp;
489         struct dp_srng *srng = NULL;
490         int ret;
491
492         ret = ath11k_dp_srng_setup(ar->ab,
493                                    &dp->rx_refill_buf_ring.refill_buf_ring,
494                                    HAL_RXDMA_BUF, 0,
495                                    dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
496         if (ret) {
497                 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
498                 return ret;
499         }
500
501         ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
502                                    HAL_RXDMA_DST, 0, dp->mac_id,
503                                    DP_RXDMA_ERR_DST_RING_SIZE);
504         if (ret) {
505                 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
506                 return ret;
507         }
508
509         srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
510         ret = ath11k_dp_srng_setup(ar->ab,
511                                    srng,
512                                    HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
513                                    DP_RXDMA_MON_STATUS_RING_SIZE);
514         if (ret) {
515                 ath11k_warn(ar->ab,
516                             "failed to setup rx_mon_status_refill_ring\n");
517                 return ret;
518         }
519         ret = ath11k_dp_srng_setup(ar->ab,
520                                    &dp->rxdma_mon_buf_ring.refill_buf_ring,
521                                    HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
522                                    DP_RXDMA_MONITOR_BUF_RING_SIZE);
523         if (ret) {
524                 ath11k_warn(ar->ab,
525                             "failed to setup HAL_RXDMA_MONITOR_BUF\n");
526                 return ret;
527         }
528
529         ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
530                                    HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
531                                    DP_RXDMA_MONITOR_DST_RING_SIZE);
532         if (ret) {
533                 ath11k_warn(ar->ab,
534                             "failed to setup HAL_RXDMA_MONITOR_DST\n");
535                 return ret;
536         }
537
538         ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
539                                    HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
540                                    DP_RXDMA_MONITOR_DESC_RING_SIZE);
541         if (ret) {
542                 ath11k_warn(ar->ab,
543                             "failed to setup HAL_RXDMA_MONITOR_DESC\n");
544                 return ret;
545         }
546
547         return 0;
548 }
549
550 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
551 {
552         struct ath11k_dp *dp = &ab->dp;
553         struct dp_reo_cmd *cmd, *tmp;
554         struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
555
556         spin_lock_bh(&dp->reo_cmd_lock);
557         list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
558                 list_del(&cmd->list);
559                 dma_unmap_single(ab->dev, cmd->data.paddr,
560                                  cmd->data.size, DMA_BIDIRECTIONAL);
561                 kfree(cmd->data.vaddr);
562                 kfree(cmd);
563         }
564
565         list_for_each_entry_safe(cmd_cache, tmp_cache,
566                                  &dp->reo_cmd_cache_flush_list, list) {
567                 list_del(&cmd_cache->list);
568                 dma_unmap_single(ab->dev, cmd_cache->data.paddr,
569                                  cmd_cache->data.size, DMA_BIDIRECTIONAL);
570                 kfree(cmd_cache->data.vaddr);
571                 kfree(cmd_cache);
572         }
573         spin_unlock_bh(&dp->reo_cmd_lock);
574 }
575
576 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
577                                    enum hal_reo_cmd_status status)
578 {
579         struct dp_rx_tid *rx_tid = ctx;
580
581         if (status != HAL_REO_CMD_SUCCESS)
582                 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
583                             rx_tid->tid, status);
584
585         dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
586                          DMA_BIDIRECTIONAL);
587         kfree(rx_tid->vaddr);
588 }
589
590 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
591                                       struct dp_rx_tid *rx_tid)
592 {
593         struct ath11k_hal_reo_cmd cmd = {0};
594         unsigned long tot_desc_sz, desc_sz;
595         int ret;
596
597         tot_desc_sz = rx_tid->size;
598         desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
599
600         while (tot_desc_sz > desc_sz) {
601                 tot_desc_sz -= desc_sz;
602                 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
603                 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
604                 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
605                                                 HAL_REO_CMD_FLUSH_CACHE, &cmd,
606                                                 NULL);
607                 if (ret)
608                         ath11k_warn(ab,
609                                     "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
610                                     rx_tid->tid, ret);
611         }
612
613         memset(&cmd, 0, sizeof(cmd));
614         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
615         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
616         cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
617         ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
618                                         HAL_REO_CMD_FLUSH_CACHE,
619                                         &cmd, ath11k_dp_reo_cmd_free);
620         if (ret) {
621                 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
622                            rx_tid->tid, ret);
623                 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
624                                  DMA_BIDIRECTIONAL);
625                 kfree(rx_tid->vaddr);
626         }
627 }
628
629 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
630                                       enum hal_reo_cmd_status status)
631 {
632         struct ath11k_base *ab = dp->ab;
633         struct dp_rx_tid *rx_tid = ctx;
634         struct dp_reo_cache_flush_elem *elem, *tmp;
635
636         if (status == HAL_REO_CMD_DRAIN) {
637                 goto free_desc;
638         } else if (status != HAL_REO_CMD_SUCCESS) {
639                 /* Shouldn't happen! Cleanup in case of other failure? */
640                 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
641                             rx_tid->tid, status);
642                 return;
643         }
644
645         elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
646         if (!elem)
647                 goto free_desc;
648
649         elem->ts = jiffies;
650         memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
651
652         spin_lock_bh(&dp->reo_cmd_lock);
653         list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
654         spin_unlock_bh(&dp->reo_cmd_lock);
655
656         /* Flush and invalidate aged REO desc from HW cache */
657         spin_lock_bh(&dp->reo_cmd_lock);
658         list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
659                                  list) {
660                 if (time_after(jiffies, elem->ts +
661                                msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
662                         list_del(&elem->list);
663                         spin_unlock_bh(&dp->reo_cmd_lock);
664
665                         ath11k_dp_reo_cache_flush(ab, &elem->data);
666                         kfree(elem);
667                         spin_lock_bh(&dp->reo_cmd_lock);
668                 }
669         }
670         spin_unlock_bh(&dp->reo_cmd_lock);
671
672         return;
673 free_desc:
674         dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
675                          DMA_BIDIRECTIONAL);
676         kfree(rx_tid->vaddr);
677 }
678
679 void ath11k_peer_rx_tid_delete(struct ath11k *ar,
680                                struct ath11k_peer *peer, u8 tid)
681 {
682         struct ath11k_hal_reo_cmd cmd = {0};
683         struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
684         int ret;
685
686         if (!rx_tid->active)
687                 return;
688
689         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
690         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
691         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
692         cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
693         ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
694                                         HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
695                                         ath11k_dp_rx_tid_del_func);
696         if (ret) {
697                 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
698                            tid, ret);
699                 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
700                                  DMA_BIDIRECTIONAL);
701                 kfree(rx_tid->vaddr);
702         }
703
704         rx_tid->active = false;
705 }
706
707 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
708                                          u32 *link_desc,
709                                          enum hal_wbm_rel_bm_act action)
710 {
711         struct ath11k_dp *dp = &ab->dp;
712         struct hal_srng *srng;
713         u32 *desc;
714         int ret = 0;
715
716         srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
717
718         spin_lock_bh(&srng->lock);
719
720         ath11k_hal_srng_access_begin(ab, srng);
721
722         desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
723         if (!desc) {
724                 ret = -ENOBUFS;
725                 goto exit;
726         }
727
728         ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
729                                          action);
730
731 exit:
732         ath11k_hal_srng_access_end(ab, srng);
733
734         spin_unlock_bh(&srng->lock);
735
736         return ret;
737 }
738
739 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
740 {
741         struct ath11k_base *ab = rx_tid->ab;
742
743         lockdep_assert_held(&ab->base_lock);
744
745         if (rx_tid->dst_ring_desc) {
746                 if (rel_link_desc)
747                         ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
748                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
749                 kfree(rx_tid->dst_ring_desc);
750                 rx_tid->dst_ring_desc = NULL;
751         }
752
753         rx_tid->cur_sn = 0;
754         rx_tid->last_frag_no = 0;
755         rx_tid->rx_frag_bitmap = 0;
756         __skb_queue_purge(&rx_tid->rx_frags);
757 }
758
759 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
760 {
761         struct dp_rx_tid *rx_tid;
762         int i;
763
764         lockdep_assert_held(&ar->ab->base_lock);
765
766         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
767                 rx_tid = &peer->rx_tid[i];
768
769                 ath11k_peer_rx_tid_delete(ar, peer, i);
770                 ath11k_dp_rx_frags_cleanup(rx_tid, true);
771
772                 spin_unlock_bh(&ar->ab->base_lock);
773                 del_timer_sync(&rx_tid->frag_timer);
774                 spin_lock_bh(&ar->ab->base_lock);
775         }
776 }
777
778 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
779                                          struct ath11k_peer *peer,
780                                          struct dp_rx_tid *rx_tid,
781                                          u32 ba_win_sz, u16 ssn,
782                                          bool update_ssn)
783 {
784         struct ath11k_hal_reo_cmd cmd = {0};
785         int ret;
786
787         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
788         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
789         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
790         cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
791         cmd.ba_window_size = ba_win_sz;
792
793         if (update_ssn) {
794                 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
795                 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
796         }
797
798         ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
799                                         HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
800                                         NULL);
801         if (ret) {
802                 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
803                             rx_tid->tid, ret);
804                 return ret;
805         }
806
807         rx_tid->ba_win_sz = ba_win_sz;
808
809         return 0;
810 }
811
812 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
813                                       const u8 *peer_mac, int vdev_id, u8 tid)
814 {
815         struct ath11k_peer *peer;
816         struct dp_rx_tid *rx_tid;
817
818         spin_lock_bh(&ab->base_lock);
819
820         peer = ath11k_peer_find(ab, vdev_id, peer_mac);
821         if (!peer) {
822                 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
823                 goto unlock_exit;
824         }
825
826         rx_tid = &peer->rx_tid[tid];
827         if (!rx_tid->active)
828                 goto unlock_exit;
829
830         dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
831                          DMA_BIDIRECTIONAL);
832         kfree(rx_tid->vaddr);
833
834         rx_tid->active = false;
835
836 unlock_exit:
837         spin_unlock_bh(&ab->base_lock);
838 }
839
840 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
841                              u8 tid, u32 ba_win_sz, u16 ssn,
842                              enum hal_pn_type pn_type)
843 {
844         struct ath11k_base *ab = ar->ab;
845         struct ath11k_peer *peer;
846         struct dp_rx_tid *rx_tid;
847         u32 hw_desc_sz;
848         u32 *addr_aligned;
849         void *vaddr;
850         dma_addr_t paddr;
851         int ret;
852
853         spin_lock_bh(&ab->base_lock);
854
855         peer = ath11k_peer_find(ab, vdev_id, peer_mac);
856         if (!peer) {
857                 ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
858                 spin_unlock_bh(&ab->base_lock);
859                 return -ENOENT;
860         }
861
862         rx_tid = &peer->rx_tid[tid];
863         /* Update the tid queue if it is already setup */
864         if (rx_tid->active) {
865                 paddr = rx_tid->paddr;
866                 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
867                                                     ba_win_sz, ssn, true);
868                 spin_unlock_bh(&ab->base_lock);
869                 if (ret) {
870                         ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
871                         return ret;
872                 }
873
874                 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
875                                                              peer_mac, paddr,
876                                                              tid, 1, ba_win_sz);
877                 if (ret)
878                         ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
879                                     tid, ret);
880                 return ret;
881         }
882
883         rx_tid->tid = tid;
884
885         rx_tid->ba_win_sz = ba_win_sz;
886
887         /* TODO: Optimize the memory allocation for qos tid based on the
888          * the actual BA window size in REO tid update path.
889          */
890         if (tid == HAL_DESC_REO_NON_QOS_TID)
891                 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
892         else
893                 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
894
895         vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
896         if (!vaddr) {
897                 spin_unlock_bh(&ab->base_lock);
898                 return -ENOMEM;
899         }
900
901         addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
902
903         ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
904                                    ssn, pn_type);
905
906         paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
907                                DMA_BIDIRECTIONAL);
908
909         ret = dma_mapping_error(ab->dev, paddr);
910         if (ret) {
911                 spin_unlock_bh(&ab->base_lock);
912                 goto err_mem_free;
913         }
914
915         rx_tid->vaddr = vaddr;
916         rx_tid->paddr = paddr;
917         rx_tid->size = hw_desc_sz;
918         rx_tid->active = true;
919
920         spin_unlock_bh(&ab->base_lock);
921
922         ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
923                                                      paddr, tid, 1, ba_win_sz);
924         if (ret) {
925                 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
926                             tid, ret);
927                 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
928         }
929
930         return ret;
931
932 err_mem_free:
933         kfree(vaddr);
934
935         return ret;
936 }
937
938 int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
939                              struct ieee80211_ampdu_params *params)
940 {
941         struct ath11k_base *ab = ar->ab;
942         struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
943         int vdev_id = arsta->arvif->vdev_id;
944         int ret;
945
946         ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
947                                        params->tid, params->buf_size,
948                                        params->ssn, arsta->pn_type);
949         if (ret)
950                 ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
951
952         return ret;
953 }
954
955 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
956                             struct ieee80211_ampdu_params *params)
957 {
958         struct ath11k_base *ab = ar->ab;
959         struct ath11k_peer *peer;
960         struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
961         int vdev_id = arsta->arvif->vdev_id;
962         dma_addr_t paddr;
963         bool active;
964         int ret;
965
966         spin_lock_bh(&ab->base_lock);
967
968         peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
969         if (!peer) {
970                 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
971                 spin_unlock_bh(&ab->base_lock);
972                 return -ENOENT;
973         }
974
975         paddr = peer->rx_tid[params->tid].paddr;
976         active = peer->rx_tid[params->tid].active;
977
978         if (!active) {
979                 spin_unlock_bh(&ab->base_lock);
980                 return 0;
981         }
982
983         ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
984         spin_unlock_bh(&ab->base_lock);
985         if (ret) {
986                 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
987                             params->tid, ret);
988                 return ret;
989         }
990
991         ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
992                                                      params->sta->addr, paddr,
993                                                      params->tid, 1, 1);
994         if (ret)
995                 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
996                             ret);
997
998         return ret;
999 }
1000
1001 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1002                                        const u8 *peer_addr,
1003                                        enum set_key_cmd key_cmd,
1004                                        struct ieee80211_key_conf *key)
1005 {
1006         struct ath11k *ar = arvif->ar;
1007         struct ath11k_base *ab = ar->ab;
1008         struct ath11k_hal_reo_cmd cmd = {0};
1009         struct ath11k_peer *peer;
1010         struct dp_rx_tid *rx_tid;
1011         u8 tid;
1012         int ret = 0;
1013
1014         /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1015          * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1016          * for now.
1017          */
1018         if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1019                 return 0;
1020
1021         cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1022         cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1023                     HAL_REO_CMD_UPD0_PN_SIZE |
1024                     HAL_REO_CMD_UPD0_PN_VALID |
1025                     HAL_REO_CMD_UPD0_PN_CHECK |
1026                     HAL_REO_CMD_UPD0_SVLD;
1027
1028         switch (key->cipher) {
1029         case WLAN_CIPHER_SUITE_TKIP:
1030         case WLAN_CIPHER_SUITE_CCMP:
1031         case WLAN_CIPHER_SUITE_CCMP_256:
1032         case WLAN_CIPHER_SUITE_GCMP:
1033         case WLAN_CIPHER_SUITE_GCMP_256:
1034                 if (key_cmd == SET_KEY) {
1035                         cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1036                         cmd.pn_size = 48;
1037                 }
1038                 break;
1039         default:
1040                 break;
1041         }
1042
1043         spin_lock_bh(&ab->base_lock);
1044
1045         peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1046         if (!peer) {
1047                 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1048                 spin_unlock_bh(&ab->base_lock);
1049                 return -ENOENT;
1050         }
1051
1052         for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1053                 rx_tid = &peer->rx_tid[tid];
1054                 if (!rx_tid->active)
1055                         continue;
1056                 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1057                 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1058                 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1059                                                 HAL_REO_CMD_UPDATE_RX_QUEUE,
1060                                                 &cmd, NULL);
1061                 if (ret) {
1062                         ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1063                                     tid, ret);
1064                         break;
1065                 }
1066         }
1067
1068         spin_unlock_bh(&ar->ab->base_lock);
1069
1070         return ret;
1071 }
1072
1073 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1074                                              u16 peer_id)
1075 {
1076         int i;
1077
1078         for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1079                 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1080                         if (peer_id == ppdu_stats->user_stats[i].peer_id)
1081                                 return i;
1082                 } else {
1083                         return i;
1084                 }
1085         }
1086
1087         return -EINVAL;
1088 }
1089
1090 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1091                                            u16 tag, u16 len, const void *ptr,
1092                                            void *data)
1093 {
1094         struct htt_ppdu_stats_info *ppdu_info;
1095         struct htt_ppdu_user_stats *user_stats;
1096         int cur_user;
1097         u16 peer_id;
1098
1099         ppdu_info = (struct htt_ppdu_stats_info *)data;
1100
1101         switch (tag) {
1102         case HTT_PPDU_STATS_TAG_COMMON:
1103                 if (len < sizeof(struct htt_ppdu_stats_common)) {
1104                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1105                                     len, tag);
1106                         return -EINVAL;
1107                 }
1108                 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1109                        sizeof(struct htt_ppdu_stats_common));
1110                 break;
1111         case HTT_PPDU_STATS_TAG_USR_RATE:
1112                 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1113                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1114                                     len, tag);
1115                         return -EINVAL;
1116                 }
1117
1118                 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1119                 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1120                                                       peer_id);
1121                 if (cur_user < 0)
1122                         return -EINVAL;
1123                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1124                 user_stats->peer_id = peer_id;
1125                 user_stats->is_valid_peer_id = true;
1126                 memcpy((void *)&user_stats->rate, ptr,
1127                        sizeof(struct htt_ppdu_stats_user_rate));
1128                 user_stats->tlv_flags |= BIT(tag);
1129                 break;
1130         case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1131                 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1132                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1133                                     len, tag);
1134                         return -EINVAL;
1135                 }
1136
1137                 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1138                 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1139                                                       peer_id);
1140                 if (cur_user < 0)
1141                         return -EINVAL;
1142                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1143                 user_stats->peer_id = peer_id;
1144                 user_stats->is_valid_peer_id = true;
1145                 memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1146                        sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1147                 user_stats->tlv_flags |= BIT(tag);
1148                 break;
1149         case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1150                 if (len <
1151                     sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1152                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1153                                     len, tag);
1154                         return -EINVAL;
1155                 }
1156
1157                 peer_id =
1158                 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1159                 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1160                                                       peer_id);
1161                 if (cur_user < 0)
1162                         return -EINVAL;
1163                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1164                 user_stats->peer_id = peer_id;
1165                 user_stats->is_valid_peer_id = true;
1166                 memcpy((void *)&user_stats->ack_ba, ptr,
1167                        sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1168                 user_stats->tlv_flags |= BIT(tag);
1169                 break;
1170         }
1171         return 0;
1172 }
1173
1174 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1175                            int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1176                                        const void *ptr, void *data),
1177                            void *data)
1178 {
1179         const struct htt_tlv *tlv;
1180         const void *begin = ptr;
1181         u16 tlv_tag, tlv_len;
1182         int ret = -EINVAL;
1183
1184         while (len > 0) {
1185                 if (len < sizeof(*tlv)) {
1186                         ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1187                                    ptr - begin, len, sizeof(*tlv));
1188                         return -EINVAL;
1189                 }
1190                 tlv = (struct htt_tlv *)ptr;
1191                 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1192                 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1193                 ptr += sizeof(*tlv);
1194                 len -= sizeof(*tlv);
1195
1196                 if (tlv_len > len) {
1197                         ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
1198                                    tlv_tag, ptr - begin, len, tlv_len);
1199                         return -EINVAL;
1200                 }
1201                 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1202                 if (ret == -ENOMEM)
1203                         return ret;
1204
1205                 ptr += tlv_len;
1206                 len -= tlv_len;
1207         }
1208         return 0;
1209 }
1210
1211 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
1212 {
1213         u32 ret = 0;
1214
1215         switch (sgi) {
1216         case RX_MSDU_START_SGI_0_8_US:
1217                 ret = NL80211_RATE_INFO_HE_GI_0_8;
1218                 break;
1219         case RX_MSDU_START_SGI_1_6_US:
1220                 ret = NL80211_RATE_INFO_HE_GI_1_6;
1221                 break;
1222         case RX_MSDU_START_SGI_3_2_US:
1223                 ret = NL80211_RATE_INFO_HE_GI_3_2;
1224                 break;
1225         }
1226
1227         return ret;
1228 }
1229
1230 static void
1231 ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1232                                 struct htt_ppdu_stats *ppdu_stats, u8 user)
1233 {
1234         struct ath11k_base *ab = ar->ab;
1235         struct ath11k_peer *peer;
1236         struct ieee80211_sta *sta;
1237         struct ath11k_sta *arsta;
1238         struct htt_ppdu_stats_user_rate *user_rate;
1239         struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1240         struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1241         struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1242         int ret;
1243         u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1244         u32 succ_bytes = 0;
1245         u16 rate = 0, succ_pkts = 0;
1246         u32 tx_duration = 0;
1247         u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1248         bool is_ampdu = false;
1249
1250         if (!usr_stats)
1251                 return;
1252
1253         if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1254                 return;
1255
1256         if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1257                 is_ampdu =
1258                         HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1259
1260         if (usr_stats->tlv_flags &
1261             BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1262                 succ_bytes = usr_stats->ack_ba.success_bytes;
1263                 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1264                                       usr_stats->ack_ba.info);
1265                 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1266                                 usr_stats->ack_ba.info);
1267         }
1268
1269         if (common->fes_duration_us)
1270                 tx_duration = common->fes_duration_us;
1271
1272         user_rate = &usr_stats->rate;
1273         flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1274         bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1275         nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1276         mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1277         sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1278         dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1279
1280         /* Note: If host configured fixed rates and in some other special
1281          * cases, the broadcast/management frames are sent in different rates.
1282          * Firmware rate's control to be skipped for this?
1283          */
1284
1285         if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
1286                 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats",  mcs);
1287                 return;
1288         }
1289
1290         if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1291                 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats",  mcs);
1292                 return;
1293         }
1294
1295         if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1296                 ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats",  mcs);
1297                 return;
1298         }
1299
1300         if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1301                 ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
1302                             mcs, nss);
1303                 return;
1304         }
1305
1306         if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1307                 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1308                                                             flags,
1309                                                             &rate_idx,
1310                                                             &rate);
1311                 if (ret < 0)
1312                         return;
1313         }
1314
1315         rcu_read_lock();
1316         spin_lock_bh(&ab->base_lock);
1317         peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1318
1319         if (!peer || !peer->sta) {
1320                 spin_unlock_bh(&ab->base_lock);
1321                 rcu_read_unlock();
1322                 return;
1323         }
1324
1325         sta = peer->sta;
1326         arsta = (struct ath11k_sta *)sta->drv_priv;
1327
1328         memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1329
1330         switch (flags) {
1331         case WMI_RATE_PREAMBLE_OFDM:
1332                 arsta->txrate.legacy = rate;
1333                 break;
1334         case WMI_RATE_PREAMBLE_CCK:
1335                 arsta->txrate.legacy = rate;
1336                 break;
1337         case WMI_RATE_PREAMBLE_HT:
1338                 arsta->txrate.mcs = mcs + 8 * (nss - 1);
1339                 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1340                 if (sgi)
1341                         arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1342                 break;
1343         case WMI_RATE_PREAMBLE_VHT:
1344                 arsta->txrate.mcs = mcs;
1345                 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1346                 if (sgi)
1347                         arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1348                 break;
1349         case WMI_RATE_PREAMBLE_HE:
1350                 arsta->txrate.mcs = mcs;
1351                 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1352                 arsta->txrate.he_dcm = dcm;
1353                 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
1354                 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
1355                                                 (user_rate->ru_end -
1356                                                  user_rate->ru_start) + 1);
1357                 break;
1358         }
1359
1360         arsta->txrate.nss = nss;
1361         arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1362         arsta->tx_duration += tx_duration;
1363         memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1364
1365         /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1366          * So skip peer stats update for mgmt packets.
1367          */
1368         if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1369                 memset(peer_stats, 0, sizeof(*peer_stats));
1370                 peer_stats->succ_pkts = succ_pkts;
1371                 peer_stats->succ_bytes = succ_bytes;
1372                 peer_stats->is_ampdu = is_ampdu;
1373                 peer_stats->duration = tx_duration;
1374                 peer_stats->ba_fails =
1375                         HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1376                         HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1377
1378                 if (ath11k_debug_is_extd_tx_stats_enabled(ar))
1379                         ath11k_accumulate_per_peer_tx_stats(arsta,
1380                                                             peer_stats, rate_idx);
1381         }
1382
1383         spin_unlock_bh(&ab->base_lock);
1384         rcu_read_unlock();
1385 }
1386
1387 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1388                                          struct htt_ppdu_stats *ppdu_stats)
1389 {
1390         u8 user;
1391
1392         for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1393                 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1394 }
1395
1396 static
1397 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1398                                                         u32 ppdu_id)
1399 {
1400         struct htt_ppdu_stats_info *ppdu_info;
1401
1402         spin_lock_bh(&ar->data_lock);
1403         if (!list_empty(&ar->ppdu_stats_info)) {
1404                 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1405                         if (ppdu_info->ppdu_id == ppdu_id) {
1406                                 spin_unlock_bh(&ar->data_lock);
1407                                 return ppdu_info;
1408                         }
1409                 }
1410
1411                 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1412                         ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1413                                                      typeof(*ppdu_info), list);
1414                         list_del(&ppdu_info->list);
1415                         ar->ppdu_stat_list_depth--;
1416                         ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1417                         kfree(ppdu_info);
1418                 }
1419         }
1420         spin_unlock_bh(&ar->data_lock);
1421
1422         ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
1423         if (!ppdu_info)
1424                 return NULL;
1425
1426         spin_lock_bh(&ar->data_lock);
1427         list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1428         ar->ppdu_stat_list_depth++;
1429         spin_unlock_bh(&ar->data_lock);
1430
1431         return ppdu_info;
1432 }
1433
1434 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1435                                       struct sk_buff *skb)
1436 {
1437         struct ath11k_htt_ppdu_stats_msg *msg;
1438         struct htt_ppdu_stats_info *ppdu_info;
1439         struct ath11k *ar;
1440         int ret;
1441         u8 pdev_id;
1442         u32 ppdu_id, len;
1443
1444         msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1445         len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1446         pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1447         ppdu_id = msg->ppdu_id;
1448
1449         rcu_read_lock();
1450         ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1451         if (!ar) {
1452                 ret = -EINVAL;
1453                 goto exit;
1454         }
1455
1456         if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
1457                 trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1458
1459         ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1460         if (!ppdu_info) {
1461                 ret = -EINVAL;
1462                 goto exit;
1463         }
1464
1465         ppdu_info->ppdu_id = ppdu_id;
1466         ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1467                                      ath11k_htt_tlv_ppdu_stats_parse,
1468                                      (void *)ppdu_info);
1469         if (ret) {
1470                 ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1471                 goto exit;
1472         }
1473
1474 exit:
1475         rcu_read_unlock();
1476
1477         return ret;
1478 }
1479
1480 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1481 {
1482         struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1483         struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1484         struct ath11k *ar;
1485         u8 pdev_id;
1486
1487         pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1488         ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1489         if (!ar) {
1490                 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1491                 return;
1492         }
1493
1494         trace_ath11k_htt_pktlog(ar, data->payload, hdr->size);
1495 }
1496
1497 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1498                                                   struct sk_buff *skb)
1499 {
1500         u32 *data = (u32 *)skb->data;
1501         u8 pdev_id, ring_type, ring_id;
1502         u16 hp, tp;
1503         u32 backpressure_time;
1504
1505         pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1506         ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1507         ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1508         ++data;
1509
1510         hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1511         tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1512         ++data;
1513
1514         backpressure_time = *data;
1515
1516         ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1517                    pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1518 }
1519
1520 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1521                                        struct sk_buff *skb)
1522 {
1523         struct ath11k_dp *dp = &ab->dp;
1524         struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1525         enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1526         u16 peer_id;
1527         u8 vdev_id;
1528         u8 mac_addr[ETH_ALEN];
1529         u16 peer_mac_h16;
1530         u16 ast_hash;
1531
1532         ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1533
1534         switch (type) {
1535         case HTT_T2H_MSG_TYPE_VERSION_CONF:
1536                 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1537                                                   resp->version_msg.version);
1538                 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1539                                                   resp->version_msg.version);
1540                 complete(&dp->htt_tgt_version_received);
1541                 break;
1542         case HTT_T2H_MSG_TYPE_PEER_MAP:
1543                 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1544                                     resp->peer_map_ev.info);
1545                 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1546                                     resp->peer_map_ev.info);
1547                 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1548                                          resp->peer_map_ev.info1);
1549                 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1550                                        peer_mac_h16, mac_addr);
1551                 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1552                                      resp->peer_map_ev.info2);
1553                 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
1554                 break;
1555         case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1556                 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1557                                     resp->peer_unmap_ev.info);
1558                 ath11k_peer_unmap_event(ab, peer_id);
1559                 break;
1560         case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1561                 ath11k_htt_pull_ppdu_stats(ab, skb);
1562                 break;
1563         case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1564                 ath11k_dbg_htt_ext_stats_handler(ab, skb);
1565                 break;
1566         case HTT_T2H_MSG_TYPE_PKTLOG:
1567                 ath11k_htt_pktlog(ab, skb);
1568                 break;
1569         case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1570                 ath11k_htt_backpressure_event_handler(ab, skb);
1571                 break;
1572         default:
1573                 ath11k_warn(ab, "htt event %d not handled\n", type);
1574                 break;
1575         }
1576
1577         dev_kfree_skb_any(skb);
1578 }
1579
1580 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1581                                       struct sk_buff_head *msdu_list,
1582                                       struct sk_buff *first, struct sk_buff *last,
1583                                       u8 l3pad_bytes, int msdu_len)
1584 {
1585         struct sk_buff *skb;
1586         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1587         int buf_first_hdr_len, buf_first_len;
1588         struct hal_rx_desc *ldesc;
1589         int space_extra;
1590         int rem_len;
1591         int buf_len;
1592
1593         /* As the msdu is spread across multiple rx buffers,
1594          * find the offset to the start of msdu for computing
1595          * the length of the msdu in the first buffer.
1596          */
1597         buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes;
1598         buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1599
1600         if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1601                 skb_put(first, buf_first_hdr_len + msdu_len);
1602                 skb_pull(first, buf_first_hdr_len);
1603                 return 0;
1604         }
1605
1606         ldesc = (struct hal_rx_desc *)last->data;
1607         rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc);
1608         rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc);
1609
1610         /* MSDU spans over multiple buffers because the length of the MSDU
1611          * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1612          * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1613          */
1614         skb_put(first, DP_RX_BUFFER_SIZE);
1615         skb_pull(first, buf_first_hdr_len);
1616
1617         /* When an MSDU spread over multiple buffers attention, MSDU_END and
1618          * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1619          */
1620         ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc);
1621
1622         space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1623         if (space_extra > 0 &&
1624             (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1625                 /* Free up all buffers of the MSDU */
1626                 while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1627                         rxcb = ATH11K_SKB_RXCB(skb);
1628                         if (!rxcb->is_continuation) {
1629                                 dev_kfree_skb_any(skb);
1630                                 break;
1631                         }
1632                         dev_kfree_skb_any(skb);
1633                 }
1634                 return -ENOMEM;
1635         }
1636
1637         rem_len = msdu_len - buf_first_len;
1638         while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1639                 rxcb = ATH11K_SKB_RXCB(skb);
1640                 if (rxcb->is_continuation)
1641                         buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE;
1642                 else
1643                         buf_len = rem_len;
1644
1645                 if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) {
1646                         WARN_ON_ONCE(1);
1647                         dev_kfree_skb_any(skb);
1648                         return -EINVAL;
1649                 }
1650
1651                 skb_put(skb, buf_len + HAL_RX_DESC_SIZE);
1652                 skb_pull(skb, HAL_RX_DESC_SIZE);
1653                 skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1654                                           buf_len);
1655                 dev_kfree_skb_any(skb);
1656
1657                 rem_len -= buf_len;
1658                 if (!rxcb->is_continuation)
1659                         break;
1660         }
1661
1662         return 0;
1663 }
1664
1665 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1666                                                       struct sk_buff *first)
1667 {
1668         struct sk_buff *skb;
1669         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1670
1671         if (!rxcb->is_continuation)
1672                 return first;
1673
1674         skb_queue_walk(msdu_list, skb) {
1675                 rxcb = ATH11K_SKB_RXCB(skb);
1676                 if (!rxcb->is_continuation)
1677                         return skb;
1678         }
1679
1680         return NULL;
1681 }
1682
1683 static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
1684 {
1685         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1686         bool ip_csum_fail, l4_csum_fail;
1687
1688         ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc);
1689         l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc);
1690
1691         msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1692                           CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1693 }
1694
1695 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1696                                        enum hal_encrypt_type enctype)
1697 {
1698         switch (enctype) {
1699         case HAL_ENCRYPT_TYPE_OPEN:
1700         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1701         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1702                 return 0;
1703         case HAL_ENCRYPT_TYPE_CCMP_128:
1704                 return IEEE80211_CCMP_MIC_LEN;
1705         case HAL_ENCRYPT_TYPE_CCMP_256:
1706                 return IEEE80211_CCMP_256_MIC_LEN;
1707         case HAL_ENCRYPT_TYPE_GCMP_128:
1708         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1709                 return IEEE80211_GCMP_MIC_LEN;
1710         case HAL_ENCRYPT_TYPE_WEP_40:
1711         case HAL_ENCRYPT_TYPE_WEP_104:
1712         case HAL_ENCRYPT_TYPE_WEP_128:
1713         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1714         case HAL_ENCRYPT_TYPE_WAPI:
1715                 break;
1716         }
1717
1718         ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1719         return 0;
1720 }
1721
1722 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1723                                          enum hal_encrypt_type enctype)
1724 {
1725         switch (enctype) {
1726         case HAL_ENCRYPT_TYPE_OPEN:
1727                 return 0;
1728         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1729         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1730                 return IEEE80211_TKIP_IV_LEN;
1731         case HAL_ENCRYPT_TYPE_CCMP_128:
1732                 return IEEE80211_CCMP_HDR_LEN;
1733         case HAL_ENCRYPT_TYPE_CCMP_256:
1734                 return IEEE80211_CCMP_256_HDR_LEN;
1735         case HAL_ENCRYPT_TYPE_GCMP_128:
1736         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1737                 return IEEE80211_GCMP_HDR_LEN;
1738         case HAL_ENCRYPT_TYPE_WEP_40:
1739         case HAL_ENCRYPT_TYPE_WEP_104:
1740         case HAL_ENCRYPT_TYPE_WEP_128:
1741         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1742         case HAL_ENCRYPT_TYPE_WAPI:
1743                 break;
1744         }
1745
1746         ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1747         return 0;
1748 }
1749
1750 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1751                                        enum hal_encrypt_type enctype)
1752 {
1753         switch (enctype) {
1754         case HAL_ENCRYPT_TYPE_OPEN:
1755         case HAL_ENCRYPT_TYPE_CCMP_128:
1756         case HAL_ENCRYPT_TYPE_CCMP_256:
1757         case HAL_ENCRYPT_TYPE_GCMP_128:
1758         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1759                 return 0;
1760         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1761         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1762                 return IEEE80211_TKIP_ICV_LEN;
1763         case HAL_ENCRYPT_TYPE_WEP_40:
1764         case HAL_ENCRYPT_TYPE_WEP_104:
1765         case HAL_ENCRYPT_TYPE_WEP_128:
1766         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1767         case HAL_ENCRYPT_TYPE_WAPI:
1768                 break;
1769         }
1770
1771         ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1772         return 0;
1773 }
1774
1775 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1776                                          struct sk_buff *msdu,
1777                                          u8 *first_hdr,
1778                                          enum hal_encrypt_type enctype,
1779                                          struct ieee80211_rx_status *status)
1780 {
1781         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1782         u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1783         struct ieee80211_hdr *hdr;
1784         size_t hdr_len;
1785         u8 da[ETH_ALEN];
1786         u8 sa[ETH_ALEN];
1787         u16 qos_ctl = 0;
1788         u8 *qos;
1789
1790         /* copy SA & DA and pull decapped header */
1791         hdr = (struct ieee80211_hdr *)msdu->data;
1792         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1793         ether_addr_copy(da, ieee80211_get_DA(hdr));
1794         ether_addr_copy(sa, ieee80211_get_SA(hdr));
1795         skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1796
1797         if (rxcb->is_first_msdu) {
1798                 /* original 802.11 header is valid for the first msdu
1799                  * hence we can reuse the same header
1800                  */
1801                 hdr = (struct ieee80211_hdr *)first_hdr;
1802                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1803
1804                 /* Each A-MSDU subframe will be reported as a separate MSDU,
1805                  * so strip the A-MSDU bit from QoS Ctl.
1806                  */
1807                 if (ieee80211_is_data_qos(hdr->frame_control)) {
1808                         qos = ieee80211_get_qos_ctl(hdr);
1809                         qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1810                 }
1811         } else {
1812                 /*  Rebuild qos header if this is a middle/last msdu */
1813                 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1814
1815                 /* Reset the order bit as the HT_Control header is stripped */
1816                 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1817
1818                 qos_ctl = rxcb->tid;
1819
1820                 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc))
1821                         qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1822
1823                 /* TODO Add other QoS ctl fields when required */
1824
1825                 /* copy decap header before overwriting for reuse below */
1826                 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
1827         }
1828
1829         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1830                 memcpy(skb_push(msdu,
1831                                 ath11k_dp_rx_crypto_param_len(ar, enctype)),
1832                        (void *)hdr + hdr_len,
1833                        ath11k_dp_rx_crypto_param_len(ar, enctype));
1834         }
1835
1836         if (!rxcb->is_first_msdu) {
1837                 memcpy(skb_push(msdu,
1838                                 IEEE80211_QOS_CTL_LEN), &qos_ctl,
1839                                 IEEE80211_QOS_CTL_LEN);
1840                 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
1841                 return;
1842         }
1843
1844         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1845
1846         /* original 802.11 header has a different DA and in
1847          * case of 4addr it may also have different SA
1848          */
1849         hdr = (struct ieee80211_hdr *)msdu->data;
1850         ether_addr_copy(ieee80211_get_DA(hdr), da);
1851         ether_addr_copy(ieee80211_get_SA(hdr), sa);
1852 }
1853
1854 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
1855                                        enum hal_encrypt_type enctype,
1856                                        struct ieee80211_rx_status *status,
1857                                        bool decrypted)
1858 {
1859         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1860         struct ieee80211_hdr *hdr;
1861         size_t hdr_len;
1862         size_t crypto_len;
1863
1864         if (!rxcb->is_first_msdu ||
1865             !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
1866                 WARN_ON_ONCE(1);
1867                 return;
1868         }
1869
1870         skb_trim(msdu, msdu->len - FCS_LEN);
1871
1872         if (!decrypted)
1873                 return;
1874
1875         hdr = (void *)msdu->data;
1876
1877         /* Tail */
1878         if (status->flag & RX_FLAG_IV_STRIPPED) {
1879                 skb_trim(msdu, msdu->len -
1880                          ath11k_dp_rx_crypto_mic_len(ar, enctype));
1881
1882                 skb_trim(msdu, msdu->len -
1883                          ath11k_dp_rx_crypto_icv_len(ar, enctype));
1884         } else {
1885                 /* MIC */
1886                 if (status->flag & RX_FLAG_MIC_STRIPPED)
1887                         skb_trim(msdu, msdu->len -
1888                                  ath11k_dp_rx_crypto_mic_len(ar, enctype));
1889
1890                 /* ICV */
1891                 if (status->flag & RX_FLAG_ICV_STRIPPED)
1892                         skb_trim(msdu, msdu->len -
1893                                  ath11k_dp_rx_crypto_icv_len(ar, enctype));
1894         }
1895
1896         /* MMIC */
1897         if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1898             !ieee80211_has_morefrags(hdr->frame_control) &&
1899             enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
1900                 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
1901
1902         /* Head */
1903         if (status->flag & RX_FLAG_IV_STRIPPED) {
1904                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1905                 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1906
1907                 memmove((void *)msdu->data + crypto_len,
1908                         (void *)msdu->data, hdr_len);
1909                 skb_pull(msdu, crypto_len);
1910         }
1911 }
1912
1913 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
1914                                          struct sk_buff *msdu,
1915                                          enum hal_encrypt_type enctype)
1916 {
1917         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1918         struct ieee80211_hdr *hdr;
1919         size_t hdr_len, crypto_len;
1920         void *rfc1042;
1921         bool is_amsdu;
1922
1923         is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
1924         hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc);
1925         rfc1042 = hdr;
1926
1927         if (rxcb->is_first_msdu) {
1928                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1929                 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1930
1931                 rfc1042 += hdr_len + crypto_len;
1932         }
1933
1934         if (is_amsdu)
1935                 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
1936
1937         return rfc1042;
1938 }
1939
1940 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
1941                                        struct sk_buff *msdu,
1942                                        u8 *first_hdr,
1943                                        enum hal_encrypt_type enctype,
1944                                        struct ieee80211_rx_status *status)
1945 {
1946         struct ieee80211_hdr *hdr;
1947         struct ethhdr *eth;
1948         size_t hdr_len;
1949         u8 da[ETH_ALEN];
1950         u8 sa[ETH_ALEN];
1951         void *rfc1042;
1952
1953         rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
1954         if (WARN_ON_ONCE(!rfc1042))
1955                 return;
1956
1957         /* pull decapped header and copy SA & DA */
1958         eth = (struct ethhdr *)msdu->data;
1959         ether_addr_copy(da, eth->h_dest);
1960         ether_addr_copy(sa, eth->h_source);
1961         skb_pull(msdu, sizeof(struct ethhdr));
1962
1963         /* push rfc1042/llc/snap */
1964         memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
1965                sizeof(struct ath11k_dp_rfc1042_hdr));
1966
1967         /* push original 802.11 header */
1968         hdr = (struct ieee80211_hdr *)first_hdr;
1969         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1970
1971         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1972                 memcpy(skb_push(msdu,
1973                                 ath11k_dp_rx_crypto_param_len(ar, enctype)),
1974                        (void *)hdr + hdr_len,
1975                        ath11k_dp_rx_crypto_param_len(ar, enctype));
1976         }
1977
1978         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1979
1980         /* original 802.11 header has a different DA and in
1981          * case of 4addr it may also have different SA
1982          */
1983         hdr = (struct ieee80211_hdr *)msdu->data;
1984         ether_addr_copy(ieee80211_get_DA(hdr), da);
1985         ether_addr_copy(ieee80211_get_SA(hdr), sa);
1986 }
1987
1988 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
1989                                    struct hal_rx_desc *rx_desc,
1990                                    enum hal_encrypt_type enctype,
1991                                    struct ieee80211_rx_status *status,
1992                                    bool decrypted)
1993 {
1994         u8 *first_hdr;
1995         u8 decap;
1996
1997         first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
1998         decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc);
1999
2000         switch (decap) {
2001         case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2002                 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2003                                              enctype, status);
2004                 break;
2005         case DP_RX_DECAP_TYPE_RAW:
2006                 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2007                                            decrypted);
2008                 break;
2009         case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2010                 /* TODO undecap support for middle/last msdu's of amsdu */
2011                 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2012                                            enctype, status);
2013                 break;
2014         case DP_RX_DECAP_TYPE_8023:
2015                 /* TODO: Handle undecap for these formats */
2016                 break;
2017         }
2018 }
2019
2020 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2021                                 struct sk_buff *msdu,
2022                                 struct hal_rx_desc *rx_desc,
2023                                 struct ieee80211_rx_status *rx_status)
2024 {
2025         bool  fill_crypto_hdr, mcast;
2026         enum hal_encrypt_type enctype;
2027         bool is_decrypted = false;
2028         struct ieee80211_hdr *hdr;
2029         struct ath11k_peer *peer;
2030         u32 err_bitmap;
2031
2032         hdr = (struct ieee80211_hdr *)msdu->data;
2033
2034         /* PN for multicast packets will be checked in mac80211 */
2035
2036         mcast = is_multicast_ether_addr(hdr->addr1);
2037         fill_crypto_hdr = mcast;
2038
2039         is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
2040
2041         spin_lock_bh(&ar->ab->base_lock);
2042         peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
2043         if (peer) {
2044                 if (mcast)
2045                         enctype = peer->sec_type_grp;
2046                 else
2047                         enctype = peer->sec_type;
2048         } else {
2049                 enctype = HAL_ENCRYPT_TYPE_OPEN;
2050         }
2051         spin_unlock_bh(&ar->ab->base_lock);
2052
2053         err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
2054
2055         /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2056         rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2057                              RX_FLAG_MMIC_ERROR |
2058                              RX_FLAG_DECRYPTED |
2059                              RX_FLAG_IV_STRIPPED |
2060                              RX_FLAG_MMIC_STRIPPED);
2061
2062         if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2063                 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2064         if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2065                 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2066
2067         if (is_decrypted) {
2068                 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2069
2070                 if (fill_crypto_hdr)
2071                         rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2072                                         RX_FLAG_ICV_STRIPPED;
2073                 else
2074                         rx_status->flag |= RX_FLAG_IV_STRIPPED |
2075                                            RX_FLAG_PN_VALIDATED;
2076         }
2077
2078         ath11k_dp_rx_h_csum_offload(msdu);
2079         ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2080                                enctype, rx_status, is_decrypted);
2081
2082         if (!is_decrypted || fill_crypto_hdr)
2083                 return;
2084
2085         hdr = (void *)msdu->data;
2086         hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2087 }
2088
2089 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2090                                 struct ieee80211_rx_status *rx_status)
2091 {
2092         struct ieee80211_supported_band *sband;
2093         enum rx_msdu_start_pkt_type pkt_type;
2094         u8 bw;
2095         u8 rate_mcs, nss;
2096         u8 sgi;
2097         bool is_cck;
2098
2099         pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc);
2100         bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc);
2101         rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc);
2102         nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc);
2103         sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc);
2104
2105         switch (pkt_type) {
2106         case RX_MSDU_START_PKT_TYPE_11A:
2107         case RX_MSDU_START_PKT_TYPE_11B:
2108                 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2109                 sband = &ar->mac.sbands[rx_status->band];
2110                 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2111                                                                 is_cck);
2112                 break;
2113         case RX_MSDU_START_PKT_TYPE_11N:
2114                 rx_status->encoding = RX_ENC_HT;
2115                 if (rate_mcs > ATH11K_HT_MCS_MAX) {
2116                         ath11k_warn(ar->ab,
2117                                     "Received with invalid mcs in HT mode %d\n",
2118                                      rate_mcs);
2119                         break;
2120                 }
2121                 rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2122                 if (sgi)
2123                         rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2124                 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2125                 break;
2126         case RX_MSDU_START_PKT_TYPE_11AC:
2127                 rx_status->encoding = RX_ENC_VHT;
2128                 rx_status->rate_idx = rate_mcs;
2129                 if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2130                         ath11k_warn(ar->ab,
2131                                     "Received with invalid mcs in VHT mode %d\n",
2132                                      rate_mcs);
2133                         break;
2134                 }
2135                 rx_status->nss = nss;
2136                 if (sgi)
2137                         rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2138                 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2139                 break;
2140         case RX_MSDU_START_PKT_TYPE_11AX:
2141                 rx_status->rate_idx = rate_mcs;
2142                 if (rate_mcs > ATH11K_HE_MCS_MAX) {
2143                         ath11k_warn(ar->ab,
2144                                     "Received with invalid mcs in HE mode %d\n",
2145                                     rate_mcs);
2146                         break;
2147                 }
2148                 rx_status->encoding = RX_ENC_HE;
2149                 rx_status->nss = nss;
2150                 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
2151                 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2152                 break;
2153         }
2154 }
2155
2156 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2157                                 struct ieee80211_rx_status *rx_status)
2158 {
2159         u8 channel_num;
2160
2161         rx_status->freq = 0;
2162         rx_status->rate_idx = 0;
2163         rx_status->nss = 0;
2164         rx_status->encoding = RX_ENC_LEGACY;
2165         rx_status->bw = RATE_INFO_BW_20;
2166
2167         rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2168
2169         channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
2170
2171         if (channel_num >= 1 && channel_num <= 14) {
2172                 rx_status->band = NL80211_BAND_2GHZ;
2173         } else if (channel_num >= 36 && channel_num <= 173) {
2174                 rx_status->band = NL80211_BAND_5GHZ;
2175         } else {
2176                 spin_lock_bh(&ar->data_lock);
2177                 rx_status->band = ar->rx_channel->band;
2178                 channel_num =
2179                         ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
2180                 spin_unlock_bh(&ar->data_lock);
2181                 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2182                                 rx_desc, sizeof(struct hal_rx_desc));
2183         }
2184
2185         rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2186                                                          rx_status->band);
2187
2188         ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2189 }
2190
2191 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
2192                                   size_t size)
2193 {
2194         u8 *qc;
2195         int tid;
2196
2197         if (!ieee80211_is_data_qos(hdr->frame_control))
2198                 return "";
2199
2200         qc = ieee80211_get_qos_ctl(hdr);
2201         tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
2202         snprintf(out, size, "tid %d", tid);
2203
2204         return out;
2205 }
2206
2207 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2208                                       struct sk_buff *msdu)
2209 {
2210         static const struct ieee80211_radiotap_he known = {
2211                 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2212                                      IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2213                 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2214         };
2215         struct ieee80211_rx_status *status;
2216         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
2217         struct ieee80211_radiotap_he *he = NULL;
2218         char tid[32];
2219
2220         status = IEEE80211_SKB_RXCB(msdu);
2221         if (status->encoding == RX_ENC_HE) {
2222                 he = skb_push(msdu, sizeof(known));
2223                 memcpy(he, &known, sizeof(known));
2224                 status->flag |= RX_FLAG_RADIOTAP_HE;
2225         }
2226
2227         ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2228                    "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2229                    msdu,
2230                    msdu->len,
2231                    ieee80211_get_SA(hdr),
2232                    ath11k_print_get_tid(hdr, tid, sizeof(tid)),
2233                    is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
2234                                                         "mcast" : "ucast",
2235                    (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
2236                    (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2237                    (status->encoding == RX_ENC_HT) ? "ht" : "",
2238                    (status->encoding == RX_ENC_VHT) ? "vht" : "",
2239                    (status->encoding == RX_ENC_HE) ? "he" : "",
2240                    (status->bw == RATE_INFO_BW_40) ? "40" : "",
2241                    (status->bw == RATE_INFO_BW_80) ? "80" : "",
2242                    (status->bw == RATE_INFO_BW_160) ? "160" : "",
2243                    status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2244                    status->rate_idx,
2245                    status->nss,
2246                    status->freq,
2247                    status->band, status->flag,
2248                    !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2249                    !!(status->flag & RX_FLAG_MMIC_ERROR),
2250                    !!(status->flag & RX_FLAG_AMSDU_MORE));
2251
2252         /* TODO: trace rx packet */
2253
2254         ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
2255 }
2256
2257 static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2258                                      struct sk_buff *msdu,
2259                                      struct sk_buff_head *msdu_list)
2260 {
2261         struct hal_rx_desc *rx_desc, *lrx_desc;
2262         struct ieee80211_rx_status rx_status = {0};
2263         struct ieee80211_rx_status *status;
2264         struct ath11k_skb_rxcb *rxcb;
2265         struct ieee80211_hdr *hdr;
2266         struct sk_buff *last_buf;
2267         u8 l3_pad_bytes;
2268         u8 *hdr_status;
2269         u16 msdu_len;
2270         int ret;
2271
2272         last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2273         if (!last_buf) {
2274                 ath11k_warn(ar->ab,
2275                             "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2276                 ret = -EIO;
2277                 goto free_out;
2278         }
2279
2280         rx_desc = (struct hal_rx_desc *)msdu->data;
2281         lrx_desc = (struct hal_rx_desc *)last_buf->data;
2282         if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
2283                 ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
2284                 ret = -EIO;
2285                 goto free_out;
2286         }
2287
2288         rxcb = ATH11K_SKB_RXCB(msdu);
2289         rxcb->rx_desc = rx_desc;
2290         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
2291         l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
2292
2293         if (rxcb->is_frag) {
2294                 skb_pull(msdu, HAL_RX_DESC_SIZE);
2295         } else if (!rxcb->is_continuation) {
2296                 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
2297                         hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
2298                         ret = -EINVAL;
2299                         ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
2300                         ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2301                                         sizeof(struct ieee80211_hdr));
2302                         ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2303                                         sizeof(struct hal_rx_desc));
2304                         goto free_out;
2305                 }
2306                 skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
2307                 skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
2308         } else {
2309                 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2310                                                  msdu, last_buf,
2311                                                  l3_pad_bytes, msdu_len);
2312                 if (ret) {
2313                         ath11k_warn(ar->ab,
2314                                     "failed to coalesce msdu rx buffer%d\n", ret);
2315                         goto free_out;
2316                 }
2317         }
2318
2319         hdr = (struct ieee80211_hdr *)msdu->data;
2320
2321         /* Process only data frames */
2322         if (!ieee80211_is_data(hdr->frame_control))
2323                 return -EINVAL;
2324
2325         ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
2326         ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
2327
2328         rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2329
2330         status = IEEE80211_SKB_RXCB(msdu);
2331         *status = rx_status;
2332         return 0;
2333
2334 free_out:
2335         return ret;
2336 }
2337
2338 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2339                                                   struct napi_struct *napi,
2340                                                   struct sk_buff_head *msdu_list,
2341                                                   int *quota, int ring_id)
2342 {
2343         struct ath11k_skb_rxcb *rxcb;
2344         struct sk_buff *msdu;
2345         struct ath11k *ar;
2346         u8 mac_id;
2347         int ret;
2348
2349         if (skb_queue_empty(msdu_list))
2350                 return;
2351
2352         rcu_read_lock();
2353
2354         while (*quota && (msdu = __skb_dequeue(msdu_list))) {
2355                 rxcb = ATH11K_SKB_RXCB(msdu);
2356                 mac_id = rxcb->mac_id;
2357                 ar = ab->pdevs[mac_id].ar;
2358                 if (!rcu_dereference(ab->pdevs_active[mac_id])) {
2359                         dev_kfree_skb_any(msdu);
2360                         continue;
2361                 }
2362
2363                 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
2364                         dev_kfree_skb_any(msdu);
2365                         continue;
2366                 }
2367
2368                 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
2369                 if (ret) {
2370                         ath11k_dbg(ab, ATH11K_DBG_DATA,
2371                                    "Unable to process msdu %d", ret);
2372                         dev_kfree_skb_any(msdu);
2373                         continue;
2374                 }
2375
2376                 ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2377                 (*quota)--;
2378         }
2379
2380         rcu_read_unlock();
2381 }
2382
2383 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2384                          struct napi_struct *napi, int budget)
2385 {
2386         struct ath11k_dp *dp = &ab->dp;
2387         struct dp_rxdma_ring *rx_ring;
2388         int num_buffs_reaped[MAX_RADIOS] = {0};
2389         struct sk_buff_head msdu_list;
2390         struct ath11k_skb_rxcb *rxcb;
2391         int total_msdu_reaped = 0;
2392         struct hal_srng *srng;
2393         struct sk_buff *msdu;
2394         int quota = budget;
2395         bool done = false;
2396         int buf_id, mac_id;
2397         struct ath11k *ar;
2398         u32 *rx_desc;
2399         int i;
2400
2401         __skb_queue_head_init(&msdu_list);
2402
2403         srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2404
2405         spin_lock_bh(&srng->lock);
2406
2407         ath11k_hal_srng_access_begin(ab, srng);
2408
2409 try_again:
2410         while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
2411                 struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
2412                 enum hal_reo_dest_ring_push_reason push_reason;
2413                 u32 cookie;
2414
2415                 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2416                                    desc->buf_addr_info.info1);
2417                 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2418                                    cookie);
2419                 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2420
2421                 ar = ab->pdevs[mac_id].ar;
2422                 rx_ring = &ar->dp.rx_refill_buf_ring;
2423                 spin_lock_bh(&rx_ring->idr_lock);
2424                 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2425                 if (!msdu) {
2426                         ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2427                                     buf_id);
2428                         spin_unlock_bh(&rx_ring->idr_lock);
2429                         continue;
2430                 }
2431
2432                 idr_remove(&rx_ring->bufs_idr, buf_id);
2433                 spin_unlock_bh(&rx_ring->idr_lock);
2434
2435                 rxcb = ATH11K_SKB_RXCB(msdu);
2436                 dma_unmap_single(ab->dev, rxcb->paddr,
2437                                  msdu->len + skb_tailroom(msdu),
2438                                  DMA_FROM_DEVICE);
2439
2440                 num_buffs_reaped[mac_id]++;
2441                 total_msdu_reaped++;
2442
2443                 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2444                                         desc->info0);
2445                 if (push_reason !=
2446                     HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2447                         dev_kfree_skb_any(msdu);
2448                         ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2449                         continue;
2450                 }
2451
2452                 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2453                                          RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2454                 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2455                                         RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2456                 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2457                                            RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2458                 rxcb->mac_id = mac_id;
2459                 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2460                                       desc->info0);
2461
2462                 __skb_queue_tail(&msdu_list, msdu);
2463
2464                 if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
2465                         done = true;
2466                         break;
2467                 }
2468         }
2469
2470         /* Hw might have updated the head pointer after we cached it.
2471          * In this case, even though there are entries in the ring we'll
2472          * get rx_desc NULL. Give the read another try with updated cached
2473          * head pointer so that we can reap complete MPDU in the current
2474          * rx processing.
2475          */
2476         if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
2477                 ath11k_hal_srng_access_end(ab, srng);
2478                 goto try_again;
2479         }
2480
2481         ath11k_hal_srng_access_end(ab, srng);
2482
2483         spin_unlock_bh(&srng->lock);
2484
2485         if (!total_msdu_reaped)
2486                 goto exit;
2487
2488         for (i = 0; i < ab->num_radios; i++) {
2489                 if (!num_buffs_reaped[i])
2490                         continue;
2491
2492                 ar = ab->pdevs[i].ar;
2493                 rx_ring = &ar->dp.rx_refill_buf_ring;
2494
2495                 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2496                                            HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
2497         }
2498
2499         ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2500                                               &quota, ring_id);
2501
2502 exit:
2503         return budget - quota;
2504 }
2505
2506 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2507                                            struct hal_rx_mon_ppdu_info *ppdu_info)
2508 {
2509         struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2510         u32 num_msdu;
2511
2512         if (!rx_stats)
2513                 return;
2514
2515         num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2516                    ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2517
2518         rx_stats->num_msdu += num_msdu;
2519         rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2520                                     ppdu_info->tcp_ack_msdu_count;
2521         rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2522         rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2523
2524         if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2525             ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2526                 ppdu_info->nss = 1;
2527                 ppdu_info->mcs = HAL_RX_MAX_MCS;
2528                 ppdu_info->tid = IEEE80211_NUM_TIDS;
2529         }
2530
2531         if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2532                 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2533
2534         if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2535                 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2536
2537         if (ppdu_info->gi < HAL_RX_GI_MAX)
2538                 rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2539
2540         if (ppdu_info->bw < HAL_RX_BW_MAX)
2541                 rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2542
2543         if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2544                 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2545
2546         if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2547                 rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2548
2549         if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2550                 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2551
2552         if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2553                 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2554
2555         if (ppdu_info->is_stbc)
2556                 rx_stats->stbc_count += num_msdu;
2557
2558         if (ppdu_info->beamformed)
2559                 rx_stats->beamformed_count += num_msdu;
2560
2561         if (ppdu_info->num_mpdu_fcs_ok > 1)
2562                 rx_stats->ampdu_msdu_count += num_msdu;
2563         else
2564                 rx_stats->non_ampdu_msdu_count += num_msdu;
2565
2566         rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2567         rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2568         rx_stats->dcm_count += ppdu_info->dcm;
2569         rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2570
2571         arsta->rssi_comb = ppdu_info->rssi_comb;
2572         rx_stats->rx_duration += ppdu_info->rx_duration;
2573         arsta->rx_duration = rx_stats->rx_duration;
2574 }
2575
2576 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2577                                                          struct dp_rxdma_ring *rx_ring,
2578                                                          int *buf_id, gfp_t gfp)
2579 {
2580         struct sk_buff *skb;
2581         dma_addr_t paddr;
2582
2583         skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2584                             DP_RX_BUFFER_ALIGN_SIZE);
2585
2586         if (!skb)
2587                 goto fail_alloc_skb;
2588
2589         if (!IS_ALIGNED((unsigned long)skb->data,
2590                         DP_RX_BUFFER_ALIGN_SIZE)) {
2591                 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2592                          skb->data);
2593         }
2594
2595         paddr = dma_map_single(ab->dev, skb->data,
2596                                skb->len + skb_tailroom(skb),
2597                                DMA_BIDIRECTIONAL);
2598         if (unlikely(dma_mapping_error(ab->dev, paddr)))
2599                 goto fail_free_skb;
2600
2601         spin_lock_bh(&rx_ring->idr_lock);
2602         *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2603                             rx_ring->bufs_max, gfp);
2604         spin_unlock_bh(&rx_ring->idr_lock);
2605         if (*buf_id < 0)
2606                 goto fail_dma_unmap;
2607
2608         ATH11K_SKB_RXCB(skb)->paddr = paddr;
2609         return skb;
2610
2611 fail_dma_unmap:
2612         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2613                          DMA_BIDIRECTIONAL);
2614 fail_free_skb:
2615         dev_kfree_skb_any(skb);
2616 fail_alloc_skb:
2617         return NULL;
2618 }
2619
2620 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2621                                            struct dp_rxdma_ring *rx_ring,
2622                                            int req_entries,
2623                                            enum hal_rx_buf_return_buf_manager mgr,
2624                                            gfp_t gfp)
2625 {
2626         struct hal_srng *srng;
2627         u32 *desc;
2628         struct sk_buff *skb;
2629         int num_free;
2630         int num_remain;
2631         int buf_id;
2632         u32 cookie;
2633         dma_addr_t paddr;
2634
2635         req_entries = min(req_entries, rx_ring->bufs_max);
2636
2637         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2638
2639         spin_lock_bh(&srng->lock);
2640
2641         ath11k_hal_srng_access_begin(ab, srng);
2642
2643         num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2644
2645         req_entries = min(num_free, req_entries);
2646         num_remain = req_entries;
2647
2648         while (num_remain > 0) {
2649                 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2650                                                         &buf_id, gfp);
2651                 if (!skb)
2652                         break;
2653                 paddr = ATH11K_SKB_RXCB(skb)->paddr;
2654
2655                 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2656                 if (!desc)
2657                         goto fail_desc_get;
2658
2659                 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2660                          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2661
2662                 num_remain--;
2663
2664                 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2665         }
2666
2667         ath11k_hal_srng_access_end(ab, srng);
2668
2669         spin_unlock_bh(&srng->lock);
2670
2671         return req_entries - num_remain;
2672
2673 fail_desc_get:
2674         spin_lock_bh(&rx_ring->idr_lock);
2675         idr_remove(&rx_ring->bufs_idr, buf_id);
2676         spin_unlock_bh(&rx_ring->idr_lock);
2677         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2678                          DMA_BIDIRECTIONAL);
2679         dev_kfree_skb_any(skb);
2680         ath11k_hal_srng_access_end(ab, srng);
2681         spin_unlock_bh(&srng->lock);
2682
2683         return req_entries - num_remain;
2684 }
2685
2686 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2687                                              int *budget, struct sk_buff_head *skb_list)
2688 {
2689         struct ath11k *ar = ab->pdevs[mac_id].ar;
2690         struct ath11k_pdev_dp *dp = &ar->dp;
2691         struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
2692         struct hal_srng *srng;
2693         void *rx_mon_status_desc;
2694         struct sk_buff *skb;
2695         struct ath11k_skb_rxcb *rxcb;
2696         struct hal_tlv_hdr *tlv;
2697         u32 cookie;
2698         int buf_id;
2699         dma_addr_t paddr;
2700         u8 rbm;
2701         int num_buffs_reaped = 0;
2702
2703         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2704
2705         spin_lock_bh(&srng->lock);
2706
2707         ath11k_hal_srng_access_begin(ab, srng);
2708         while (*budget) {
2709                 *budget -= 1;
2710                 rx_mon_status_desc =
2711                         ath11k_hal_srng_src_peek(ab, srng);
2712                 if (!rx_mon_status_desc)
2713                         break;
2714
2715                 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
2716                                                 &cookie, &rbm);
2717                 if (paddr) {
2718                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
2719
2720                         spin_lock_bh(&rx_ring->idr_lock);
2721                         skb = idr_find(&rx_ring->bufs_idr, buf_id);
2722                         if (!skb) {
2723                                 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
2724                                             buf_id);
2725                                 spin_unlock_bh(&rx_ring->idr_lock);
2726                                 continue;
2727                         }
2728
2729                         idr_remove(&rx_ring->bufs_idr, buf_id);
2730                         spin_unlock_bh(&rx_ring->idr_lock);
2731
2732                         rxcb = ATH11K_SKB_RXCB(skb);
2733
2734                         dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
2735                                                 skb->len + skb_tailroom(skb),
2736                                                 DMA_FROM_DEVICE);
2737
2738                         dma_unmap_single(ab->dev, rxcb->paddr,
2739                                          skb->len + skb_tailroom(skb),
2740                                          DMA_BIDIRECTIONAL);
2741
2742                         tlv = (struct hal_tlv_hdr *)skb->data;
2743                         if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
2744                                         HAL_RX_STATUS_BUFFER_DONE) {
2745                                 ath11k_hal_srng_src_get_next_entry(ab, srng);
2746                                 continue;
2747                         }
2748
2749                         __skb_queue_tail(skb_list, skb);
2750                 }
2751
2752                 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2753                                                         &buf_id, GFP_ATOMIC);
2754
2755                 if (!skb) {
2756                         ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
2757                                                         HAL_RX_BUF_RBM_SW3_BM);
2758                         num_buffs_reaped++;
2759                         break;
2760                 }
2761                 rxcb = ATH11K_SKB_RXCB(skb);
2762
2763                 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2764                          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2765
2766                 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
2767                                                 cookie, HAL_RX_BUF_RBM_SW3_BM);
2768                 ath11k_hal_srng_src_get_next_entry(ab, srng);
2769                 num_buffs_reaped++;
2770         }
2771         ath11k_hal_srng_access_end(ab, srng);
2772         spin_unlock_bh(&srng->lock);
2773
2774         return num_buffs_reaped;
2775 }
2776
2777 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
2778                                     struct napi_struct *napi, int budget)
2779 {
2780         struct ath11k *ar = ab->pdevs[mac_id].ar;
2781         enum hal_rx_mon_status hal_status;
2782         struct sk_buff *skb;
2783         struct sk_buff_head skb_list;
2784         struct hal_rx_mon_ppdu_info ppdu_info;
2785         struct ath11k_peer *peer;
2786         struct ath11k_sta *arsta;
2787         int num_buffs_reaped = 0;
2788
2789         __skb_queue_head_init(&skb_list);
2790
2791         num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
2792                                                              &skb_list);
2793         if (!num_buffs_reaped)
2794                 goto exit;
2795
2796         while ((skb = __skb_dequeue(&skb_list))) {
2797                 memset(&ppdu_info, 0, sizeof(ppdu_info));
2798                 ppdu_info.peer_id = HAL_INVALID_PEERID;
2799
2800                 if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
2801                         trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2802
2803                 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
2804
2805                 if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
2806                     hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
2807                         dev_kfree_skb_any(skb);
2808                         continue;
2809                 }
2810
2811                 rcu_read_lock();
2812                 spin_lock_bh(&ab->base_lock);
2813                 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
2814
2815                 if (!peer || !peer->sta) {
2816                         ath11k_dbg(ab, ATH11K_DBG_DATA,
2817                                    "failed to find the peer with peer_id %d\n",
2818                                    ppdu_info.peer_id);
2819                         spin_unlock_bh(&ab->base_lock);
2820                         rcu_read_unlock();
2821                         dev_kfree_skb_any(skb);
2822                         continue;
2823                 }
2824
2825                 arsta = (struct ath11k_sta *)peer->sta->drv_priv;
2826                 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
2827
2828                 if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
2829                         trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2830
2831                 spin_unlock_bh(&ab->base_lock);
2832                 rcu_read_unlock();
2833
2834                 dev_kfree_skb_any(skb);
2835         }
2836 exit:
2837         return num_buffs_reaped;
2838 }
2839
2840 static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
2841 {
2842         struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
2843
2844         spin_lock_bh(&rx_tid->ab->base_lock);
2845         if (rx_tid->last_frag_no &&
2846             rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2847                 spin_unlock_bh(&rx_tid->ab->base_lock);
2848                 return;
2849         }
2850         ath11k_dp_rx_frags_cleanup(rx_tid, true);
2851         spin_unlock_bh(&rx_tid->ab->base_lock);
2852 }
2853
2854 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
2855 {
2856         struct ath11k_base *ab = ar->ab;
2857         struct crypto_shash *tfm;
2858         struct ath11k_peer *peer;
2859         struct dp_rx_tid *rx_tid;
2860         int i;
2861
2862         tfm = crypto_alloc_shash("michael_mic", 0, 0);
2863         if (IS_ERR(tfm))
2864                 return PTR_ERR(tfm);
2865
2866         spin_lock_bh(&ab->base_lock);
2867
2868         peer = ath11k_peer_find(ab, vdev_id, peer_mac);
2869         if (!peer) {
2870                 ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
2871                 spin_unlock_bh(&ab->base_lock);
2872                 return -ENOENT;
2873         }
2874
2875         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
2876                 rx_tid = &peer->rx_tid[i];
2877                 rx_tid->ab = ab;
2878                 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
2879                 skb_queue_head_init(&rx_tid->rx_frags);
2880         }
2881
2882         peer->tfm_mmic = tfm;
2883         spin_unlock_bh(&ab->base_lock);
2884
2885         return 0;
2886 }
2887
2888 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
2889                                       struct ieee80211_hdr *hdr, u8 *data,
2890                                       size_t data_len, u8 *mic)
2891 {
2892         SHASH_DESC_ON_STACK(desc, tfm);
2893         u8 mic_hdr[16] = {0};
2894         u8 tid = 0;
2895         int ret;
2896
2897         if (!tfm)
2898                 return -EINVAL;
2899
2900         desc->tfm = tfm;
2901
2902         ret = crypto_shash_setkey(tfm, key, 8);
2903         if (ret)
2904                 goto out;
2905
2906         ret = crypto_shash_init(desc);
2907         if (ret)
2908                 goto out;
2909
2910         /* TKIP MIC header */
2911         memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
2912         memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
2913         if (ieee80211_is_data_qos(hdr->frame_control))
2914                 tid = ieee80211_get_tid(hdr);
2915         mic_hdr[12] = tid;
2916
2917         ret = crypto_shash_update(desc, mic_hdr, 16);
2918         if (ret)
2919                 goto out;
2920         ret = crypto_shash_update(desc, data, data_len);
2921         if (ret)
2922                 goto out;
2923         ret = crypto_shash_final(desc, mic);
2924 out:
2925         shash_desc_zero(desc);
2926         return ret;
2927 }
2928
2929 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
2930                                           struct sk_buff *msdu)
2931 {
2932         struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
2933         struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
2934         struct ieee80211_key_conf *key_conf;
2935         struct ieee80211_hdr *hdr;
2936         u8 mic[IEEE80211_CCMP_MIC_LEN];
2937         int head_len, tail_len, ret;
2938         size_t data_len;
2939         u32 hdr_len;
2940         u8 *key, *data;
2941         u8 key_idx;
2942
2943         if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
2944                 return 0;
2945
2946         hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
2947         hdr_len = ieee80211_hdrlen(hdr->frame_control);
2948         head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN;
2949         tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
2950
2951         if (!is_multicast_ether_addr(hdr->addr1))
2952                 key_idx = peer->ucast_keyidx;
2953         else
2954                 key_idx = peer->mcast_keyidx;
2955
2956         key_conf = peer->keys[key_idx];
2957
2958         data = msdu->data + head_len;
2959         data_len = msdu->len - head_len - tail_len;
2960         key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
2961
2962         ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
2963         if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
2964                 goto mic_fail;
2965
2966         return 0;
2967
2968 mic_fail:
2969         (ATH11K_SKB_RXCB(msdu))->is_first_msdu = 1;
2970         (ATH11K_SKB_RXCB(msdu))->is_last_msdu = 1;
2971
2972         rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
2973                     RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
2974         skb_pull(msdu, HAL_RX_DESC_SIZE);
2975
2976         ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
2977         ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2978                                HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
2979         ieee80211_rx(ar->hw, msdu);
2980         return -EINVAL;
2981 }
2982
2983 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
2984                                         enum hal_encrypt_type enctype, u32 flags)
2985 {
2986         struct ieee80211_hdr *hdr;
2987         size_t hdr_len;
2988         size_t crypto_len;
2989
2990         if (!flags)
2991                 return;
2992
2993         hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
2994
2995         if (flags & RX_FLAG_MIC_STRIPPED)
2996                 skb_trim(msdu, msdu->len -
2997                          ath11k_dp_rx_crypto_mic_len(ar, enctype));
2998
2999         if (flags & RX_FLAG_ICV_STRIPPED)
3000                 skb_trim(msdu, msdu->len -
3001                          ath11k_dp_rx_crypto_icv_len(ar, enctype));
3002
3003         if (flags & RX_FLAG_IV_STRIPPED) {
3004                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3005                 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3006
3007                 memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len,
3008                         (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len);
3009                 skb_pull(msdu, crypto_len);
3010         }
3011 }
3012
3013 static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3014                                  struct ath11k_peer *peer,
3015                                  struct dp_rx_tid *rx_tid,
3016                                  struct sk_buff **defrag_skb)
3017 {
3018         struct hal_rx_desc *rx_desc;
3019         struct sk_buff *skb, *first_frag, *last_frag;
3020         struct ieee80211_hdr *hdr;
3021         enum hal_encrypt_type enctype;
3022         bool is_decrypted = false;
3023         int msdu_len = 0;
3024         int extra_space;
3025         u32 flags;
3026
3027         first_frag = skb_peek(&rx_tid->rx_frags);
3028         last_frag = skb_peek_tail(&rx_tid->rx_frags);
3029
3030         skb_queue_walk(&rx_tid->rx_frags, skb) {
3031                 flags = 0;
3032                 rx_desc = (struct hal_rx_desc *)skb->data;
3033                 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
3034
3035                 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
3036                 if (enctype != HAL_ENCRYPT_TYPE_OPEN)
3037                         is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
3038
3039                 if (is_decrypted) {
3040                         if (skb != first_frag)
3041                                 flags |=  RX_FLAG_IV_STRIPPED;
3042                         if (skb != last_frag)
3043                                 flags |= RX_FLAG_ICV_STRIPPED |
3044                                          RX_FLAG_MIC_STRIPPED;
3045                 }
3046
3047                 /* RX fragments are always raw packets */
3048                 if (skb != last_frag)
3049                         skb_trim(skb, skb->len - FCS_LEN);
3050                 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3051
3052                 if (skb != first_frag)
3053                         skb_pull(skb, HAL_RX_DESC_SIZE +
3054                                       ieee80211_hdrlen(hdr->frame_control));
3055                 msdu_len += skb->len;
3056         }
3057
3058         extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3059         if (extra_space > 0 &&
3060             (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3061                 return -ENOMEM;
3062
3063         __skb_unlink(first_frag, &rx_tid->rx_frags);
3064         while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3065                 skb_put_data(first_frag, skb->data, skb->len);
3066                 dev_kfree_skb_any(skb);
3067         }
3068
3069         hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE);
3070         hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3071         ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3072
3073         if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3074                 first_frag = NULL;
3075
3076         *defrag_skb = first_frag;
3077         return 0;
3078 }
3079
3080 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3081                                               struct sk_buff *defrag_skb)
3082 {
3083         struct ath11k_base *ab = ar->ab;
3084         struct ath11k_pdev_dp *dp = &ar->dp;
3085         struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3086         struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3087         struct hal_reo_entrance_ring *reo_ent_ring;
3088         struct hal_reo_dest_ring *reo_dest_ring;
3089         struct dp_link_desc_bank *link_desc_banks;
3090         struct hal_rx_msdu_link *msdu_link;
3091         struct hal_rx_msdu_details *msdu0;
3092         struct hal_srng *srng;
3093         dma_addr_t paddr;
3094         u32 desc_bank, msdu_info, mpdu_info;
3095         u32 dst_idx, cookie;
3096         u32 *msdu_len_offset;
3097         int ret, buf_id;
3098
3099         link_desc_banks = ab->dp.link_desc_banks;
3100         reo_dest_ring = rx_tid->dst_ring_desc;
3101
3102         ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3103         msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3104                         (paddr - link_desc_banks[desc_bank].paddr));
3105         msdu0 = &msdu_link->msdu_link[0];
3106         dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3107         memset(msdu0, 0, sizeof(*msdu0));
3108
3109         msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3110                     FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3111                     FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3112                     FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3113                                defrag_skb->len - HAL_RX_DESC_SIZE) |
3114                     FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3115                     FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3116                     FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3117         msdu0->rx_msdu_info.info0 = msdu_info;
3118
3119         /* change msdu len in hal rx desc */
3120         msdu_len_offset = (u32 *)&rx_desc->msdu_start;
3121         *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH);
3122         *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE;
3123
3124         paddr = dma_map_single(ab->dev, defrag_skb->data,
3125                                defrag_skb->len + skb_tailroom(defrag_skb),
3126                                DMA_FROM_DEVICE);
3127         if (dma_mapping_error(ab->dev, paddr))
3128                 return -ENOMEM;
3129
3130         spin_lock_bh(&rx_refill_ring->idr_lock);
3131         buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3132                            rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3133         spin_unlock_bh(&rx_refill_ring->idr_lock);
3134         if (buf_id < 0) {
3135                 ret = -ENOMEM;
3136                 goto err_unmap_dma;
3137         }
3138
3139         ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3140         cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3141                  FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3142
3143         ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM);
3144
3145         /* Fill mpdu details into reo entrace ring */
3146         srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3147
3148         spin_lock_bh(&srng->lock);
3149         ath11k_hal_srng_access_begin(ab, srng);
3150
3151         reo_ent_ring = (struct hal_reo_entrance_ring *)
3152                         ath11k_hal_srng_src_get_next_entry(ab, srng);
3153         if (!reo_ent_ring) {
3154                 ath11k_hal_srng_access_end(ab, srng);
3155                 spin_unlock_bh(&srng->lock);
3156                 ret = -ENOSPC;
3157                 goto err_free_idr;
3158         }
3159         memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3160
3161         ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3162         ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3163                                         HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3164
3165         mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3166                     FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3167                     FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3168                     FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3169                     FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3170                     FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3171                     FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3172
3173         reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3174         reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3175         reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3176         reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3177                                          FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3178                                                    reo_dest_ring->info0)) |
3179                               FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3180         ath11k_hal_srng_access_end(ab, srng);
3181         spin_unlock_bh(&srng->lock);
3182
3183         return 0;
3184
3185 err_free_idr:
3186         spin_lock_bh(&rx_refill_ring->idr_lock);
3187         idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3188         spin_unlock_bh(&rx_refill_ring->idr_lock);
3189 err_unmap_dma:
3190         dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3191                          DMA_FROM_DEVICE);
3192         return ret;
3193 }
3194
3195 static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b)
3196 {
3197         int frag1, frag2;
3198
3199         frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a);
3200         frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b);
3201
3202         return frag1 - frag2;
3203 }
3204
3205 static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list,
3206                                       struct sk_buff *cur_frag)
3207 {
3208         struct sk_buff *skb;
3209         int cmp;
3210
3211         skb_queue_walk(frag_list, skb) {
3212                 cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag);
3213                 if (cmp < 0)
3214                         continue;
3215                 __skb_queue_before(frag_list, skb, cur_frag);
3216                 return;
3217         }
3218         __skb_queue_tail(frag_list, cur_frag);
3219 }
3220
3221 static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb)
3222 {
3223         struct ieee80211_hdr *hdr;
3224         u64 pn = 0;
3225         u8 *ehdr;
3226
3227         hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
3228         ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control);
3229
3230         pn = ehdr[0];
3231         pn |= (u64)ehdr[1] << 8;
3232         pn |= (u64)ehdr[4] << 16;
3233         pn |= (u64)ehdr[5] << 24;
3234         pn |= (u64)ehdr[6] << 32;
3235         pn |= (u64)ehdr[7] << 40;
3236
3237         return pn;
3238 }
3239
3240 static bool
3241 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3242 {
3243         enum hal_encrypt_type encrypt_type;
3244         struct sk_buff *first_frag, *skb;
3245         struct hal_rx_desc *desc;
3246         u64 last_pn;
3247         u64 cur_pn;
3248
3249         first_frag = skb_peek(&rx_tid->rx_frags);
3250         desc = (struct hal_rx_desc *)first_frag->data;
3251
3252         encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc);
3253         if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3254             encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3255             encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3256             encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3257                 return true;
3258
3259         last_pn = ath11k_dp_rx_h_get_pn(first_frag);
3260         skb_queue_walk(&rx_tid->rx_frags, skb) {
3261                 if (skb == first_frag)
3262                         continue;
3263
3264                 cur_pn = ath11k_dp_rx_h_get_pn(skb);
3265                 if (cur_pn != last_pn + 1)
3266                         return false;
3267                 last_pn = cur_pn;
3268         }
3269         return true;
3270 }
3271
3272 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3273                                     struct sk_buff *msdu,
3274                                     u32 *ring_desc)
3275 {
3276         struct ath11k_base *ab = ar->ab;
3277         struct hal_rx_desc *rx_desc;
3278         struct ath11k_peer *peer;
3279         struct dp_rx_tid *rx_tid;
3280         struct sk_buff *defrag_skb = NULL;
3281         u32 peer_id;
3282         u16 seqno, frag_no;
3283         u8 tid;
3284         int ret = 0;
3285         bool more_frags;
3286
3287         rx_desc = (struct hal_rx_desc *)msdu->data;
3288         peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc);
3289         tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc);
3290         seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc);
3291         frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu);
3292         more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu);
3293
3294         if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) ||
3295             !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) ||
3296             tid > IEEE80211_NUM_TIDS)
3297                 return -EINVAL;
3298
3299         /* received unfragmented packet in reo
3300          * exception ring, this shouldn't happen
3301          * as these packets typically come from
3302          * reo2sw srngs.
3303          */
3304         if (WARN_ON_ONCE(!frag_no && !more_frags))
3305                 return -EINVAL;
3306
3307         spin_lock_bh(&ab->base_lock);
3308         peer = ath11k_peer_find_by_id(ab, peer_id);
3309         if (!peer) {
3310                 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3311                             peer_id);
3312                 ret = -ENOENT;
3313                 goto out_unlock;
3314         }
3315         rx_tid = &peer->rx_tid[tid];
3316
3317         if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3318             skb_queue_empty(&rx_tid->rx_frags)) {
3319                 /* Flush stored fragments and start a new sequence */
3320                 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3321                 rx_tid->cur_sn = seqno;
3322         }
3323
3324         if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3325                 /* Fragment already present */
3326                 ret = -EINVAL;
3327                 goto out_unlock;
3328         }
3329
3330         if (frag_no > __fls(rx_tid->rx_frag_bitmap))
3331                 __skb_queue_tail(&rx_tid->rx_frags, msdu);
3332         else
3333                 ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu);
3334
3335         rx_tid->rx_frag_bitmap |= BIT(frag_no);
3336         if (!more_frags)
3337                 rx_tid->last_frag_no = frag_no;
3338
3339         if (frag_no == 0) {
3340                 rx_tid->dst_ring_desc = kmemdup(ring_desc,
3341                                                 sizeof(*rx_tid->dst_ring_desc),
3342                                                 GFP_ATOMIC);
3343                 if (!rx_tid->dst_ring_desc) {
3344                         ret = -ENOMEM;
3345                         goto out_unlock;
3346                 }
3347         } else {
3348                 ath11k_dp_rx_link_desc_return(ab, ring_desc,
3349                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3350         }
3351
3352         if (!rx_tid->last_frag_no ||
3353             rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3354                 mod_timer(&rx_tid->frag_timer, jiffies +
3355                                                ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3356                 goto out_unlock;
3357         }
3358
3359         spin_unlock_bh(&ab->base_lock);
3360         del_timer_sync(&rx_tid->frag_timer);
3361         spin_lock_bh(&ab->base_lock);
3362
3363         peer = ath11k_peer_find_by_id(ab, peer_id);
3364         if (!peer)
3365                 goto err_frags_cleanup;
3366
3367         if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3368                 goto err_frags_cleanup;
3369
3370         if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3371                 goto err_frags_cleanup;
3372
3373         if (!defrag_skb)
3374                 goto err_frags_cleanup;
3375
3376         if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3377                 goto err_frags_cleanup;
3378
3379         ath11k_dp_rx_frags_cleanup(rx_tid, false);
3380         goto out_unlock;
3381
3382 err_frags_cleanup:
3383         dev_kfree_skb_any(defrag_skb);
3384         ath11k_dp_rx_frags_cleanup(rx_tid, true);
3385 out_unlock:
3386         spin_unlock_bh(&ab->base_lock);
3387         return ret;
3388 }
3389
3390 static int
3391 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3392 {
3393         struct ath11k_pdev_dp *dp = &ar->dp;
3394         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3395         struct sk_buff *msdu;
3396         struct ath11k_skb_rxcb *rxcb;
3397         struct hal_rx_desc *rx_desc;
3398         u8 *hdr_status;
3399         u16 msdu_len;
3400
3401         spin_lock_bh(&rx_ring->idr_lock);
3402         msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3403         if (!msdu) {
3404                 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3405                             buf_id);
3406                 spin_unlock_bh(&rx_ring->idr_lock);
3407                 return -EINVAL;
3408         }
3409
3410         idr_remove(&rx_ring->bufs_idr, buf_id);
3411         spin_unlock_bh(&rx_ring->idr_lock);
3412
3413         rxcb = ATH11K_SKB_RXCB(msdu);
3414         dma_unmap_single(ar->ab->dev, rxcb->paddr,
3415                          msdu->len + skb_tailroom(msdu),
3416                          DMA_FROM_DEVICE);
3417
3418         if (drop) {
3419                 dev_kfree_skb_any(msdu);
3420                 return 0;
3421         }
3422
3423         rcu_read_lock();
3424         if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3425                 dev_kfree_skb_any(msdu);
3426                 goto exit;
3427         }
3428
3429         if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3430                 dev_kfree_skb_any(msdu);
3431                 goto exit;
3432         }
3433
3434         rx_desc = (struct hal_rx_desc *)msdu->data;
3435         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
3436         if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
3437                 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
3438                 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3439                 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3440                                 sizeof(struct ieee80211_hdr));
3441                 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3442                                 sizeof(struct hal_rx_desc));
3443                 dev_kfree_skb_any(msdu);
3444                 goto exit;
3445         }
3446
3447         skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
3448
3449         if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3450                 dev_kfree_skb_any(msdu);
3451                 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3452                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3453         }
3454 exit:
3455         rcu_read_unlock();
3456         return 0;
3457 }
3458
3459 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3460                              int budget)
3461 {
3462         u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3463         struct dp_link_desc_bank *link_desc_banks;
3464         enum hal_rx_buf_return_buf_manager rbm;
3465         int tot_n_bufs_reaped, quota, ret, i;
3466         int n_bufs_reaped[MAX_RADIOS] = {0};
3467         struct dp_rxdma_ring *rx_ring;
3468         struct dp_srng *reo_except;
3469         u32 desc_bank, num_msdus;
3470         struct hal_srng *srng;
3471         struct ath11k_dp *dp;
3472         void *link_desc_va;
3473         int buf_id, mac_id;
3474         struct ath11k *ar;
3475         dma_addr_t paddr;
3476         u32 *desc;
3477         bool is_frag;
3478         u8 drop = 0;
3479
3480         tot_n_bufs_reaped = 0;
3481         quota = budget;
3482
3483         dp = &ab->dp;
3484         reo_except = &dp->reo_except_ring;
3485         link_desc_banks = dp->link_desc_banks;
3486
3487         srng = &ab->hal.srng_list[reo_except->ring_id];
3488
3489         spin_lock_bh(&srng->lock);
3490
3491         ath11k_hal_srng_access_begin(ab, srng);
3492
3493         while (budget &&
3494                (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3495                 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3496
3497                 ab->soc_stats.err_ring_pkts++;
3498                 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3499                                                     &desc_bank);
3500                 if (ret) {
3501                         ath11k_warn(ab, "failed to parse error reo desc %d\n",
3502                                     ret);
3503                         continue;
3504                 }
3505                 link_desc_va = link_desc_banks[desc_bank].vaddr +
3506                                (paddr - link_desc_banks[desc_bank].paddr);
3507                 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3508                                                  &rbm);
3509                 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3510                     rbm != HAL_RX_BUF_RBM_SW3_BM) {
3511                         ab->soc_stats.invalid_rbm++;
3512                         ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3513                         ath11k_dp_rx_link_desc_return(ab, desc,
3514                                                       HAL_WBM_REL_BM_ACT_REL_MSDU);
3515                         continue;
3516                 }
3517
3518                 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3519
3520                 /* Process only rx fragments with one msdu per link desc below, and drop
3521                  * msdu's indicated due to error reasons.
3522                  */
3523                 if (!is_frag || num_msdus > 1) {
3524                         drop = 1;
3525                         /* Return the link desc back to wbm idle list */
3526                         ath11k_dp_rx_link_desc_return(ab, desc,
3527                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3528                 }
3529
3530                 for (i = 0; i < num_msdus; i++) {
3531                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3532                                            msdu_cookies[i]);
3533
3534                         mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3535                                            msdu_cookies[i]);
3536
3537                         ar = ab->pdevs[mac_id].ar;
3538
3539                         if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3540                                 n_bufs_reaped[mac_id]++;
3541                                 tot_n_bufs_reaped++;
3542                         }
3543                 }
3544
3545                 if (tot_n_bufs_reaped >= quota) {
3546                         tot_n_bufs_reaped = quota;
3547                         goto exit;
3548                 }
3549
3550                 budget = quota - tot_n_bufs_reaped;
3551         }
3552
3553 exit:
3554         ath11k_hal_srng_access_end(ab, srng);
3555
3556         spin_unlock_bh(&srng->lock);
3557
3558         for (i = 0; i <  ab->num_radios; i++) {
3559                 if (!n_bufs_reaped[i])
3560                         continue;
3561
3562                 ar = ab->pdevs[i].ar;
3563                 rx_ring = &ar->dp.rx_refill_buf_ring;
3564
3565                 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3566                                            HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3567         }
3568
3569         return tot_n_bufs_reaped;
3570 }
3571
3572 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3573                                              int msdu_len,
3574                                              struct sk_buff_head *msdu_list)
3575 {
3576         struct sk_buff *skb, *tmp;
3577         struct ath11k_skb_rxcb *rxcb;
3578         int n_buffs;
3579
3580         n_buffs = DIV_ROUND_UP(msdu_len,
3581                                (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE));
3582
3583         skb_queue_walk_safe(msdu_list, skb, tmp) {
3584                 rxcb = ATH11K_SKB_RXCB(skb);
3585                 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3586                     rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3587                         if (!n_buffs)
3588                                 break;
3589                         __skb_unlink(skb, msdu_list);
3590                         dev_kfree_skb_any(skb);
3591                         n_buffs--;
3592                 }
3593         }
3594 }
3595
3596 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3597                                       struct ieee80211_rx_status *status,
3598                                       struct sk_buff_head *msdu_list)
3599 {
3600         u16 msdu_len;
3601         struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3602         u8 l3pad_bytes;
3603         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3604
3605         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
3606
3607         if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) {
3608                 /* First buffer will be freed by the caller, so deduct it's length */
3609                 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
3610                 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3611                 return -EINVAL;
3612         }
3613
3614         if (!ath11k_dp_rx_h_attn_msdu_done(desc)) {
3615                 ath11k_warn(ar->ab,
3616                             "msdu_done bit not set in null_q_des processing\n");
3617                 __skb_queue_purge(msdu_list);
3618                 return -EIO;
3619         }
3620
3621         /* Handle NULL queue descriptor violations arising out a missing
3622          * REO queue for a given peer or a given TID. This typically
3623          * may happen if a packet is received on a QOS enabled TID before the
3624          * ADDBA negotiation for that TID, when the TID queue is setup. Or
3625          * it may also happen for MC/BC frames if they are not routed to the
3626          * non-QOS TID queue, in the absence of any other default TID queue.
3627          * This error can show up both in a REO destination or WBM release ring.
3628          */
3629
3630         rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
3631         rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
3632
3633         if (rxcb->is_frag) {
3634                 skb_pull(msdu, HAL_RX_DESC_SIZE);
3635         } else {
3636                 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
3637
3638                 if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3639                         return -EINVAL;
3640
3641                 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
3642                 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
3643         }
3644         ath11k_dp_rx_h_ppdu(ar, desc, status);
3645
3646         ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3647
3648         rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc);
3649
3650         /* Please note that caller will having the access to msdu and completing
3651          * rx with mac80211. Need not worry about cleaning up amsdu_list.
3652          */
3653
3654         return 0;
3655 }
3656
3657 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3658                                    struct ieee80211_rx_status *status,
3659                                    struct sk_buff_head *msdu_list)
3660 {
3661         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3662         bool drop = false;
3663
3664         ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3665
3666         switch (rxcb->err_code) {
3667         case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3668                 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3669                         drop = true;
3670                 break;
3671         case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3672                 /* TODO: Do not drop PN failed packets in the driver;
3673                  * instead, it is good to drop such packets in mac80211
3674                  * after incrementing the replay counters.
3675                  */
3676
3677                 /* fall through */
3678         default:
3679                 /* TODO: Review other errors and process them to mac80211
3680                  * as appropriate.
3681                  */
3682                 drop = true;
3683                 break;
3684         }
3685
3686         return drop;
3687 }
3688
3689 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3690                                         struct ieee80211_rx_status *status)
3691 {
3692         u16 msdu_len;
3693         struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3694         u8 l3pad_bytes;
3695         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3696
3697         rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
3698         rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
3699
3700         l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
3701         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
3702         skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
3703         skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
3704
3705         ath11k_dp_rx_h_ppdu(ar, desc, status);
3706
3707         status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3708                          RX_FLAG_DECRYPTED);
3709
3710         ath11k_dp_rx_h_undecap(ar, msdu, desc,
3711                                HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3712 }
3713
3714 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
3715                                      struct ieee80211_rx_status *status)
3716 {
3717         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3718         bool drop = false;
3719
3720         ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3721
3722         switch (rxcb->err_code) {
3723         case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3724                 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3725                 break;
3726         default:
3727                 /* TODO: Review other rxdma error code to check if anything is
3728                  * worth reporting to mac80211
3729                  */
3730                 drop = true;
3731                 break;
3732         }
3733
3734         return drop;
3735 }
3736
3737 static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
3738                                  struct napi_struct *napi,
3739                                  struct sk_buff *msdu,
3740                                  struct sk_buff_head *msdu_list)
3741 {
3742         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3743         struct ieee80211_rx_status rxs = {0};
3744         struct ieee80211_rx_status *status;
3745         bool drop = true;
3746
3747         switch (rxcb->err_rel_src) {
3748         case HAL_WBM_REL_SRC_MODULE_REO:
3749                 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3750                 break;
3751         case HAL_WBM_REL_SRC_MODULE_RXDMA:
3752                 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3753                 break;
3754         default:
3755                 /* msdu will get freed */
3756                 break;
3757         }
3758
3759         if (drop) {
3760                 dev_kfree_skb_any(msdu);
3761                 return;
3762         }
3763
3764         status = IEEE80211_SKB_RXCB(msdu);
3765         *status = rxs;
3766
3767         ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
3768 }
3769
3770 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
3771                                  struct napi_struct *napi, int budget)
3772 {
3773         struct ath11k *ar;
3774         struct ath11k_dp *dp = &ab->dp;
3775         struct dp_rxdma_ring *rx_ring;
3776         struct hal_rx_wbm_rel_info err_info;
3777         struct hal_srng *srng;
3778         struct sk_buff *msdu;
3779         struct sk_buff_head msdu_list[MAX_RADIOS];
3780         struct ath11k_skb_rxcb *rxcb;
3781         u32 *rx_desc;
3782         int buf_id, mac_id;
3783         int num_buffs_reaped[MAX_RADIOS] = {0};
3784         int total_num_buffs_reaped = 0;
3785         int ret, i;
3786
3787         for (i = 0; i < MAX_RADIOS; i++)
3788                 __skb_queue_head_init(&msdu_list[i]);
3789
3790         srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3791
3792         spin_lock_bh(&srng->lock);
3793
3794         ath11k_hal_srng_access_begin(ab, srng);
3795
3796         while (budget) {
3797                 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
3798                 if (!rx_desc)
3799                         break;
3800
3801                 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3802                 if (ret) {
3803                         ath11k_warn(ab,
3804                                     "failed to parse rx error in wbm_rel ring desc %d\n",
3805                                     ret);
3806                         continue;
3807                 }
3808
3809                 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
3810                 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
3811
3812                 ar = ab->pdevs[mac_id].ar;
3813                 rx_ring = &ar->dp.rx_refill_buf_ring;
3814
3815                 spin_lock_bh(&rx_ring->idr_lock);
3816                 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3817                 if (!msdu) {
3818                         ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
3819                                     buf_id, mac_id);
3820                         spin_unlock_bh(&rx_ring->idr_lock);
3821                         continue;
3822                 }
3823
3824                 idr_remove(&rx_ring->bufs_idr, buf_id);
3825                 spin_unlock_bh(&rx_ring->idr_lock);
3826
3827                 rxcb = ATH11K_SKB_RXCB(msdu);
3828                 dma_unmap_single(ab->dev, rxcb->paddr,
3829                                  msdu->len + skb_tailroom(msdu),
3830                                  DMA_FROM_DEVICE);
3831
3832                 num_buffs_reaped[mac_id]++;
3833                 total_num_buffs_reaped++;
3834                 budget--;
3835
3836                 if (err_info.push_reason !=
3837                     HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3838                         dev_kfree_skb_any(msdu);
3839                         continue;
3840                 }
3841
3842                 rxcb->err_rel_src = err_info.err_rel_src;
3843                 rxcb->err_code = err_info.err_code;
3844                 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
3845                 __skb_queue_tail(&msdu_list[mac_id], msdu);
3846         }
3847
3848         ath11k_hal_srng_access_end(ab, srng);
3849
3850         spin_unlock_bh(&srng->lock);
3851
3852         if (!total_num_buffs_reaped)
3853                 goto done;
3854
3855         for (i = 0; i <  ab->num_radios; i++) {
3856                 if (!num_buffs_reaped[i])
3857                         continue;
3858
3859                 ar = ab->pdevs[i].ar;
3860                 rx_ring = &ar->dp.rx_refill_buf_ring;
3861
3862                 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
3863                                            HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3864         }
3865
3866         rcu_read_lock();
3867         for (i = 0; i <  ab->num_radios; i++) {
3868                 if (!rcu_dereference(ab->pdevs_active[i])) {
3869                         __skb_queue_purge(&msdu_list[i]);
3870                         continue;
3871                 }
3872
3873                 ar = ab->pdevs[i].ar;
3874
3875                 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3876                         __skb_queue_purge(&msdu_list[i]);
3877                         continue;
3878                 }
3879
3880                 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
3881                         ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
3882         }
3883         rcu_read_unlock();
3884 done:
3885         return total_num_buffs_reaped;
3886 }
3887
3888 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
3889 {
3890         struct ath11k *ar = ab->pdevs[mac_id].ar;
3891         struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
3892         struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
3893         struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
3894         struct hal_srng *srng;
3895         u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3896         enum hal_rx_buf_return_buf_manager rbm;
3897         enum hal_reo_entr_rxdma_ecode rxdma_err_code;
3898         struct ath11k_skb_rxcb *rxcb;
3899         struct sk_buff *skb;
3900         struct hal_reo_entrance_ring *entr_ring;
3901         void *desc;
3902         int num_buf_freed = 0;
3903         int quota = budget;
3904         dma_addr_t paddr;
3905         u32 desc_bank;
3906         void *link_desc_va;
3907         int num_msdus;
3908         int i;
3909         int buf_id;
3910
3911         srng = &ab->hal.srng_list[err_ring->ring_id];
3912
3913         spin_lock_bh(&srng->lock);
3914
3915         ath11k_hal_srng_access_begin(ab, srng);
3916
3917         while (quota-- &&
3918                (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3919                 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
3920
3921                 entr_ring = (struct hal_reo_entrance_ring *)desc;
3922                 rxdma_err_code =
3923                         FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
3924                                   entr_ring->info1);
3925                 ab->soc_stats.rxdma_error[rxdma_err_code]++;
3926
3927                 link_desc_va = link_desc_banks[desc_bank].vaddr +
3928                                (paddr - link_desc_banks[desc_bank].paddr);
3929                 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
3930                                                  msdu_cookies, &rbm);
3931
3932                 for (i = 0; i < num_msdus; i++) {
3933                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3934                                            msdu_cookies[i]);
3935
3936                         spin_lock_bh(&rx_ring->idr_lock);
3937                         skb = idr_find(&rx_ring->bufs_idr, buf_id);
3938                         if (!skb) {
3939                                 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
3940                                             buf_id);
3941                                 spin_unlock_bh(&rx_ring->idr_lock);
3942                                 continue;
3943                         }
3944
3945                         idr_remove(&rx_ring->bufs_idr, buf_id);
3946                         spin_unlock_bh(&rx_ring->idr_lock);
3947
3948                         rxcb = ATH11K_SKB_RXCB(skb);
3949                         dma_unmap_single(ab->dev, rxcb->paddr,
3950                                          skb->len + skb_tailroom(skb),
3951                                          DMA_FROM_DEVICE);
3952                         dev_kfree_skb_any(skb);
3953
3954                         num_buf_freed++;
3955                 }
3956
3957                 ath11k_dp_rx_link_desc_return(ab, desc,
3958                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3959         }
3960
3961         ath11k_hal_srng_access_end(ab, srng);
3962
3963         spin_unlock_bh(&srng->lock);
3964
3965         if (num_buf_freed)
3966                 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
3967                                            HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3968
3969         return budget - quota;
3970 }
3971
3972 void ath11k_dp_process_reo_status(struct ath11k_base *ab)
3973 {
3974         struct ath11k_dp *dp = &ab->dp;
3975         struct hal_srng *srng;
3976         struct dp_reo_cmd *cmd, *tmp;
3977         bool found = false;
3978         u32 *reo_desc;
3979         u16 tag;
3980         struct hal_reo_status reo_status;
3981
3982         srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
3983
3984         memset(&reo_status, 0, sizeof(reo_status));
3985
3986         spin_lock_bh(&srng->lock);
3987
3988         ath11k_hal_srng_access_begin(ab, srng);
3989
3990         while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3991                 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
3992
3993                 switch (tag) {
3994                 case HAL_REO_GET_QUEUE_STATS_STATUS:
3995                         ath11k_hal_reo_status_queue_stats(ab, reo_desc,
3996                                                           &reo_status);
3997                         break;
3998                 case HAL_REO_FLUSH_QUEUE_STATUS:
3999                         ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4000                                                           &reo_status);
4001                         break;
4002                 case HAL_REO_FLUSH_CACHE_STATUS:
4003                         ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4004                                                           &reo_status);
4005                         break;
4006                 case HAL_REO_UNBLOCK_CACHE_STATUS:
4007                         ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4008                                                           &reo_status);
4009                         break;
4010                 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4011                         ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4012                                                                  &reo_status);
4013                         break;
4014                 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4015                         ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4016                                                                   &reo_status);
4017                         break;
4018                 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4019                         ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4020                                                                   &reo_status);
4021                         break;
4022                 default:
4023                         ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4024                         continue;
4025                 }
4026
4027                 spin_lock_bh(&dp->reo_cmd_lock);
4028                 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4029                         if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4030                                 found = true;
4031                                 list_del(&cmd->list);
4032                                 break;
4033                         }
4034                 }
4035                 spin_unlock_bh(&dp->reo_cmd_lock);
4036
4037                 if (found) {
4038                         cmd->handler(dp, (void *)&cmd->data,
4039                                      reo_status.uniform_hdr.cmd_status);
4040                         kfree(cmd);
4041                 }
4042
4043                 found = false;
4044         }
4045
4046         ath11k_hal_srng_access_end(ab, srng);
4047
4048         spin_unlock_bh(&srng->lock);
4049 }
4050
4051 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4052 {
4053         struct ath11k *ar = ab->pdevs[mac_id].ar;
4054
4055         ath11k_dp_rx_pdev_srng_free(ar);
4056         ath11k_dp_rxdma_pdev_buf_free(ar);
4057 }
4058
4059 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4060 {
4061         struct ath11k *ar = ab->pdevs[mac_id].ar;
4062         struct ath11k_pdev_dp *dp = &ar->dp;
4063         u32 ring_id;
4064         int ret;
4065
4066         ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4067         if (ret) {
4068                 ath11k_warn(ab, "failed to setup rx srngs\n");
4069                 return ret;
4070         }
4071
4072         ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4073         if (ret) {
4074                 ath11k_warn(ab, "failed to setup rxdma ring\n");
4075                 return ret;
4076         }
4077
4078         ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4079         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4080         if (ret) {
4081                 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4082                             ret);
4083                 return ret;
4084         }
4085
4086         ring_id = dp->rxdma_err_dst_ring.ring_id;
4087         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
4088         if (ret) {
4089                 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
4090                             ret);
4091                 return ret;
4092         }
4093
4094         ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4095         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4096                                           mac_id, HAL_RXDMA_MONITOR_BUF);
4097         if (ret) {
4098                 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4099                             ret);
4100                 return ret;
4101         }
4102         ret = ath11k_dp_tx_htt_srng_setup(ab,
4103                                           dp->rxdma_mon_dst_ring.ring_id,
4104                                           mac_id, HAL_RXDMA_MONITOR_DST);
4105         if (ret) {
4106                 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4107                             ret);
4108                 return ret;
4109         }
4110         ret = ath11k_dp_tx_htt_srng_setup(ab,
4111                                           dp->rxdma_mon_desc_ring.ring_id,
4112                                           mac_id, HAL_RXDMA_MONITOR_DESC);
4113         if (ret) {
4114                 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4115                             ret);
4116                 return ret;
4117         }
4118         ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
4119         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
4120                                           HAL_RXDMA_MONITOR_STATUS);
4121         if (ret) {
4122                 ath11k_warn(ab,
4123                             "failed to configure mon_status_refill_ring %d\n",
4124                             ret);
4125                 return ret;
4126         }
4127         return 0;
4128 }
4129
4130 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4131 {
4132         if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4133                 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4134                 *total_len -= *frag_len;
4135         } else {
4136                 *frag_len = *total_len;
4137                 *total_len = 0;
4138         }
4139 }
4140
4141 static
4142 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4143                                           void *p_last_buf_addr_info,
4144                                           u8 mac_id)
4145 {
4146         struct ath11k_pdev_dp *dp = &ar->dp;
4147         struct dp_srng *dp_srng;
4148         void *hal_srng;
4149         void *src_srng_desc;
4150         int ret = 0;
4151
4152         dp_srng = &dp->rxdma_mon_desc_ring;
4153         hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4154
4155         ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4156
4157         src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4158
4159         if (src_srng_desc) {
4160                 struct ath11k_buffer_addr *src_desc =
4161                                 (struct ath11k_buffer_addr *)src_srng_desc;
4162
4163                 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4164         } else {
4165                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4166                            "Monitor Link Desc Ring %d Full", mac_id);
4167                 ret = -ENOMEM;
4168         }
4169
4170         ath11k_hal_srng_access_end(ar->ab, hal_srng);
4171         return ret;
4172 }
4173
4174 static
4175 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4176                                          dma_addr_t *paddr, u32 *sw_cookie,
4177                                          void **pp_buf_addr_info)
4178 {
4179         struct hal_rx_msdu_link *msdu_link =
4180                         (struct hal_rx_msdu_link *)rx_msdu_link_desc;
4181         struct ath11k_buffer_addr *buf_addr_info;
4182         u8 rbm = 0;
4183
4184         buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4185
4186         ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
4187
4188         *pp_buf_addr_info = (void *)buf_addr_info;
4189 }
4190
4191 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4192 {
4193         if (skb->len > len) {
4194                 skb_trim(skb, len);
4195         } else {
4196                 if (skb_tailroom(skb) < len - skb->len) {
4197                         if ((pskb_expand_head(skb, 0,
4198                                               len - skb->len - skb_tailroom(skb),
4199                                               GFP_ATOMIC))) {
4200                                 dev_kfree_skb_any(skb);
4201                                 return -ENOMEM;
4202                         }
4203                 }
4204                 skb_put(skb, (len - skb->len));
4205         }
4206         return 0;
4207 }
4208
4209 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4210                                         void *msdu_link_desc,
4211                                         struct hal_rx_msdu_list *msdu_list,
4212                                         u16 *num_msdus)
4213 {
4214         struct hal_rx_msdu_details *msdu_details = NULL;
4215         struct rx_msdu_desc *msdu_desc_info = NULL;
4216         struct hal_rx_msdu_link *msdu_link = NULL;
4217         int i;
4218         u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4219         u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4220         u8  tmp  = 0;
4221
4222         msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4223         msdu_details = &msdu_link->msdu_link[0];
4224
4225         for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4226                 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4227                               msdu_details[i].buf_addr_info.info0) == 0) {
4228                         msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4229                         msdu_desc_info->info0 |= last;
4230                         ;
4231                         break;
4232                 }
4233                 msdu_desc_info = &msdu_details[i].rx_msdu_info;
4234
4235                 if (!i)
4236                         msdu_desc_info->info0 |= first;
4237                 else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4238                         msdu_desc_info->info0 |= last;
4239                 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4240                 msdu_list->msdu_info[i].msdu_len =
4241                          HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4242                 msdu_list->sw_cookie[i] =
4243                         FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4244                                   msdu_details[i].buf_addr_info.info1);
4245                 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4246                                 msdu_details[i].buf_addr_info.info1);
4247                 msdu_list->rbm[i] = tmp;
4248         }
4249         *num_msdus = i;
4250 }
4251
4252 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4253                                         u32 *rx_bufs_used)
4254 {
4255         u32 ret = 0;
4256
4257         if ((*ppdu_id < msdu_ppdu_id) &&
4258             ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4259                 *ppdu_id = msdu_ppdu_id;
4260                 ret = msdu_ppdu_id;
4261         } else if ((*ppdu_id > msdu_ppdu_id) &&
4262                 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4263                 /* mon_dst is behind than mon_status
4264                  * skip dst_ring and free it
4265                  */
4266                 *rx_bufs_used += 1;
4267                 *ppdu_id = msdu_ppdu_id;
4268                 ret = msdu_ppdu_id;
4269         }
4270         return ret;
4271 }
4272
4273 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4274                                       bool *is_frag, u32 *total_len,
4275                                       u32 *frag_len, u32 *msdu_cnt)
4276 {
4277         if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4278                 if (!*is_frag) {
4279                         *total_len = info->msdu_len;
4280                         *is_frag = true;
4281                 }
4282                 ath11k_dp_mon_set_frag_len(total_len,
4283                                            frag_len);
4284         } else {
4285                 if (*is_frag) {
4286                         ath11k_dp_mon_set_frag_len(total_len,
4287                                                    frag_len);
4288                 } else {
4289                         *frag_len = info->msdu_len;
4290                 }
4291                 *is_frag = false;
4292                 *msdu_cnt -= 1;
4293         }
4294 }
4295
4296 static u32
4297 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
4298                           void *ring_entry, struct sk_buff **head_msdu,
4299                           struct sk_buff **tail_msdu, u32 *npackets,
4300                           u32 *ppdu_id)
4301 {
4302         struct ath11k_pdev_dp *dp = &ar->dp;
4303         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4304         struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4305         struct sk_buff *msdu = NULL, *last = NULL;
4306         struct hal_rx_msdu_list msdu_list;
4307         void *p_buf_addr_info, *p_last_buf_addr_info;
4308         struct hal_rx_desc *rx_desc;
4309         void *rx_msdu_link_desc;
4310         dma_addr_t paddr;
4311         u16 num_msdus = 0;
4312         u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4313         u32 rx_bufs_used = 0, i = 0;
4314         u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4315         u32 total_len = 0, frag_len = 0;
4316         bool is_frag, is_first_msdu;
4317         bool drop_mpdu = false;
4318         struct ath11k_skb_rxcb *rxcb;
4319         struct hal_reo_entrance_ring *ent_desc =
4320                         (struct hal_reo_entrance_ring *)ring_entry;
4321         int buf_id;
4322
4323         ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4324                                             &sw_cookie, &p_last_buf_addr_info,
4325                                             &msdu_cnt);
4326
4327         if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4328                       ent_desc->info1) ==
4329                       HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4330                 u8 rxdma_err =
4331                         FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4332                                   ent_desc->info1);
4333                 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4334                     rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4335                     rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4336                         drop_mpdu = true;
4337                         pmon->rx_mon_stats.dest_mpdu_drop++;
4338                 }
4339         }
4340
4341         is_frag = false;
4342         is_first_msdu = true;
4343
4344         do {
4345                 if (pmon->mon_last_linkdesc_paddr == paddr) {
4346                         pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4347                         return rx_bufs_used;
4348                 }
4349
4350                 rx_msdu_link_desc =
4351                         (void *)pmon->link_desc_banks[sw_cookie].vaddr +
4352                         (paddr - pmon->link_desc_banks[sw_cookie].paddr);
4353
4354                 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4355                                             &num_msdus);
4356
4357                 for (i = 0; i < num_msdus; i++) {
4358                         u32 l2_hdr_offset;
4359
4360                         if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4361                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4362                                            "i %d last_cookie %d is same\n",
4363                                            i, pmon->mon_last_buf_cookie);
4364                                 drop_mpdu = true;
4365                                 pmon->rx_mon_stats.dup_mon_buf_cnt++;
4366                                 continue;
4367                         }
4368                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4369                                            msdu_list.sw_cookie[i]);
4370
4371                         spin_lock_bh(&rx_ring->idr_lock);
4372                         msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4373                         spin_unlock_bh(&rx_ring->idr_lock);
4374                         if (!msdu) {
4375                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4376                                            "msdu_pop: invalid buf_id %d\n", buf_id);
4377                                 break;
4378                         }
4379                         rxcb = ATH11K_SKB_RXCB(msdu);
4380                         if (!rxcb->unmapped) {
4381                                 dma_unmap_single(ar->ab->dev, rxcb->paddr,
4382                                                  msdu->len +
4383                                                  skb_tailroom(msdu),
4384                                                  DMA_FROM_DEVICE);
4385                                 rxcb->unmapped = 1;
4386                         }
4387                         if (drop_mpdu) {
4388                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4389                                            "i %d drop msdu %p *ppdu_id %x\n",
4390                                            i, msdu, *ppdu_id);
4391                                 dev_kfree_skb_any(msdu);
4392                                 msdu = NULL;
4393                                 goto next_msdu;
4394                         }
4395
4396                         rx_desc = (struct hal_rx_desc *)msdu->data;
4397
4398                         rx_pkt_offset = sizeof(struct hal_rx_desc);
4399                         l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc);
4400
4401                         if (is_first_msdu) {
4402                                 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) {
4403                                         drop_mpdu = true;
4404                                         dev_kfree_skb_any(msdu);
4405                                         msdu = NULL;
4406                                         pmon->mon_last_linkdesc_paddr = paddr;
4407                                         goto next_msdu;
4408                                 }
4409
4410                                 msdu_ppdu_id =
4411                                         ath11k_dp_rxdesc_get_ppduid(rx_desc);
4412
4413                                 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4414                                                                  ppdu_id,
4415                                                                  &rx_bufs_used)) {
4416                                         if (rx_bufs_used) {
4417                                                 drop_mpdu = true;
4418                                                 dev_kfree_skb_any(msdu);
4419                                                 msdu = NULL;
4420                                                 goto next_msdu;
4421                                         }
4422                                         return rx_bufs_used;
4423                                 }
4424                                 pmon->mon_last_linkdesc_paddr = paddr;
4425                                 is_first_msdu = false;
4426                         }
4427                         ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4428                                                   &is_frag, &total_len,
4429                                                   &frag_len, &msdu_cnt);
4430                         rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4431
4432                         ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4433
4434                         if (!(*head_msdu))
4435                                 *head_msdu = msdu;
4436                         else if (last)
4437                                 last->next = msdu;
4438
4439                         last = msdu;
4440 next_msdu:
4441                         pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4442                         rx_bufs_used++;
4443                         spin_lock_bh(&rx_ring->idr_lock);
4444                         idr_remove(&rx_ring->bufs_idr, buf_id);
4445                         spin_unlock_bh(&rx_ring->idr_lock);
4446                 }
4447
4448                 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4449                                                     &sw_cookie,
4450                                                     &p_buf_addr_info);
4451
4452                 if (ath11k_dp_rx_monitor_link_desc_return(ar,
4453                                                           p_last_buf_addr_info,
4454                                                           dp->mac_id))
4455                         ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4456                                    "dp_rx_monitor_link_desc_return failed");
4457
4458                 p_last_buf_addr_info = p_buf_addr_info;
4459
4460         } while (paddr && msdu_cnt);
4461
4462         if (last)
4463                 last->next = NULL;
4464
4465         *tail_msdu = msdu;
4466
4467         if (msdu_cnt == 0)
4468                 *npackets = 1;
4469
4470         return rx_bufs_used;
4471 }
4472
4473 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu)
4474 {
4475         u32 rx_pkt_offset, l2_hdr_offset;
4476
4477         rx_pkt_offset = sizeof(struct hal_rx_desc);
4478         l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data);
4479         skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4480 }
4481
4482 static struct sk_buff *
4483 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4484                             u32 mac_id, struct sk_buff *head_msdu,
4485                             struct sk_buff *last_msdu,
4486                             struct ieee80211_rx_status *rxs)
4487 {
4488         struct sk_buff *msdu, *mpdu_buf, *prev_buf;
4489         u32 decap_format, wifi_hdr_len;
4490         struct hal_rx_desc *rx_desc;
4491         char *hdr_desc;
4492         u8 *dest;
4493         struct ieee80211_hdr_3addr *wh;
4494
4495         mpdu_buf = NULL;
4496
4497         if (!head_msdu)
4498                 goto err_merge_fail;
4499
4500         rx_desc = (struct hal_rx_desc *)head_msdu->data;
4501
4502         if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc))
4503                 return NULL;
4504
4505         decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc);
4506
4507         ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4508
4509         if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4510                 ath11k_dp_rx_msdus_set_payload(head_msdu);
4511
4512                 prev_buf = head_msdu;
4513                 msdu = head_msdu->next;
4514
4515                 while (msdu) {
4516                         ath11k_dp_rx_msdus_set_payload(msdu);
4517
4518                         prev_buf = msdu;
4519                         msdu = msdu->next;
4520                 }
4521
4522                 prev_buf->next = NULL;
4523
4524                 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4525         } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4526                 __le16 qos_field;
4527                 u8 qos_pkt = 0;
4528
4529                 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4530                 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
4531
4532                 /* Base size */
4533                 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
4534                 wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4535
4536                 if (ieee80211_is_data_qos(wh->frame_control)) {
4537                         struct ieee80211_qos_hdr *qwh =
4538                                         (struct ieee80211_qos_hdr *)hdr_desc;
4539
4540                         qos_field = qwh->qos_ctrl;
4541                         qos_pkt = 1;
4542                 }
4543                 msdu = head_msdu;
4544
4545                 while (msdu) {
4546                         rx_desc = (struct hal_rx_desc *)msdu->data;
4547                         hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
4548
4549                         if (qos_pkt) {
4550                                 dest = skb_push(msdu, sizeof(__le16));
4551                                 if (!dest)
4552                                         goto err_merge_fail;
4553                                 memcpy(dest, hdr_desc, wifi_hdr_len);
4554                                 memcpy(dest + wifi_hdr_len,
4555                                        (u8 *)&qos_field, sizeof(__le16));
4556                         }
4557                         ath11k_dp_rx_msdus_set_payload(msdu);
4558                         prev_buf = msdu;
4559                         msdu = msdu->next;
4560                 }
4561                 dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4562                 if (!dest)
4563                         goto err_merge_fail;
4564
4565                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4566                            "mpdu_buf %pK mpdu_buf->len %u",
4567                            prev_buf, prev_buf->len);
4568         } else {
4569                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4570                            "decap format %d is not supported!\n",
4571                            decap_format);
4572                 goto err_merge_fail;
4573         }
4574
4575         return head_msdu;
4576
4577 err_merge_fail:
4578         if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
4579                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4580                            "err_merge_fail mpdu_buf %pK", mpdu_buf);
4581                 /* Free the head buffer */
4582                 dev_kfree_skb_any(mpdu_buf);
4583         }
4584         return NULL;
4585 }
4586
4587 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
4588                                     struct sk_buff *head_msdu,
4589                                     struct sk_buff *tail_msdu,
4590                                     struct napi_struct *napi)
4591 {
4592         struct ath11k_pdev_dp *dp = &ar->dp;
4593         struct sk_buff *mon_skb, *skb_next, *header;
4594         struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
4595
4596         mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
4597                                               tail_msdu, rxs);
4598
4599         if (!mon_skb)
4600                 goto mon_deliver_fail;
4601
4602         header = mon_skb;
4603
4604         rxs->flag = 0;
4605         do {
4606                 skb_next = mon_skb->next;
4607                 if (!skb_next)
4608                         rxs->flag &= ~RX_FLAG_AMSDU_MORE;
4609                 else
4610                         rxs->flag |= RX_FLAG_AMSDU_MORE;
4611
4612                 if (mon_skb == header) {
4613                         header = NULL;
4614                         rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
4615                 } else {
4616                         rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
4617                 }
4618                 rxs->flag |= RX_FLAG_ONLY_MONITOR;
4619
4620                 status = IEEE80211_SKB_RXCB(mon_skb);
4621                 *status = *rxs;
4622
4623                 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
4624                 mon_skb = skb_next;
4625         } while (mon_skb);
4626         rxs->flag = 0;
4627
4628         return 0;
4629
4630 mon_deliver_fail:
4631         mon_skb = head_msdu;
4632         while (mon_skb) {
4633                 skb_next = mon_skb->next;
4634                 dev_kfree_skb_any(mon_skb);
4635                 mon_skb = skb_next;
4636         }
4637         return -EINVAL;
4638 }
4639
4640 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
4641                                           struct napi_struct *napi)
4642 {
4643         struct ath11k_pdev_dp *dp = &ar->dp;
4644         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4645         void *ring_entry;
4646         void *mon_dst_srng;
4647         u32 ppdu_id;
4648         u32 rx_bufs_used;
4649         struct ath11k_pdev_mon_stats *rx_mon_stats;
4650         u32      npackets = 0;
4651
4652         mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
4653
4654         if (!mon_dst_srng) {
4655                 ath11k_warn(ar->ab,
4656                             "HAL Monitor Destination Ring Init Failed -- %pK",
4657                             mon_dst_srng);
4658                 return;
4659         }
4660
4661         spin_lock_bh(&pmon->mon_lock);
4662
4663         ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
4664
4665         ppdu_id = pmon->mon_ppdu_info.ppdu_id;
4666         rx_bufs_used = 0;
4667         rx_mon_stats = &pmon->rx_mon_stats;
4668
4669         while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
4670                 struct sk_buff *head_msdu, *tail_msdu;
4671
4672                 head_msdu = NULL;
4673                 tail_msdu = NULL;
4674
4675                 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
4676                                                           &head_msdu,
4677                                                           &tail_msdu,
4678                                                           &npackets, &ppdu_id);
4679
4680                 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
4681                         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4682                         ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4683                                    "dest_rx: new ppdu_id %x != status ppdu_id %x",
4684                                    ppdu_id, pmon->mon_ppdu_info.ppdu_id);
4685                         break;
4686                 }
4687                 if (head_msdu && tail_msdu) {
4688                         ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
4689                                                  tail_msdu, napi);
4690                         rx_mon_stats->dest_mpdu_done++;
4691                 }
4692
4693                 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
4694                                                                 mon_dst_srng);
4695         }
4696         ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
4697
4698         spin_unlock_bh(&pmon->mon_lock);
4699
4700         if (rx_bufs_used) {
4701                 rx_mon_stats->dest_ppdu_done++;
4702                 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
4703                                            &dp->rxdma_mon_buf_ring,
4704                                            rx_bufs_used,
4705                                            HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
4706         }
4707 }
4708
4709 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
4710                                                 u32 quota,
4711                                                 struct napi_struct *napi)
4712 {
4713         struct ath11k_pdev_dp *dp = &ar->dp;
4714         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4715         struct hal_rx_mon_ppdu_info *ppdu_info;
4716         struct sk_buff *status_skb;
4717         u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
4718         struct ath11k_pdev_mon_stats *rx_mon_stats;
4719
4720         ppdu_info = &pmon->mon_ppdu_info;
4721         rx_mon_stats = &pmon->rx_mon_stats;
4722
4723         if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
4724                 return;
4725
4726         while (!skb_queue_empty(&pmon->rx_status_q)) {
4727                 status_skb = skb_dequeue(&pmon->rx_status_q);
4728
4729                 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
4730                                                             status_skb);
4731                 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
4732                         rx_mon_stats->status_ppdu_done++;
4733                         pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
4734                         ath11k_dp_rx_mon_dest_process(ar, quota, napi);
4735                         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4736                 }
4737                 dev_kfree_skb_any(status_skb);
4738         }
4739 }
4740
4741 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
4742                                     struct napi_struct *napi, int budget)
4743 {
4744         struct ath11k *ar = ab->pdevs[mac_id].ar;
4745         struct ath11k_pdev_dp *dp = &ar->dp;
4746         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4747         int num_buffs_reaped = 0;
4748
4749         num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
4750                                                              &pmon->rx_status_q);
4751         if (num_buffs_reaped)
4752                 ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
4753
4754         return num_buffs_reaped;
4755 }
4756
4757 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
4758                                    struct napi_struct *napi, int budget)
4759 {
4760         struct ath11k *ar = ab->pdevs[mac_id].ar;
4761         int ret = 0;
4762
4763         if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
4764                 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
4765         else
4766                 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
4767         return ret;
4768 }
4769
4770 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
4771 {
4772         struct ath11k_pdev_dp *dp = &ar->dp;
4773         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4774
4775         skb_queue_head_init(&pmon->rx_status_q);
4776
4777         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4778
4779         memset(&pmon->rx_mon_stats, 0,
4780                sizeof(pmon->rx_mon_stats));
4781         return 0;
4782 }
4783
4784 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
4785 {
4786         struct ath11k_pdev_dp *dp = &ar->dp;
4787         struct ath11k_mon_data *pmon = &dp->mon_data;
4788         struct hal_srng *mon_desc_srng = NULL;
4789         struct dp_srng *dp_srng;
4790         int ret = 0;
4791         u32 n_link_desc = 0;
4792
4793         ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
4794         if (ret) {
4795                 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
4796                 return ret;
4797         }
4798
4799         dp_srng = &dp->rxdma_mon_desc_ring;
4800         n_link_desc = dp_srng->size /
4801                 ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
4802         mon_desc_srng =
4803                 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
4804
4805         ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
4806                                         HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
4807                                         n_link_desc);
4808         if (ret) {
4809                 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
4810                 return ret;
4811         }
4812         pmon->mon_last_linkdesc_paddr = 0;
4813         pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4814         spin_lock_init(&pmon->mon_lock);
4815         return 0;
4816 }
4817
4818 static int ath11k_dp_mon_link_free(struct ath11k *ar)
4819 {
4820         struct ath11k_pdev_dp *dp = &ar->dp;
4821         struct ath11k_mon_data *pmon = &dp->mon_data;
4822
4823         ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
4824                                     HAL_RXDMA_MONITOR_DESC,
4825                                     &dp->rxdma_mon_desc_ring);
4826         return 0;
4827 }
4828
4829 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
4830 {
4831         ath11k_dp_mon_link_free(ar);
4832         return 0;
4833 }