2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted (subject to the limitations in the
7 * disclaimer below) provided that the following conditions are met:
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
17 * * Neither the name of Qualcomm Atheros nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
22 * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
23 * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
33 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <adf_os_types.h>
37 #include <adf_os_dma.h>
38 #include <adf_os_timer.h>
39 #include <adf_os_lock.h>
40 #include <adf_os_io.h>
41 #include <adf_os_mem.h>
42 #include <adf_os_util.h>
43 #include <adf_os_stdtypes.h>
44 #include <adf_os_defer.h>
45 #include <adf_os_atomic.h>
48 #include <adf_net_wcmd.h>
50 #include "if_ethersubr.h"
53 #ifdef USE_HEADERLEN_RESV
57 #include <ieee80211_var.h>
58 #include "if_athrate.h"
59 #include "if_athvar.h"
61 #include "if_ath_pci.h"
63 #define ath_tgt_free_skb adf_nbuf_free
65 #define OFDM_PLCP_BITS 22
66 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
67 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
73 #define HT_LTF(_ns) (4 * (_ns))
74 #define SYMBOL_TIME(_ns) ((_ns) << 2) // ns * 4 us
75 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) // ns * 3.6 us
77 static a_uint16_t bits_per_symbol[][2] = {
79 { 26, 54 }, // 0: BPSK
80 { 52, 108 }, // 1: QPSK 1/2
81 { 78, 162 }, // 2: QPSK 3/4
82 { 104, 216 }, // 3: 16-QAM 1/2
83 { 156, 324 }, // 4: 16-QAM 3/4
84 { 208, 432 }, // 5: 64-QAM 2/3
85 { 234, 486 }, // 6: 64-QAM 3/4
86 { 260, 540 }, // 7: 64-QAM 5/6
87 { 52, 108 }, // 8: BPSK
88 { 104, 216 }, // 9: QPSK 1/2
89 { 156, 324 }, // 10: QPSK 3/4
90 { 208, 432 }, // 11: 16-QAM 1/2
91 { 312, 648 }, // 12: 16-QAM 3/4
92 { 416, 864 }, // 13: 64-QAM 2/3
93 { 468, 972 }, // 14: 64-QAM 3/4
94 { 520, 1080 }, // 15: 64-QAM 5/6
97 void owltgt_tx_processq(struct ath_softc_tgt *sc, struct ath_txq *txq,
98 owl_txq_state_t txqstate);
99 static void ath_tgt_txqaddbuf(struct ath_softc_tgt *sc, struct ath_txq *txq,
100 struct ath_buf *bf, struct ath_desc *lastds);
101 void ath_rate_findrate_11n_Hardcoded(struct ath_softc_tgt *sc,
102 struct ath_rc_series series[]);
103 void ath_buf_set_rate_Hardcoded(struct ath_softc_tgt *sc,
104 struct ath_tx_buf *bf) ;
105 static a_int32_t ath_tgt_txbuf_setup(struct ath_softc_tgt *sc,
106 struct ath_tx_buf *bf, ath_data_hdr_t *dh);
107 static void ath_tx_freebuf(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
108 static void ath_tx_uc_comp(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
109 static void ath_update_stats(struct ath_softc_tgt *sc, struct ath_buf *bf);
110 void adf_print_buf(adf_nbuf_t buf);
111 static void ath_tgt_tx_enqueue(struct ath_txq *txq, struct ath_atx_tid *tid);
113 struct ath_buf * ath_tgt_tx_prepare(struct ath_softc_tgt *sc,
114 adf_nbuf_t skb, ath_data_hdr_t *dh);
115 void ath_tgt_tx_comp_aggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
116 struct ieee80211_frame *ATH_SKB_2_WH(adf_nbuf_t skb);
118 void ath_tgt_tx_send_normal(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
120 static void ath_tgt_tx_sched_normal(struct ath_softc_tgt *sc, ath_atx_tid_t *tid);
121 static void ath_tgt_tx_sched_aggr(struct ath_softc_tgt *sc, ath_atx_tid_t *tid);
123 static struct ath_node_target * owltarget_findnode(struct tx_frame_heade *dh,
124 struct ath_softc_tgt *sc,
125 struct adf_nbuf_t *skb);
126 extern a_int32_t ath_chainmask_sel_logic(void *);
127 static a_int32_t ath_get_pktlen(struct ath_buf *bf, a_int32_t hdrlen);
128 static void ath_tgt_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
130 typedef void (*ath_ft_set_atype_t)(struct ath_softc_tgt *sc, struct ath_buf *bf);
133 ath_tx_set_retry(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
136 ath_bar_tx(struct ath_softc_tgt *sc, ath_atx_tid_t *tid, struct ath_tx_buf *bf);
138 ath_tx_update_baw(ath_atx_tid_t *tid, int seqno);
140 ath_tx_retry_subframe(struct ath_softc_tgt *sc, struct ath_tx_buf *bf,
141 ath_bufhead *bf_q, struct ath_tx_buf **bar);
144 ath_tx_comp_aggr_error(struct ath_softc_tgt *sc, struct ath_tx_buf *bf, ath_atx_tid_t *tid);
146 void ath_tx_addto_baw(ath_atx_tid_t *tid, struct ath_tx_buf *bf);
147 static inline void ath_tx_retry_unaggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
148 static void ath_tx_comp_unaggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
149 static void ath_update_aggr_stats(struct ath_softc_tgt *sc, struct ath_tx_desc *ds,
150 int nframes, int nbad);
151 static inline void ath_aggr_resume_tid(struct ath_softc_tgt *sc, ath_atx_tid_t *tid);
152 static void ath_tx_comp_cleanup(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
154 int ath_tgt_tx_add_to_aggr(struct ath_softc_tgt *sc,
155 struct ath_buf *bf,int datatype,
156 ath_atx_tid_t *tid, int is_burst);
158 struct ieee80211_frame *ATH_SKB_2_WH(adf_nbuf_t skb)
163 adf_nbuf_peek_header(skb, &anbdata, &anblen);
165 return((struct ieee80211_frame *)anbdata);
168 #undef adf_os_cpu_to_le16
170 static a_uint16_t adf_os_cpu_to_le16(a_uint16_t x)
172 return ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8));
176 ath_aggr_resume_tid(struct ath_softc_tgt *sc, ath_atx_tid_t *tid)
180 txq = TID_TO_ACTXQ(tid->tidno);
183 if (asf_tailq_empty(&tid->buf_q))
186 ath_tgt_tx_enqueue(txq, tid);
187 ath_tgt_txq_schedule(sc, txq);
191 ath_aggr_pause_tid(struct ath_softc_tgt *sc, ath_atx_tid_t *tid)
196 static a_uint32_t ath_pkt_duration(struct ath_softc_tgt *sc,
197 a_uint8_t rix, struct ath_tx_buf *bf,
198 a_int32_t width, a_int32_t half_gi)
200 const HAL_RATE_TABLE *rt = sc->sc_currates;
201 a_uint32_t nbits, nsymbits, duration, nsymbols;
206 pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_pktlen;
207 rc = rt->info[rix].rateCode;
210 return ath_hal_computetxtime(sc->sc_ah, rt, pktlen, rix,
213 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
214 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
215 nsymbols = (nbits + nsymbits - 1) / nsymbits;
218 duration = SYMBOL_TIME(nsymbols);
220 duration = SYMBOL_TIME_HALFGI(nsymbols);
222 streams = HT_RC_2_STREAMS(rc);
223 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
228 static void ath_dma_map(struct ath_softc_tgt *sc, struct ath_buf *bf)
230 adf_nbuf_t skb = bf->bf_skb;
232 skb = adf_nbuf_queue_first(&bf->bf_skbhead);
233 adf_nbuf_map(sc->sc_dev, bf->bf_dmamap, skb, ADF_OS_DMA_TO_DEVICE);
236 static void ath_dma_unmap(struct ath_softc_tgt *sc, struct ath_buf *bf)
238 adf_nbuf_t skb = bf->bf_skb;
240 skb = adf_nbuf_queue_first(&bf->bf_skbhead);
241 adf_nbuf_unmap( sc->sc_dev, bf->bf_dmamap, ADF_OS_DMA_TO_DEVICE);
244 static void ath_filltxdesc(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
246 struct ath_desc *ds0, *ds = bf->bf_desc;
250 adf_nbuf_dmamap_info(bf->bf_dmamap, &bf->bf_dmamap_info);
252 for (i = 0; i < bf->bf_dmamap_info.nsegs; i++, ds++) {
254 ds->ds_data = bf->bf_dmamap_info.dma_segs[i].paddr;
256 if (i == (bf->bf_dmamap_info.nsegs - 1)) {
260 ds->ds_link = ATH_BUF_GET_DESC_PHY_ADDR_WITH_IDX(bf, i+1);
262 ath_hal_filltxdesc(sc->sc_ah, ds
263 , bf->bf_dmamap_info.dma_segs[i].len
265 , i == (bf->bf_dmamap_info.nsegs - 1)
270 static void ath_tx_tgt_setds(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
272 struct ath_desc *ds0, *ds = bf->bf_desc;
274 adf_nbuf_queue_t skbhead;
275 a_int32_t i, dscnt = 0;
277 switch (bf->bf_protmode) {
278 case IEEE80211_PROT_RTSCTS:
279 bf->bf_flags |= HAL_TXDESC_RTSENA;
281 case IEEE80211_PROT_CTSONLY:
282 bf->bf_flags |= HAL_TXDESC_CTSENA;
288 ath_hal_set11n_txdesc(sc->sc_ah, ds
294 , bf->bf_flags | HAL_TXDESC_INTREQ);
296 ath_filltxdesc(sc, bf);
299 static struct ath_buf *ath_buf_toggle(struct ath_softc_tgt *sc,
300 struct ath_tx_buf *bf,
303 struct ath_tx_buf *tmp = NULL;
304 adf_nbuf_t buf = NULL;
306 adf_os_assert(sc->sc_txbuf_held != NULL);
308 tmp = sc->sc_txbuf_held;
311 ath_dma_unmap(sc, bf);
312 adf_nbuf_queue_init(&tmp->bf_skbhead);
313 buf = adf_nbuf_queue_remove(&bf->bf_skbhead);
315 adf_nbuf_queue_add(&tmp->bf_skbhead, buf);
317 adf_os_assert(adf_nbuf_queue_len(&bf->bf_skbhead) == 0);
319 tmp->bf_next = bf->bf_next;
320 tmp->bf_endpt = bf->bf_endpt;
321 tmp->bf_tidno = bf->bf_tidno;
322 tmp->bf_skb = bf->bf_skb;
323 tmp->bf_node = bf->bf_node;
324 tmp->bf_isaggr = bf->bf_isaggr;
325 tmp->bf_flags = bf->bf_flags;
326 tmp->bf_state = bf->bf_state;
327 tmp->bf_retries = bf->bf_retries;
328 tmp->bf_comp = bf->bf_comp;
329 tmp->bf_nframes = bf->bf_nframes;
330 tmp->bf_cookie = bf->bf_cookie;
342 ath_dma_map(sc, tmp);
343 ath_tx_tgt_setds(sc, tmp);
346 sc->sc_txbuf_held = bf;
351 static void ath_tgt_skb_free(struct ath_softc_tgt *sc,
352 adf_nbuf_queue_t *head,
353 HTC_ENDPOINT_ID endpt)
357 while (adf_nbuf_queue_len(head) != 0) {
358 tskb = adf_nbuf_queue_remove(head);
359 ath_free_tx_skb(sc->tgt_htc_handle,endpt,tskb);
363 static void ath_buf_comp(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
365 ath_dma_unmap(sc, bf);
366 ath_tgt_skb_free(sc, &bf->bf_skbhead,bf->bf_endpt);
369 bf = ath_buf_toggle(sc, bf, 0);
371 asf_tailq_insert_tail(&sc->sc_txbuf, bf, bf_list);
375 static void ath_buf_set_rate(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
377 struct ath_hal *ah = sc->sc_ah;
378 const HAL_RATE_TABLE *rt;
379 struct ath_desc *ds = bf->bf_desc;
380 HAL_11N_RATE_SERIES series[4];
382 a_uint8_t rix, cix, rtsctsrate;
383 a_uint32_t aggr_limit_with_rts;
384 a_uint32_t ctsduration = 0;
385 a_int32_t prot_mode = AH_FALSE;
387 rt = sc->sc_currates;
388 rix = bf->bf_rcs[0].rix;
389 flags = (bf->bf_flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA));
390 cix = rt->info[sc->sc_protrix].controlRate;
392 if (bf->bf_protmode != IEEE80211_PROT_NONE &&
393 (rt->info[rix].phy == IEEE80211_T_OFDM ||
394 rt->info[rix].phy == IEEE80211_T_HT) &&
395 (bf->bf_flags & HAL_TXDESC_NOACK) == 0) {
396 cix = rt->info[sc->sc_protrix].controlRate;
399 if (ath_hal_htsupported(ah) && (!bf->bf_ismcast))
400 flags = HAL_TXDESC_RTSENA;
403 if (bf->bf_rcs[i].tries) {
404 cix = rt->info[bf->bf_rcs[i].rix].controlRate;
411 ath_hal_getrtsaggrlimit(sc->sc_ah, &aggr_limit_with_rts);
413 if (bf->bf_isaggr && aggr_limit_with_rts &&
414 bf->bf_al > aggr_limit_with_rts) {
415 flags &= ~(HAL_TXDESC_RTSENA);
418 adf_os_mem_set(series, 0, sizeof(HAL_11N_RATE_SERIES) * 4);
420 for (i = 0; i < 4; i++) {
421 if (!bf->bf_rcs[i].tries)
424 rix = bf->bf_rcs[i].rix;
426 series[i].Rate = rt->info[rix].rateCode |
427 (bf->bf_shpream ? rt->info[rix].shortPreamble : 0);
429 series[i].Tries = bf->bf_rcs[i].tries;
431 series[i].RateFlags = ((bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
432 HAL_RATESERIES_RTS_CTS : 0 ) |
433 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
434 HAL_RATESERIES_2040 : 0 ) |
435 ((bf->bf_rcs[i].flags & ATH_RC_HT40_SGI_FLAG) ?
436 HAL_RATESERIES_HALFGI : 0 ) |
437 ((bf->bf_rcs[i].flags & ATH_RC_TX_STBC_FLAG) ?
438 HAL_RATESERIES_STBC: 0);
440 series[i].RateFlags = ((bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
441 HAL_RATESERIES_RTS_CTS : 0 ) |
442 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
443 HAL_RATESERIES_2040 : 0 ) |
444 ((bf->bf_rcs[i].flags & ATH_RC_HT40_SGI_FLAG) ?
445 HAL_RATESERIES_HALFGI : 0 );
447 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
448 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
449 (bf->bf_rcs[i].flags & ATH_RC_HT40_SGI_FLAG));
451 series[i].ChSel = sc->sc_ic.ic_tx_chainmask;
454 series[i].RateFlags |= HAL_RATESERIES_RTS_CTS;
456 if (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG)
457 series[i].RateFlags |= HAL_RATESERIES_RTS_CTS;
460 rtsctsrate = rt->info[cix].rateCode |
461 (bf->bf_shpream ? rt->info[cix].shortPreamble : 0);
463 ath_hal_set11n_ratescenario(ah, ds, 1,
464 rtsctsrate, ctsduration,
469 static void ath_tgt_rate_findrate(struct ath_softc_tgt *sc,
470 struct ath_node_target *an,
471 a_int32_t shortPreamble,
477 struct ath_rc_series series[],
480 ath_rate_findrate(sc, an, 1, frameLen, 10, 4, 1,
481 ATH_RC_PROBE_ALLOWED, series, isProbe);
484 static void owl_tgt_tid_init(struct ath_atx_tid *tid)
488 tid->seq_start = tid->seq_next = 0;
489 tid->baw_size = WME_MAX_BA;
490 tid->baw_head = tid->baw_tail = 0;
493 tid->sched = AH_FALSE;
495 asf_tailq_init(&tid->buf_q);
497 for (i = 0; i < ATH_TID_MAX_BUFS; i++) {
498 TX_BUF_BITMAP_CLR(tid->tx_buf_bitmap, i);
502 static void owl_tgt_tid_cleanup(struct ath_softc_tgt *sc,
503 struct ath_atx_tid *tid)
510 tid->flag &= ~TID_CLEANUP_INPROGRES;
512 if (tid->flag & TID_REINITIALIZE) {
513 adf_os_print("TID REINIT DONE for tid %p\n", tid);
514 tid->flag &= ~TID_REINITIALIZE;
515 owl_tgt_tid_init(tid);
517 ath_aggr_resume_tid(sc, tid);
521 void owl_tgt_node_init(struct ath_node_target * an)
523 struct ath_atx_tid *tid;
526 for (tidno = 0, tid = &an->tid[tidno]; tidno < WME_NUM_TID;tidno++, tid++) {
530 if ( tid->flag & TID_CLEANUP_INPROGRES ) {
531 tid->flag |= TID_REINITIALIZE;
532 adf_os_print("tid[%p]->incomp is not 0: %d\n",
535 owl_tgt_tid_init(tid);
540 void ath_tx_status_clear(struct ath_softc_tgt *sc)
544 for (i = 0; i < 2; i++) {
545 sc->tx_status[i].cnt = 0;
549 struct WMI_TXSTATUS_EVENT* ath_tx_status_get(struct ath_softc_tgt *sc)
551 WMI_TXSTATUS_EVENT *txs = NULL;
554 for (i = 0; i < 2; i++) {
555 if (sc->tx_status[i].cnt < HTC_MAX_TX_STATUS) {
556 txs = &sc->tx_status[i];
564 void ath_tx_status_update(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
566 struct ath_tx_desc *ds = bf->bf_lastds;
567 WMI_TXSTATUS_EVENT *txs;
569 if (sc->sc_tx_draining)
572 txs = ath_tx_status_get(sc);
576 txs->txstatus[txs->cnt].cookie = bf->bf_cookie;
577 txs->txstatus[txs->cnt].ts_rate = SM(bf->bf_endpt, ATH9K_HTC_TXSTAT_EPID);
579 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT)
580 txs->txstatus[txs->cnt].ts_flags |= ATH9K_HTC_TXSTAT_FILT;
582 if (!(ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) &&
583 !(ds->ds_txstat.ts_status & HAL_TXERR_FIFO) &&
584 !(ds->ds_txstat.ts_status & HAL_TXERR_TIMER_EXPIRED) &&
585 !(ds->ds_txstat.ts_status & HAL_TXERR_FILT))
586 txs->txstatus[txs->cnt].ts_flags |= ATH9K_HTC_TXSTAT_ACK;
588 ath_tx_status_update_rate(sc, bf->bf_rcs, ds->ds_txstat.ts_rate, txs);
593 void ath_tx_status_update_aggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf,
594 struct ath_tx_desc *ds, struct ath_rc_series rcs[],
597 WMI_TXSTATUS_EVENT *txs;
599 if (sc->sc_tx_draining)
602 txs = ath_tx_status_get(sc);
606 txs->txstatus[txs->cnt].cookie = bf->bf_cookie;
607 txs->txstatus[txs->cnt].ts_rate = SM(bf->bf_endpt, ATH9K_HTC_TXSTAT_EPID);
610 txs->txstatus[txs->cnt].ts_flags |= ATH9K_HTC_TXSTAT_ACK;
613 ath_tx_status_update_rate(sc, rcs, ds->ds_txstat.ts_rate, txs);
618 void ath_tx_status_send(struct ath_softc_tgt *sc)
622 if (sc->sc_tx_draining)
625 for (i = 0; i < 2; i++) {
626 if (sc->tx_status[i].cnt) {
627 wmi_event(sc->tgt_wmi_handle, WMI_TXSTATUS_EVENTID,
628 &sc->tx_status[i], sizeof(WMI_TXSTATUS_EVENT));
629 /* FIXME: Handle failures. */
630 sc->tx_status[i].cnt = 0;
635 static void owltgt_tx_process_cabq(struct ath_softc_tgt *sc, struct ath_txq *txq)
637 ath_hal_intrset(sc->sc_ah, sc->sc_imask & ~HAL_INT_SWBA);
638 owltgt_tx_processq(sc, txq, OWL_TXQ_ACTIVE);
639 ath_hal_intrset(sc->sc_ah, sc->sc_imask);
642 void owl_tgt_tx_tasklet(TQUEUE_ARG data)
644 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)data;
646 a_uint32_t qcumask = ((1 << HAL_NUM_TX_QUEUES) - 1);
650 ath_tx_status_clear(sc);
652 for (i = 0; i < (HAL_NUM_TX_QUEUES - 6); i++) {
653 txq = ATH_TXQ(sc, i);
655 if (ATH_TXQ_SETUP(sc, i)) {
656 if (txq == sc->sc_cabq)
657 owltgt_tx_process_cabq(sc, txq);
659 owltgt_tx_processq(sc, txq, OWL_TXQ_ACTIVE);
663 ath_tx_status_send(sc);
666 void owltgt_tx_processq(struct ath_softc_tgt *sc, struct ath_txq *txq,
667 owl_txq_state_t txqstate)
669 struct ath_tx_buf *bf;
670 struct ath_tx_desc *ds;
674 if (asf_tailq_empty(&txq->axq_q)) {
675 txq->axq_link = NULL;
676 txq->axq_linkbuf = NULL;
680 bf = asf_tailq_first(&txq->axq_q);
683 status = ath_hal_txprocdesc(sc->sc_ah, ds);
685 if (status == HAL_EINPROGRESS) {
686 if (txqstate == OWL_TXQ_ACTIVE)
688 else if (txqstate == OWL_TXQ_STOPPED) {
689 __stats(sc, tx_stopfiltered);
690 ds->ds_txstat.ts_flags = 0;
691 ds->ds_txstat.ts_status = HAL_OK;
693 ds->ds_txstat.ts_flags = HAL_TX_SW_FILTERED;
697 ATH_TXQ_REMOVE_HEAD(txq, bf, bf_list);
698 if ((asf_tailq_empty(&txq->axq_q))) {
699 __stats(sc, tx_qnull);
700 txq->axq_link = NULL;
701 txq->axq_linkbuf = NULL;
707 ath_tx_status_update(sc, bf);
708 ath_buf_comp(sc, bf);
711 if (txqstate == OWL_TXQ_ACTIVE) {
712 ath_tgt_txq_schedule(sc, txq);
717 static struct ieee80211_frame* ATH_SKB2_WH(adf_nbuf_t skb)
722 adf_nbuf_peek_header(skb, &anbdata, &anblen);
723 return((struct ieee80211_frame *)anbdata);
727 ath_tgt_tid_drain(struct ath_softc_tgt *sc, struct ath_atx_tid *tid)
729 struct ath_tx_buf *bf;
731 while (!asf_tailq_empty(&tid->buf_q)) {
732 TAILQ_DEQ(&tid->buf_q, bf, bf_list);
733 ath_tx_freebuf(sc, bf);
736 tid->seq_next = tid->seq_start;
737 tid->baw_tail = tid->baw_head;
740 static void ath_tgt_tx_comp_normal(struct ath_softc_tgt *sc,
741 struct ath_tx_buf *bf)
743 struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
744 struct ath_desc *ds = bf->bf_lastds;
745 ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
747 if (tid->flag & TID_CLEANUP_INPROGRES) {
748 owl_tgt_tid_cleanup(sc, tid);
752 ath_tx_uc_comp(sc, bf);
755 ath_tx_freebuf(sc, bf);
758 static struct ieee80211_node_target * ath_tgt_find_node(struct ath_softc_tgt *sc,
759 a_int32_t node_index)
761 struct ath_node_target *an;
763 struct ieee80211_node_target *ni;
765 if (node_index > TARGET_NODE_MAX)
768 an = &sc->sc_sta[node_index];
772 if (ni->ni_vap == NULL) {
781 static struct ath_buf* ath_buf_alloc(struct ath_softc_tgt *sc)
783 struct ath_tx_buf *bf = NULL;
785 bf = asf_tailq_first(&sc->sc_txbuf);
787 adf_os_mem_set(&bf->bf_state, 0, sizeof(struct ath_buf_state));
788 asf_tailq_remove(&sc->sc_txbuf, bf, bf_list);
796 struct ath_buf* ath_tgt_tx_prepare(struct ath_softc_tgt *sc,
797 adf_nbuf_t skb, ath_data_hdr_t *dh)
799 struct ath_tx_buf *bf;
801 struct ieee80211_node_target *ni;
802 a_uint32_t flags = adf_os_ntohl(dh->flags);
803 struct ath_atx_tid *tid;
805 ni = ath_tgt_find_node(sc, dh->ni_index);
809 tid = ATH_AN_2_TID(ATH_NODE_TARGET(ni), dh->tidno);
810 if (tid->flag & TID_REINITIALIZE) {
811 adf_os_print("drop frame due to TID reinit\n");
815 bf = ath_buf_alloc(sc);
817 __stats(sc, tx_nobufs);
821 bf->bf_tidno = dh->tidno;
822 bf->bf_txq = TID_TO_ACTXQ(bf->bf_tidno);
823 bf->bf_keytype = dh->keytype;
824 bf->bf_keyix = dh->keyix;
825 bf->bf_protmode = dh->flags & (IEEE80211_PROT_RTSCTS | IEEE80211_PROT_CTSONLY);
826 bf->bf_node = (struct ath_node_target *)ni;
828 adf_nbuf_queue_add(&bf->bf_skbhead, skb);
829 skb = adf_nbuf_queue_first(&(bf->bf_skbhead));
831 if (adf_nbuf_queue_len(&(bf->bf_skbhead)) == 0) {
832 __stats(sc, tx_noskbs);
840 ath_tgt_txbuf_setup(sc, bf, dh);
842 ath_tx_tgt_setds(sc, bf);
847 static void ath_tgt_tx_seqno_normal(struct ath_tx_buf *bf)
849 struct ieee80211_node_target *ni = bf->bf_node;
850 struct ath_node_target *an = ATH_NODE_TARGET(ni);
851 struct ieee80211_frame *wh = ATH_SKB_2_WH(bf->bf_skb);
852 struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
854 u_int8_t fragno = (wh->i_seq[0] & 0xf);
856 INCR(ni->ni_txseqmgmt, IEEE80211_SEQ_MAX);
858 bf->bf_seqno = (tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
860 *(u_int16_t *)wh->i_seq = adf_os_cpu_to_le16(bf->bf_seqno);
861 wh->i_seq[0] |= fragno;
863 if (!(wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG))
864 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
867 static a_int32_t ath_key_setup(struct ieee80211_node_target *ni,
868 struct ath_tx_buf *bf)
870 struct ieee80211_frame *wh = ATH_SKB_2_WH(bf->bf_skb);
871 const struct ieee80211_cipher *cip;
872 struct ieee80211_key *k;
874 if (!(wh->i_fc[1] & IEEE80211_FC1_WEP)) {
875 bf->bf_keytype = HAL_KEY_TYPE_CLEAR;
876 bf->bf_keyix = HAL_TXKEYIX_INVALID;
880 switch (bf->bf_keytype) {
881 case HAL_KEY_TYPE_WEP:
882 bf->bf_pktlen += IEEE80211_WEP_ICVLEN;
884 case HAL_KEY_TYPE_AES:
885 bf->bf_pktlen += IEEE80211_WEP_MICLEN;
887 case HAL_KEY_TYPE_TKIP:
888 bf->bf_pktlen += IEEE80211_WEP_ICVLEN;
894 if (bf->bf_keytype == HAL_KEY_TYPE_AES ||
895 bf->bf_keytype == HAL_KEY_TYPE_TKIP)
896 ieee80211_tgt_crypto_encap(wh, ni, bf->bf_keytype);
901 static void ath_tgt_txq_add_ucast(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
903 struct ath_hal *ah = sc->sc_ah;
905 struct ath_node_target *an;
907 static a_int32_t count = 0,i;
908 volatile a_int32_t txe_val;
914 status = ath_hal_txprocdesc(sc->sc_ah, bf->bf_lastds);
916 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
918 if (txq->axq_link == NULL) {
919 ath_hal_puttxbuf(ah, txq->axq_qnum, ATH_BUF_GET_DESC_PHY_ADDR(bf));
921 *txq->axq_link = ATH_BUF_GET_DESC_PHY_ADDR(bf);
923 txe_val = OS_REG_READ(ah, 0x840);
924 if (!(txe_val & (1<< txq->axq_qnum)))
925 ath_hal_puttxbuf(ah, txq->axq_qnum, ATH_BUF_GET_DESC_PHY_ADDR(bf));
928 txq->axq_link = &bf->bf_lastds->ds_link;
929 ath_hal_txstart(ah, txq->axq_qnum);
932 static a_int32_t ath_tgt_txbuf_setup(struct ath_softc_tgt *sc,
933 struct ath_tx_buf *bf,
937 struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
938 struct ieee80211_frame *wh = ATH_SKB2_WH(bf->bf_skb);
939 struct ieee80211_node_target *ni = (struct ieee80211_node_target *)an;
940 struct ieee80211vap_target *vap = ni->ni_vap;
941 struct ieee80211com_target *ic = &sc->sc_ic;
942 a_int32_t retval, fragno = 0;
943 a_uint32_t flags = adf_os_ntohl(dh->flags);
945 ath_tgt_tx_seqno_normal(bf);
947 bf->bf_txq_add = ath_tgt_txq_add_ucast;
948 bf->bf_hdrlen = ieee80211_anyhdrsize(wh);
949 bf->bf_pktlen = ath_get_pktlen(bf, bf->bf_hdrlen);
950 bf->bf_ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
952 if ((retval = ath_key_setup(bf->bf_node, bf)) < 0)
955 if (flags & ATH_SHORT_PREAMBLE)
956 bf->bf_shpream = AH_TRUE;
958 bf->bf_shpream = AH_FALSE;
960 bf->bf_flags = HAL_TXDESC_CLRDMASK;
961 bf->bf_atype = HAL_PKT_TYPE_NORMAL;
967 ath_get_pktlen(struct ath_buf *bf, a_int32_t hdrlen)
969 adf_nbuf_t skb = bf->bf_skb;
972 skb = adf_nbuf_queue_first(&bf->bf_skbhead);
973 pktlen = adf_nbuf_len(skb);
975 pktlen -= (hdrlen & 3);
976 pktlen += IEEE80211_CRC_LEN;
982 ath_tgt_tx_send_normal(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
984 struct ath_node_target *an = bf->bf_node;
985 struct ath_rc_series rcs[4];
986 struct ath_rc_series mrcs[4];
987 a_int32_t shortPreamble = 0;
988 a_int32_t isProbe = 0;
990 adf_os_mem_set(rcs, 0, sizeof(struct ath_rc_series)*4 );
991 adf_os_mem_set(mrcs, 0, sizeof(struct ath_rc_series)*4 );
993 if (!bf->bf_ismcast) {
994 ath_tgt_rate_findrate(sc, an, shortPreamble,
997 memcpy(bf->bf_rcs, rcs, sizeof(rcs));
999 mrcs[1].tries = mrcs[2].tries = mrcs[3].tries = 0;
1000 mrcs[1].rix = mrcs[2].rix = mrcs[3].rix = 0;
1004 memcpy(bf->bf_rcs, mrcs, sizeof(mrcs));
1007 ath_buf_set_rate(sc, bf);
1008 bf->bf_txq_add(sc, bf);
1012 ath_tx_freebuf(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
1015 struct ath_desc *bfd = NULL;
1017 for (bfd = bf->bf_desc, i = 0; i < bf->bf_dmamap_info.nsegs; bfd++, i++) {
1018 ath_hal_clr11n_aggr(sc->sc_ah, bfd);
1019 ath_hal_set11n_burstduration(sc->sc_ah, bfd, 0);
1020 ath_hal_set11n_virtualmorefrag(sc->sc_ah, bfd, 0);
1023 ath_dma_unmap(sc, bf);
1025 ath_tgt_skb_free(sc, &bf->bf_skbhead,bf->bf_endpt);
1031 bf = ath_buf_toggle(sc, bf, 0);
1033 bf->bf_isretried = 0;
1036 asf_tailq_insert_tail(&sc->sc_txbuf, bf, bf_list);
1040 ath_tx_uc_comp(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
1042 ath_tx_status_update(sc, bf);
1043 ath_update_stats(sc, bf);
1044 ath_rate_tx_complete(sc, ATH_NODE_TARGET(bf->bf_node),
1045 bf->bf_lastds, bf->bf_rcs, 1, 0);
1049 ath_update_stats(struct ath_softc_tgt *sc, struct ath_buf *bf)
1051 struct ieee80211_node_target *ni = bf->bf_node;
1052 struct ath_tx_desc *ds = bf->bf_desc;
1053 struct ath_node_target *an = ATH_NODE_TARGET(ni);
1055 struct ieee80211_cb *cb;
1057 if (ds->ds_txstat.ts_status == 0) {
1058 if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE)
1059 sc->sc_tx_stats.ast_tx_altrate++;
1061 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY)
1062 sc->sc_tx_stats.ast_tx_xretries++;
1063 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO)
1064 sc->sc_tx_stats.ast_tx_fifoerr++;
1065 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT)
1066 sc->sc_tx_stats.ast_tx_filtered++;
1067 if (ds->ds_txstat.ts_status & HAL_TXERR_TIMER_EXPIRED)
1068 sc->sc_tx_stats.ast_tx_timer_exp++;
1070 sr = ds->ds_txstat.ts_shortretry;
1071 lr = ds->ds_txstat.ts_longretry;
1072 sc->sc_tx_stats.ast_tx_shortretry += sr;
1073 sc->sc_tx_stats.ast_tx_longretry += lr;
1077 ath_tgt_send_mgt(struct ath_softc_tgt *sc,adf_nbuf_t hdr_buf, adf_nbuf_t skb,
1078 HTC_ENDPOINT_ID endpt)
1080 struct ieee80211_node_target *ni;
1081 struct ieee80211vap_target *vap;
1082 struct ath_vap_target *avp;
1083 struct ath_hal *ah = sc->sc_ah;
1084 a_uint8_t rix, txrate, ctsrate, cix = 0xff, *data;
1085 a_uint32_t ivlen = 0, icvlen = 0, subtype, flags, ctsduration, fval;
1086 a_int32_t i, iswep, ismcast, hdrlen, pktlen, try0, len;
1087 struct ath_desc *ds=NULL, *ds0=NULL;
1088 struct ath_txq *txq=NULL;
1089 struct ath_tx_buf *bf;
1091 const HAL_RATE_TABLE *rt;
1092 HAL_BOOL shortPreamble;
1093 struct ieee80211_frame *wh;
1094 struct ath_rc_series rcs[4];
1095 HAL_11N_RATE_SERIES series[4];
1097 struct ieee80211com_target *ic = &sc->sc_ic;
1101 adf_nbuf_peek_header(skb, &data, &len);
1102 adf_nbuf_pull_head(skb, sizeof(ath_mgt_hdr_t));
1104 adf_nbuf_peek_header(hdr_buf, &data, &len);
1107 adf_os_assert(len >= sizeof(ath_mgt_hdr_t));
1109 mh = (ath_mgt_hdr_t *)data;
1110 adf_nbuf_peek_header(skb, &data, &len);
1111 wh = (struct ieee80211_frame *)data;
1113 adf_os_mem_set(rcs, 0, sizeof(struct ath_rc_series)*4);
1114 adf_os_mem_set(series, 0, sizeof(HAL_11N_RATE_SERIES)*4);
1116 bf = asf_tailq_first(&sc->sc_txbuf);
1120 asf_tailq_remove(&sc->sc_txbuf, bf, bf_list);
1122 ni = ath_tgt_find_node(sc, mh->ni_index);
1126 bf->bf_endpt = endpt;
1127 bf->bf_cookie = mh->cookie;
1128 bf->bf_protmode = mh->flags & (IEEE80211_PROT_RTSCTS | IEEE80211_PROT_CTSONLY);
1129 txq = &sc->sc_txq[1];
1130 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
1131 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1132 hdrlen = ieee80211_anyhdrsize(wh);
1134 keyix = HAL_TXKEYIX_INVALID;
1135 pktlen -= (hdrlen & 3);
1136 pktlen += IEEE80211_CRC_LEN;
1141 adf_nbuf_map(sc->sc_dev, bf->bf_dmamap, skb, ADF_OS_DMA_TO_DEVICE);
1144 adf_nbuf_queue_add(&bf->bf_skbhead, skb);
1147 rt = sc->sc_currates;
1148 adf_os_assert(rt != NULL);
1150 if (mh->flags == ATH_SHORT_PREAMBLE)
1151 shortPreamble = AH_TRUE;
1153 shortPreamble = AH_FALSE;
1155 flags = HAL_TXDESC_CLRDMASK;
1157 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1158 case IEEE80211_FC0_TYPE_MGT:
1159 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1161 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1162 atype = HAL_PKT_TYPE_PROBE_RESP;
1163 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1164 atype = HAL_PKT_TYPE_ATIM;
1166 atype = HAL_PKT_TYPE_NORMAL;
1170 atype = HAL_PKT_TYPE_NORMAL;
1174 avp = &sc->sc_vap[mh->vap_index];
1176 rcs[0].rix = ath_get_minrateidx(sc, avp);
1177 rcs[0].tries = ATH_TXMAXTRY;
1180 adf_os_mem_copy(bf->bf_rcs, rcs, sizeof(rcs));
1182 try0 = rcs[0].tries;
1183 txrate = rt->info[rix].rateCode;
1186 txrate |= rt->info[rix].shortPreamble;
1193 flags |= HAL_TXDESC_NOACK;
1195 } else if (pktlen > vap->iv_rtsthreshold) {
1196 flags |= HAL_TXDESC_RTSENA;
1197 cix = rt->info[rix].controlRate;
1200 if ((bf->bf_protmode != IEEE80211_PROT_NONE) &&
1201 rt->info[rix].phy == IEEE80211_T_OFDM &&
1202 (flags & HAL_TXDESC_NOACK) == 0) {
1203 cix = rt->info[sc->sc_protrix].controlRate;
1204 sc->sc_tx_stats.ast_tx_protect++;
1207 *(a_uint16_t *)&wh->i_seq[0] = adf_os_cpu_to_le16(ni->ni_txseqmgmt <<
1208 IEEE80211_SEQ_SEQ_SHIFT);
1209 INCR(ni->ni_txseqmgmt, IEEE80211_SEQ_MAX);
1212 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
1213 adf_os_assert(cix != 0xff);
1214 ctsrate = rt->info[cix].rateCode;
1215 if (shortPreamble) {
1216 ctsrate |= rt->info[cix].shortPreamble;
1217 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1218 ctsduration += rt->info[cix].spAckDuration;
1219 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1220 ctsduration += rt->info[cix].spAckDuration;
1222 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1223 ctsduration += rt->info[cix].lpAckDuration;
1224 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1225 ctsduration += rt->info[cix].lpAckDuration;
1227 ctsduration += ath_hal_computetxtime(ah,
1228 rt, pktlen, rix, shortPreamble);
1233 flags |= HAL_TXDESC_INTREQ;
1235 ath_hal_setuptxdesc(ah, ds
1248 , ATH_COMP_PROC_NO_COMP_NO_CCS);
1250 bf->bf_flags = flags;
1253 * Set key type in tx desc while sending the encrypted challenge to AP
1254 * in Auth frame 3 of Shared Authentication, owl needs this.
1256 if (iswep && (keyix != HAL_TXKEYIX_INVALID) &&
1257 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_AUTH)
1258 ath_hal_fillkeytxdesc(ah, ds, mh->keytype);
1260 ath_filltxdesc(sc, bf);
1262 for (i=0; i<4; i++) {
1263 series[i].Tries = 2;
1264 series[i].Rate = txrate;
1265 series[i].ChSel = sc->sc_ic.ic_tx_chainmask;
1266 series[i].RateFlags = 0;
1268 ath_hal_set11n_ratescenario(ah, ds, 0, ctsrate, ctsduration, series, 4, 0);
1269 ath_tgt_txqaddbuf(sc, txq, bf, bf->bf_lastds);
1273 HTC_ReturnBuffers(sc->tgt_htc_handle, endpt, skb);
1278 ath_tgt_txqaddbuf(struct ath_softc_tgt *sc,
1279 struct ath_txq *txq, struct ath_buf *bf,
1280 struct ath_desc *lastds)
1282 struct ath_hal *ah = sc->sc_ah;
1284 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
1286 if (txq->axq_link == NULL) {
1287 ath_hal_puttxbuf(ah, txq->axq_qnum, ATH_BUF_GET_DESC_PHY_ADDR(bf));
1289 *txq->axq_link = ATH_BUF_GET_DESC_PHY_ADDR(bf);
1292 txq->axq_link = &lastds->ds_link;
1293 ath_hal_txstart(ah, txq->axq_qnum);
1296 void ath_tgt_handle_normal(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
1299 struct ath_node_target *an;
1300 struct ath_desc *ds;
1301 struct ath_txq *txq = bf->bf_txq;
1302 a_bool_t queue_frame;
1304 an = (struct ath_node_target *)bf->bf_node;
1307 tid = &an->tid[bf->bf_tidno];
1310 bf->bf_comp = ath_tgt_tx_comp_normal;
1311 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1312 ath_tgt_tx_send_normal(sc, bf);
1316 ath_tgt_tx_enqueue(struct ath_txq *txq, struct ath_atx_tid *tid)
1324 tid->sched = AH_TRUE;
1325 asf_tailq_insert_tail(&txq->axq_tidq, tid, tid_qelem);
1329 ath_tgt_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1331 struct ath_atx_tid *tid;
1335 struct ieee80211_node *ieee_node;
1336 u_int32_t aggr_limit_with_rts;
1341 TAILQ_DEQ(&txq->axq_tidq, tid, tid_qelem);
1346 tid->sched = AH_FALSE;
1351 if (!(tid->flag & TID_AGGR_ENABLED))
1352 ath_tgt_tx_sched_normal(sc,tid);
1354 ath_tgt_tx_sched_aggr(sc,tid);
1358 if (!asf_tailq_empty(&tid->buf_q)) {
1359 ath_tgt_tx_enqueue(txq, tid);
1362 } while (!asf_tailq_empty(&txq->axq_tidq) && !bdone);
1366 ath_tgt_handle_aggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
1369 struct ath_node_target *an;
1370 struct ath_desc *ds;
1371 struct ath_txq *txq = bf->bf_txq;
1372 a_bool_t queue_frame, within_baw;
1374 an = (struct ath_node_target *)bf->bf_node;
1377 tid = &an->tid[bf->bf_tidno];
1380 bf->bf_comp = ath_tgt_tx_comp_aggr;
1382 within_baw = BAW_WITHIN(tid->seq_start, tid->baw_size,
1383 SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
1385 queue_frame = ( (txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) ||
1386 (!asf_tailq_empty(&tid->buf_q)) ||
1387 (tid->paused) || (!within_baw) );
1390 asf_tailq_insert_tail(&tid->buf_q, bf, bf_list);
1391 ath_tgt_tx_enqueue(txq, tid);
1393 ath_tx_addto_baw(tid, bf);
1394 __stats(sc, txaggr_nframes);
1395 ath_tgt_tx_send_normal(sc, bf);
1400 ath_tgt_tx_sched_normal(struct ath_softc_tgt *sc, ath_atx_tid_t *tid)
1403 struct ath_txq *txq =TID_TO_ACTXQ(tid->tidno);;
1406 if (asf_tailq_empty(&tid->buf_q))
1409 bf = asf_tailq_first(&tid->buf_q);
1410 asf_tailq_remove(&tid->buf_q, bf, bf_list);
1411 ath_tgt_tx_send_normal(sc, bf);
1413 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH);
1417 ath_tgt_tx_sched_aggr(struct ath_softc_tgt *sc, ath_atx_tid_t *tid)
1419 struct ath_tx_buf *bf, *bf_last;
1420 ATH_AGGR_STATUS status;
1422 struct ath_txq *txq = TID_TO_ACTXQ(tid->tidno);
1423 struct ath_desc *ds = NULL;
1427 if (asf_tailq_empty(&tid->buf_q))
1431 if (asf_tailq_empty(&tid->buf_q))
1434 asf_tailq_init(&bf_q);
1436 status = ath_tgt_tx_form_aggr(sc, tid, &bf_q);
1438 if (asf_tailq_empty(&bf_q))
1441 bf = asf_tailq_first(&bf_q);
1442 bf_last = asf_tailq_last(&bf_q, ath_bufhead_s);
1444 if (bf->bf_nframes == 1) {
1446 if(bf->bf_retries == 0)
1447 __stats(sc, txaggr_single);
1449 bf->bf_lastds = &(bf->bf_descarr[bf->bf_dmamap_info.nsegs -1]);
1450 bf->bf_lastds->ds_link = 0;
1453 for(ds = bf->bf_desc; ds <= bf->bf_lastds; ds++)
1454 ath_hal_clr11n_aggr(sc->sc_ah, ds);
1456 ath_buf_set_rate(sc, bf);
1457 bf->bf_txq_add(sc, bf);
1462 bf_last->bf_next = NULL;
1463 bf_last->bf_lastds->ds_link = 0;
1464 bf_last->bf_ndelim = 0;
1467 ath_buf_set_rate(sc, bf);
1468 ath_hal_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al,
1470 bf->bf_lastds = bf_last->bf_lastds;
1472 for (i = 0; i < bf_last->bf_dmamap_info.nsegs; i++)
1473 ath_hal_set11n_aggr_last(sc->sc_ah, &bf_last->bf_descarr[i]);
1475 if (status == ATH_AGGR_8K_LIMITED) {
1480 bf->bf_txq_add(sc, bf);
1481 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1482 status != ATH_TGT_AGGR_BAW_CLOSED);
1485 static u_int32_t ath_lookup_rate(struct ath_softc_tgt *sc,
1486 struct ath_node_target *an,
1487 struct ath_tx_buf *bf)
1490 u_int32_t max4msframelen, frame_length;
1491 u_int16_t aggr_limit, legacy=0;
1492 const HAL_RATE_TABLE *rt = sc->sc_currates;
1493 struct ieee80211_node_target *ieee_node = (struct ieee80211_node_target *)an;
1495 if (bf->bf_ismcast) {
1496 bf->bf_rcs[1].tries = bf->bf_rcs[2].tries = bf->bf_rcs[3].tries = 0;
1497 bf->bf_rcs[0].rix = 0xb;
1498 bf->bf_rcs[0].tries = ATH_TXMAXTRY - 1;
1499 bf->bf_rcs[0].flags = 0;
1501 ath_tgt_rate_findrate(sc, an, AH_TRUE, 0, ATH_TXMAXTRY-1, 4, 1,
1502 ATH_RC_PROBE_ALLOWED, bf->bf_rcs, &prate);
1505 max4msframelen = IEEE80211_AMPDU_LIMIT_MAX;
1507 for (i = 0; i < 4; i++) {
1508 if (bf->bf_rcs[i].tries) {
1509 frame_length = bf->bf_rcs[i].max4msframelen;
1511 if (rt->info[bf->bf_rcs[i].rix].phy != IEEE80211_T_HT) {
1516 max4msframelen = ATH_MIN(max4msframelen, frame_length);
1520 if (prate || legacy)
1523 if (sc->sc_ic.ic_enable_coex)
1524 aggr_limit = ATH_MIN((max4msframelen*3)/8, sc->sc_ic.ic_ampdu_limit);
1526 aggr_limit = ATH_MIN(max4msframelen, sc->sc_ic.ic_ampdu_limit);
1528 if (ieee_node->ni_maxampdu)
1529 aggr_limit = ATH_MIN(aggr_limit, ieee_node->ni_maxampdu);
1534 int ath_tgt_tx_form_aggr(struct ath_softc_tgt *sc, ath_atx_tid_t *tid,
1537 struct ath_tx_buf *bf_first ,*bf_prev = NULL;
1538 int nframes = 0, rl = 0;;
1539 struct ath_desc *ds = NULL;
1540 struct ath_tx_buf *bf;
1541 u_int16_t aggr_limit = (64*1024 -1), al = 0, bpad = 0, al_delta;
1542 u_int16_t h_baw = tid->baw_size/2, prev_al = 0, prev_frames = 0;
1544 bf_first = asf_tailq_first(&tid->buf_q);
1547 bf = asf_tailq_first(&tid->buf_q);
1550 if (!BAW_WITHIN(tid->seq_start, tid->baw_size,
1551 SEQNO_FROM_BF_SEQNO(bf->bf_seqno))) {
1553 bf_first->bf_al= al;
1554 bf_first->bf_nframes = nframes;
1555 return ATH_TGT_AGGR_BAW_CLOSED;
1559 aggr_limit = ath_lookup_rate(sc, tid->an, bf);
1563 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_pktlen;
1565 if (nframes && (aggr_limit < (al + bpad + al_delta + prev_al))) {
1566 bf_first->bf_al= al;
1567 bf_first->bf_nframes = nframes;
1568 return ATH_TGT_AGGR_LIMITED;
1572 if ((nframes + prev_frames) >= ATH_MIN((h_baw), 17)) {
1574 if ((nframes + prev_frames) >= ATH_MIN((h_baw), 22)) {
1576 bf_first->bf_al= al;
1577 bf_first->bf_nframes = nframes;
1578 return ATH_TGT_AGGR_LIMITED;
1581 ath_tx_addto_baw(tid, bf);
1582 asf_tailq_remove(&tid->buf_q, bf, bf_list);
1583 asf_tailq_insert_tail(bf_q, bf, bf_list);
1588 adf_os_assert(bf->bf_comp == ath_tgt_tx_comp_aggr);
1590 al += bpad + al_delta;
1591 bf->bf_ndelim = ATH_AGGR_GET_NDELIM(bf->bf_pktlen);
1593 switch (bf->bf_keytype) {
1594 case HAL_KEY_TYPE_AES:
1595 bf->bf_ndelim += ATH_AGGR_ENCRYPTDELIM;
1597 case HAL_KEY_TYPE_WEP:
1598 case HAL_KEY_TYPE_TKIP:
1599 bf->bf_ndelim += 64;
1601 case HAL_KEY_TYPE_WAPI:
1602 bf->bf_ndelim += 12;
1608 bpad = PADBYTES(al_delta) + (bf->bf_ndelim << 2);
1611 bf_prev->bf_next = bf;
1612 bf_prev->bf_lastds->ds_link = ATH_BUF_GET_DESC_PHY_ADDR(bf);
1616 for(ds = bf->bf_desc; ds <= bf->bf_lastds; ds++)
1617 ath_hal_set11n_aggr_middle(sc->sc_ah, ds, bf->bf_ndelim);
1619 } while (!asf_tailq_empty(&tid->buf_q));
1621 bf_first->bf_al= al;
1622 bf_first->bf_nframes = nframes;
1624 return ATH_TGT_AGGR_DONE;
1627 void ath_tx_addto_baw(ath_atx_tid_t *tid, struct ath_tx_buf *bf)
1631 if (bf->bf_isretried) {
1635 index = ATH_BA_INDEX(tid->seq_start, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
1636 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1638 TX_BUF_BITMAP_SET(tid->tx_buf_bitmap, cindex);
1640 if (index >= ((tid->baw_tail - tid->baw_head) & (ATH_TID_MAX_BUFS - 1))) {
1641 tid->baw_tail = cindex;
1642 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1646 void ath_tgt_tx_comp_aggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
1648 struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
1649 ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
1650 struct ath_tx_desc lastds;
1651 struct ath_tx_desc *ds = &lastds;
1652 struct ath_rc_series rcs[4];
1657 int nframes = bf->bf_nframes;
1658 struct ath_buf *bf_next;
1661 struct ath_buf *bar = NULL;
1662 struct ath_txq *txq;
1666 if (tid->flag & TID_CLEANUP_INPROGRES) {
1667 ath_tx_comp_cleanup(sc, bf);
1671 adf_os_mem_copy(ds, bf->bf_lastds, sizeof (struct ath_tx_desc));
1672 adf_os_mem_copy(rcs, bf->bf_rcs, sizeof(rcs));
1674 if (ds->ds_txstat.ts_flags == HAL_TX_SW_FILTERED) {
1679 if (!bf->bf_isaggr) {
1680 ath_tx_comp_unaggr(sc, bf);
1684 __stats(sc, tx_compaggr);
1686 asf_tailq_init(&bf_q);
1688 seq_st = ATH_DS_BA_SEQ(ds);
1689 ba = ATH_DS_BA_BITMAP(ds);
1690 tx_ok = (ATH_DS_TX_STATUS(ds) == HAL_OK);
1692 if (ATH_DS_TX_STATUS(ds) & HAL_TXERR_XRETRY) {
1693 ath_tx_comp_aggr_error(sc, bf, tid);
1697 if (tx_ok && !ATH_DS_TX_BA(ds)) {
1698 __stats(sc, txaggr_babug);
1699 adf_os_print("BA Bug?\n");
1700 ath_tx_comp_aggr_error(sc, bf, tid);
1705 ba_index = ATH_BA_INDEX(seq_st, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
1706 bf_next = bf->bf_next;
1708 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
1709 __stats(sc, txaggr_compgood);
1710 ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
1711 ath_tx_status_update_aggr(sc, bf, ds, rcs, 1);
1712 ath_tx_freebuf(sc, bf);
1714 ath_tx_retry_subframe(sc, bf, &bf_q, &bar);
1720 ath_update_aggr_stats(sc, ds, nframes, nbad);
1721 ath_rate_tx_complete(sc, an, ds, rcs, nframes, nbad);
1724 ath_bar_tx(sc, tid, bar);
1727 if (!asf_tailq_empty(&bf_q)) {
1728 __stats(sc, txaggr_prepends);
1729 TAILQ_INSERTQ_HEAD(&tid->buf_q, &bf_q, bf_list);
1730 ath_tgt_tx_enqueue(txq, tid);
1735 ath_tx_comp_aggr_error(struct ath_softc_tgt *sc, struct ath_tx_buf *bf,
1740 struct ath_tx_desc lastds;
1741 struct ath_desc *ds = &lastds;
1742 struct ath_rc_series rcs[4];
1743 struct ath_buf *bar = NULL;
1744 struct ath_buf *bf_next;
1745 int nframes = bf->bf_nframes;
1747 struct ath_txq *txq;
1749 asf_tailq_init(&bf_q);
1752 adf_os_mem_copy(ds, bf->bf_lastds, sizeof (struct ath_tx_desc));
1753 adf_os_mem_copy(rcs, bf->bf_rcs, sizeof(rcs));
1756 bf_next = bf->bf_next;
1757 ath_tx_retry_subframe(sc, bf, &bf_q, &bar);
1761 ath_update_aggr_stats(sc, ds, nframes, nframes);
1762 ath_rate_tx_complete(sc, tid->an, ds, rcs, nframes, nframes);
1765 ath_bar_tx(sc, tid, bar);
1768 if (!asf_tailq_empty(&bf_q)) {
1769 __stats(sc, txaggr_prepends);
1770 TAILQ_INSERTQ_HEAD(&tid->buf_q, &bf_q, bf_list);
1771 ath_tgt_tx_enqueue(txq, tid);
1776 ath_tx_comp_cleanup(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
1779 struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
1780 ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
1781 struct ath_tx_desc lastds;
1782 struct ath_tx_desc *ds = &lastds;
1783 struct ath_rc_series rcs[4];
1788 int nframes = bf->bf_nframes;
1789 struct ath_buf *bf_next;
1792 adf_os_mem_copy(ds, bf->bf_lastds, sizeof (struct ath_tx_desc));
1793 adf_os_mem_copy(rcs, bf->bf_rcs, sizeof(rcs));
1795 seq_st = ATH_DS_BA_SEQ(ds);
1796 ba = ATH_DS_BA_BITMAP(ds);
1797 tx_ok = (ATH_DS_TX_STATUS(ds) == HAL_OK);
1799 if (!bf->bf_isaggr) {
1800 ath_update_stats(sc, bf);
1802 __stats(sc, tx_compunaggr);
1804 ath_tx_status_update(sc, bf);
1806 ath_tx_freebuf(sc, bf);
1808 if (tid->flag & TID_CLEANUP_INPROGRES) {
1809 owl_tgt_tid_cleanup(sc, tid);
1817 ba_index = ATH_BA_INDEX(seq_st, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
1818 bf_next = bf->bf_next;
1820 ath_tx_status_update_aggr(sc, bf, ds, rcs, 0);
1822 ath_tx_freebuf(sc, bf);
1826 tid->flag &= ~TID_CLEANUP_INPROGRES;
1827 ath_aggr_resume_tid(sc, tid);
1834 ath_update_aggr_stats(sc, ds, nframes, nbad);
1835 ath_rate_tx_complete(sc, an, ds, rcs, nframes, nbad);
1839 ath_tx_retry_subframe(struct ath_softc_tgt *sc, struct ath_tx_buf *bf,
1840 ath_bufhead *bf_q, struct ath_tx_buf **bar)
1843 struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
1844 ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
1845 struct ath_desc *ds = NULL;
1848 __stats(sc, txaggr_compretries);
1850 for(ds = bf->bf_desc, i = 0; i < bf->bf_dmamap_info.nsegs; ds++, i++) {
1851 ath_hal_clr11n_aggr(sc->sc_ah, ds);
1852 ath_hal_set11n_burstduration(sc->sc_ah, ds, 0);
1853 ath_hal_set11n_virtualmorefrag(sc->sc_ah, ds, 0);
1856 if (bf->bf_retries >= OWLMAX_RETRIES) {
1857 __stats(sc, txaggr_xretries);
1858 ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
1859 ath_tx_status_update_aggr(sc, bf, bf->bf_lastds, NULL, 0);
1864 ath_tx_freebuf(sc, bf);
1869 __stats(sc, txaggr_errlast);
1870 bf = ath_buf_toggle(sc, bf, 1);
1872 bf->bf_lastds = &(bf->bf_descarr[bf->bf_dmamap_info.nsegs - 1]);
1874 ath_tx_set_retry(sc, bf);
1875 asf_tailq_insert_tail(bf_q, bf, bf_list);
1879 ath_update_aggr_stats(struct ath_softc_tgt *sc,
1880 struct ath_tx_desc *ds, int nframes,
1884 u_int8_t status = ATH_DS_TX_STATUS(ds);
1885 u_int8_t txflags = ATH_DS_TX_FLAGS(ds);
1887 __statsn(sc, txaggr_longretries, ds->ds_txstat.ts_longretry);
1888 __statsn(sc, txaggr_shortretries, ds->ds_txstat.ts_shortretry);
1890 if (txflags & HAL_TX_DESC_CFG_ERR)
1891 __stats(sc, txaggr_desc_cfgerr);
1893 if (txflags & HAL_TX_DATA_UNDERRUN)
1894 __stats(sc, txaggr_data_urun);
1896 if (txflags & HAL_TX_DELIM_UNDERRUN)
1897 __stats(sc, txaggr_delim_urun);
1903 if (status & HAL_TXERR_XRETRY)
1904 __stats(sc, txaggr_compxretry);
1906 if (status & HAL_TXERR_FILT)
1907 __stats(sc, txaggr_filtered);
1909 if (status & HAL_TXERR_FIFO)
1910 __stats(sc, txaggr_fifo);
1912 if (status & HAL_TXERR_XTXOP)
1913 __stats(sc, txaggr_xtxop);
1915 if (status & HAL_TXERR_TIMER_EXPIRED)
1916 __stats(sc, txaggr_timer_exp);
1920 ath_tx_comp_unaggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
1922 struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
1923 ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
1924 struct ath_desc *ds = bf->bf_lastds;
1926 ath_update_stats(sc, bf);
1927 ath_rate_tx_complete(sc, an, ds, bf->bf_rcs, 1, 0);
1929 if (ATH_DS_TX_STATUS(ds) & HAL_TXERR_XRETRY) {
1930 ath_tx_retry_unaggr(sc, bf);
1933 __stats(sc, tx_compunaggr);
1935 ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
1936 ath_tx_status_update(sc, bf);
1937 ath_tx_freebuf(sc, bf);
1941 ath_tx_retry_unaggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
1943 struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
1944 ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
1945 struct ath_txq *txq;
1949 if (bf->bf_retries >= OWLMAX_RETRIES) {
1950 __stats(sc, txunaggr_xretry);
1951 ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
1952 ath_tx_status_update(sc, bf);
1953 ath_bar_tx(sc, tid, bf);
1957 __stats(sc, txunaggr_compretries);
1958 if (!bf->bf_lastds->ds_link) {
1959 __stats(sc, txunaggr_errlast);
1960 bf = ath_buf_toggle(sc, bf, 1);
1963 ath_tx_set_retry(sc, bf);
1964 asf_tailq_insert_head(&tid->buf_q, bf, bf_list);
1965 ath_tgt_tx_enqueue(txq, tid);
1969 ath_tx_update_baw(ath_atx_tid_t *tid, int seqno)
1974 index = ATH_BA_INDEX(tid->seq_start, seqno);
1975 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1977 TX_BUF_BITMAP_CLR(tid->tx_buf_bitmap, cindex);
1979 while (tid->baw_head != tid->baw_tail &&
1980 (!TX_BUF_BITMAP_IS_SET(tid->tx_buf_bitmap, tid->baw_head))) {
1981 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1982 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
1986 static void ath_tx_set_retry(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
1988 struct ieee80211_frame *wh;
1990 __stats(sc, txaggr_retries);
1992 bf->bf_isretried = 1;
1994 wh = ATH_SKB_2_WH(bf->bf_skb);
1995 wh->i_fc[1] |= IEEE80211_FC1_RETRY;
1998 void ath_tgt_tx_cleanup(struct ath_softc_tgt *sc, struct ath_node_target *an,
1999 ath_atx_tid_t *tid, a_uint8_t discard_all)
2001 struct ath_tx_buf *bf;
2002 struct ath_tx_buf *bf_next;
2003 struct ath_txq *txq;
2005 txq = TID_TO_ACTXQ(tid->tidno);
2007 bf = asf_tailq_first(&tid->buf_q);
2010 if (discard_all || bf->bf_isretried) {
2011 bf_next = asf_tailq_next(bf, bf_list);
2012 TAILQ_DEQ(&tid->buf_q, bf, bf_list);
2013 if (bf->bf_isretried)
2014 ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
2015 ath_tx_freebuf(sc, bf);
2019 bf->bf_comp = ath_tgt_tx_comp_normal;
2020 bf = asf_tailq_next(bf, bf_list);
2023 ath_aggr_pause_tid(sc, tid);
2025 while (tid->baw_head != tid->baw_tail) {
2026 if (TX_BUF_BITMAP_IS_SET(tid->tx_buf_bitmap, tid->baw_head)) {
2028 tid->flag |= TID_CLEANUP_INPROGRES;
2029 TX_BUF_BITMAP_CLR(tid->tx_buf_bitmap, tid->baw_head);
2031 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2032 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
2035 if (!(tid->flag & TID_CLEANUP_INPROGRES)) {
2036 ath_aggr_resume_tid(sc, tid);
2040 /******************/
2041 /* BAR Management */
2042 /******************/
2044 static void ath_tgt_delba_send(struct ath_softc_tgt *sc,
2045 struct ieee80211_node_target *ni,
2046 a_uint8_t tidno, a_uint8_t initiator,
2047 a_uint16_t reasoncode)
2049 struct ath_node_target *an = ATH_NODE_TARGET(ni);
2050 ath_atx_tid_t *tid = ATH_AN_2_TID(an, tidno);
2051 struct wmi_data_delba wmi_delba;
2053 tid->flag &= ~TID_AGGR_ENABLED;
2055 ath_tgt_tx_cleanup(sc, an, tid, 1);
2057 wmi_delba.ni_nodeindex = ni->ni_nodeindex;
2058 wmi_delba.tidno = tid->tidno;
2059 wmi_delba.initiator = 1;
2060 wmi_delba.reasoncode = IEEE80211_REASON_UNSPECIFIED;
2062 __stats(sc, txbar_xretry);
2063 wmi_event(sc->tgt_wmi_handle,
2069 static void ath_bar_retry(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
2071 struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
2072 ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
2074 if (bf->bf_retries >= OWLMAX_BAR_RETRIES) {
2075 ath_tgt_delba_send(sc, bf->bf_node, tid->tidno, 1,
2076 IEEE80211_REASON_UNSPECIFIED);
2077 ath_tgt_tid_drain(sc, tid);
2080 ath_buf_comp(sc, bf);
2084 __stats(sc, txbar_compretries);
2086 if (!bf->bf_lastds->ds_link) {
2087 __stats(sc, txbar_errlast);
2088 bf = ath_buf_toggle(sc, bf, 1);
2091 bf->bf_lastds->ds_link = 0;
2093 ath_tx_set_retry(sc, bf);
2094 ath_tgt_txq_add_ucast(sc, bf);
2097 static void ath_bar_tx_comp(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
2099 struct ath_desc *ds = bf->bf_lastds;
2100 struct ath_node_target *an;
2102 struct ath_txq *txq;
2104 an = (struct ath_node_target *)bf->bf_node;
2105 tid = &an->tid[bf->bf_tidno];
2106 txq = TID_TO_ACTXQ(tid->tidno);
2108 if (ATH_DS_TX_STATUS(ds) & HAL_TXERR_XRETRY) {
2109 ath_bar_retry(sc, bf);
2113 ath_aggr_resume_tid(sc, tid);
2116 ath_buf_comp(sc, bf);
2119 static void ath_bar_tx(struct ath_softc_tgt *sc,
2120 ath_atx_tid_t *tid, struct ath_tx_buf *bf)
2123 struct ieee80211_frame_bar *bar;
2125 struct ath_desc *ds, *ds0;
2126 HAL_11N_RATE_SERIES series[4];
2128 adf_nbuf_queue_t skbhead;
2132 __stats(sc, tx_bars);
2134 memset(&series, 0, sizeof(series));
2136 ath_aggr_pause_tid(sc, tid);
2138 skb = adf_nbuf_queue_remove(&bf->bf_skbhead);
2139 adf_nbuf_peek_header(skb, &anbdata, &anblen);
2140 adf_nbuf_trim_tail(skb, anblen);
2141 bar = (struct ieee80211_frame_bar *) anbdata;
2145 ath_dma_unmap(sc, bf);
2146 adf_nbuf_queue_add(&bf->bf_skbhead, skb);
2148 bar->i_fc[1] = IEEE80211_FC1_DIR_NODS;
2149 bar->i_fc[0] = IEEE80211_FC0_VERSION_0 |
2150 IEEE80211_FC0_TYPE_CTL |
2151 IEEE80211_FC0_SUBTYPE_BAR;
2152 bar->i_ctl = tid->tidno << IEEE80211_BAR_CTL_TID_S |
2153 IEEE80211_BAR_CTL_COMBA;
2154 bar->i_seq = adf_os_cpu_to_le16(tid->seq_start << IEEE80211_SEQ_SEQ_SHIFT);
2156 bf->bf_seqno = tid->seq_start << IEEE80211_SEQ_SEQ_SHIFT;
2158 adf_nbuf_put_tail(skb, sizeof(struct ieee80211_frame_bar));
2160 bf->bf_comp = ath_bar_tx_comp;
2161 bf->bf_tidno = tid->tidno;
2162 bf->bf_node = &tid->an->ni;
2163 ath_dma_map(sc, bf);
2164 adf_nbuf_dmamap_info(bf->bf_dmamap, &bf->bf_dmamap_info);
2167 ath_hal_setuptxdesc(sc->sc_ah, ds
2168 , adf_nbuf_len(skb) + IEEE80211_CRC_LEN
2170 , HAL_PKT_TYPE_NORMAL
2177 | HAL_TXDESC_CLRDMASK
2179 , ATH_COMP_PROC_NO_COMP_NO_CCS);
2181 skbhead = bf->bf_skbhead;
2185 for (ds0 = ds, i=0; i < bf->bf_dmamap_info.nsegs; ds0++, i++) {
2186 ath_hal_clr11n_aggr(sc->sc_ah, ds0);
2189 ath_filltxdesc(sc, bf);
2191 for (i = 0 ; i < 4; i++) {
2192 series[i].Tries = ATH_TXMAXTRY;
2193 series[i].Rate = min_rate;
2194 series[i].ChSel = sc->sc_ic.ic_tx_chainmask;
2197 ath_hal_set11n_ratescenario(sc->sc_ah, bf->bf_desc, 0, 0, 0, series, 4, 4);
2198 ath_tgt_txq_add_ucast(sc, bf);