5a547938bfb1c03ce1f50924defc3c296b3a1e63
[librecmc/librecmc.git] / package / madwifi / patches / 300-napi_polling.patch
1 diff -ur madwifi.old/ath/if_ath.c madwifi.dev/ath/if_ath.c
2 --- madwifi.old/ath/if_ath.c    2007-05-21 07:49:54.571131744 +0200
3 +++ madwifi.dev/ath/if_ath.c    2007-05-21 07:51:40.208072488 +0200
4 @@ -167,7 +167,7 @@
5         int, u_int32_t);
6  static void ath_setdefantenna(struct ath_softc *, u_int);
7  static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
8 -static void ath_rx_tasklet(TQUEUE_ARG);
9 +static int ath_rx_poll(struct net_device *dev, int *budget);
10  static int ath_hardstart(struct sk_buff *, struct net_device *);
11  static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
12  #ifdef ATH_SUPERG_COMP
13 @@ -417,7 +417,6 @@
14         ATH_TXBUF_LOCK_INIT(sc);
15         ATH_RXBUF_LOCK_INIT(sc);
16  
17 -       ATH_INIT_TQUEUE(&sc->sc_rxtq,     ath_rx_tasklet,       dev);
18         ATH_INIT_TQUEUE(&sc->sc_txtq,     ath_tx_tasklet,       dev);
19         ATH_INIT_TQUEUE(&sc->sc_bmisstq,  ath_bmiss_tasklet,    dev);
20         ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet,   dev);
21 @@ -665,6 +664,8 @@
22         dev->set_mac_address = ath_set_mac_address;
23         dev->change_mtu = ath_change_mtu;
24         dev->tx_queue_len = ATH_TXBUF - 1;              /* 1 for mgmt frame */
25 +       dev->poll = ath_rx_poll;
26 +       dev->weight = 64;
27  #ifdef USE_HEADERLEN_RESV
28         dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
29                                 sizeof(struct llc) +
30 @@ -1635,6 +1636,7 @@
31          */
32         ath_hal_getisr(ah, &status);            /* NB: clears ISR too */
33         DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
34 +       sc->sc_isr = status;
35         status &= sc->sc_imask;                 /* discard unasked for bits */
36         if (status & HAL_INT_FATAL) {
37                 sc->sc_stats.ast_hardware++;
38 @@ -1674,7 +1676,12 @@
39                          * might take too long to fire */
40                         ath_hal_process_noisefloor(ah);
41                         sc->sc_channoise = ath_hal_get_channel_noise(ah, &(sc->sc_curchan));
42 -                       ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
43 +                       sc->sc_isr &= ~HAL_INT_RX;
44 +                       if (netif_rx_schedule_prep(dev)) {
45 +                               sc->sc_imask &= ~HAL_INT_RX;
46 +                               ath_hal_intrset(ah, sc->sc_imask);
47 +                               __netif_rx_schedule(dev);
48 +                       }
49                 }
50                 if (status & HAL_INT_TX) {
51  #ifdef ATH_SUPERG_DYNTURBO
52 @@ -1700,6 +1707,11 @@
53                                 }
54                         } 
55  #endif
56 +                       /* disable transmit interrupt */
57 +                       sc->sc_isr &= ~HAL_INT_TX;
58 +                       ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
59 +                       sc->sc_imask &= ~HAL_INT_TX;
60 +
61                         ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
62                 }
63                 if (status & HAL_INT_BMISS) {
64 @@ -2162,12 +2174,13 @@
65          * Insert the frame on the outbound list and
66          * pass it on to the hardware.
67          */
68 -       ATH_TXQ_LOCK(txq);
69 +       ATH_TXQ_LOCK_IRQ(txq);
70         if (ni && ni->ni_vap && txq == &ATH_VAP(ni->ni_vap)->av_mcastq) {
71                 /*
72                  * The CAB queue is started from the SWBA handler since
73                  * frames only go out on DTIM and to avoid possible races.
74                  */
75 +               sc->sc_imask &= ~HAL_INT_SWBA;
76                 ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_SWBA);
77                 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
78                 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: txq depth = %d\n", __func__, txq->axq_depth);
79 @@ -2183,6 +2196,7 @@
80                                 ito64(bf->bf_daddr), bf->bf_desc);
81                 }
82                 txq->axq_link = &lastds->ds_link;
83 +               sc->sc_imask |= HAL_INT_SWBA;
84                 ath_hal_intrset(ah, sc->sc_imask);
85         } else {
86                 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
87 @@ -2218,7 +2232,7 @@
88                         }
89                 }
90         }
91 -       ATH_TXQ_UNLOCK(txq);
92 +       ATH_TXQ_UNLOCK_IRQ(txq);
93  
94         sc->sc_devstats.tx_packets++;
95         sc->sc_devstats.tx_bytes += framelen;
96 @@ -2369,8 +2383,14 @@
97         unsigned int pktlen;
98         int framecnt;
99  
100 +       /*
101 +        * NB: using _BH style locking even though this function may be called
102 +        *     at interrupt time (within tasklet or bh). This should be harmless
103 +        *     and this function calls others (i.e., ath_tx_start()) which do
104 +        *     the same.
105 +        */
106         for (;;) {
107 -               ATH_TXQ_LOCK(txq);
108 +               ATH_TXQ_LOCK_BH(txq);
109  
110                 bf_ff = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
111                 if ((!bf_ff) || ath_ff_flushdonetest(txq, bf_ff)) {
112 @@ -2384,7 +2404,7 @@
113                 ATH_NODE(ni)->an_tx_ffbuf[bf_ff->bf_skb->priority] = NULL;
114                 TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
115  
116 -               ATH_TXQ_UNLOCK(txq);
117 +               ATH_TXQ_UNLOCK_BH(txq);
118  
119                 /* encap and xmit */
120                 bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
121 @@ -2405,15 +2425,16 @@
122                 }
123                 bf_ff->bf_node = NULL;
124  
125 -               ATH_TXBUF_LOCK_IRQ(sc);
126 +               ATH_TXBUF_LOCK_BH(sc);
127                 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
128 -               ATH_TXBUF_UNLOCK_IRQ(sc);
129 +               ATH_TXBUF_UNLOCK_BH(sc);
130         }
131 +       ATH_TXQ_UNLOCK_BH(txq);
132  }
133  #endif
134  
135  #define ATH_HARDSTART_GET_TX_BUF_WITH_LOCK                             \
136 -       ATH_TXBUF_LOCK_IRQ(sc);                                         \
137 +       ATH_TXBUF_LOCK_BH(sc);                                          \
138         bf = STAILQ_FIRST(&sc->sc_txbuf);                               \
139         if (bf != NULL) {                                               \
140                 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);             \
141 @@ -2428,11 +2449,23 @@
142                 sc->sc_devstopped = 1;                                  \
143                 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL);                \
144         }                                                               \
145 -       ATH_TXBUF_UNLOCK_IRQ(sc);                                       \
146 +
147 +#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF                      \
148 +       ATH_TXBUF_UNLOCK_BH(sc);                                        \
149 +       if (bf == NULL) {               /* NB: should not happen */     \
150 +           DPRINTF(sc,ATH_DEBUG_XMIT,"%s: discard, no xmit buf\n", __func__);                                                                          \
151 +           sc->sc_stats.ast_tx_nobuf++;                                \
152 +           goto hardstart_fail;                                        \
153 +       }
154 +
155 +#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON                       \
156 +       ATH_TXBUF_UNLOCK_BH(sc);                                        \
157         if (bf == NULL) {               /* NB: should not happen */     \
158                 DPRINTF(sc,ATH_DEBUG_XMIT,                              \
159                         "%s: discard, no xmit buf\n", __func__);        \
160 +           ATH_TXQ_UNLOCK_BH(txq);                                     \
161                 sc->sc_stats.ast_tx_nobuf++;                            \
162 +               goto hardstart_fail;                                    \
163         }
164  
165  /*
166 @@ -2494,6 +2527,7 @@
167         if (M_FLAG_GET(skb, M_UAPSD)) {
168                 /* bypass FF handling */
169                 ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
170 +               ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
171                 if (bf == NULL)
172                         goto hardstart_fail;
173                 goto ff_bypass;
174 @@ -2515,7 +2549,7 @@
175         /* NB: use this lock to protect an->an_ff_txbuf in athff_can_aggregate()
176          *     call too.
177          */
178 -       ATH_TXQ_LOCK(txq);
179 +       ATH_TXQ_LOCK_BH(txq);
180         if (athff_can_aggregate(sc, eh, an, skb, vap->iv_fragthreshold, &ff_flush)) {
181  
182                 if (an->an_tx_ffbuf[skb->priority]) { /* i.e., frame on the staging queue */
183 @@ -2525,7 +2559,7 @@
184                         TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
185                         an->an_tx_ffbuf[skb->priority] = NULL;
186  
187 -                       ATH_TXQ_UNLOCK(txq);
188 +                       ATH_TXQ_UNLOCK_BH(txq);
189  
190                         /*
191                          * chain skbs and add FF magic
192 @@ -2552,6 +2586,7 @@
193                          *     to give the buffer back.
194                          */
195                         ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
196 +                       ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON;
197                         if (bf == NULL) {
198                                 ATH_TXQ_UNLOCK(txq);
199                                 goto hardstart_fail;
200 @@ -2566,7 +2601,7 @@
201  
202                         TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
203  
204 -                       ATH_TXQ_UNLOCK(txq);
205 +                       ATH_TXQ_UNLOCK_BH(txq);
206  
207                         return 0;
208                 }
209 @@ -2577,7 +2612,7 @@
210                         TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
211                         an->an_tx_ffbuf[skb->priority] = NULL;
212  
213 -                       ATH_TXQ_UNLOCK(txq);
214 +                       ATH_TXQ_UNLOCK_BH(txq);
215  
216                         /* encap and xmit */
217                         bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
218 @@ -2607,9 +2642,9 @@
219                         }
220                         bf_ff->bf_node = NULL;
221  
222 -                       ATH_TXBUF_LOCK(sc);
223 +                       ATH_TXBUF_LOCK_BH(sc);
224                         STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
225 -                       ATH_TXBUF_UNLOCK(sc);
226 +                       ATH_TXBUF_UNLOCK_BH(sc);
227                         goto ff_flushdone;
228                 }
229                 /*
230 @@ -2619,14 +2654,13 @@
231                 else if (an->an_tx_ffbuf[skb->priority]) {
232                         DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
233                                 "%s: Out-Of-Order fast-frame\n", __func__);
234 -                       ATH_TXQ_UNLOCK(txq);
235 +                       ATH_TXQ_UNLOCK_BH(txq);
236                 } else
237 -                       ATH_TXQ_UNLOCK(txq);
238 +                       ATH_TXQ_UNLOCK_BH(txq);
239  
240         ff_flushdone:
241                 ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
242 -               if (bf == NULL)
243 -                       goto hardstart_fail;
244 +               ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
245         }
246  
247  ff_bypass:
248 @@ -2634,6 +2668,7 @@
249  #else /* ATH_SUPERG_FF */
250  
251         ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
252 +       ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
253  
254  #endif /* ATH_SUPERG_FF */
255  
256 @@ -2655,7 +2690,7 @@
257                  *  Allocate 1 ath_buf for each frame given 1 was 
258                  *  already alloc'd
259                  */
260 -               ATH_TXBUF_LOCK(sc);
261 +               ATH_TXBUF_LOCK_BH(sc);
262                 for (bfcnt = 1; bfcnt < framecnt; ++bfcnt) {
263                         if ((tbf = STAILQ_FIRST(&sc->sc_txbuf)) != NULL) {
264                                 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
265 @@ -2676,11 +2711,11 @@
266                                         STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
267                                 }
268                         }
269 -                       ATH_TXBUF_UNLOCK(sc);
270 +                       ATH_TXBUF_UNLOCK_BH(sc);
271                         STAILQ_INIT(&bf_head);
272                         goto hardstart_fail;
273                 }
274 -               ATH_TXBUF_UNLOCK(sc);
275 +               ATH_TXBUF_UNLOCK_BH(sc);
276  
277                 while ((bf = STAILQ_FIRST(&bf_head)) != NULL && skb != NULL) {
278                         unsigned int nextfraglen = 0;
279 @@ -2716,7 +2751,7 @@
280  
281  hardstart_fail:
282         if (!STAILQ_EMPTY(&bf_head)) {
283 -               ATH_TXBUF_LOCK(sc);
284 +               ATH_TXBUF_LOCK_BH(sc);
285                 STAILQ_FOREACH_SAFE(tbf, &bf_head, bf_list, tempbf) {
286                         tbf->bf_skb = NULL;
287                         tbf->bf_node = NULL;
288 @@ -2726,7 +2761,7 @@
289  
290                         STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
291                 }
292 -               ATH_TXBUF_UNLOCK(sc);
293 +               ATH_TXBUF_UNLOCK_BH(sc);
294         }
295  
296         /* free sk_buffs */
297 @@ -2769,7 +2804,7 @@
298         /*
299          * Grab a TX buffer and associated resources.
300          */
301 -       ATH_TXBUF_LOCK_IRQ(sc);
302 +       ATH_TXBUF_LOCK_BH(sc);
303         bf = STAILQ_FIRST(&sc->sc_txbuf);
304         if (bf != NULL)
305                 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
306 @@ -2780,7 +2815,7 @@
307                 sc->sc_devstopped=1;
308                 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL);
309         }
310 -       ATH_TXBUF_UNLOCK_IRQ(sc);
311 +       ATH_TXBUF_UNLOCK_BH(sc);
312         if (bf == NULL) {
313                 printk("ath_mgtstart: discard, no xmit buf\n");
314                 sc->sc_stats.ast_tx_nobufmgt++;
315 @@ -2809,9 +2844,9 @@
316                 bf->bf_skb = NULL;
317                 bf->bf_node = NULL;
318  
319 -               ATH_TXBUF_LOCK_IRQ(sc);
320 +               ATH_TXBUF_LOCK_BH(sc);
321                 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
322 -               ATH_TXBUF_UNLOCK_IRQ(sc);
323 +               ATH_TXBUF_UNLOCK_BH(sc);
324         }
325         dev_kfree_skb_any(skb);
326         skb = NULL;
327 @@ -3279,10 +3314,10 @@
328          *
329          * XXX Using in_softirq is not right since we might
330          * be called from other soft irq contexts than
331 -        * ath_rx_tasklet.
332 +        * ath_rx_poll
333          */
334         if (!in_softirq())
335 -               tasklet_disable(&sc->sc_rxtq);
336 +               netif_poll_disable(dev);
337         netif_stop_queue(dev);
338  }
339  
340 @@ -3295,7 +3330,7 @@
341         DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
342         netif_start_queue(dev);
343         if (!in_softirq())              /* NB: see above */
344 -               tasklet_enable(&sc->sc_rxtq);
345 +               netif_poll_enable(dev);
346  }
347  
348  /*
349 @@ -4861,9 +4896,9 @@
350                 bf->bf_node = NULL;
351                 bf->bf_desc->ds_link = 0;
352                 
353 -               ATH_TXBUF_LOCK_IRQ(sc);
354 +               ATH_TXBUF_LOCK_BH(sc);
355                 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
356 -               ATH_TXBUF_UNLOCK_IRQ(sc);
357 +               ATH_TXBUF_UNLOCK_BH(sc);
358  
359                 an->an_uapsd_overflowqdepth--;
360         }
361 @@ -5542,13 +5577,12 @@
362         sc->sc_rxotherant = 0;
363  }
364  
365 -static void
366 -ath_rx_tasklet(TQUEUE_ARG data)
367 +static int
368 +ath_rx_poll(struct net_device *dev, int *budget)
369  {
370  #define        PA2DESC(_sc, _pa) \
371         ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
372                 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
373 -       struct net_device *dev = (struct net_device *)data;
374         struct ath_buf *bf;
375         struct ath_softc *sc = dev->priv;
376         struct ieee80211com *ic = &sc->sc_ic;
377 @@ -5560,11 +5594,15 @@
378         unsigned int len;
379         int type;
380         u_int phyerr;
381 +       int processed = 0, early_stop = 0;
382 +       int rx_limit = dev->quota;
383  
384         /* Let the 802.11 layer know about the new noise floor */
385         ic->ic_channoise = sc->sc_channoise;
386         
387         DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s\n", __func__);
388 +
389 +process_rx_again:
390         do {
391                 bf = STAILQ_FIRST(&sc->sc_rxbuf);
392                 if (bf == NULL) {               /* XXX ??? can this happen */
393 @@ -5588,6 +5626,13 @@
394                         /* NB: never process the self-linked entry at the end */
395                         break;
396                 }
397 +
398 +               processed++;
399 +               if (rx_limit-- < 0) {
400 +                       early_stop = 1;
401 +                       break;
402 +               }
403 +
404                 skb = bf->bf_skb;
405                 if (skb == NULL) {              /* XXX ??? can this happen */
406                         printk("%s: no skbuff (%s)\n", dev->name, __func__);
407 @@ -5626,6 +5671,7 @@
408                                 sc->sc_stats.ast_rx_phyerr++;
409                                 phyerr = rs->rs_phyerr & 0x1f;
410                                 sc->sc_stats.ast_rx_phy[phyerr]++;
411 +                               goto rx_next;
412                         }
413                         if (rs->rs_status & HAL_RXERR_DECRYPT) {
414                                 /*
415 @@ -5829,9 +5875,29 @@
416                 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
417                 ATH_RXBUF_UNLOCK_IRQ(sc);
418         } while (ath_rxbuf_init(sc, bf) == 0);
419 +       if (!early_stop) {
420 +               /* Check if more data is received while we were
421 +                * processing the descriptor chain.
422 +                */
423 +               ATH_DISABLE_INTR();
424 +               if (sc->sc_isr & HAL_INT_RX) {
425 +                       sc->sc_isr &= ~HAL_INT_RX;
426 +                       ATH_ENABLE_INTR();
427 +                       ath_uapsd_processtriggers(sc);
428 +                       goto process_rx_again;
429 +               }
430 +               netif_rx_complete(dev);
431 +
432 +               sc->sc_imask |= HAL_INT_RX;
433 +               ath_hal_intrset(ah, sc->sc_imask);
434 +               ATH_ENABLE_INTR();
435 +       }
436 +
437 +       *budget -= processed;
438         
439         /* rx signal state monitoring */
440         ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
441 +       return early_stop;
442  #undef PA2DESC
443  }
444  
445 @@ -6107,22 +6173,22 @@
446                                 }
447                         }
448  
449 -                       ATH_TXBUF_LOCK_IRQ(sc);                                 
450 +                       ATH_TXBUF_LOCK_BH(sc);                                  
451                         bf = STAILQ_FIRST(&sc->sc_grppollbuf);
452                         if (bf != NULL)
453                                 STAILQ_REMOVE_HEAD(&sc->sc_grppollbuf, bf_list);
454                         else {
455                                 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs\n", __func__);
456 -                               ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
457 +                               ATH_TXBUF_UNLOCK_BH(sc);
458                                 return;
459                         }
460                         /* XXX use a counter and leave at least one for mgmt frames */
461                         if (STAILQ_EMPTY(&sc->sc_grppollbuf)) {                         
462                                 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs left\n", __func__);
463 -                               ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
464 +                               ATH_TXBUF_UNLOCK_BH(sc);
465                                 return;
466                         }                                       
467 -                       ATH_TXBUF_UNLOCK_IRQ(sc);
468 +                       ATH_TXBUF_UNLOCK_BH(sc);
469  
470                         bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
471                                 skb->data, skb->len, BUS_DMA_TODEVICE);
472 @@ -6588,9 +6654,9 @@
473                 dev_kfree_skb(lastbuf->bf_skb);
474                 lastbuf->bf_skb = NULL;
475                 ieee80211_unref_node(&lastbuf->bf_node);
476 -               ATH_TXBUF_LOCK_IRQ(sc);
477 +               ATH_TXBUF_LOCK_BH(sc);
478                 STAILQ_INSERT_TAIL(&sc->sc_txbuf, lastbuf, bf_list);
479 -               ATH_TXBUF_UNLOCK_IRQ(sc);
480 +               ATH_TXBUF_UNLOCK_BH(sc);
481                 
482                 /*
483                  *  move oldest from overflow to delivery
484 @@ -7411,9 +7477,6 @@
485                         if (sc->sc_reapcount > ATH_TXBUF_FREE_THRESHOLD) {
486                                 if (!sc->sc_dfswait)
487                                         netif_start_queue(sc->sc_dev);
488 -                               DPRINTF(sc, ATH_DEBUG_TX_PROC,
489 -                                       "%s: tx tasklet restart the queue\n",
490 -                                       __func__);
491                                 sc->sc_reapcount = 0;
492                                 sc->sc_devstopped = 0;
493                         } else
494 @@ -7448,11 +7511,22 @@
495         struct net_device *dev = (struct net_device *)data;
496         struct ath_softc *sc = dev->priv;
497  
498 +process_tx_again:
499         if (txqactive(sc->sc_ah, 0))
500                 ath_tx_processq(sc, &sc->sc_txq[0]);
501         if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
502                 ath_tx_processq(sc, sc->sc_cabq);
503  
504 +       ATH_DISABLE_INTR();
505 +       if (sc->sc_isr & HAL_INT_TX) {
506 +               sc->sc_isr &= ~HAL_INT_TX;
507 +               ATH_ENABLE_INTR();
508 +               goto process_tx_again;
509 +       }
510 +       sc->sc_imask |= HAL_INT_TX;
511 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
512 +       ATH_ENABLE_INTR();
513 +
514         netif_wake_queue(dev);
515  
516         if (sc->sc_softled)
517 @@ -7469,6 +7543,7 @@
518         struct net_device *dev = (struct net_device *)data;
519         struct ath_softc *sc = dev->priv;
520  
521 +process_tx_again:
522         /*
523          * Process each active queue.
524          */
525 @@ -7489,6 +7564,16 @@
526         if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
527                 ath_tx_processq(sc, sc->sc_uapsdq);
528  
529 +       ATH_DISABLE_INTR();
530 +       if (sc->sc_isr & HAL_INT_TX) {
531 +               sc->sc_isr &= ~HAL_INT_TX;
532 +               ATH_ENABLE_INTR();
533 +               goto process_tx_again;
534 +       }
535 +       sc->sc_imask |= HAL_INT_TX;
536 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
537 +       ATH_ENABLE_INTR();
538 +
539         netif_wake_queue(dev);
540  
541         if (sc->sc_softled)
542 @@ -7506,6 +7591,7 @@
543         unsigned int i;
544  
545         /* Process each active queue. */
546 +process_tx_again:
547         for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
548                 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
549                         ath_tx_processq(sc, &sc->sc_txq[i]);
550 @@ -7514,6 +7600,16 @@
551                 ath_tx_processq(sc, sc->sc_xrtxq);
552  #endif
553  
554 +       ATH_DISABLE_INTR();
555 +       if (sc->sc_isr & HAL_INT_TX) {
556 +               sc->sc_isr &= ~HAL_INT_TX;
557 +               ATH_ENABLE_INTR();
558 +               goto process_tx_again;
559 +       }
560 +       sc->sc_imask |= HAL_INT_TX;
561 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
562 +       ATH_ENABLE_INTR();
563 +
564         netif_wake_queue(dev);
565  
566         if (sc->sc_softled)
567 @@ -7612,6 +7708,7 @@
568  ath_draintxq(struct ath_softc *sc)
569  {
570         struct ath_hal *ah = sc->sc_ah;
571 +       int npend = 0;
572         unsigned int i;
573  
574         /* XXX return value */
575 @@ -9144,9 +9241,9 @@
576         dev->mtu = mtu;
577         if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
578                 /* NB: the rx buffers may need to be reallocated */
579 -               tasklet_disable(&sc->sc_rxtq);
580 +               netif_poll_disable(dev);
581                 error = ath_reset(dev);
582 -               tasklet_enable(&sc->sc_rxtq);
583 +               netif_poll_enable(dev);
584         }
585         ATH_UNLOCK(sc);
586  
587 diff -ur madwifi.old/ath/if_athvar.h madwifi.dev/ath/if_athvar.h
588 --- madwifi.old/ath/if_athvar.h 2007-05-21 07:49:54.563132960 +0200
589 +++ madwifi.dev/ath/if_athvar.h 2007-05-21 07:50:22.814838048 +0200
590 @@ -48,6 +48,10 @@
591  #include "if_athioctl.h"
592  #include "net80211/ieee80211.h"                /* XXX for WME_NUM_AC */
593  
594 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
595 +#define irqs_disabled()                        0
596 +#endif
597 +
598  /*
599   * Deduce if tasklets are available.  If not then
600   * fall back to using the immediate work queue.
601 @@ -478,8 +482,12 @@
602  #define        ATH_TXQ_LOCK_DESTROY(_tq)       
603  #define        ATH_TXQ_LOCK(_tq)               spin_lock(&(_tq)->axq_lock)
604  #define        ATH_TXQ_UNLOCK(_tq)             spin_unlock(&(_tq)->axq_lock)
605 -#define        ATH_TXQ_LOCK_BH(_tq)            spin_lock_bh(&(_tq)->axq_lock)
606 -#define        ATH_TXQ_UNLOCK_BH(_tq)          spin_unlock_bh(&(_tq)->axq_lock)
607 +#define        ATH_TXQ_LOCK_BH(_tq) \
608 +       if (!irqs_disabled()) \
609 +               spin_lock_bh(&(_tq)->axq_lock)
610 +#define        ATH_TXQ_UNLOCK_BH(_tq) \
611 +       if (!irqs_disabled()) \
612 +               spin_unlock_bh(&(_tq)->axq_lock)
613  #define ATH_TXQ_LOCK_IRQ(_tq)          do {    \
614         unsigned long __axq_lockflags;          \
615         spin_lock_irqsave(&(_tq)->axq_lock, __axq_lockflags);
616 @@ -623,7 +631,6 @@
617         struct ath_buf *sc_rxbufcur;            /* current rx buffer */
618         u_int32_t *sc_rxlink;                   /* link ptr in last RX desc */
619         spinlock_t sc_rxbuflock; 
620 -       struct ATH_TQ_STRUCT sc_rxtq;           /* rx intr tasklet */
621         struct ATH_TQ_STRUCT sc_rxorntq;        /* rxorn intr tasklet */
622         u_int8_t sc_defant;                     /* current default antenna */
623         u_int8_t sc_rxotherant;                 /* rx's on non-default antenna*/
624 @@ -636,6 +643,7 @@
625         u_int sc_txintrperiod;                  /* tx interrupt batching */
626         struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
627         struct ath_txq *sc_ac2q[WME_NUM_AC];    /* WME AC -> h/w qnum */ 
628 +       HAL_INT sc_isr;                         /* unmasked ISR state */
629         struct ATH_TQ_STRUCT sc_txtq;           /* tx intr tasklet */
630         u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];  
631         struct ath_descdma sc_bdma;             /* beacon descriptors */
632 @@ -701,8 +709,12 @@
633  #define        ATH_TXBUF_LOCK_DESTROY(_sc)
634  #define        ATH_TXBUF_LOCK(_sc)             spin_lock(&(_sc)->sc_txbuflock)
635  #define        ATH_TXBUF_UNLOCK(_sc)           spin_unlock(&(_sc)->sc_txbuflock)
636 -#define        ATH_TXBUF_LOCK_BH(_sc)          spin_lock_bh(&(_sc)->sc_txbuflock)
637 -#define        ATH_TXBUF_UNLOCK_BH(_sc)        spin_unlock_bh(&(_sc)->sc_txbuflock)
638 +#define        ATH_TXBUF_LOCK_BH(_sc) \
639 +       if (!irqs_disabled()) \
640 +               spin_lock_bh(&(_sc)->sc_txbuflock)
641 +#define        ATH_TXBUF_UNLOCK_BH(_sc) \
642 +       if (!irqs_disabled()) \
643 +               spin_unlock_bh(&(_sc)->sc_txbuflock)
644  #define        ATH_TXBUF_LOCK_IRQ(_sc)         do {    \
645         unsigned long __txbuflockflags;         \
646         spin_lock_irqsave(&(_sc)->sc_txbuflock, __txbuflockflags);
647 @@ -720,8 +732,12 @@
648  #define        ATH_RXBUF_LOCK_DESTROY(_sc)
649  #define        ATH_RXBUF_LOCK(_sc)             spin_lock(&(_sc)->sc_rxbuflock)
650  #define        ATH_RXBUF_UNLOCK(_sc)           spin_unlock(&(_sc)->sc_rxbuflock)
651 -#define        ATH_RXBUF_LOCK_BH(_sc)          spin_lock_bh(&(_sc)->sc_rxbuflock)
652 -#define        ATH_RXBUF_UNLOCK_BH(_sc)        spin_unlock_bh(&(_sc)->sc_rxbuflock)
653 +#define        ATH_RXBUF_LOCK_BH(_sc) \
654 +       if (!irqs_disabled()) \
655 +               spin_lock_bh(&(_sc)->sc_rxbuflock)
656 +#define        ATH_RXBUF_UNLOCK_BH(_sc) \
657 +       if (!irqs_disabled()) \
658 +               spin_unlock_bh(&(_sc)->sc_rxbuflock)
659  #define        ATH_RXBUF_LOCK_IRQ(_sc)         do {    \
660         unsigned long __rxbuflockflags;         \
661         spin_lock_irqsave(&(_sc)->sc_rxbuflock, __rxbuflockflags);
662 @@ -731,6 +747,8 @@
663  #define        ATH_RXBUF_UNLOCK_IRQ_EARLY(_sc)         \
664         spin_unlock_irqrestore(&(_sc)->sc_rxbuflock, __rxbuflockflags);
665  
666 +#define ATH_DISABLE_INTR               local_irq_disable
667 +#define ATH_ENABLE_INTR                local_irq_enable
668  
669  /* Protects the device from concurrent accesses */
670  #define        ATH_LOCK_INIT(_sc)              init_MUTEX(&(_sc)->sc_lock)
671 diff -ur madwifi.old/net80211/ieee80211_beacon.c madwifi.dev/net80211/ieee80211_beacon.c
672 --- madwifi.old/net80211/ieee80211_beacon.c     2007-01-31 11:41:05.000000000 +0100
673 +++ madwifi.dev/net80211/ieee80211_beacon.c     2007-05-21 07:50:22.815837896 +0200
674 @@ -286,7 +286,7 @@
675         int len_changed = 0;
676         u_int16_t capinfo;
677  
678 -       IEEE80211_LOCK(ic);
679 +       IEEE80211_BEACON_LOCK(ic);
680  
681         if ((ic->ic_flags & IEEE80211_F_DOTH) &&
682             (vap->iv_flags & IEEE80211_F_CHANSWITCH) &&
683 @@ -547,7 +547,7 @@
684                 vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
685         }
686  
687 -       IEEE80211_UNLOCK(ic);
688 +       IEEE80211_BEACON_UNLOCK(ic);
689  
690         return len_changed;
691  }
692 diff -ur madwifi.old/net80211/ieee80211_input.c madwifi.dev/net80211/ieee80211_input.c
693 --- madwifi.old/net80211/ieee80211_input.c      2007-05-21 07:49:54.527138432 +0200
694 +++ madwifi.dev/net80211/ieee80211_input.c      2007-05-21 07:50:22.816837744 +0200
695 @@ -1155,8 +1155,9 @@
696                 if (ni->ni_vlan != 0 && vap->iv_vlgrp != NULL) {
697                         /* attach vlan tag */
698                         vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan);
699 -               } else
700 -                       netif_rx(skb);
701 +               } else {
702 +                       netif_receive_skb(skb);
703 +               }
704                 dev->last_rx = jiffies;
705         }
706  }
707 @@ -3657,9 +3658,9 @@
708         }
709  
710         /* Okay, take the first queued packet and put it out... */
711 -       IEEE80211_NODE_SAVEQ_LOCK(ni);
712 +       IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
713         IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
714 -       IEEE80211_NODE_SAVEQ_UNLOCK(ni);
715 +       IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
716         if (skb == NULL) {
717                 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_POWER, wh->i_addr2,
718                         "%s", "recv ps-poll, but queue empty");
719 diff -ur madwifi.old/net80211/ieee80211_linux.h madwifi.dev/net80211/ieee80211_linux.h
720 --- madwifi.old/net80211/ieee80211_linux.h      2007-05-21 07:49:54.528138280 +0200
721 +++ madwifi.dev/net80211/ieee80211_linux.h      2007-05-21 07:50:22.817837592 +0200
722 @@ -31,6 +31,10 @@
723  
724  #include <linux/wireless.h>
725  
726 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
727 +#define irqs_disabled()                        0
728 +#endif
729 +
730  /*
731   * Task deferral
732   *
733 @@ -86,8 +90,12 @@
734  } while (0)
735  #define        IEEE80211_UNLOCK_IRQ_EARLY(_ic)                                 \
736         spin_unlock_irqrestore(&(_ic)->ic_comlock, __ilockflags);
737 -#define IEEE80211_LOCK_BH(_ic) spin_lock_bh(&(_ic)->ic_comlock)
738 -#define IEEE80211_UNLOCK_BH(_ic) spin_unlock_bh(&(_ic)->ic_comlock)
739 +#define IEEE80211_LOCK_BH(_ic) \
740 +       if (!irqs_disabled()) \
741 +               spin_lock_bh(&(_ic)->ic_comlock)
742 +#define IEEE80211_UNLOCK_BH(_ic) \
743 +       if (!irqs_disabled()) \
744 +               spin_unlock_bh(&(_ic)->ic_comlock)
745  #define IEEE80211_LOCK(_ic)    spin_lock(&(_ic)->ic_comlock)
746  #define IEEE80211_UNLOCK(_ic)  spin_unlock(&(_ic)->ic_comlock)
747  
748 @@ -104,15 +112,22 @@
749  #define IEEE80211_VAPS_LOCK_DESTROY(_ic)
750  #define IEEE80211_VAPS_LOCK(_ic)       spin_lock(&(_ic)->ic_vapslock);
751  #define IEEE80211_VAPS_UNLOCK(_ic)     spin_unlock(&(_ic)->ic_vapslock);
752 -#define IEEE80211_VAPS_LOCK_BH(_ic)    spin_lock_bh(&(_ic)->ic_vapslock);
753 -#define IEEE80211_VAPS_UNLOCK_BH(_ic)  spin_unlock_bh(&(_ic)->ic_vapslock);
754 -#define IEEE80211_VAPS_LOCK_IRQ(_ic)   do {    \
755 -       int _vaps_lockflags;                    \
756 -       spin_lock_irqsave(&(_ic)->ic_vapslock, _vaps_lockflags);
757 -#define IEEE80211_VAPS_UNLOCK_IRQ(_ic) \
758 -       spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags); \
759 -} while (0)
760 -#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic)   spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
761 +#define IEEE80211_VAPS_LOCK_BH(_ic) \
762 +       if (!irqs_disabled()) \
763 +               spin_lock_bh(&(_ic)->ic_vapslock);
764 +#define IEEE80211_VAPS_UNLOCK_BH(_ic) \
765 +       if (!irqs_disabled()) \
766 +               spin_unlock_bh(&(_ic)->ic_vapslock);
767 +#define IEEE80211_VAPS_LOCK_IRQ(_ic) do {                              \
768 +       unsigned long __vlockflags=0;                           \
769 +       unsigned int __vlocked=0;                               \
770 +       __vlocked=spin_is_locked(&(_ic)->ic_vapslock);          \
771 +       if(!__vlocked) spin_lock_irqsave(&(_ic)->ic_vapslock, __vlockflags);
772 +#define IEEE80211_VAPS_UNLOCK_IRQ(_ic)                                 \
773 +       if(!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, __vlockflags);       \
774 +} while (0);
775 +#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic) \
776 +       if (!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
777  
778  #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
779  #define IEEE80211_VAPS_LOCK_ASSERT(_ic) \
780 @@ -122,6 +137,11 @@
781  #define IEEE80211_VAPS_LOCK_ASSERT(_ic)
782  #endif
783  
784 +/*
785 + * Beacon locking definitions; piggyback on com lock.
786 + */
787 +#define        IEEE80211_BEACON_LOCK(_ic)         IEEE80211_LOCK_IRQ(_ic)
788 +#define        IEEE80211_BEACON_UNLOCK(_ic)       IEEE80211_UNLOCK_IRQ(_ic)
789  
790  /*
791   * Node locking definitions.
792 @@ -191,8 +211,12 @@
793  typedef spinlock_t ieee80211_scan_lock_t;
794  #define        IEEE80211_SCAN_LOCK_INIT(_nt, _name) spin_lock_init(&(_nt)->nt_scanlock)
795  #define        IEEE80211_SCAN_LOCK_DESTROY(_nt)
796 -#define        IEEE80211_SCAN_LOCK_BH(_nt)     spin_lock_bh(&(_nt)->nt_scanlock)
797 -#define        IEEE80211_SCAN_UNLOCK_BH(_nt)   spin_unlock_bh(&(_nt)->nt_scanlock)
798 +#define        IEEE80211_SCAN_LOCK_BH(_nt) \
799 +       if (!irqs_disabled()) \
800 +               spin_lock_bh(&(_nt)->nt_scanlock)
801 +#define        IEEE80211_SCAN_UNLOCK_BH(_nt) \
802 +       if (!irqs_disabled()) \
803 +               spin_unlock_bh(&(_nt)->nt_scanlock)
804  #define        IEEE80211_SCAN_LOCK_IRQ(_nt)    do {    \
805         unsigned long __scan_lockflags;         \
806         spin_lock_irqsave(&(_nt)->nt_scanlock, __scan_lockflags);
807 @@ -217,8 +241,12 @@
808  #define        ACL_LOCK_DESTROY(_as)
809  #define        ACL_LOCK(_as)                   spin_lock(&(_as)->as_lock)
810  #define        ACL_UNLOCK(_as)                 spin_unlock(&(_as)->as_lock)
811 -#define        ACL_LOCK_BH(_as)                spin_lock_bh(&(_as)->as_lock)
812 -#define        ACL_UNLOCK_BH(_as)              spin_unlock_bh(&(_as)->as_lock)
813 +#define        ACL_LOCK_BH(_as) \
814 +       if (!irqs_disabled()) \
815 +               spin_lock_bh(&(_as)->as_lock)
816 +#define        ACL_UNLOCK_BH(_as) \
817 +       if (!irqs_disabled()) \
818 +               spin_unlock_bh(&(_as)->as_lock)
819  
820  #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
821  #define        ACL_LOCK_ASSERT(_as) \
822 diff -ur madwifi.old/net80211/ieee80211_node.c madwifi.dev/net80211/ieee80211_node.c
823 --- madwifi.old/net80211/ieee80211_node.c       2007-05-21 07:49:54.555134176 +0200
824 +++ madwifi.dev/net80211/ieee80211_node.c       2007-05-21 07:50:22.818837440 +0200
825 @@ -1570,7 +1570,7 @@
826         struct ieee80211_node *ni;
827         u_int gen;
828  
829 -       IEEE80211_SCAN_LOCK_IRQ(nt);
830 +       IEEE80211_SCAN_LOCK_BH(nt);
831         gen = ++nt->nt_scangen;
832         
833  restart:
834 @@ -1590,7 +1590,7 @@
835         }
836         IEEE80211_NODE_TABLE_UNLOCK_IRQ(nt);
837  
838 -       IEEE80211_SCAN_UNLOCK_IRQ(nt);
839 +       IEEE80211_SCAN_UNLOCK_BH(nt);
840  }
841  EXPORT_SYMBOL(ieee80211_iterate_dev_nodes);
842  
843 diff -ur madwifi.old/net80211/ieee80211_power.c madwifi.dev/net80211/ieee80211_power.c
844 --- madwifi.old/net80211/ieee80211_power.c      2007-05-21 07:49:54.532137672 +0200
845 +++ madwifi.dev/net80211/ieee80211_power.c      2007-05-21 07:50:22.818837440 +0200
846 @@ -147,7 +147,7 @@
847  #endif
848                 struct sk_buff *skb;
849  
850 -               IEEE80211_NODE_SAVEQ_LOCK(ni);
851 +               IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
852                 while ((skb = skb_peek(&ni->ni_savedq)) != NULL &&
853                      M_AGE_GET(skb) < IEEE80211_INACT_WAIT) {
854                         IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
855 @@ -159,7 +159,7 @@
856                 }
857                 if (skb != NULL)
858                         M_AGE_SUB(skb, IEEE80211_INACT_WAIT);
859 -               IEEE80211_NODE_SAVEQ_UNLOCK(ni);
860 +               IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
861  
862                 IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
863                         "discard %u frames for age", discard);
864 @@ -185,7 +185,7 @@
865         KASSERT(aid < vap->iv_max_aid,
866                 ("bogus aid %u, max %u", aid, vap->iv_max_aid));
867  
868 -       IEEE80211_LOCK(ni->ni_ic);
869 +       IEEE80211_BEACON_LOCK(ni->ni_ic);
870         if (set != (isset(vap->iv_tim_bitmap, aid) != 0)) {
871                 if (set) {
872                         setbit(vap->iv_tim_bitmap, aid);
873 @@ -196,7 +196,7 @@
874                 }
875                 vap->iv_flags |= IEEE80211_F_TIMUPDATE;
876         }
877 -       IEEE80211_UNLOCK(ni->ni_ic);
878 +       IEEE80211_BEACON_UNLOCK(ni->ni_ic);
879  }
880  
881  /*
882 @@ -297,9 +297,9 @@
883                 struct sk_buff *skb;
884                 int qlen;
885  
886 -               IEEE80211_NODE_SAVEQ_LOCK(ni);
887 +               IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
888                 IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
889 -               IEEE80211_NODE_SAVEQ_UNLOCK(ni);
890 +               IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
891                 if (skb == NULL)
892                         break;
893                 /* 
894 @@ -361,9 +361,9 @@
895                         for (;;) {
896                                 struct sk_buff *skb;
897  
898 -                               IEEE80211_NODE_SAVEQ_LOCK(ni);
899 +                               IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
900                                 skb = __skb_dequeue(&ni->ni_savedq);
901 -                               IEEE80211_NODE_SAVEQ_UNLOCK(ni);
902 +                               IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
903                                 if (skb == NULL)
904                                         break;
905                                 ieee80211_parent_queue_xmit(skb);
906 diff -ur madwifi.old/net80211/ieee80211_proto.c madwifi.dev/net80211/ieee80211_proto.c
907 --- madwifi.old/net80211/ieee80211_proto.c      2007-05-21 07:49:54.574131288 +0200
908 +++ madwifi.dev/net80211/ieee80211_proto.c      2007-05-21 07:50:22.819837288 +0200
909 @@ -635,9 +635,9 @@
910  {
911         struct ieee80211com *ic = vap->iv_ic;
912  
913 -       IEEE80211_LOCK(ic);
914 +       IEEE80211_BEACON_LOCK(ic);
915         ieee80211_wme_initparams_locked(vap);
916 -       IEEE80211_UNLOCK(ic);
917 +       IEEE80211_BEACON_UNLOCK(ic);
918  }
919  
920  void
921 @@ -920,9 +920,9 @@
922         struct ieee80211com *ic = vap->iv_ic;
923  
924         if (ic->ic_caps & IEEE80211_C_WME) {
925 -               IEEE80211_LOCK(ic);
926 +               IEEE80211_BEACON_LOCK(ic);
927                 ieee80211_wme_updateparams_locked(vap);
928 -               IEEE80211_UNLOCK(ic);
929 +               IEEE80211_BEACON_UNLOCK(ic);
930         }
931  }
932  
933 diff -ur madwifi.old/net80211/ieee80211_scan_sta.c madwifi.dev/net80211/ieee80211_scan_sta.c
934 --- madwifi.old/net80211/ieee80211_scan_sta.c   2006-09-20 10:45:13.000000000 +0200
935 +++ madwifi.dev/net80211/ieee80211_scan_sta.c   2007-05-21 07:50:22.819837288 +0200
936 @@ -163,9 +163,11 @@
937  {
938         struct sta_table *st = ss->ss_priv;
939  
940 -       spin_lock(&st->st_lock);
941 +       if (!irqs_disabled())
942 +               spin_lock_bh(&st->st_lock);
943         sta_flush_table(st);
944 -       spin_unlock(&st->st_lock);
945 +       if (!irqs_disabled())
946 +               spin_unlock_bh(&st->st_lock);
947         ss->ss_last = 0;
948         return 0;
949  }
950 @@ -215,7 +217,8 @@
951         int hash;
952  
953         hash = STA_HASH(macaddr);
954 -       spin_lock(&st->st_lock);  
955 +       if (!irqs_disabled())
956 +               spin_lock_bh(&st->st_lock);  
957         LIST_FOREACH(se, &st->st_hash[hash], se_hash)
958                 if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr) &&
959                     sp->ssid[1] == se->base.se_ssid[1] && 
960 @@ -225,7 +228,7 @@
961         MALLOC(se, struct sta_entry *, sizeof(struct sta_entry),
962                 M_80211_SCAN, M_NOWAIT | M_ZERO);
963         if (se == NULL) {
964 -               spin_unlock(&st->st_lock);
965 +               spin_unlock_bh(&st->st_lock);
966                 return 0;
967         }
968         se->se_scangen = st->st_scangen-1;
969 @@ -287,7 +290,8 @@
970         se->se_seen = 1;
971         se->se_notseen = 0;
972  
973 -       spin_unlock(&st->st_lock);
974 +       if (!irqs_disabled())
975 +               spin_unlock_bh(&st->st_lock);
976  
977         /*
978          * If looking for a quick choice and nothing's
979 @@ -1063,7 +1067,8 @@
980         u_int gen;
981         int res = 0;
982  
983 -       spin_lock(&st->st_scanlock);
984 +       if (!irqs_disabled())
985 +               spin_lock_bh(&st->st_scanlock);
986         gen = st->st_scangen++;
987  restart:
988         spin_lock(&st->st_lock);
989 @@ -1086,7 +1091,8 @@
990         spin_unlock(&st->st_lock);
991  
992   done:
993 -       spin_unlock(&st->st_scanlock);
994 +       if (!irqs_disabled())
995 +               spin_unlock_bh(&st->st_scanlock);
996  
997         return res;
998  }
999 @@ -1235,7 +1241,8 @@
1000         bestchan = NULL;
1001         bestrssi = -1;
1002  
1003 -       spin_lock(&st->st_lock);
1004 +       if (!irqs_disabled())
1005 +               spin_lock_bh(&st->st_lock);
1006         for (i = 0; i < ss->ss_last; i++) {
1007                 c = ss->ss_chans[i];
1008                 maxrssi = 0;
1009 @@ -1248,7 +1255,8 @@
1010                 if (bestchan == NULL || maxrssi < bestrssi)
1011                         bestchan = c;
1012         }
1013 -       spin_unlock(&st->st_lock);
1014 +       if (!irqs_disabled())
1015 +               spin_unlock_bh(&st->st_lock);
1016  
1017         return bestchan;
1018  }