69e2b106bbbe5d5486fb2b643190602f6206c3b9
[librecmc/librecmc.git] / package / madwifi / patches / 300-napi_polling.patch
1 diff -ur madwifi.old/ath/if_ath.c madwifi.dev/ath/if_ath.c
2 --- madwifi.old/ath/if_ath.c    2007-05-31 06:48:28.561525488 +0200
3 +++ madwifi.dev/ath/if_ath.c    2007-05-31 06:50:07.143538744 +0200
4 @@ -167,7 +167,7 @@
5         int, u_int32_t);
6  static void ath_setdefantenna(struct ath_softc *, u_int);
7  static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
8 -static void ath_rx_tasklet(TQUEUE_ARG);
9 +static int ath_rx_poll(struct net_device *dev, int *budget);
10  static int ath_hardstart(struct sk_buff *, struct net_device *);
11  static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
12  #ifdef ATH_SUPERG_COMP
13 @@ -429,7 +429,6 @@
14         ATH_TXBUF_LOCK_INIT(sc);
15         ATH_RXBUF_LOCK_INIT(sc);
16  
17 -       ATH_INIT_TQUEUE(&sc->sc_rxtq,     ath_rx_tasklet,       dev);
18         ATH_INIT_TQUEUE(&sc->sc_txtq,     ath_tx_tasklet,       dev);
19         ATH_INIT_TQUEUE(&sc->sc_bmisstq,  ath_bmiss_tasklet,    dev);
20         ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet,   dev);
21 @@ -685,6 +684,8 @@
22         dev->set_mac_address = ath_set_mac_address;
23         dev->change_mtu = ath_change_mtu;
24         dev->tx_queue_len = ATH_TXBUF - 1;              /* 1 for mgmt frame */
25 +       dev->poll = ath_rx_poll;
26 +       dev->weight = 64;
27  #ifdef USE_HEADERLEN_RESV
28         dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
29                                 sizeof(struct llc) +
30 @@ -1643,6 +1644,7 @@
31          */
32         ath_hal_getisr(ah, &status);            /* NB: clears ISR too */
33         DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
34 +       sc->sc_isr = status;
35         status &= sc->sc_imask;                 /* discard unasked for bits */
36         if (status & HAL_INT_FATAL) {
37                 sc->sc_stats.ast_hardware++;
38 @@ -1678,7 +1680,12 @@
39                 if (status & HAL_INT_RX) {
40                         sc->sc_tsf = ath_hal_gettsf64(ah);
41                         ath_uapsd_processtriggers(sc);
42 -                       ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
43 +                       sc->sc_isr &= ~HAL_INT_RX;
44 +                       if (netif_rx_schedule_prep(dev)) {
45 +                               sc->sc_imask &= ~HAL_INT_RX;
46 +                               ath_hal_intrset(ah, sc->sc_imask);
47 +                               __netif_rx_schedule(dev);
48 +                       }
49                 }
50                 if (status & HAL_INT_TX) {
51  #ifdef ATH_SUPERG_DYNTURBO
52 @@ -1704,6 +1711,11 @@
53                                 }
54                         } 
55  #endif
56 +                       /* disable transmit interrupt */
57 +                       sc->sc_isr &= ~HAL_INT_TX;
58 +                       ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
59 +                       sc->sc_imask &= ~HAL_INT_TX;
60 +
61                         ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
62                 }
63                 if (status & HAL_INT_BMISS) {
64 @@ -2166,12 +2178,13 @@
65          * Insert the frame on the outbound list and
66          * pass it on to the hardware.
67          */
68 -       ATH_TXQ_LOCK(txq);
69 +       ATH_TXQ_LOCK_IRQ(txq);
70         if (ni && ni->ni_vap && txq == &ATH_VAP(ni->ni_vap)->av_mcastq) {
71                 /*
72                  * The CAB queue is started from the SWBA handler since
73                  * frames only go out on DTIM and to avoid possible races.
74                  */
75 +               sc->sc_imask &= ~HAL_INT_SWBA;
76                 ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_SWBA);
77                 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
78                 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: txq depth = %d\n", __func__, txq->axq_depth);
79 @@ -2187,6 +2200,7 @@
80                                 ito64(bf->bf_daddr), bf->bf_desc);
81                 }
82                 txq->axq_link = &lastds->ds_link;
83 +               sc->sc_imask |= HAL_INT_SWBA;
84                 ath_hal_intrset(ah, sc->sc_imask);
85         } else {
86                 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
87 @@ -2222,7 +2236,7 @@
88                         }
89                 }
90         }
91 -       ATH_TXQ_UNLOCK(txq);
92 +       ATH_TXQ_UNLOCK_IRQ(txq);
93  
94         sc->sc_devstats.tx_packets++;
95         sc->sc_devstats.tx_bytes += framelen;
96 @@ -2373,12 +2387,18 @@
97         unsigned int pktlen;
98         int framecnt;
99  
100 +       /*
101 +        * NB: using _BH style locking even though this function may be called
102 +        *     at interrupt time (within tasklet or bh). This should be harmless
103 +        *     and this function calls others (i.e., ath_tx_start()) which do
104 +        *     the same.
105 +        */
106         for (;;) {
107 -               ATH_TXQ_LOCK(txq);
108 +               ATH_TXQ_LOCK_BH(txq);
109  
110                 bf_ff = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
111                 if ((!bf_ff) || ath_ff_flushdonetest(txq, bf_ff)) {
112 -                       ATH_TXQ_UNLOCK(txq);
113 +                       ATH_TXQ_UNLOCK_BH(txq);
114                         break;
115                 }
116  
117 @@ -2388,7 +2408,7 @@
118                 ATH_NODE(ni)->an_tx_ffbuf[bf_ff->bf_skb->priority] = NULL;
119                 TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
120  
121 -               ATH_TXQ_UNLOCK(txq);
122 +               ATH_TXQ_UNLOCK_BH(txq);
123  
124                 /* encap and xmit */
125                 bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
126 @@ -2409,15 +2429,16 @@
127                 }
128                 bf_ff->bf_node = NULL;
129  
130 -               ATH_TXBUF_LOCK_IRQ(sc);
131 +               ATH_TXBUF_LOCK_BH(sc);
132                 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
133 -               ATH_TXBUF_UNLOCK_IRQ(sc);
134 +               ATH_TXBUF_UNLOCK_BH(sc);
135         }
136 +       ATH_TXQ_UNLOCK_BH(txq);
137  }
138  #endif
139  
140  #define ATH_HARDSTART_GET_TX_BUF_WITH_LOCK                             \
141 -       ATH_TXBUF_LOCK_IRQ(sc);                                         \
142 +       ATH_TXBUF_LOCK_BH(sc);                                          \
143         bf = STAILQ_FIRST(&sc->sc_txbuf);                               \
144         if (bf != NULL) {                                               \
145                 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);             \
146 @@ -2432,11 +2453,23 @@
147                 sc->sc_devstopped = 1;                                  \
148                 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL);                \
149         }                                                               \
150 -       ATH_TXBUF_UNLOCK_IRQ(sc);                                       \
151 +
152 +#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF                      \
153 +       ATH_TXBUF_UNLOCK_BH(sc);                                        \
154 +       if (bf == NULL) {               /* NB: should not happen */     \
155 +           DPRINTF(sc,ATH_DEBUG_XMIT,"%s: discard, no xmit buf\n", __func__);                                                                          \
156 +           sc->sc_stats.ast_tx_nobuf++;                                \
157 +           goto hardstart_fail;                                        \
158 +       }
159 +
160 +#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON                       \
161 +       ATH_TXBUF_UNLOCK_BH(sc);                                        \
162         if (bf == NULL) {               /* NB: should not happen */     \
163                 DPRINTF(sc,ATH_DEBUG_XMIT,                              \
164                         "%s: discard, no xmit buf\n", __func__);        \
165 +           ATH_TXQ_UNLOCK_BH(txq);                                     \
166                 sc->sc_stats.ast_tx_nobuf++;                            \
167 +               goto hardstart_fail;                                    \
168         }
169  
170  /*
171 @@ -2498,6 +2531,7 @@
172         if (M_FLAG_GET(skb, M_UAPSD)) {
173                 /* bypass FF handling */
174                 ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
175 +               ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
176                 if (bf == NULL)
177                         goto hardstart_fail;
178                 goto ff_bypass;
179 @@ -2519,7 +2553,7 @@
180         /* NB: use this lock to protect an->an_ff_txbuf in athff_can_aggregate()
181          *     call too.
182          */
183 -       ATH_TXQ_LOCK(txq);
184 +       ATH_TXQ_LOCK_BH(txq);
185         if (athff_can_aggregate(sc, eh, an, skb, vap->iv_fragthreshold, &ff_flush)) {
186  
187                 if (an->an_tx_ffbuf[skb->priority]) { /* i.e., frame on the staging queue */
188 @@ -2529,7 +2563,7 @@
189                         TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
190                         an->an_tx_ffbuf[skb->priority] = NULL;
191  
192 -                       ATH_TXQ_UNLOCK(txq);
193 +                       ATH_TXQ_UNLOCK_BH(txq);
194  
195                         /*
196                          * chain skbs and add FF magic
197 @@ -2556,8 +2590,9 @@
198                          *     to give the buffer back.
199                          */
200                         ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
201 +                       ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON;
202                         if (bf == NULL) {
203 -                               ATH_TXQ_UNLOCK(txq);
204 +                               ATH_TXQ_UNLOCK_BH(txq);
205                                 goto hardstart_fail;
206                         }
207                         DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
208 @@ -2570,7 +2605,7 @@
209  
210                         TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
211  
212 -                       ATH_TXQ_UNLOCK(txq);
213 +                       ATH_TXQ_UNLOCK_BH(txq);
214  
215                         return 0;
216                 }
217 @@ -2581,7 +2616,7 @@
218                         TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
219                         an->an_tx_ffbuf[skb->priority] = NULL;
220  
221 -                       ATH_TXQ_UNLOCK(txq);
222 +                       ATH_TXQ_UNLOCK_BH(txq);
223  
224                         /* encap and xmit */
225                         bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
226 @@ -2611,9 +2646,9 @@
227                         }
228                         bf_ff->bf_node = NULL;
229  
230 -                       ATH_TXBUF_LOCK(sc);
231 +                       ATH_TXBUF_LOCK_BH(sc);
232                         STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
233 -                       ATH_TXBUF_UNLOCK(sc);
234 +                       ATH_TXBUF_UNLOCK_BH(sc);
235                         goto ff_flushdone;
236                 }
237                 /*
238 @@ -2623,14 +2658,13 @@
239                 else if (an->an_tx_ffbuf[skb->priority]) {
240                         DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
241                                 "%s: Out-Of-Order fast-frame\n", __func__);
242 -                       ATH_TXQ_UNLOCK(txq);
243 +                       ATH_TXQ_UNLOCK_BH(txq);
244                 } else
245 -                       ATH_TXQ_UNLOCK(txq);
246 +                       ATH_TXQ_UNLOCK_BH(txq);
247  
248         ff_flushdone:
249                 ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
250 -               if (bf == NULL)
251 -                       goto hardstart_fail;
252 +               ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
253         }
254  
255  ff_bypass:
256 @@ -2638,6 +2672,7 @@
257  #else /* ATH_SUPERG_FF */
258  
259         ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
260 +       ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
261  
262  #endif /* ATH_SUPERG_FF */
263  
264 @@ -2659,7 +2694,7 @@
265                  *  Allocate 1 ath_buf for each frame given 1 was 
266                  *  already alloc'd
267                  */
268 -               ATH_TXBUF_LOCK(sc);
269 +               ATH_TXBUF_LOCK_BH(sc);
270                 for (bfcnt = 1; bfcnt < framecnt; ++bfcnt) {
271                         if ((tbf = STAILQ_FIRST(&sc->sc_txbuf)) != NULL) {
272                                 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
273 @@ -2680,11 +2715,11 @@
274                                         STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
275                                 }
276                         }
277 -                       ATH_TXBUF_UNLOCK(sc);
278 +                       ATH_TXBUF_UNLOCK_BH(sc);
279                         STAILQ_INIT(&bf_head);
280                         goto hardstart_fail;
281                 }
282 -               ATH_TXBUF_UNLOCK(sc);
283 +               ATH_TXBUF_UNLOCK_BH(sc);
284  
285                 while ((bf = STAILQ_FIRST(&bf_head)) != NULL && skb != NULL) {
286                         unsigned int nextfraglen = 0;
287 @@ -2720,7 +2755,7 @@
288  
289  hardstart_fail:
290         if (!STAILQ_EMPTY(&bf_head)) {
291 -               ATH_TXBUF_LOCK(sc);
292 +               ATH_TXBUF_LOCK_BH(sc);
293                 STAILQ_FOREACH_SAFE(tbf, &bf_head, bf_list, tempbf) {
294                         tbf->bf_skb = NULL;
295                         tbf->bf_node = NULL;
296 @@ -2730,7 +2765,7 @@
297  
298                         STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
299                 }
300 -               ATH_TXBUF_UNLOCK(sc);
301 +               ATH_TXBUF_UNLOCK_BH(sc);
302         }
303  
304         /* free sk_buffs */
305 @@ -2773,7 +2808,7 @@
306         /*
307          * Grab a TX buffer and associated resources.
308          */
309 -       ATH_TXBUF_LOCK_IRQ(sc);
310 +       ATH_TXBUF_LOCK_BH(sc);
311         bf = STAILQ_FIRST(&sc->sc_txbuf);
312         if (bf != NULL)
313                 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
314 @@ -2784,7 +2819,7 @@
315                 sc->sc_devstopped=1;
316                 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL);
317         }
318 -       ATH_TXBUF_UNLOCK_IRQ(sc);
319 +       ATH_TXBUF_UNLOCK_BH(sc);
320         if (bf == NULL) {
321                 printk("ath_mgtstart: discard, no xmit buf\n");
322                 sc->sc_stats.ast_tx_nobufmgt++;
323 @@ -2813,9 +2848,9 @@
324                 bf->bf_skb = NULL;
325                 bf->bf_node = NULL;
326  
327 -               ATH_TXBUF_LOCK_IRQ(sc);
328 +               ATH_TXBUF_LOCK_BH(sc);
329                 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
330 -               ATH_TXBUF_UNLOCK_IRQ(sc);
331 +               ATH_TXBUF_UNLOCK_BH(sc);
332         }
333         dev_kfree_skb_any(skb);
334         skb = NULL;
335 @@ -3283,10 +3318,10 @@
336          *
337          * XXX Using in_softirq is not right since we might
338          * be called from other soft irq contexts than
339 -        * ath_rx_tasklet.
340 +        * ath_rx_poll
341          */
342         if (!in_softirq())
343 -               tasklet_disable(&sc->sc_rxtq);
344 +               netif_poll_disable(dev);
345         netif_stop_queue(dev);
346  }
347  
348 @@ -3299,7 +3334,7 @@
349         DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
350         netif_start_queue(dev);
351         if (!in_softirq())              /* NB: see above */
352 -               tasklet_enable(&sc->sc_rxtq);
353 +               netif_poll_enable(dev);
354  }
355  
356  /*
357 @@ -4866,9 +4901,9 @@
358                 bf->bf_node = NULL;
359                 bf->bf_desc->ds_link = 0;
360                 
361 -               ATH_TXBUF_LOCK_IRQ(sc);
362 +               ATH_TXBUF_LOCK_BH(sc);
363                 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
364 -               ATH_TXBUF_UNLOCK_IRQ(sc);
365 +               ATH_TXBUF_UNLOCK_BH(sc);
366  
367                 an->an_uapsd_overflowqdepth--;
368         }
369 @@ -5544,13 +5579,12 @@
370         sc->sc_rxotherant = 0;
371  }
372  
373 -static void
374 -ath_rx_tasklet(TQUEUE_ARG data)
375 +static int
376 +ath_rx_poll(struct net_device *dev, int *budget)
377  {
378  #define        PA2DESC(_sc, _pa) \
379         ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
380                 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
381 -       struct net_device *dev = (struct net_device *)data;
382         struct ath_buf *bf;
383         struct ath_softc *sc = dev->priv;
384         struct ieee80211com *ic = &sc->sc_ic;
385 @@ -5562,12 +5596,15 @@
386         unsigned int len;
387         int type;
388         u_int phyerr;
389 +       u_int processed = 0, early_stop = 0;
390 +       u_int rx_limit = dev->quota;
391  
392         /* Let the 802.11 layer know about the new noise floor */
393         sc->sc_channoise = ath_hal_get_channel_noise(ah, &(sc->sc_curchan));
394         ic->ic_channoise = sc->sc_channoise;
395         
396         DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s\n", __func__);
397 +process_rx_again:
398         do {
399                 bf = STAILQ_FIRST(&sc->sc_rxbuf);
400                 if (bf == NULL) {               /* XXX ??? can this happen */
401 @@ -5591,6 +5628,13 @@
402                         /* NB: never process the self-linked entry at the end */
403                         break;
404                 }
405 +
406 +               processed++;
407 +               if (rx_limit-- < 0) {
408 +                       early_stop = 1;
409 +                       break;
410 +               }
411 +
412                 skb = bf->bf_skb;
413                 if (skb == NULL) {              /* XXX ??? can this happen */
414                         printk("%s: no skbuff (%s)\n", dev->name, __func__);
415 @@ -5629,6 +5673,7 @@
416                                 sc->sc_stats.ast_rx_phyerr++;
417                                 phyerr = rs->rs_phyerr & 0x1f;
418                                 sc->sc_stats.ast_rx_phy[phyerr]++;
419 +                               goto rx_next;
420                         }
421                         if (rs->rs_status & HAL_RXERR_DECRYPT) {
422                                 /*
423 @@ -5840,9 +5885,29 @@
424                 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
425                 ATH_RXBUF_UNLOCK_IRQ(sc);
426         } while (ath_rxbuf_init(sc, bf) == 0);
427 +       if (!early_stop) {
428 +               /* Check if more data is received while we were
429 +                * processing the descriptor chain.
430 +                */
431 +               ATH_DISABLE_INTR();
432 +               if (sc->sc_isr & HAL_INT_RX) {
433 +                       sc->sc_isr &= ~HAL_INT_RX;
434 +                       ATH_ENABLE_INTR();
435 +                       ath_uapsd_processtriggers(sc);
436 +                       goto process_rx_again;
437 +               }
438 +               netif_rx_complete(dev);
439 +
440 +               sc->sc_imask |= HAL_INT_RX;
441 +               ath_hal_intrset(ah, sc->sc_imask);
442 +               ATH_ENABLE_INTR();
443 +       }
444 +
445 +       *budget -= processed;
446         
447         /* rx signal state monitoring */
448         ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
449 +       return early_stop;
450  #undef PA2DESC
451  }
452  
453 @@ -6118,22 +6183,22 @@
454                                 }
455                         }
456  
457 -                       ATH_TXBUF_LOCK_IRQ(sc);                                 
458 +                       ATH_TXBUF_LOCK_BH(sc);                                  
459                         bf = STAILQ_FIRST(&sc->sc_grppollbuf);
460                         if (bf != NULL)
461                                 STAILQ_REMOVE_HEAD(&sc->sc_grppollbuf, bf_list);
462                         else {
463                                 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs\n", __func__);
464 -                               ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
465 +                               ATH_TXBUF_UNLOCK_BH(sc);
466                                 return;
467                         }
468                         /* XXX use a counter and leave at least one for mgmt frames */
469                         if (STAILQ_EMPTY(&sc->sc_grppollbuf)) {                         
470                                 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs left\n", __func__);
471 -                               ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
472 +                               ATH_TXBUF_UNLOCK_BH(sc);
473                                 return;
474                         }                                       
475 -                       ATH_TXBUF_UNLOCK_IRQ(sc);
476 +                       ATH_TXBUF_UNLOCK_BH(sc);
477  
478                         bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
479                                 skb->data, skb->len, BUS_DMA_TODEVICE);
480 @@ -6599,9 +6664,9 @@
481                 dev_kfree_skb(lastbuf->bf_skb);
482                 lastbuf->bf_skb = NULL;
483                 ieee80211_unref_node(&lastbuf->bf_node);
484 -               ATH_TXBUF_LOCK_IRQ(sc);
485 +               ATH_TXBUF_LOCK_BH(sc);
486                 STAILQ_INSERT_TAIL(&sc->sc_txbuf, lastbuf, bf_list);
487 -               ATH_TXBUF_UNLOCK_IRQ(sc);
488 +               ATH_TXBUF_UNLOCK_BH(sc);
489                 
490                 /*
491                  *  move oldest from overflow to delivery
492 @@ -7426,9 +7491,6 @@
493                         if (sc->sc_reapcount > ATH_TXBUF_FREE_THRESHOLD) {
494                                 if (!sc->sc_dfswait)
495                                         netif_start_queue(sc->sc_dev);
496 -                               DPRINTF(sc, ATH_DEBUG_TX_PROC,
497 -                                       "%s: tx tasklet restart the queue\n",
498 -                                       __func__);
499                                 sc->sc_reapcount = 0;
500                                 sc->sc_devstopped = 0;
501                         } else
502 @@ -7463,11 +7525,22 @@
503         struct net_device *dev = (struct net_device *)data;
504         struct ath_softc *sc = dev->priv;
505  
506 +process_tx_again:
507         if (txqactive(sc->sc_ah, 0))
508                 ath_tx_processq(sc, &sc->sc_txq[0]);
509         if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
510                 ath_tx_processq(sc, sc->sc_cabq);
511  
512 +       ATH_DISABLE_INTR();
513 +       if (sc->sc_isr & HAL_INT_TX) {
514 +               sc->sc_isr &= ~HAL_INT_TX;
515 +               ATH_ENABLE_INTR();
516 +               goto process_tx_again;
517 +       }
518 +       sc->sc_imask |= HAL_INT_TX;
519 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
520 +       ATH_ENABLE_INTR();
521 +
522         netif_wake_queue(dev);
523  
524         if (sc->sc_softled)
525 @@ -7484,6 +7557,7 @@
526         struct net_device *dev = (struct net_device *)data;
527         struct ath_softc *sc = dev->priv;
528  
529 +process_tx_again:
530         /*
531          * Process each active queue.
532          */
533 @@ -7504,6 +7578,16 @@
534         if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
535                 ath_tx_processq(sc, sc->sc_uapsdq);
536  
537 +       ATH_DISABLE_INTR();
538 +       if (sc->sc_isr & HAL_INT_TX) {
539 +               sc->sc_isr &= ~HAL_INT_TX;
540 +               ATH_ENABLE_INTR();
541 +               goto process_tx_again;
542 +       }
543 +       sc->sc_imask |= HAL_INT_TX;
544 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
545 +       ATH_ENABLE_INTR();
546 +
547         netif_wake_queue(dev);
548  
549         if (sc->sc_softled)
550 @@ -7521,6 +7605,7 @@
551         unsigned int i;
552  
553         /* Process each active queue. */
554 +process_tx_again:
555         for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
556                 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
557                         ath_tx_processq(sc, &sc->sc_txq[i]);
558 @@ -7529,6 +7614,16 @@
559                 ath_tx_processq(sc, sc->sc_xrtxq);
560  #endif
561  
562 +       ATH_DISABLE_INTR();
563 +       if (sc->sc_isr & HAL_INT_TX) {
564 +               sc->sc_isr &= ~HAL_INT_TX;
565 +               ATH_ENABLE_INTR();
566 +               goto process_tx_again;
567 +       }
568 +       sc->sc_imask |= HAL_INT_TX;
569 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
570 +       ATH_ENABLE_INTR();
571 +
572         netif_wake_queue(dev);
573  
574         if (sc->sc_softled)
575 @@ -7627,6 +7722,7 @@
576  ath_draintxq(struct ath_softc *sc)
577  {
578         struct ath_hal *ah = sc->sc_ah;
579 +       int npend = 0;
580         unsigned int i;
581  
582         /* XXX return value */
583 @@ -9160,9 +9256,9 @@
584         dev->mtu = mtu;
585         if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
586                 /* NB: the rx buffers may need to be reallocated */
587 -               tasklet_disable(&sc->sc_rxtq);
588 +               netif_poll_disable(dev);
589                 error = ath_reset(dev);
590 -               tasklet_enable(&sc->sc_rxtq);
591 +               netif_poll_enable(dev);
592         }
593         ATH_UNLOCK(sc);
594  
595 Only in madwifi.dev/ath: if_ath.c.orig
596 diff -ur madwifi.old/ath/if_athvar.h madwifi.dev/ath/if_athvar.h
597 --- madwifi.old/ath/if_athvar.h 2007-05-31 06:48:28.555526400 +0200
598 +++ madwifi.dev/ath/if_athvar.h 2007-05-31 06:48:57.847073408 +0200
599 @@ -48,6 +48,10 @@
600  #include "if_athioctl.h"
601  #include "net80211/ieee80211.h"                /* XXX for WME_NUM_AC */
602  
603 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
604 +#define irqs_disabled()                        0
605 +#endif
606 +
607  /*
608   * Deduce if tasklets are available.  If not then
609   * fall back to using the immediate work queue.
610 @@ -478,8 +482,12 @@
611  #define        ATH_TXQ_LOCK_DESTROY(_tq)       
612  #define        ATH_TXQ_LOCK(_tq)               spin_lock(&(_tq)->axq_lock)
613  #define        ATH_TXQ_UNLOCK(_tq)             spin_unlock(&(_tq)->axq_lock)
614 -#define        ATH_TXQ_LOCK_BH(_tq)            spin_lock_bh(&(_tq)->axq_lock)
615 -#define        ATH_TXQ_UNLOCK_BH(_tq)          spin_unlock_bh(&(_tq)->axq_lock)
616 +#define        ATH_TXQ_LOCK_BH(_tq) \
617 +       if (!irqs_disabled()) \
618 +               spin_lock_bh(&(_tq)->axq_lock)
619 +#define        ATH_TXQ_UNLOCK_BH(_tq) \
620 +       if (!irqs_disabled()) \
621 +               spin_unlock_bh(&(_tq)->axq_lock)
622  #define ATH_TXQ_LOCK_IRQ(_tq)          do {    \
623         unsigned long __axq_lockflags;          \
624         spin_lock_irqsave(&(_tq)->axq_lock, __axq_lockflags);
625 @@ -624,7 +632,6 @@
626         struct ath_buf *sc_rxbufcur;            /* current rx buffer */
627         u_int32_t *sc_rxlink;                   /* link ptr in last RX desc */
628         spinlock_t sc_rxbuflock; 
629 -       struct ATH_TQ_STRUCT sc_rxtq;           /* rx intr tasklet */
630         struct ATH_TQ_STRUCT sc_rxorntq;        /* rxorn intr tasklet */
631         u_int8_t sc_defant;                     /* current default antenna */
632         u_int8_t sc_rxotherant;                 /* rx's on non-default antenna*/
633 @@ -637,6 +644,7 @@
634         u_int sc_txintrperiod;                  /* tx interrupt batching */
635         struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
636         struct ath_txq *sc_ac2q[WME_NUM_AC];    /* WME AC -> h/w qnum */ 
637 +       HAL_INT sc_isr;                         /* unmasked ISR state */
638         struct ATH_TQ_STRUCT sc_txtq;           /* tx intr tasklet */
639         u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];  
640         struct ath_descdma sc_bdma;             /* beacon descriptors */
641 @@ -703,8 +711,12 @@
642  #define        ATH_TXBUF_LOCK_DESTROY(_sc)
643  #define        ATH_TXBUF_LOCK(_sc)             spin_lock(&(_sc)->sc_txbuflock)
644  #define        ATH_TXBUF_UNLOCK(_sc)           spin_unlock(&(_sc)->sc_txbuflock)
645 -#define        ATH_TXBUF_LOCK_BH(_sc)          spin_lock_bh(&(_sc)->sc_txbuflock)
646 -#define        ATH_TXBUF_UNLOCK_BH(_sc)        spin_unlock_bh(&(_sc)->sc_txbuflock)
647 +#define        ATH_TXBUF_LOCK_BH(_sc) \
648 +       if (!irqs_disabled()) \
649 +               spin_lock_bh(&(_sc)->sc_txbuflock)
650 +#define        ATH_TXBUF_UNLOCK_BH(_sc) \
651 +       if (!irqs_disabled()) \
652 +               spin_unlock_bh(&(_sc)->sc_txbuflock)
653  #define        ATH_TXBUF_LOCK_IRQ(_sc)         do {    \
654         unsigned long __txbuflockflags;         \
655         spin_lock_irqsave(&(_sc)->sc_txbuflock, __txbuflockflags);
656 @@ -722,8 +734,12 @@
657  #define        ATH_RXBUF_LOCK_DESTROY(_sc)
658  #define        ATH_RXBUF_LOCK(_sc)             spin_lock(&(_sc)->sc_rxbuflock)
659  #define        ATH_RXBUF_UNLOCK(_sc)           spin_unlock(&(_sc)->sc_rxbuflock)
660 -#define        ATH_RXBUF_LOCK_BH(_sc)          spin_lock_bh(&(_sc)->sc_rxbuflock)
661 -#define        ATH_RXBUF_UNLOCK_BH(_sc)        spin_unlock_bh(&(_sc)->sc_rxbuflock)
662 +#define        ATH_RXBUF_LOCK_BH(_sc) \
663 +       if (!irqs_disabled()) \
664 +               spin_lock_bh(&(_sc)->sc_rxbuflock)
665 +#define        ATH_RXBUF_UNLOCK_BH(_sc) \
666 +       if (!irqs_disabled()) \
667 +               spin_unlock_bh(&(_sc)->sc_rxbuflock)
668  #define        ATH_RXBUF_LOCK_IRQ(_sc)         do {    \
669         unsigned long __rxbuflockflags;         \
670         spin_lock_irqsave(&(_sc)->sc_rxbuflock, __rxbuflockflags);
671 @@ -733,6 +749,8 @@
672  #define        ATH_RXBUF_UNLOCK_IRQ_EARLY(_sc)         \
673         spin_unlock_irqrestore(&(_sc)->sc_rxbuflock, __rxbuflockflags);
674  
675 +#define ATH_DISABLE_INTR               local_irq_disable
676 +#define ATH_ENABLE_INTR                local_irq_enable
677  
678  /* Protects the device from concurrent accesses */
679  #define        ATH_LOCK_INIT(_sc)              init_MUTEX(&(_sc)->sc_lock)
680 Only in madwifi.dev/ath: if_athvar.h.orig
681 diff -ur madwifi.old/net80211/ieee80211_beacon.c madwifi.dev/net80211/ieee80211_beacon.c
682 --- madwifi.old/net80211/ieee80211_beacon.c     2007-05-31 06:48:28.555526400 +0200
683 +++ madwifi.dev/net80211/ieee80211_beacon.c     2007-05-31 06:48:57.847073408 +0200
684 @@ -286,7 +286,7 @@
685         int len_changed = 0;
686         u_int16_t capinfo;
687  
688 -       IEEE80211_LOCK(ic);
689 +       IEEE80211_BEACON_LOCK(ic);
690  
691         if ((ic->ic_flags & IEEE80211_F_DOTH) &&
692             (vap->iv_flags & IEEE80211_F_CHANSWITCH) &&
693 @@ -547,7 +547,7 @@
694                 vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
695         }
696  
697 -       IEEE80211_UNLOCK(ic);
698 +       IEEE80211_BEACON_UNLOCK(ic);
699  
700         return len_changed;
701  }
702 diff -ur madwifi.old/net80211/ieee80211_input.c madwifi.dev/net80211/ieee80211_input.c
703 --- madwifi.old/net80211/ieee80211_input.c      2007-05-31 06:48:28.557526096 +0200
704 +++ madwifi.dev/net80211/ieee80211_input.c      2007-05-31 06:48:57.849073104 +0200
705 @@ -1169,8 +1169,9 @@
706                 if (ni->ni_vlan != 0 && vap->iv_vlgrp != NULL) {
707                         /* attach vlan tag */
708                         vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan);
709 -               } else
710 -                       netif_rx(skb);
711 +               } else {
712 +                       netif_receive_skb(skb);
713 +               }
714                 dev->last_rx = jiffies;
715         }
716  }
717 @@ -3675,9 +3676,9 @@
718         }
719  
720         /* Okay, take the first queued packet and put it out... */
721 -       IEEE80211_NODE_SAVEQ_LOCK(ni);
722 +       IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
723         IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
724 -       IEEE80211_NODE_SAVEQ_UNLOCK(ni);
725 +       IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
726         if (skb == NULL) {
727                 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_POWER, wh->i_addr2,
728                         "%s", "recv ps-poll, but queue empty");
729 Only in madwifi.dev/net80211: ieee80211_input.c.orig
730 diff -ur madwifi.old/net80211/ieee80211_linux.h madwifi.dev/net80211/ieee80211_linux.h
731 --- madwifi.old/net80211/ieee80211_linux.h      2007-05-24 19:31:37.000000000 +0200
732 +++ madwifi.dev/net80211/ieee80211_linux.h      2007-05-31 06:48:57.849073104 +0200
733 @@ -31,6 +31,10 @@
734  
735  #include <linux/wireless.h>
736  
737 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
738 +#define irqs_disabled()                        0
739 +#endif
740 +
741  /*
742   * Task deferral
743   *
744 @@ -86,8 +90,12 @@
745  } while (0)
746  #define        IEEE80211_UNLOCK_IRQ_EARLY(_ic)                                 \
747         spin_unlock_irqrestore(&(_ic)->ic_comlock, __ilockflags);
748 -#define IEEE80211_LOCK_BH(_ic) spin_lock_bh(&(_ic)->ic_comlock)
749 -#define IEEE80211_UNLOCK_BH(_ic) spin_unlock_bh(&(_ic)->ic_comlock)
750 +#define IEEE80211_LOCK_BH(_ic) \
751 +       if (!irqs_disabled()) \
752 +               spin_lock_bh(&(_ic)->ic_comlock)
753 +#define IEEE80211_UNLOCK_BH(_ic) \
754 +       if (!irqs_disabled()) \
755 +               spin_unlock_bh(&(_ic)->ic_comlock)
756  #define IEEE80211_LOCK(_ic)    spin_lock(&(_ic)->ic_comlock)
757  #define IEEE80211_UNLOCK(_ic)  spin_unlock(&(_ic)->ic_comlock)
758  
759 @@ -104,15 +112,22 @@
760  #define IEEE80211_VAPS_LOCK_DESTROY(_ic)
761  #define IEEE80211_VAPS_LOCK(_ic)       spin_lock(&(_ic)->ic_vapslock);
762  #define IEEE80211_VAPS_UNLOCK(_ic)     spin_unlock(&(_ic)->ic_vapslock);
763 -#define IEEE80211_VAPS_LOCK_BH(_ic)    spin_lock_bh(&(_ic)->ic_vapslock);
764 -#define IEEE80211_VAPS_UNLOCK_BH(_ic)  spin_unlock_bh(&(_ic)->ic_vapslock);
765 -#define IEEE80211_VAPS_LOCK_IRQ(_ic)   do {    \
766 -       int _vaps_lockflags;                    \
767 -       spin_lock_irqsave(&(_ic)->ic_vapslock, _vaps_lockflags);
768 -#define IEEE80211_VAPS_UNLOCK_IRQ(_ic) \
769 -       spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags); \
770 -} while (0)
771 -#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic)   spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
772 +#define IEEE80211_VAPS_LOCK_BH(_ic) \
773 +       if (!irqs_disabled()) \
774 +               spin_lock_bh(&(_ic)->ic_vapslock);
775 +#define IEEE80211_VAPS_UNLOCK_BH(_ic) \
776 +       if (!irqs_disabled()) \
777 +               spin_unlock_bh(&(_ic)->ic_vapslock);
778 +#define IEEE80211_VAPS_LOCK_IRQ(_ic) do {                              \
779 +       unsigned long __vlockflags=0;                           \
780 +       unsigned int __vlocked=0;                               \
781 +       __vlocked=spin_is_locked(&(_ic)->ic_vapslock);          \
782 +       if(!__vlocked) spin_lock_irqsave(&(_ic)->ic_vapslock, __vlockflags);
783 +#define IEEE80211_VAPS_UNLOCK_IRQ(_ic)                                 \
784 +       if(!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, __vlockflags);       \
785 +} while (0);
786 +#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic) \
787 +       if (!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
788  
789  #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
790  #define IEEE80211_VAPS_LOCK_ASSERT(_ic) \
791 @@ -122,6 +137,11 @@
792  #define IEEE80211_VAPS_LOCK_ASSERT(_ic)
793  #endif
794  
795 +/*
796 + * Beacon locking definitions; piggyback on com lock.
797 + */
798 +#define        IEEE80211_BEACON_LOCK(_ic)         IEEE80211_LOCK_IRQ(_ic)
799 +#define        IEEE80211_BEACON_UNLOCK(_ic)       IEEE80211_UNLOCK_IRQ(_ic)
800  
801  /*
802   * Node locking definitions.
803 @@ -191,8 +211,12 @@
804  typedef spinlock_t ieee80211_scan_lock_t;
805  #define        IEEE80211_SCAN_LOCK_INIT(_nt, _name) spin_lock_init(&(_nt)->nt_scanlock)
806  #define        IEEE80211_SCAN_LOCK_DESTROY(_nt)
807 -#define        IEEE80211_SCAN_LOCK_BH(_nt)     spin_lock_bh(&(_nt)->nt_scanlock)
808 -#define        IEEE80211_SCAN_UNLOCK_BH(_nt)   spin_unlock_bh(&(_nt)->nt_scanlock)
809 +#define        IEEE80211_SCAN_LOCK_BH(_nt) \
810 +       if (!irqs_disabled()) \
811 +               spin_lock_bh(&(_nt)->nt_scanlock)
812 +#define        IEEE80211_SCAN_UNLOCK_BH(_nt) \
813 +       if (!irqs_disabled()) \
814 +               spin_unlock_bh(&(_nt)->nt_scanlock)
815  #define        IEEE80211_SCAN_LOCK_IRQ(_nt)    do {    \
816         unsigned long __scan_lockflags;         \
817         spin_lock_irqsave(&(_nt)->nt_scanlock, __scan_lockflags);
818 @@ -217,8 +241,12 @@
819  #define        ACL_LOCK_DESTROY(_as)
820  #define        ACL_LOCK(_as)                   spin_lock(&(_as)->as_lock)
821  #define        ACL_UNLOCK(_as)                 spin_unlock(&(_as)->as_lock)
822 -#define        ACL_LOCK_BH(_as)                spin_lock_bh(&(_as)->as_lock)
823 -#define        ACL_UNLOCK_BH(_as)              spin_unlock_bh(&(_as)->as_lock)
824 +#define        ACL_LOCK_BH(_as) \
825 +       if (!irqs_disabled()) \
826 +               spin_lock_bh(&(_as)->as_lock)
827 +#define        ACL_UNLOCK_BH(_as) \
828 +       if (!irqs_disabled()) \
829 +               spin_unlock_bh(&(_as)->as_lock)
830  
831  #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
832  #define        ACL_LOCK_ASSERT(_as) \
833 diff -ur madwifi.old/net80211/ieee80211_node.c madwifi.dev/net80211/ieee80211_node.c
834 --- madwifi.old/net80211/ieee80211_node.c       2007-05-31 06:48:28.558525944 +0200
835 +++ madwifi.dev/net80211/ieee80211_node.c       2007-05-31 06:48:57.868070216 +0200
836 @@ -1584,7 +1584,7 @@
837         struct ieee80211_node *ni;
838         u_int gen;
839  
840 -       IEEE80211_SCAN_LOCK_IRQ(nt);
841 +       IEEE80211_SCAN_LOCK_BH(nt);
842         gen = ++nt->nt_scangen;
843         
844  restart:
845 @@ -1604,7 +1604,7 @@
846         }
847         IEEE80211_NODE_TABLE_UNLOCK_IRQ(nt);
848  
849 -       IEEE80211_SCAN_UNLOCK_IRQ(nt);
850 +       IEEE80211_SCAN_UNLOCK_BH(nt);
851  }
852  EXPORT_SYMBOL(ieee80211_iterate_dev_nodes);
853  
854 Only in madwifi.dev/net80211: ieee80211_node.c.orig
855 diff -ur madwifi.old/net80211/ieee80211_power.c madwifi.dev/net80211/ieee80211_power.c
856 --- madwifi.old/net80211/ieee80211_power.c      2007-05-24 19:31:37.000000000 +0200
857 +++ madwifi.dev/net80211/ieee80211_power.c      2007-05-31 06:48:57.868070216 +0200
858 @@ -147,7 +147,7 @@
859  #endif
860                 struct sk_buff *skb;
861  
862 -               IEEE80211_NODE_SAVEQ_LOCK(ni);
863 +               IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
864                 while ((skb = skb_peek(&ni->ni_savedq)) != NULL &&
865                      M_AGE_GET(skb) < IEEE80211_INACT_WAIT) {
866                         IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
867 @@ -159,7 +159,7 @@
868                 }
869                 if (skb != NULL)
870                         M_AGE_SUB(skb, IEEE80211_INACT_WAIT);
871 -               IEEE80211_NODE_SAVEQ_UNLOCK(ni);
872 +               IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
873  
874                 IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
875                         "discard %u frames for age", discard);
876 @@ -185,7 +185,7 @@
877         KASSERT(aid < vap->iv_max_aid,
878                 ("bogus aid %u, max %u", aid, vap->iv_max_aid));
879  
880 -       IEEE80211_LOCK(ni->ni_ic);
881 +       IEEE80211_BEACON_LOCK(ni->ni_ic);
882         if (set != (isset(vap->iv_tim_bitmap, aid) != 0)) {
883                 if (set) {
884                         setbit(vap->iv_tim_bitmap, aid);
885 @@ -196,7 +196,7 @@
886                 }
887                 vap->iv_flags |= IEEE80211_F_TIMUPDATE;
888         }
889 -       IEEE80211_UNLOCK(ni->ni_ic);
890 +       IEEE80211_BEACON_UNLOCK(ni->ni_ic);
891  }
892  
893  /*
894 @@ -295,9 +295,9 @@
895                         struct sk_buff *skb;
896                         int qlen;
897  
898 -                       IEEE80211_NODE_SAVEQ_LOCK(ni);
899 +                       IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
900                         IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
901 -                       IEEE80211_NODE_SAVEQ_UNLOCK(ni);
902 +                       IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
903                         if (skb == NULL)
904                                 break;
905                         /* 
906 @@ -362,9 +362,9 @@
907                         for (;;) {
908                                 struct sk_buff *skb;
909  
910 -                               IEEE80211_NODE_SAVEQ_LOCK(ni);
911 +                               IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
912                                 skb = __skb_dequeue(&ni->ni_savedq);
913 -                               IEEE80211_NODE_SAVEQ_UNLOCK(ni);
914 +                               IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
915                                 if (skb == NULL)
916                                         break;
917                                 ieee80211_parent_queue_xmit(skb);
918 diff -ur madwifi.old/net80211/ieee80211_proto.c madwifi.dev/net80211/ieee80211_proto.c
919 --- madwifi.old/net80211/ieee80211_proto.c      2007-05-31 06:48:28.564525032 +0200
920 +++ madwifi.dev/net80211/ieee80211_proto.c      2007-05-31 06:48:57.869070064 +0200
921 @@ -635,9 +635,9 @@
922  {
923         struct ieee80211com *ic = vap->iv_ic;
924  
925 -       IEEE80211_LOCK(ic);
926 +       IEEE80211_BEACON_LOCK(ic);
927         ieee80211_wme_initparams_locked(vap);
928 -       IEEE80211_UNLOCK(ic);
929 +       IEEE80211_BEACON_UNLOCK(ic);
930  }
931  
932  void
933 @@ -920,9 +920,9 @@
934         struct ieee80211com *ic = vap->iv_ic;
935  
936         if (ic->ic_caps & IEEE80211_C_WME) {
937 -               IEEE80211_LOCK(ic);
938 +               IEEE80211_BEACON_LOCK(ic);
939                 ieee80211_wme_updateparams_locked(vap);
940 -               IEEE80211_UNLOCK(ic);
941 +               IEEE80211_BEACON_UNLOCK(ic);
942         }
943  }
944  
945 diff -ur madwifi.old/net80211/ieee80211_scan_sta.c madwifi.dev/net80211/ieee80211_scan_sta.c
946 --- madwifi.old/net80211/ieee80211_scan_sta.c   2007-05-21 17:53:39.000000000 +0200
947 +++ madwifi.dev/net80211/ieee80211_scan_sta.c   2007-05-31 06:48:57.870069912 +0200
948 @@ -163,9 +163,11 @@
949  {
950         struct sta_table *st = ss->ss_priv;
951  
952 -       spin_lock(&st->st_lock);
953 +       if (!irqs_disabled())
954 +               spin_lock_bh(&st->st_lock);
955         sta_flush_table(st);
956 -       spin_unlock(&st->st_lock);
957 +       if (!irqs_disabled())
958 +               spin_unlock_bh(&st->st_lock);
959         ss->ss_last = 0;
960         return 0;
961  }
962 @@ -215,7 +217,8 @@
963         int hash;
964  
965         hash = STA_HASH(macaddr);
966 -       spin_lock(&st->st_lock);  
967 +       if (!irqs_disabled())
968 +               spin_lock_bh(&st->st_lock);  
969         LIST_FOREACH(se, &st->st_hash[hash], se_hash)
970                 if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr) &&
971                     sp->ssid[1] == se->base.se_ssid[1] && 
972 @@ -225,7 +228,7 @@
973         MALLOC(se, struct sta_entry *, sizeof(struct sta_entry),
974                 M_80211_SCAN, M_NOWAIT | M_ZERO);
975         if (se == NULL) {
976 -               spin_unlock(&st->st_lock);
977 +               spin_unlock_bh(&st->st_lock);
978                 return 0;
979         }
980         se->se_scangen = st->st_scangen-1;
981 @@ -287,7 +290,8 @@
982         se->se_seen = 1;
983         se->se_notseen = 0;
984  
985 -       spin_unlock(&st->st_lock);
986 +       if (!irqs_disabled())
987 +               spin_unlock_bh(&st->st_lock);
988  
989         /*
990          * If looking for a quick choice and nothing's
991 @@ -1063,7 +1067,8 @@
992         u_int gen;
993         int res = 0;
994  
995 -       spin_lock(&st->st_scanlock);
996 +       if (!irqs_disabled())
997 +               spin_lock_bh(&st->st_scanlock);
998         gen = st->st_scangen++;
999  restart:
1000         spin_lock(&st->st_lock);
1001 @@ -1086,7 +1091,8 @@
1002         spin_unlock(&st->st_lock);
1003  
1004   done:
1005 -       spin_unlock(&st->st_scanlock);
1006 +       if (!irqs_disabled())
1007 +               spin_unlock_bh(&st->st_scanlock);
1008  
1009         return res;
1010  }
1011 @@ -1235,7 +1241,8 @@
1012         bestchan = NULL;
1013         bestrssi = -1;
1014  
1015 -       spin_lock(&st->st_lock);
1016 +       if (!irqs_disabled())
1017 +               spin_lock_bh(&st->st_lock);
1018         for (i = 0; i < ss->ss_last; i++) {
1019                 c = ss->ss_chans[i];
1020                 maxrssi = 0;
1021 @@ -1248,7 +1255,8 @@
1022                 if (bestchan == NULL || maxrssi < bestrssi)
1023                         bestchan = c;
1024         }
1025 -       spin_unlock(&st->st_lock);
1026 +       if (!irqs_disabled())
1027 +               spin_unlock_bh(&st->st_lock);
1028  
1029         return bestchan;
1030  }