Linux-libre 3.14.42-gnu
[librecmc/linux-libre.git] / drivers / net / wireless / cw1200 / queue.c
1 /*
2  * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
3  *
4  * Copyright (c) 2010, ST-Ericsson
5  * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <net/mac80211.h>
13 #include <linux/sched.h>
14 #include "queue.h"
15 #include "cw1200.h"
16 #include "debug.h"
17
18 /* private */ struct cw1200_queue_item
19 {
20         struct list_head        head;
21         struct sk_buff          *skb;
22         u32                     packet_id;
23         unsigned long           queue_timestamp;
24         unsigned long           xmit_timestamp;
25         struct cw1200_txpriv    txpriv;
26         u8                      generation;
27 };
28
29 static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
30 {
31         struct cw1200_queue_stats *stats = queue->stats;
32         if (queue->tx_locked_cnt++ == 0) {
33                 pr_debug("[TX] Queue %d is locked.\n",
34                          queue->queue_id);
35                 ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
36         }
37 }
38
39 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
40 {
41         struct cw1200_queue_stats *stats = queue->stats;
42         BUG_ON(!queue->tx_locked_cnt);
43         if (--queue->tx_locked_cnt == 0) {
44                 pr_debug("[TX] Queue %d is unlocked.\n",
45                          queue->queue_id);
46                 ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
47         }
48 }
49
50 static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
51                                          u8 *queue_id, u8 *item_generation,
52                                          u8 *item_id)
53 {
54         *item_id                = (packet_id >>  0) & 0xFF;
55         *item_generation        = (packet_id >>  8) & 0xFF;
56         *queue_id               = (packet_id >> 16) & 0xFF;
57         *queue_generation       = (packet_id >> 24) & 0xFF;
58 }
59
60 static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
61                                             u8 item_generation, u8 item_id)
62 {
63         return ((u32)item_id << 0) |
64                 ((u32)item_generation << 8) |
65                 ((u32)queue_id << 16) |
66                 ((u32)queue_generation << 24);
67 }
68
69 static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
70                                  struct list_head *gc_list)
71 {
72         struct cw1200_queue_item *item, *tmp;
73
74         list_for_each_entry_safe(item, tmp, gc_list, head) {
75                 list_del(&item->head);
76                 stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
77                 kfree(item);
78         }
79 }
80
81 static void cw1200_queue_register_post_gc(struct list_head *gc_list,
82                                           struct cw1200_queue_item *item)
83 {
84         struct cw1200_queue_item *gc_item;
85         gc_item = kmalloc(sizeof(struct cw1200_queue_item),
86                         GFP_ATOMIC);
87         BUG_ON(!gc_item);
88         memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
89         list_add_tail(&gc_item->head, gc_list);
90 }
91
92 static void __cw1200_queue_gc(struct cw1200_queue *queue,
93                               struct list_head *head,
94                               bool unlock)
95 {
96         struct cw1200_queue_stats *stats = queue->stats;
97         struct cw1200_queue_item *item = NULL, *tmp;
98         bool wakeup_stats = false;
99
100         list_for_each_entry_safe(item, tmp, &queue->queue, head) {
101                 if (jiffies - item->queue_timestamp < queue->ttl)
102                         break;
103                 --queue->num_queued;
104                 --queue->link_map_cache[item->txpriv.link_id];
105                 spin_lock_bh(&stats->lock);
106                 --stats->num_queued;
107                 if (!--stats->link_map_cache[item->txpriv.link_id])
108                         wakeup_stats = true;
109                 spin_unlock_bh(&stats->lock);
110                 cw1200_debug_tx_ttl(stats->priv);
111                 cw1200_queue_register_post_gc(head, item);
112                 item->skb = NULL;
113                 list_move_tail(&item->head, &queue->free_pool);
114         }
115
116         if (wakeup_stats)
117                 wake_up(&stats->wait_link_id_empty);
118
119         if (queue->overfull) {
120                 if (queue->num_queued <= (queue->capacity >> 1)) {
121                         queue->overfull = false;
122                         if (unlock)
123                                 __cw1200_queue_unlock(queue);
124                 } else if (item) {
125                         unsigned long tmo = item->queue_timestamp + queue->ttl;
126                         mod_timer(&queue->gc, tmo);
127                         cw1200_pm_stay_awake(&stats->priv->pm_state,
128                                              tmo - jiffies);
129                 }
130         }
131 }
132
133 static void cw1200_queue_gc(unsigned long arg)
134 {
135         LIST_HEAD(list);
136         struct cw1200_queue *queue =
137                 (struct cw1200_queue *)arg;
138
139         spin_lock_bh(&queue->lock);
140         __cw1200_queue_gc(queue, &list, true);
141         spin_unlock_bh(&queue->lock);
142         cw1200_queue_post_gc(queue->stats, &list);
143 }
144
145 int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
146                             size_t map_capacity,
147                             cw1200_queue_skb_dtor_t skb_dtor,
148                             struct cw1200_common *priv)
149 {
150         memset(stats, 0, sizeof(*stats));
151         stats->map_capacity = map_capacity;
152         stats->skb_dtor = skb_dtor;
153         stats->priv = priv;
154         spin_lock_init(&stats->lock);
155         init_waitqueue_head(&stats->wait_link_id_empty);
156
157         stats->link_map_cache = kzalloc(sizeof(int) * map_capacity,
158                                         GFP_KERNEL);
159         if (!stats->link_map_cache)
160                 return -ENOMEM;
161
162         return 0;
163 }
164
165 int cw1200_queue_init(struct cw1200_queue *queue,
166                       struct cw1200_queue_stats *stats,
167                       u8 queue_id,
168                       size_t capacity,
169                       unsigned long ttl)
170 {
171         size_t i;
172
173         memset(queue, 0, sizeof(*queue));
174         queue->stats = stats;
175         queue->capacity = capacity;
176         queue->queue_id = queue_id;
177         queue->ttl = ttl;
178         INIT_LIST_HEAD(&queue->queue);
179         INIT_LIST_HEAD(&queue->pending);
180         INIT_LIST_HEAD(&queue->free_pool);
181         spin_lock_init(&queue->lock);
182         init_timer(&queue->gc);
183         queue->gc.data = (unsigned long)queue;
184         queue->gc.function = cw1200_queue_gc;
185
186         queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
187                         GFP_KERNEL);
188         if (!queue->pool)
189                 return -ENOMEM;
190
191         queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity,
192                         GFP_KERNEL);
193         if (!queue->link_map_cache) {
194                 kfree(queue->pool);
195                 queue->pool = NULL;
196                 return -ENOMEM;
197         }
198
199         for (i = 0; i < capacity; ++i)
200                 list_add_tail(&queue->pool[i].head, &queue->free_pool);
201
202         return 0;
203 }
204
205 int cw1200_queue_clear(struct cw1200_queue *queue)
206 {
207         int i;
208         LIST_HEAD(gc_list);
209         struct cw1200_queue_stats *stats = queue->stats;
210         struct cw1200_queue_item *item, *tmp;
211
212         spin_lock_bh(&queue->lock);
213         queue->generation++;
214         list_splice_tail_init(&queue->queue, &queue->pending);
215         list_for_each_entry_safe(item, tmp, &queue->pending, head) {
216                 WARN_ON(!item->skb);
217                 cw1200_queue_register_post_gc(&gc_list, item);
218                 item->skb = NULL;
219                 list_move_tail(&item->head, &queue->free_pool);
220         }
221         queue->num_queued = 0;
222         queue->num_pending = 0;
223
224         spin_lock_bh(&stats->lock);
225         for (i = 0; i < stats->map_capacity; ++i) {
226                 stats->num_queued -= queue->link_map_cache[i];
227                 stats->link_map_cache[i] -= queue->link_map_cache[i];
228                 queue->link_map_cache[i] = 0;
229         }
230         spin_unlock_bh(&stats->lock);
231         if (queue->overfull) {
232                 queue->overfull = false;
233                 __cw1200_queue_unlock(queue);
234         }
235         spin_unlock_bh(&queue->lock);
236         wake_up(&stats->wait_link_id_empty);
237         cw1200_queue_post_gc(stats, &gc_list);
238         return 0;
239 }
240
241 void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
242 {
243         kfree(stats->link_map_cache);
244         stats->link_map_cache = NULL;
245 }
246
247 void cw1200_queue_deinit(struct cw1200_queue *queue)
248 {
249         cw1200_queue_clear(queue);
250         del_timer_sync(&queue->gc);
251         INIT_LIST_HEAD(&queue->free_pool);
252         kfree(queue->pool);
253         kfree(queue->link_map_cache);
254         queue->pool = NULL;
255         queue->link_map_cache = NULL;
256         queue->capacity = 0;
257 }
258
259 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
260                                    u32 link_id_map)
261 {
262         size_t ret;
263         int i, bit;
264         size_t map_capacity = queue->stats->map_capacity;
265
266         if (!link_id_map)
267                 return 0;
268
269         spin_lock_bh(&queue->lock);
270         if (link_id_map == (u32)-1) {
271                 ret = queue->num_queued - queue->num_pending;
272         } else {
273                 ret = 0;
274                 for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
275                         if (link_id_map & bit)
276                                 ret += queue->link_map_cache[i];
277                 }
278         }
279         spin_unlock_bh(&queue->lock);
280         return ret;
281 }
282
283 int cw1200_queue_put(struct cw1200_queue *queue,
284                      struct sk_buff *skb,
285                      struct cw1200_txpriv *txpriv)
286 {
287         int ret = 0;
288         LIST_HEAD(gc_list);
289         struct cw1200_queue_stats *stats = queue->stats;
290
291         if (txpriv->link_id >= queue->stats->map_capacity)
292                 return -EINVAL;
293
294         spin_lock_bh(&queue->lock);
295         if (!WARN_ON(list_empty(&queue->free_pool))) {
296                 struct cw1200_queue_item *item = list_first_entry(
297                         &queue->free_pool, struct cw1200_queue_item, head);
298                 BUG_ON(item->skb);
299
300                 list_move_tail(&item->head, &queue->queue);
301                 item->skb = skb;
302                 item->txpriv = *txpriv;
303                 item->generation = 0;
304                 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
305                                                             queue->queue_id,
306                                                             item->generation,
307                                                             item - queue->pool);
308                 item->queue_timestamp = jiffies;
309
310                 ++queue->num_queued;
311                 ++queue->link_map_cache[txpriv->link_id];
312
313                 spin_lock_bh(&stats->lock);
314                 ++stats->num_queued;
315                 ++stats->link_map_cache[txpriv->link_id];
316                 spin_unlock_bh(&stats->lock);
317
318                 /* TX may happen in parallel sometimes.
319                  * Leave extra queue slots so we don't overflow.
320                  */
321                 if (queue->overfull == false &&
322                     queue->num_queued >=
323                     (queue->capacity - (num_present_cpus() - 1))) {
324                         queue->overfull = true;
325                         __cw1200_queue_lock(queue);
326                         mod_timer(&queue->gc, jiffies);
327                 }
328         } else {
329                 ret = -ENOENT;
330         }
331         spin_unlock_bh(&queue->lock);
332         return ret;
333 }
334
335 int cw1200_queue_get(struct cw1200_queue *queue,
336                      u32 link_id_map,
337                      struct wsm_tx **tx,
338                      struct ieee80211_tx_info **tx_info,
339                      const struct cw1200_txpriv **txpriv)
340 {
341         int ret = -ENOENT;
342         struct cw1200_queue_item *item;
343         struct cw1200_queue_stats *stats = queue->stats;
344         bool wakeup_stats = false;
345
346         spin_lock_bh(&queue->lock);
347         list_for_each_entry(item, &queue->queue, head) {
348                 if (link_id_map & BIT(item->txpriv.link_id)) {
349                         ret = 0;
350                         break;
351                 }
352         }
353
354         if (!WARN_ON(ret)) {
355                 *tx = (struct wsm_tx *)item->skb->data;
356                 *tx_info = IEEE80211_SKB_CB(item->skb);
357                 *txpriv = &item->txpriv;
358                 (*tx)->packet_id = item->packet_id;
359                 list_move_tail(&item->head, &queue->pending);
360                 ++queue->num_pending;
361                 --queue->link_map_cache[item->txpriv.link_id];
362                 item->xmit_timestamp = jiffies;
363
364                 spin_lock_bh(&stats->lock);
365                 --stats->num_queued;
366                 if (!--stats->link_map_cache[item->txpriv.link_id])
367                         wakeup_stats = true;
368                 spin_unlock_bh(&stats->lock);
369         }
370         spin_unlock_bh(&queue->lock);
371         if (wakeup_stats)
372                 wake_up(&stats->wait_link_id_empty);
373         return ret;
374 }
375
376 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
377 {
378         int ret = 0;
379         u8 queue_generation, queue_id, item_generation, item_id;
380         struct cw1200_queue_item *item;
381         struct cw1200_queue_stats *stats = queue->stats;
382
383         cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
384                               &item_generation, &item_id);
385
386         item = &queue->pool[item_id];
387
388         spin_lock_bh(&queue->lock);
389         BUG_ON(queue_id != queue->queue_id);
390         if (queue_generation != queue->generation) {
391                 ret = -ENOENT;
392         } else if (item_id >= (unsigned) queue->capacity) {
393                 WARN_ON(1);
394                 ret = -EINVAL;
395         } else if (item->generation != item_generation) {
396                 WARN_ON(1);
397                 ret = -ENOENT;
398         } else {
399                 --queue->num_pending;
400                 ++queue->link_map_cache[item->txpriv.link_id];
401
402                 spin_lock_bh(&stats->lock);
403                 ++stats->num_queued;
404                 ++stats->link_map_cache[item->txpriv.link_id];
405                 spin_unlock_bh(&stats->lock);
406
407                 item->generation = ++item_generation;
408                 item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
409                                                             queue_id,
410                                                             item_generation,
411                                                             item_id);
412                 list_move(&item->head, &queue->queue);
413         }
414         spin_unlock_bh(&queue->lock);
415         return ret;
416 }
417
418 int cw1200_queue_requeue_all(struct cw1200_queue *queue)
419 {
420         struct cw1200_queue_item *item, *tmp;
421         struct cw1200_queue_stats *stats = queue->stats;
422         spin_lock_bh(&queue->lock);
423
424         list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
425                 --queue->num_pending;
426                 ++queue->link_map_cache[item->txpriv.link_id];
427
428                 spin_lock_bh(&stats->lock);
429                 ++stats->num_queued;
430                 ++stats->link_map_cache[item->txpriv.link_id];
431                 spin_unlock_bh(&stats->lock);
432
433                 ++item->generation;
434                 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
435                                                             queue->queue_id,
436                                                             item->generation,
437                                                             item - queue->pool);
438                 list_move(&item->head, &queue->queue);
439         }
440         spin_unlock_bh(&queue->lock);
441
442         return 0;
443 }
444
445 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
446 {
447         int ret = 0;
448         u8 queue_generation, queue_id, item_generation, item_id;
449         struct cw1200_queue_item *item;
450         struct cw1200_queue_stats *stats = queue->stats;
451         struct sk_buff *gc_skb = NULL;
452         struct cw1200_txpriv gc_txpriv;
453
454         cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
455                               &item_generation, &item_id);
456
457         item = &queue->pool[item_id];
458
459         spin_lock_bh(&queue->lock);
460         BUG_ON(queue_id != queue->queue_id);
461         if (queue_generation != queue->generation) {
462                 ret = -ENOENT;
463         } else if (item_id >= (unsigned) queue->capacity) {
464                 WARN_ON(1);
465                 ret = -EINVAL;
466         } else if (item->generation != item_generation) {
467                 WARN_ON(1);
468                 ret = -ENOENT;
469         } else {
470                 gc_txpriv = item->txpriv;
471                 gc_skb = item->skb;
472                 item->skb = NULL;
473                 --queue->num_pending;
474                 --queue->num_queued;
475                 ++queue->num_sent;
476                 ++item->generation;
477                 /* Do not use list_move_tail here, but list_move:
478                  * try to utilize cache row.
479                  */
480                 list_move(&item->head, &queue->free_pool);
481
482                 if (queue->overfull &&
483                     (queue->num_queued <= (queue->capacity >> 1))) {
484                         queue->overfull = false;
485                         __cw1200_queue_unlock(queue);
486                 }
487         }
488         spin_unlock_bh(&queue->lock);
489
490         if (gc_skb)
491                 stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
492
493         return ret;
494 }
495
496 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
497                          struct sk_buff **skb,
498                          const struct cw1200_txpriv **txpriv)
499 {
500         int ret = 0;
501         u8 queue_generation, queue_id, item_generation, item_id;
502         struct cw1200_queue_item *item;
503         cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
504                               &item_generation, &item_id);
505
506         item = &queue->pool[item_id];
507
508         spin_lock_bh(&queue->lock);
509         BUG_ON(queue_id != queue->queue_id);
510         if (queue_generation != queue->generation) {
511                 ret = -ENOENT;
512         } else if (item_id >= (unsigned) queue->capacity) {
513                 WARN_ON(1);
514                 ret = -EINVAL;
515         } else if (item->generation != item_generation) {
516                 WARN_ON(1);
517                 ret = -ENOENT;
518         } else {
519                 *skb = item->skb;
520                 *txpriv = &item->txpriv;
521         }
522         spin_unlock_bh(&queue->lock);
523         return ret;
524 }
525
526 void cw1200_queue_lock(struct cw1200_queue *queue)
527 {
528         spin_lock_bh(&queue->lock);
529         __cw1200_queue_lock(queue);
530         spin_unlock_bh(&queue->lock);
531 }
532
533 void cw1200_queue_unlock(struct cw1200_queue *queue)
534 {
535         spin_lock_bh(&queue->lock);
536         __cw1200_queue_unlock(queue);
537         spin_unlock_bh(&queue->lock);
538 }
539
540 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
541                                      unsigned long *timestamp,
542                                      u32 pending_frame_id)
543 {
544         struct cw1200_queue_item *item;
545         bool ret;
546
547         spin_lock_bh(&queue->lock);
548         ret = !list_empty(&queue->pending);
549         if (ret) {
550                 list_for_each_entry(item, &queue->pending, head) {
551                         if (item->packet_id != pending_frame_id)
552                                 if (time_before(item->xmit_timestamp,
553                                                 *timestamp))
554                                         *timestamp = item->xmit_timestamp;
555                 }
556         }
557         spin_unlock_bh(&queue->lock);
558         return ret;
559 }
560
561 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
562                                  u32 link_id_map)
563 {
564         bool empty = true;
565
566         spin_lock_bh(&stats->lock);
567         if (link_id_map == (u32)-1) {
568                 empty = stats->num_queued == 0;
569         } else {
570                 int i;
571                 for (i = 0; i < stats->map_capacity; ++i) {
572                         if (link_id_map & BIT(i)) {
573                                 if (stats->link_map_cache[i]) {
574                                         empty = false;
575                                         break;
576                                 }
577                         }
578                 }
579         }
580         spin_unlock_bh(&stats->lock);
581
582         return empty;
583 }