Linux-libre 5.0.10-gnu
[librecmc/linux-libre.git] / drivers / infiniband / core / fmr_pool.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/errno.h>
35 #include <linux/spinlock.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/jhash.h>
39 #include <linux/kthread.h>
40
41 #include <rdma/ib_fmr_pool.h>
42
43 #include "core_priv.h"
44
45 #define PFX "fmr_pool: "
46
47 enum {
48         IB_FMR_MAX_REMAPS = 32,
49
50         IB_FMR_HASH_BITS  = 8,
51         IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
52         IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
53 };
54
55 /*
56  * If an FMR is not in use, then the list member will point to either
57  * its pool's free_list (if the FMR can be mapped again; that is,
58  * remap_count < pool->max_remaps) or its pool's dirty_list (if the
59  * FMR needs to be unmapped before being remapped).  In either of
60  * these cases it is a bug if the ref_count is not 0.  In other words,
61  * if ref_count is > 0, then the list member must not be linked into
62  * either free_list or dirty_list.
63  *
64  * The cache_node member is used to link the FMR into a cache bucket
65  * (if caching is enabled).  This is independent of the reference
66  * count of the FMR.  When a valid FMR is released, its ref_count is
67  * decremented, and if ref_count reaches 0, the FMR is placed in
68  * either free_list or dirty_list as appropriate.  However, it is not
69  * removed from the cache and may be "revived" if a call to
70  * ib_fmr_register_physical() occurs before the FMR is remapped.  In
71  * this case we just increment the ref_count and remove the FMR from
72  * free_list/dirty_list.
73  *
74  * Before we remap an FMR from free_list, we remove it from the cache
75  * (to prevent another user from obtaining a stale FMR).  When an FMR
76  * is released, we add it to the tail of the free list, so that our
77  * cache eviction policy is "least recently used."
78  *
79  * All manipulation of ref_count, list and cache_node is protected by
80  * pool_lock to maintain consistency.
81  */
82
83 struct ib_fmr_pool {
84         spinlock_t                pool_lock;
85
86         int                       pool_size;
87         int                       max_pages;
88         int                       max_remaps;
89         int                       dirty_watermark;
90         int                       dirty_len;
91         struct list_head          free_list;
92         struct list_head          dirty_list;
93         struct hlist_head        *cache_bucket;
94
95         void                     (*flush_function)(struct ib_fmr_pool *pool,
96                                                    void *              arg);
97         void                     *flush_arg;
98
99         struct kthread_worker     *worker;
100         struct kthread_work       work;
101
102         atomic_t                  req_ser;
103         atomic_t                  flush_ser;
104
105         wait_queue_head_t         force_wait;
106 };
107
108 static inline u32 ib_fmr_hash(u64 first_page)
109 {
110         return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
111                 (IB_FMR_HASH_SIZE - 1);
112 }
113
114 /* Caller must hold pool_lock */
115 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
116                                                       u64 *page_list,
117                                                       int  page_list_len,
118                                                       u64  io_virtual_address)
119 {
120         struct hlist_head *bucket;
121         struct ib_pool_fmr *fmr;
122
123         if (!pool->cache_bucket)
124                 return NULL;
125
126         bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
127
128         hlist_for_each_entry(fmr, bucket, cache_node)
129                 if (io_virtual_address == fmr->io_virtual_address &&
130                     page_list_len      == fmr->page_list_len      &&
131                     !memcmp(page_list, fmr->page_list,
132                             page_list_len * sizeof *page_list))
133                         return fmr;
134
135         return NULL;
136 }
137
138 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
139 {
140         int                 ret;
141         struct ib_pool_fmr *fmr;
142         LIST_HEAD(unmap_list);
143         LIST_HEAD(fmr_list);
144
145         spin_lock_irq(&pool->pool_lock);
146
147         list_for_each_entry(fmr, &pool->dirty_list, list) {
148                 hlist_del_init(&fmr->cache_node);
149                 fmr->remap_count = 0;
150                 list_add_tail(&fmr->fmr->list, &fmr_list);
151
152 #ifdef DEBUG
153                 if (fmr->ref_count !=0) {
154                         pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
155                                 fmr, fmr->ref_count);
156                 }
157 #endif
158         }
159
160         list_splice_init(&pool->dirty_list, &unmap_list);
161         pool->dirty_len = 0;
162
163         spin_unlock_irq(&pool->pool_lock);
164
165         if (list_empty(&unmap_list)) {
166                 return;
167         }
168
169         ret = ib_unmap_fmr(&fmr_list);
170         if (ret)
171                 pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
172
173         spin_lock_irq(&pool->pool_lock);
174         list_splice(&unmap_list, &pool->free_list);
175         spin_unlock_irq(&pool->pool_lock);
176 }
177
178 static void ib_fmr_cleanup_func(struct kthread_work *work)
179 {
180         struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
181
182         ib_fmr_batch_release(pool);
183         atomic_inc(&pool->flush_ser);
184         wake_up_interruptible(&pool->force_wait);
185
186         if (pool->flush_function)
187                 pool->flush_function(pool, pool->flush_arg);
188
189         if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
190                 kthread_queue_work(pool->worker, &pool->work);
191 }
192
193 /**
194  * ib_create_fmr_pool - Create an FMR pool
195  * @pd:Protection domain for FMRs
196  * @params:FMR pool parameters
197  *
198  * Create a pool of FMRs.  Return value is pointer to new pool or
199  * error code if creation failed.
200  */
201 struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
202                                        struct ib_fmr_pool_param *params)
203 {
204         struct ib_device   *device;
205         struct ib_fmr_pool *pool;
206         int i;
207         int ret;
208         int max_remaps;
209
210         if (!params)
211                 return ERR_PTR(-EINVAL);
212
213         device = pd->device;
214         if (!device->ops.alloc_fmr    || !device->ops.dealloc_fmr  ||
215             !device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
216                 dev_info(&device->dev, "Device does not support FMRs\n");
217                 return ERR_PTR(-ENOSYS);
218         }
219
220         if (!device->attrs.max_map_per_fmr)
221                 max_remaps = IB_FMR_MAX_REMAPS;
222         else
223                 max_remaps = device->attrs.max_map_per_fmr;
224
225         pool = kmalloc(sizeof *pool, GFP_KERNEL);
226         if (!pool)
227                 return ERR_PTR(-ENOMEM);
228
229         pool->cache_bucket   = NULL;
230         pool->flush_function = params->flush_function;
231         pool->flush_arg      = params->flush_arg;
232
233         INIT_LIST_HEAD(&pool->free_list);
234         INIT_LIST_HEAD(&pool->dirty_list);
235
236         if (params->cache) {
237                 pool->cache_bucket =
238                         kmalloc_array(IB_FMR_HASH_SIZE,
239                                       sizeof(*pool->cache_bucket),
240                                       GFP_KERNEL);
241                 if (!pool->cache_bucket) {
242                         ret = -ENOMEM;
243                         goto out_free_pool;
244                 }
245
246                 for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
247                         INIT_HLIST_HEAD(pool->cache_bucket + i);
248         }
249
250         pool->pool_size       = 0;
251         pool->max_pages       = params->max_pages_per_fmr;
252         pool->max_remaps      = max_remaps;
253         pool->dirty_watermark = params->dirty_watermark;
254         pool->dirty_len       = 0;
255         spin_lock_init(&pool->pool_lock);
256         atomic_set(&pool->req_ser,   0);
257         atomic_set(&pool->flush_ser, 0);
258         init_waitqueue_head(&pool->force_wait);
259
260         pool->worker =
261                 kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
262         if (IS_ERR(pool->worker)) {
263                 pr_warn(PFX "couldn't start cleanup kthread worker\n");
264                 ret = PTR_ERR(pool->worker);
265                 goto out_free_pool;
266         }
267         kthread_init_work(&pool->work, ib_fmr_cleanup_func);
268
269         {
270                 struct ib_pool_fmr *fmr;
271                 struct ib_fmr_attr fmr_attr = {
272                         .max_pages  = params->max_pages_per_fmr,
273                         .max_maps   = pool->max_remaps,
274                         .page_shift = params->page_shift
275                 };
276                 int bytes_per_fmr = sizeof *fmr;
277
278                 if (pool->cache_bucket)
279                         bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
280
281                 for (i = 0; i < params->pool_size; ++i) {
282                         fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
283                         if (!fmr)
284                                 goto out_fail;
285
286                         fmr->pool             = pool;
287                         fmr->remap_count      = 0;
288                         fmr->ref_count        = 0;
289                         INIT_HLIST_NODE(&fmr->cache_node);
290
291                         fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
292                         if (IS_ERR(fmr->fmr)) {
293                                 pr_warn(PFX "fmr_create failed for FMR %d\n",
294                                         i);
295                                 kfree(fmr);
296                                 goto out_fail;
297                         }
298
299                         list_add_tail(&fmr->list, &pool->free_list);
300                         ++pool->pool_size;
301                 }
302         }
303
304         return pool;
305
306  out_free_pool:
307         kfree(pool->cache_bucket);
308         kfree(pool);
309
310         return ERR_PTR(ret);
311
312  out_fail:
313         ib_destroy_fmr_pool(pool);
314
315         return ERR_PTR(-ENOMEM);
316 }
317 EXPORT_SYMBOL(ib_create_fmr_pool);
318
319 /**
320  * ib_destroy_fmr_pool - Free FMR pool
321  * @pool:FMR pool to free
322  *
323  * Destroy an FMR pool and free all associated resources.
324  */
325 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
326 {
327         struct ib_pool_fmr *fmr;
328         struct ib_pool_fmr *tmp;
329         LIST_HEAD(fmr_list);
330         int                 i;
331
332         kthread_destroy_worker(pool->worker);
333         ib_fmr_batch_release(pool);
334
335         i = 0;
336         list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
337                 if (fmr->remap_count) {
338                         INIT_LIST_HEAD(&fmr_list);
339                         list_add_tail(&fmr->fmr->list, &fmr_list);
340                         ib_unmap_fmr(&fmr_list);
341                 }
342                 ib_dealloc_fmr(fmr->fmr);
343                 list_del(&fmr->list);
344                 kfree(fmr);
345                 ++i;
346         }
347
348         if (i < pool->pool_size)
349                 pr_warn(PFX "pool still has %d regions registered\n",
350                         pool->pool_size - i);
351
352         kfree(pool->cache_bucket);
353         kfree(pool);
354 }
355 EXPORT_SYMBOL(ib_destroy_fmr_pool);
356
357 /**
358  * ib_flush_fmr_pool - Invalidate all unmapped FMRs
359  * @pool:FMR pool to flush
360  *
361  * Ensure that all unmapped FMRs are fully invalidated.
362  */
363 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
364 {
365         int serial;
366         struct ib_pool_fmr *fmr, *next;
367
368         /*
369          * The free_list holds FMRs that may have been used
370          * but have not been remapped enough times to be dirty.
371          * Put them on the dirty list now so that the cleanup
372          * thread will reap them too.
373          */
374         spin_lock_irq(&pool->pool_lock);
375         list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
376                 if (fmr->remap_count > 0)
377                         list_move(&fmr->list, &pool->dirty_list);
378         }
379         spin_unlock_irq(&pool->pool_lock);
380
381         serial = atomic_inc_return(&pool->req_ser);
382         kthread_queue_work(pool->worker, &pool->work);
383
384         if (wait_event_interruptible(pool->force_wait,
385                                      atomic_read(&pool->flush_ser) - serial >= 0))
386                 return -EINTR;
387
388         return 0;
389 }
390 EXPORT_SYMBOL(ib_flush_fmr_pool);
391
392 /**
393  * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
394  * @pool_handle: FMR pool to allocate FMR from
395  * @page_list: List of pages to map
396  * @list_len: Number of pages in @page_list
397  * @io_virtual_address: I/O virtual address for new FMR
398  */
399 struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
400                                          u64                *page_list,
401                                          int                 list_len,
402                                          u64                 io_virtual_address)
403 {
404         struct ib_fmr_pool *pool = pool_handle;
405         struct ib_pool_fmr *fmr;
406         unsigned long       flags;
407         int                 result;
408
409         if (list_len < 1 || list_len > pool->max_pages)
410                 return ERR_PTR(-EINVAL);
411
412         spin_lock_irqsave(&pool->pool_lock, flags);
413         fmr = ib_fmr_cache_lookup(pool,
414                                   page_list,
415                                   list_len,
416                                   io_virtual_address);
417         if (fmr) {
418                 /* found in cache */
419                 ++fmr->ref_count;
420                 if (fmr->ref_count == 1) {
421                         list_del(&fmr->list);
422                 }
423
424                 spin_unlock_irqrestore(&pool->pool_lock, flags);
425
426                 return fmr;
427         }
428
429         if (list_empty(&pool->free_list)) {
430                 spin_unlock_irqrestore(&pool->pool_lock, flags);
431                 return ERR_PTR(-EAGAIN);
432         }
433
434         fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
435         list_del(&fmr->list);
436         hlist_del_init(&fmr->cache_node);
437         spin_unlock_irqrestore(&pool->pool_lock, flags);
438
439         result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
440                                  io_virtual_address);
441
442         if (result) {
443                 spin_lock_irqsave(&pool->pool_lock, flags);
444                 list_add(&fmr->list, &pool->free_list);
445                 spin_unlock_irqrestore(&pool->pool_lock, flags);
446
447                 pr_warn(PFX "fmr_map returns %d\n", result);
448
449                 return ERR_PTR(result);
450         }
451
452         ++fmr->remap_count;
453         fmr->ref_count = 1;
454
455         if (pool->cache_bucket) {
456                 fmr->io_virtual_address = io_virtual_address;
457                 fmr->page_list_len      = list_len;
458                 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
459
460                 spin_lock_irqsave(&pool->pool_lock, flags);
461                 hlist_add_head(&fmr->cache_node,
462                                pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
463                 spin_unlock_irqrestore(&pool->pool_lock, flags);
464         }
465
466         return fmr;
467 }
468 EXPORT_SYMBOL(ib_fmr_pool_map_phys);
469
470 /**
471  * ib_fmr_pool_unmap - Unmap FMR
472  * @fmr:FMR to unmap
473  *
474  * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
475  * reused (or until ib_flush_fmr_pool() is called).
476  */
477 void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
478 {
479         struct ib_fmr_pool *pool;
480         unsigned long flags;
481
482         pool = fmr->pool;
483
484         spin_lock_irqsave(&pool->pool_lock, flags);
485
486         --fmr->ref_count;
487         if (!fmr->ref_count) {
488                 if (fmr->remap_count < pool->max_remaps) {
489                         list_add_tail(&fmr->list, &pool->free_list);
490                 } else {
491                         list_add_tail(&fmr->list, &pool->dirty_list);
492                         if (++pool->dirty_len >= pool->dirty_watermark) {
493                                 atomic_inc(&pool->req_ser);
494                                 kthread_queue_work(pool->worker, &pool->work);
495                         }
496                 }
497         }
498
499 #ifdef DEBUG
500         if (fmr->ref_count < 0)
501                 pr_warn(PFX "FMR %p has ref count %d < 0\n",
502                         fmr, fmr->ref_count);
503 #endif
504
505         spin_unlock_irqrestore(&pool->pool_lock, flags);
506 }
507 EXPORT_SYMBOL(ib_fmr_pool_unmap);