2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
47 #include <linux/nospec.h>
49 #include <rdma/rdma_user_cm.h>
50 #include <rdma/ib_marshall.h>
51 #include <rdma/rdma_cm.h>
52 #include <rdma/rdma_cm_ib.h>
53 #include <rdma/ib_addr.h>
55 #include <rdma/rdma_netlink.h>
56 #include "core_priv.h"
58 MODULE_AUTHOR("Sean Hefty");
59 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
60 MODULE_LICENSE("Dual BSD/GPL");
62 static unsigned int max_backlog = 1024;
64 static struct ctl_table_header *ucma_ctl_table_hdr;
65 static struct ctl_table ucma_ctl_table[] = {
67 .procname = "max_backlog",
69 .maxlen = sizeof max_backlog,
71 .proc_handler = proc_dointvec,
79 struct list_head ctx_list;
80 struct list_head event_list;
81 wait_queue_head_t poll_wait;
82 struct workqueue_struct *close_wq;
87 struct completion comp;
92 struct ucma_file *file;
93 struct rdma_cm_id *cm_id;
96 struct list_head list;
97 struct list_head mc_list;
98 /* mark that device is in process of destroying the internal HW
99 * resources, protected by the ctx_table lock
102 /* sync between removal event and id destroy, protected by file mut */
104 struct work_struct close_work;
107 struct ucma_multicast {
108 struct ucma_context *ctx;
114 struct list_head list;
115 struct sockaddr_storage addr;
119 struct ucma_context *ctx;
120 struct ucma_multicast *mc;
121 struct list_head list;
122 struct rdma_cm_id *cm_id;
123 struct rdma_ucm_event_resp resp;
124 struct work_struct close_work;
127 static DEFINE_XARRAY_ALLOC(ctx_table);
128 static DEFINE_XARRAY_ALLOC(multicast_table);
130 static const struct file_operations ucma_fops;
132 static inline struct ucma_context *_ucma_find_context(int id,
133 struct ucma_file *file)
135 struct ucma_context *ctx;
137 ctx = xa_load(&ctx_table, id);
139 ctx = ERR_PTR(-ENOENT);
140 else if (ctx->file != file || !ctx->cm_id)
141 ctx = ERR_PTR(-EINVAL);
145 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
147 struct ucma_context *ctx;
150 ctx = _ucma_find_context(id, file);
155 atomic_inc(&ctx->ref);
157 xa_unlock(&ctx_table);
161 static void ucma_put_ctx(struct ucma_context *ctx)
163 if (atomic_dec_and_test(&ctx->ref))
164 complete(&ctx->comp);
168 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
171 static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
173 struct ucma_context *ctx = ucma_get_ctx(file, id);
177 if (!ctx->cm_id->device) {
179 return ERR_PTR(-EINVAL);
184 static void ucma_close_event_id(struct work_struct *work)
186 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
188 rdma_destroy_id(uevent_close->cm_id);
192 static void ucma_close_id(struct work_struct *work)
194 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
196 /* once all inflight tasks are finished, we close all underlying
197 * resources. The context is still alive till its explicit destryoing
201 wait_for_completion(&ctx->comp);
202 /* No new events will be generated after destroying the id. */
203 rdma_destroy_id(ctx->cm_id);
206 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
208 struct ucma_context *ctx;
210 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
214 INIT_WORK(&ctx->close_work, ucma_close_id);
215 atomic_set(&ctx->ref, 1);
216 init_completion(&ctx->comp);
217 INIT_LIST_HEAD(&ctx->mc_list);
220 if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
223 list_add_tail(&ctx->list, &file->ctx_list);
231 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
233 struct ucma_multicast *mc;
235 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
240 if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL))
243 list_add_tail(&mc->list, &ctx->mc_list);
251 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
252 struct rdma_conn_param *src)
254 if (src->private_data_len)
255 memcpy(dst->private_data, src->private_data,
256 src->private_data_len);
257 dst->private_data_len = src->private_data_len;
258 dst->responder_resources =src->responder_resources;
259 dst->initiator_depth = src->initiator_depth;
260 dst->flow_control = src->flow_control;
261 dst->retry_count = src->retry_count;
262 dst->rnr_retry_count = src->rnr_retry_count;
264 dst->qp_num = src->qp_num;
267 static void ucma_copy_ud_event(struct ib_device *device,
268 struct rdma_ucm_ud_param *dst,
269 struct rdma_ud_param *src)
271 if (src->private_data_len)
272 memcpy(dst->private_data, src->private_data,
273 src->private_data_len);
274 dst->private_data_len = src->private_data_len;
275 ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
276 dst->qp_num = src->qp_num;
277 dst->qkey = src->qkey;
280 static void ucma_set_event_context(struct ucma_context *ctx,
281 struct rdma_cm_event *event,
282 struct ucma_event *uevent)
285 switch (event->event) {
286 case RDMA_CM_EVENT_MULTICAST_JOIN:
287 case RDMA_CM_EVENT_MULTICAST_ERROR:
288 uevent->mc = (struct ucma_multicast *)
289 event->param.ud.private_data;
290 uevent->resp.uid = uevent->mc->uid;
291 uevent->resp.id = uevent->mc->id;
294 uevent->resp.uid = ctx->uid;
295 uevent->resp.id = ctx->id;
300 /* Called with file->mut locked for the relevant context. */
301 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
303 struct ucma_context *ctx = cm_id->context;
304 struct ucma_event *con_req_eve;
310 /* only if context is pointing to cm_id that it owns it and can be
311 * queued to be closed, otherwise that cm_id is an inflight one that
312 * is part of that context event list pending to be detached and
313 * reattached to its new context as part of ucma_get_event,
314 * handled separately below.
316 if (ctx->cm_id == cm_id) {
319 xa_unlock(&ctx_table);
320 queue_work(ctx->file->close_wq, &ctx->close_work);
324 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
325 if (con_req_eve->cm_id == cm_id &&
326 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
327 list_del(&con_req_eve->list);
328 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
329 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
335 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
338 static int ucma_event_handler(struct rdma_cm_id *cm_id,
339 struct rdma_cm_event *event)
341 struct ucma_event *uevent;
342 struct ucma_context *ctx = cm_id->context;
345 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
347 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
349 mutex_lock(&ctx->file->mut);
350 uevent->cm_id = cm_id;
351 ucma_set_event_context(ctx, event, uevent);
352 uevent->resp.event = event->event;
353 uevent->resp.status = event->status;
354 if (cm_id->qp_type == IB_QPT_UD)
355 ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
358 ucma_copy_conn_event(&uevent->resp.param.conn,
361 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
368 } else if (!ctx->uid || ctx->cm_id != cm_id) {
370 * We ignore events for new connections until userspace has set
371 * their context. This can only happen if an error occurs on a
372 * new connection before the user accepts it. This is okay,
373 * since the accept will just fail later. However, we do need
374 * to release the underlying HW resources in case of a device
377 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
378 ucma_removal_event_handler(cm_id);
384 list_add_tail(&uevent->list, &ctx->file->event_list);
385 wake_up_interruptible(&ctx->file->poll_wait);
386 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
387 ucma_removal_event_handler(cm_id);
389 mutex_unlock(&ctx->file->mut);
393 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
394 int in_len, int out_len)
396 struct ucma_context *ctx;
397 struct rdma_ucm_get_event cmd;
398 struct ucma_event *uevent;
402 * Old 32 bit user space does not send the 4 byte padding in the
403 * reserved field. We don't care, allow it to keep working.
405 if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved))
408 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
411 mutex_lock(&file->mut);
412 while (list_empty(&file->event_list)) {
413 mutex_unlock(&file->mut);
415 if (file->filp->f_flags & O_NONBLOCK)
418 if (wait_event_interruptible(file->poll_wait,
419 !list_empty(&file->event_list)))
422 mutex_lock(&file->mut);
425 uevent = list_entry(file->event_list.next, struct ucma_event, list);
427 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
428 ctx = ucma_alloc_ctx(file);
433 uevent->ctx->backlog++;
434 ctx->cm_id = uevent->cm_id;
435 ctx->cm_id->context = ctx;
436 uevent->resp.id = ctx->id;
439 if (copy_to_user(u64_to_user_ptr(cmd.response),
441 min_t(size_t, out_len, sizeof(uevent->resp)))) {
446 list_del(&uevent->list);
447 uevent->ctx->events_reported++;
449 uevent->mc->events_reported++;
452 mutex_unlock(&file->mut);
456 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
460 *qp_type = IB_QPT_RC;
464 *qp_type = IB_QPT_UD;
467 *qp_type = cmd->qp_type;
474 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
475 int in_len, int out_len)
477 struct rdma_ucm_create_id cmd;
478 struct rdma_ucm_create_id_resp resp;
479 struct ucma_context *ctx;
480 struct rdma_cm_id *cm_id;
481 enum ib_qp_type qp_type;
484 if (out_len < sizeof(resp))
487 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
490 ret = ucma_get_qp_type(&cmd, &qp_type);
494 mutex_lock(&file->mut);
495 ctx = ucma_alloc_ctx(file);
496 mutex_unlock(&file->mut);
501 cm_id = __rdma_create_id(current->nsproxy->net_ns,
502 ucma_event_handler, ctx, cmd.ps, qp_type, NULL);
504 ret = PTR_ERR(cm_id);
509 if (copy_to_user(u64_to_user_ptr(cmd.response),
510 &resp, sizeof(resp))) {
519 rdma_destroy_id(cm_id);
521 xa_erase(&ctx_table, ctx->id);
522 mutex_lock(&file->mut);
523 list_del(&ctx->list);
524 mutex_unlock(&file->mut);
529 static void ucma_cleanup_multicast(struct ucma_context *ctx)
531 struct ucma_multicast *mc, *tmp;
533 mutex_lock(&ctx->file->mut);
534 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
536 xa_erase(&multicast_table, mc->id);
539 mutex_unlock(&ctx->file->mut);
542 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
544 struct ucma_event *uevent, *tmp;
546 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
547 if (uevent->mc != mc)
550 list_del(&uevent->list);
556 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
557 * this point, no new events will be reported from the hardware. However, we
558 * still need to cleanup the UCMA context for this ID. Specifically, there
559 * might be events that have not yet been consumed by the user space software.
560 * These might include pending connect requests which we have not completed
561 * processing. We cannot call rdma_destroy_id while holding the lock of the
562 * context (file->mut), as it might cause a deadlock. We therefore extract all
563 * relevant events from the context pending events list while holding the
564 * mutex. After that we release them as needed.
566 static int ucma_free_ctx(struct ucma_context *ctx)
569 struct ucma_event *uevent, *tmp;
573 ucma_cleanup_multicast(ctx);
575 /* Cleanup events not yet reported to the user. */
576 mutex_lock(&ctx->file->mut);
577 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
578 if (uevent->ctx == ctx)
579 list_move_tail(&uevent->list, &list);
581 list_del(&ctx->list);
582 mutex_unlock(&ctx->file->mut);
584 list_for_each_entry_safe(uevent, tmp, &list, list) {
585 list_del(&uevent->list);
586 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
587 rdma_destroy_id(uevent->cm_id);
591 events_reported = ctx->events_reported;
593 return events_reported;
596 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
597 int in_len, int out_len)
599 struct rdma_ucm_destroy_id cmd;
600 struct rdma_ucm_destroy_id_resp resp;
601 struct ucma_context *ctx;
604 if (out_len < sizeof(resp))
607 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
611 ctx = _ucma_find_context(cmd.id, file);
613 __xa_erase(&ctx_table, ctx->id);
614 xa_unlock(&ctx_table);
619 mutex_lock(&ctx->file->mut);
621 mutex_unlock(&ctx->file->mut);
623 flush_workqueue(ctx->file->close_wq);
624 /* At this point it's guaranteed that there is no inflight
628 xa_unlock(&ctx_table);
630 wait_for_completion(&ctx->comp);
631 rdma_destroy_id(ctx->cm_id);
633 xa_unlock(&ctx_table);
636 resp.events_reported = ucma_free_ctx(ctx);
637 if (copy_to_user(u64_to_user_ptr(cmd.response),
638 &resp, sizeof(resp)))
644 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
645 int in_len, int out_len)
647 struct rdma_ucm_bind_ip cmd;
648 struct ucma_context *ctx;
651 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
654 if (!rdma_addr_size_in6(&cmd.addr))
657 ctx = ucma_get_ctx(file, cmd.id);
661 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
666 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
667 int in_len, int out_len)
669 struct rdma_ucm_bind cmd;
670 struct ucma_context *ctx;
673 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
676 if (cmd.reserved || !cmd.addr_size ||
677 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
680 ctx = ucma_get_ctx(file, cmd.id);
684 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
689 static ssize_t ucma_resolve_ip(struct ucma_file *file,
690 const char __user *inbuf,
691 int in_len, int out_len)
693 struct rdma_ucm_resolve_ip cmd;
694 struct ucma_context *ctx;
697 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
700 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
701 !rdma_addr_size_in6(&cmd.dst_addr))
704 ctx = ucma_get_ctx(file, cmd.id);
708 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
709 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
714 static ssize_t ucma_resolve_addr(struct ucma_file *file,
715 const char __user *inbuf,
716 int in_len, int out_len)
718 struct rdma_ucm_resolve_addr cmd;
719 struct ucma_context *ctx;
722 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
726 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
727 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
730 ctx = ucma_get_ctx(file, cmd.id);
734 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
735 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
740 static ssize_t ucma_resolve_route(struct ucma_file *file,
741 const char __user *inbuf,
742 int in_len, int out_len)
744 struct rdma_ucm_resolve_route cmd;
745 struct ucma_context *ctx;
748 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
751 ctx = ucma_get_ctx_dev(file, cmd.id);
755 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
760 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
761 struct rdma_route *route)
763 struct rdma_dev_addr *dev_addr;
765 resp->num_paths = route->num_paths;
766 switch (route->num_paths) {
768 dev_addr = &route->addr.dev_addr;
769 rdma_addr_get_dgid(dev_addr,
770 (union ib_gid *) &resp->ib_route[0].dgid);
771 rdma_addr_get_sgid(dev_addr,
772 (union ib_gid *) &resp->ib_route[0].sgid);
773 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
776 ib_copy_path_rec_to_user(&resp->ib_route[1],
777 &route->path_rec[1]);
780 ib_copy_path_rec_to_user(&resp->ib_route[0],
781 &route->path_rec[0]);
788 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
789 struct rdma_route *route)
792 resp->num_paths = route->num_paths;
793 switch (route->num_paths) {
795 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
796 (union ib_gid *)&resp->ib_route[0].dgid);
797 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
798 (union ib_gid *)&resp->ib_route[0].sgid);
799 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
802 ib_copy_path_rec_to_user(&resp->ib_route[1],
803 &route->path_rec[1]);
806 ib_copy_path_rec_to_user(&resp->ib_route[0],
807 &route->path_rec[0]);
814 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
815 struct rdma_route *route)
817 struct rdma_dev_addr *dev_addr;
819 dev_addr = &route->addr.dev_addr;
820 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
821 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
824 static ssize_t ucma_query_route(struct ucma_file *file,
825 const char __user *inbuf,
826 int in_len, int out_len)
828 struct rdma_ucm_query cmd;
829 struct rdma_ucm_query_route_resp resp;
830 struct ucma_context *ctx;
831 struct sockaddr *addr;
834 if (out_len < sizeof(resp))
837 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
840 ctx = ucma_get_ctx(file, cmd.id);
844 memset(&resp, 0, sizeof resp);
845 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
846 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
847 sizeof(struct sockaddr_in) :
848 sizeof(struct sockaddr_in6));
849 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
850 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
851 sizeof(struct sockaddr_in) :
852 sizeof(struct sockaddr_in6));
853 if (!ctx->cm_id->device)
856 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
857 resp.port_num = ctx->cm_id->port_num;
859 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
860 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
861 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
862 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
863 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
864 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
867 if (copy_to_user(u64_to_user_ptr(cmd.response),
868 &resp, sizeof(resp)))
875 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
876 struct rdma_ucm_query_addr_resp *resp)
881 resp->node_guid = (__force __u64) cm_id->device->node_guid;
882 resp->port_num = cm_id->port_num;
883 resp->pkey = (__force __u16) cpu_to_be16(
884 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
887 static ssize_t ucma_query_addr(struct ucma_context *ctx,
888 void __user *response, int out_len)
890 struct rdma_ucm_query_addr_resp resp;
891 struct sockaddr *addr;
894 if (out_len < sizeof(resp))
897 memset(&resp, 0, sizeof resp);
899 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
900 resp.src_size = rdma_addr_size(addr);
901 memcpy(&resp.src_addr, addr, resp.src_size);
903 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
904 resp.dst_size = rdma_addr_size(addr);
905 memcpy(&resp.dst_addr, addr, resp.dst_size);
907 ucma_query_device_addr(ctx->cm_id, &resp);
909 if (copy_to_user(response, &resp, sizeof(resp)))
915 static ssize_t ucma_query_path(struct ucma_context *ctx,
916 void __user *response, int out_len)
918 struct rdma_ucm_query_path_resp *resp;
921 if (out_len < sizeof(*resp))
924 resp = kzalloc(out_len, GFP_KERNEL);
928 resp->num_paths = ctx->cm_id->route.num_paths;
929 for (i = 0, out_len -= sizeof(*resp);
930 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
931 i++, out_len -= sizeof(struct ib_path_rec_data)) {
932 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
934 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
935 IB_PATH_BIDIRECTIONAL;
936 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
937 struct sa_path_rec ib;
939 sa_convert_path_opa_to_ib(&ib, rec);
940 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
943 ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
947 if (copy_to_user(response, resp, struct_size(resp, path_data, i)))
954 static ssize_t ucma_query_gid(struct ucma_context *ctx,
955 void __user *response, int out_len)
957 struct rdma_ucm_query_addr_resp resp;
958 struct sockaddr_ib *addr;
961 if (out_len < sizeof(resp))
964 memset(&resp, 0, sizeof resp);
966 ucma_query_device_addr(ctx->cm_id, &resp);
968 addr = (struct sockaddr_ib *) &resp.src_addr;
969 resp.src_size = sizeof(*addr);
970 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
971 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
973 addr->sib_family = AF_IB;
974 addr->sib_pkey = (__force __be16) resp.pkey;
975 rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
977 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
978 &ctx->cm_id->route.addr.src_addr);
981 addr = (struct sockaddr_ib *) &resp.dst_addr;
982 resp.dst_size = sizeof(*addr);
983 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
984 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
986 addr->sib_family = AF_IB;
987 addr->sib_pkey = (__force __be16) resp.pkey;
988 rdma_read_gids(ctx->cm_id, NULL,
989 (union ib_gid *)&addr->sib_addr);
990 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
991 &ctx->cm_id->route.addr.dst_addr);
994 if (copy_to_user(response, &resp, sizeof(resp)))
1000 static ssize_t ucma_query(struct ucma_file *file,
1001 const char __user *inbuf,
1002 int in_len, int out_len)
1004 struct rdma_ucm_query cmd;
1005 struct ucma_context *ctx;
1006 void __user *response;
1009 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1012 response = u64_to_user_ptr(cmd.response);
1013 ctx = ucma_get_ctx(file, cmd.id);
1015 return PTR_ERR(ctx);
1017 switch (cmd.option) {
1018 case RDMA_USER_CM_QUERY_ADDR:
1019 ret = ucma_query_addr(ctx, response, out_len);
1021 case RDMA_USER_CM_QUERY_PATH:
1022 ret = ucma_query_path(ctx, response, out_len);
1024 case RDMA_USER_CM_QUERY_GID:
1025 ret = ucma_query_gid(ctx, response, out_len);
1036 static void ucma_copy_conn_param(struct rdma_cm_id *id,
1037 struct rdma_conn_param *dst,
1038 struct rdma_ucm_conn_param *src)
1040 dst->private_data = src->private_data;
1041 dst->private_data_len = src->private_data_len;
1042 dst->responder_resources =src->responder_resources;
1043 dst->initiator_depth = src->initiator_depth;
1044 dst->flow_control = src->flow_control;
1045 dst->retry_count = src->retry_count;
1046 dst->rnr_retry_count = src->rnr_retry_count;
1047 dst->srq = src->srq;
1048 dst->qp_num = src->qp_num;
1049 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1052 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1053 int in_len, int out_len)
1055 struct rdma_ucm_connect cmd;
1056 struct rdma_conn_param conn_param;
1057 struct ucma_context *ctx;
1060 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1063 if (!cmd.conn_param.valid)
1066 ctx = ucma_get_ctx_dev(file, cmd.id);
1068 return PTR_ERR(ctx);
1070 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1071 ret = rdma_connect(ctx->cm_id, &conn_param);
1076 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1077 int in_len, int out_len)
1079 struct rdma_ucm_listen cmd;
1080 struct ucma_context *ctx;
1083 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1086 ctx = ucma_get_ctx(file, cmd.id);
1088 return PTR_ERR(ctx);
1090 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1091 cmd.backlog : max_backlog;
1092 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1097 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1098 int in_len, int out_len)
1100 struct rdma_ucm_accept cmd;
1101 struct rdma_conn_param conn_param;
1102 struct ucma_context *ctx;
1105 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1108 ctx = ucma_get_ctx_dev(file, cmd.id);
1110 return PTR_ERR(ctx);
1112 if (cmd.conn_param.valid) {
1113 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1114 mutex_lock(&file->mut);
1115 ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
1118 mutex_unlock(&file->mut);
1120 ret = __rdma_accept(ctx->cm_id, NULL, NULL);
1126 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1127 int in_len, int out_len)
1129 struct rdma_ucm_reject cmd;
1130 struct ucma_context *ctx;
1133 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1136 ctx = ucma_get_ctx_dev(file, cmd.id);
1138 return PTR_ERR(ctx);
1140 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1145 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1146 int in_len, int out_len)
1148 struct rdma_ucm_disconnect cmd;
1149 struct ucma_context *ctx;
1152 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1155 ctx = ucma_get_ctx_dev(file, cmd.id);
1157 return PTR_ERR(ctx);
1159 ret = rdma_disconnect(ctx->cm_id);
1164 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1165 const char __user *inbuf,
1166 int in_len, int out_len)
1168 struct rdma_ucm_init_qp_attr cmd;
1169 struct ib_uverbs_qp_attr resp;
1170 struct ucma_context *ctx;
1171 struct ib_qp_attr qp_attr;
1174 if (out_len < sizeof(resp))
1177 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1180 if (cmd.qp_state > IB_QPS_ERR)
1183 ctx = ucma_get_ctx_dev(file, cmd.id);
1185 return PTR_ERR(ctx);
1187 resp.qp_attr_mask = 0;
1188 memset(&qp_attr, 0, sizeof qp_attr);
1189 qp_attr.qp_state = cmd.qp_state;
1190 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1194 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
1195 if (copy_to_user(u64_to_user_ptr(cmd.response),
1196 &resp, sizeof(resp)))
1204 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1205 void *optval, size_t optlen)
1210 case RDMA_OPTION_ID_TOS:
1211 if (optlen != sizeof(u8)) {
1215 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1217 case RDMA_OPTION_ID_REUSEADDR:
1218 if (optlen != sizeof(int)) {
1222 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1224 case RDMA_OPTION_ID_AFONLY:
1225 if (optlen != sizeof(int)) {
1229 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1231 case RDMA_OPTION_ID_ACK_TIMEOUT:
1232 if (optlen != sizeof(u8)) {
1236 ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
1245 static int ucma_set_ib_path(struct ucma_context *ctx,
1246 struct ib_path_rec_data *path_data, size_t optlen)
1248 struct sa_path_rec sa_path;
1249 struct rdma_cm_event event;
1252 if (optlen % sizeof(*path_data))
1255 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1256 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1257 IB_PATH_BIDIRECTIONAL))
1264 if (!ctx->cm_id->device)
1267 memset(&sa_path, 0, sizeof(sa_path));
1269 sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1270 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1272 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1273 struct sa_path_rec opa;
1275 sa_convert_path_ib_to_opa(&opa, &sa_path);
1276 ret = rdma_set_ib_path(ctx->cm_id, &opa);
1278 ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
1283 memset(&event, 0, sizeof event);
1284 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1285 return ucma_event_handler(ctx->cm_id, &event);
1288 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1289 void *optval, size_t optlen)
1294 case RDMA_OPTION_IB_PATH:
1295 ret = ucma_set_ib_path(ctx, optval, optlen);
1304 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1305 int optname, void *optval, size_t optlen)
1310 case RDMA_OPTION_ID:
1311 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1313 case RDMA_OPTION_IB:
1314 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1323 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1324 int in_len, int out_len)
1326 struct rdma_ucm_set_option cmd;
1327 struct ucma_context *ctx;
1331 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1334 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1337 ctx = ucma_get_ctx(file, cmd.id);
1339 return PTR_ERR(ctx);
1341 optval = memdup_user(u64_to_user_ptr(cmd.optval),
1343 if (IS_ERR(optval)) {
1344 ret = PTR_ERR(optval);
1348 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1357 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1358 int in_len, int out_len)
1360 struct rdma_ucm_notify cmd;
1361 struct ucma_context *ctx;
1364 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1367 ctx = ucma_get_ctx(file, cmd.id);
1369 return PTR_ERR(ctx);
1371 if (ctx->cm_id->device)
1372 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1378 static ssize_t ucma_process_join(struct ucma_file *file,
1379 struct rdma_ucm_join_mcast *cmd, int out_len)
1381 struct rdma_ucm_create_id_resp resp;
1382 struct ucma_context *ctx;
1383 struct ucma_multicast *mc;
1384 struct sockaddr *addr;
1388 if (out_len < sizeof(resp))
1391 addr = (struct sockaddr *) &cmd->addr;
1392 if (cmd->addr_size != rdma_addr_size(addr))
1395 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1396 join_state = BIT(FULLMEMBER_JOIN);
1397 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1398 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1402 ctx = ucma_get_ctx_dev(file, cmd->id);
1404 return PTR_ERR(ctx);
1406 mutex_lock(&file->mut);
1407 mc = ucma_alloc_multicast(ctx);
1412 mc->join_state = join_state;
1414 memcpy(&mc->addr, addr, cmd->addr_size);
1415 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1421 if (copy_to_user(u64_to_user_ptr(cmd->response),
1422 &resp, sizeof(resp))) {
1427 xa_store(&multicast_table, mc->id, mc, 0);
1429 mutex_unlock(&file->mut);
1434 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1435 ucma_cleanup_mc_events(mc);
1437 xa_erase(&multicast_table, mc->id);
1438 list_del(&mc->list);
1441 mutex_unlock(&file->mut);
1446 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1447 const char __user *inbuf,
1448 int in_len, int out_len)
1450 struct rdma_ucm_join_ip_mcast cmd;
1451 struct rdma_ucm_join_mcast join_cmd;
1453 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1456 join_cmd.response = cmd.response;
1457 join_cmd.uid = cmd.uid;
1458 join_cmd.id = cmd.id;
1459 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1460 if (!join_cmd.addr_size)
1463 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1464 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1466 return ucma_process_join(file, &join_cmd, out_len);
1469 static ssize_t ucma_join_multicast(struct ucma_file *file,
1470 const char __user *inbuf,
1471 int in_len, int out_len)
1473 struct rdma_ucm_join_mcast cmd;
1475 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1478 if (!rdma_addr_size_kss(&cmd.addr))
1481 return ucma_process_join(file, &cmd, out_len);
1484 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1485 const char __user *inbuf,
1486 int in_len, int out_len)
1488 struct rdma_ucm_destroy_id cmd;
1489 struct rdma_ucm_destroy_id_resp resp;
1490 struct ucma_multicast *mc;
1493 if (out_len < sizeof(resp))
1496 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1499 xa_lock(&multicast_table);
1500 mc = xa_load(&multicast_table, cmd.id);
1502 mc = ERR_PTR(-ENOENT);
1503 else if (mc->ctx->file != file)
1504 mc = ERR_PTR(-EINVAL);
1505 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1506 mc = ERR_PTR(-ENXIO);
1508 __xa_erase(&multicast_table, mc->id);
1509 xa_unlock(&multicast_table);
1516 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1517 mutex_lock(&mc->ctx->file->mut);
1518 ucma_cleanup_mc_events(mc);
1519 list_del(&mc->list);
1520 mutex_unlock(&mc->ctx->file->mut);
1522 ucma_put_ctx(mc->ctx);
1523 resp.events_reported = mc->events_reported;
1526 if (copy_to_user(u64_to_user_ptr(cmd.response),
1527 &resp, sizeof(resp)))
1533 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1535 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1536 if (file1 < file2) {
1537 mutex_lock(&file1->mut);
1538 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1540 mutex_lock(&file2->mut);
1541 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1545 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1547 if (file1 < file2) {
1548 mutex_unlock(&file2->mut);
1549 mutex_unlock(&file1->mut);
1551 mutex_unlock(&file1->mut);
1552 mutex_unlock(&file2->mut);
1556 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1558 struct ucma_event *uevent, *tmp;
1560 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1561 if (uevent->ctx == ctx)
1562 list_move_tail(&uevent->list, &file->event_list);
1565 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1566 const char __user *inbuf,
1567 int in_len, int out_len)
1569 struct rdma_ucm_migrate_id cmd;
1570 struct rdma_ucm_migrate_resp resp;
1571 struct ucma_context *ctx;
1573 struct ucma_file *cur_file;
1576 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1579 /* Get current fd to protect against it being closed */
1583 if (f.file->f_op != &ucma_fops) {
1588 /* Validate current fd and prevent destruction of id. */
1589 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1595 cur_file = ctx->file;
1596 if (cur_file == new_file) {
1597 resp.events_reported = ctx->events_reported;
1602 * Migrate events between fd's, maintaining order, and avoiding new
1603 * events being added before existing events.
1605 ucma_lock_files(cur_file, new_file);
1606 xa_lock(&ctx_table);
1608 list_move_tail(&ctx->list, &new_file->ctx_list);
1609 ucma_move_events(ctx, new_file);
1610 ctx->file = new_file;
1611 resp.events_reported = ctx->events_reported;
1613 xa_unlock(&ctx_table);
1614 ucma_unlock_files(cur_file, new_file);
1617 if (copy_to_user(u64_to_user_ptr(cmd.response),
1618 &resp, sizeof(resp)))
1627 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1628 const char __user *inbuf,
1629 int in_len, int out_len) = {
1630 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1631 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1632 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1633 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1634 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1635 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1636 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1637 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1638 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1639 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1640 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1641 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1642 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1643 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1644 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1645 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1646 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1647 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1648 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1649 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1650 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1651 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1652 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1655 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1656 size_t len, loff_t *pos)
1658 struct ucma_file *file = filp->private_data;
1659 struct rdma_ucm_cmd_hdr hdr;
1662 if (!ib_safe_file_access(filp)) {
1663 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1664 task_tgid_vnr(current), current->comm);
1668 if (len < sizeof(hdr))
1671 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1674 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1676 hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
1678 if (hdr.in + sizeof(hdr) > len)
1681 if (!ucma_cmd_table[hdr.cmd])
1684 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1691 static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
1693 struct ucma_file *file = filp->private_data;
1696 poll_wait(filp, &file->poll_wait, wait);
1698 if (!list_empty(&file->event_list))
1699 mask = EPOLLIN | EPOLLRDNORM;
1705 * ucma_open() does not need the BKL:
1707 * - no global state is referred to;
1708 * - there is no ioctl method to race against;
1709 * - no further module initialization is required for open to work
1710 * after the device is registered.
1712 static int ucma_open(struct inode *inode, struct file *filp)
1714 struct ucma_file *file;
1716 file = kmalloc(sizeof *file, GFP_KERNEL);
1720 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1722 if (!file->close_wq) {
1727 INIT_LIST_HEAD(&file->event_list);
1728 INIT_LIST_HEAD(&file->ctx_list);
1729 init_waitqueue_head(&file->poll_wait);
1730 mutex_init(&file->mut);
1732 filp->private_data = file;
1735 return stream_open(inode, filp);
1738 static int ucma_close(struct inode *inode, struct file *filp)
1740 struct ucma_file *file = filp->private_data;
1741 struct ucma_context *ctx, *tmp;
1743 mutex_lock(&file->mut);
1744 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1745 ctx->destroying = 1;
1746 mutex_unlock(&file->mut);
1748 xa_erase(&ctx_table, ctx->id);
1749 flush_workqueue(file->close_wq);
1750 /* At that step once ctx was marked as destroying and workqueue
1751 * was flushed we are safe from any inflights handlers that
1752 * might put other closing task.
1754 xa_lock(&ctx_table);
1755 if (!ctx->closing) {
1756 xa_unlock(&ctx_table);
1758 wait_for_completion(&ctx->comp);
1759 /* rdma_destroy_id ensures that no event handlers are
1760 * inflight for that id before releasing it.
1762 rdma_destroy_id(ctx->cm_id);
1764 xa_unlock(&ctx_table);
1768 mutex_lock(&file->mut);
1770 mutex_unlock(&file->mut);
1771 destroy_workqueue(file->close_wq);
1776 static const struct file_operations ucma_fops = {
1777 .owner = THIS_MODULE,
1779 .release = ucma_close,
1780 .write = ucma_write,
1782 .llseek = no_llseek,
1785 static struct miscdevice ucma_misc = {
1786 .minor = MISC_DYNAMIC_MINOR,
1788 .nodename = "infiniband/rdma_cm",
1793 static int ucma_get_global_nl_info(struct ib_client_nl_info *res)
1795 res->abi = RDMA_USER_CM_ABI_VERSION;
1796 res->cdev = ucma_misc.this_device;
1800 static struct ib_client rdma_cma_client = {
1802 .get_global_nl_info = ucma_get_global_nl_info,
1804 MODULE_ALIAS_RDMA_CLIENT("rdma_cm");
1806 static ssize_t show_abi_version(struct device *dev,
1807 struct device_attribute *attr,
1810 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1812 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1814 static int __init ucma_init(void)
1818 ret = misc_register(&ucma_misc);
1822 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1824 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1828 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1829 if (!ucma_ctl_table_hdr) {
1830 pr_err("rdma_ucm: couldn't register sysctl paths\n");
1835 ret = ib_register_client(&rdma_cma_client);
1841 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1843 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1845 misc_deregister(&ucma_misc);
1849 static void __exit ucma_cleanup(void)
1851 ib_unregister_client(&rdma_cma_client);
1852 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1853 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1854 misc_deregister(&ucma_misc);
1857 module_init(ucma_init);
1858 module_exit(ucma_cleanup);