1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2015-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
45 #include <linux/interrupt.h>
46 #include <linux/sched.h>
47 #include <linux/slab.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
50 #include <linux/export.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
56 #include <linux/sunrpc/addr.h>
57 #include <linux/sunrpc/debug.h>
58 #include <linux/sunrpc/rpc_rdma.h>
59 #include <linux/sunrpc/svc_xprt.h>
60 #include <linux/sunrpc/svc_rdma.h>
62 #include "xprt_rdma.h"
63 #include <trace/events/rpcrdma.h>
65 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
67 static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
69 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
71 struct sockaddr *sa, int salen,
73 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
74 static void svc_rdma_detach(struct svc_xprt *xprt);
75 static void svc_rdma_free(struct svc_xprt *xprt);
76 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
77 static void svc_rdma_secure_port(struct svc_rqst *);
78 static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
80 static const struct svc_xprt_ops svc_rdma_ops = {
81 .xpo_create = svc_rdma_create,
82 .xpo_recvfrom = svc_rdma_recvfrom,
83 .xpo_sendto = svc_rdma_sendto,
84 .xpo_read_payload = svc_rdma_read_payload,
85 .xpo_release_rqst = svc_rdma_release_rqst,
86 .xpo_detach = svc_rdma_detach,
87 .xpo_free = svc_rdma_free,
88 .xpo_has_wspace = svc_rdma_has_wspace,
89 .xpo_accept = svc_rdma_accept,
90 .xpo_secure_port = svc_rdma_secure_port,
91 .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
94 struct svc_xprt_class svc_rdma_class = {
96 .xcl_owner = THIS_MODULE,
97 .xcl_ops = &svc_rdma_ops,
98 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
99 .xcl_ident = XPRT_TRANSPORT_RDMA,
102 /* QP event handler */
103 static void qp_event_handler(struct ib_event *event, void *context)
105 struct svc_xprt *xprt = context;
107 trace_svcrdma_qp_error(event, (struct sockaddr *)&xprt->xpt_remote);
108 switch (event->event) {
109 /* These are considered benign events */
110 case IB_EVENT_PATH_MIG:
111 case IB_EVENT_COMM_EST:
112 case IB_EVENT_SQ_DRAINED:
113 case IB_EVENT_QP_LAST_WQE_REACHED:
116 /* These are considered fatal events */
117 case IB_EVENT_PATH_MIG_ERR:
118 case IB_EVENT_QP_FATAL:
119 case IB_EVENT_QP_REQ_ERR:
120 case IB_EVENT_QP_ACCESS_ERR:
121 case IB_EVENT_DEVICE_FATAL:
123 set_bit(XPT_CLOSE, &xprt->xpt_flags);
124 svc_xprt_enqueue(xprt);
129 static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
132 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
135 dprintk("svcrdma: failed to create new transport\n");
138 svc_xprt_init(net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
139 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
140 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
141 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
142 INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts);
143 init_llist_head(&cma_xprt->sc_recv_ctxts);
144 INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
145 init_waitqueue_head(&cma_xprt->sc_send_wait);
147 spin_lock_init(&cma_xprt->sc_lock);
148 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
149 spin_lock_init(&cma_xprt->sc_send_lock);
150 spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
153 * Note that this implies that the underlying transport support
154 * has some form of congestion control (see RFC 7530 section 3.1
155 * paragraph 2). For now, we assume that all supported RDMA
156 * transports are suitable here.
158 set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
164 svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
165 struct rdma_conn_param *param)
167 const struct rpcrdma_connect_private *pmsg = param->private_data;
170 pmsg->cp_magic == rpcrdma_cmp_magic &&
171 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
172 newxprt->sc_snd_w_inv = pmsg->cp_flags &
173 RPCRDMA_CMP_F_SND_W_INV_OK;
175 dprintk("svcrdma: client send_size %u, recv_size %u "
176 "remote inv %ssupported\n",
177 rpcrdma_decode_buffer_size(pmsg->cp_send_size),
178 rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
179 newxprt->sc_snd_w_inv ? "" : "un");
184 * This function handles the CONNECT_REQUEST event on a listening
185 * endpoint. It is passed the cma_id for the _new_ connection. The context in
186 * this cma_id is inherited from the listening cma_id and is the svc_xprt
187 * structure for the listening endpoint.
189 * This function creates a new xprt for the new connection and enqueues it on
190 * the accept queue for the listent xprt. When the listen thread is kicked, it
191 * will call the recvfrom method on the listen xprt which will accept the new
194 static void handle_connect_req(struct rdma_cm_id *new_cma_id,
195 struct rdma_conn_param *param)
197 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
198 struct svcxprt_rdma *newxprt;
201 /* Create a new transport */
202 newxprt = svc_rdma_create_xprt(listen_xprt->sc_xprt.xpt_server,
203 listen_xprt->sc_xprt.xpt_net);
206 newxprt->sc_cm_id = new_cma_id;
207 new_cma_id->context = newxprt;
208 svc_rdma_parse_connect_private(newxprt, param);
210 /* Save client advertised inbound read limit for use later in accept. */
211 newxprt->sc_ord = param->initiator_depth;
213 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
214 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
215 /* The remote port is arbitrary and not under the control of the
216 * client ULP. Set it to a fixed value so that the DRC continues
217 * to be effective after a reconnect.
219 rpc_set_port((struct sockaddr *)&newxprt->sc_xprt.xpt_remote, 0);
221 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
222 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
225 * Enqueue the new transport on the accept queue of the listening
228 spin_lock(&listen_xprt->sc_lock);
229 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
230 spin_unlock(&listen_xprt->sc_lock);
232 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
233 svc_xprt_enqueue(&listen_xprt->sc_xprt);
237 * Handles events generated on the listening endpoint. These events will be
238 * either be incoming connect requests or adapter removal events.
240 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
241 struct rdma_cm_event *event)
243 switch (event->event) {
244 case RDMA_CM_EVENT_CONNECT_REQUEST:
245 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
246 "event = %s (%d)\n", cma_id, cma_id->context,
247 rdma_event_msg(event->event), event->event);
248 handle_connect_req(cma_id, &event->param.conn);
251 /* NB: No device removal upcall for INADDR_ANY listeners */
252 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
253 "event = %s (%d)\n", cma_id,
254 rdma_event_msg(event->event), event->event);
261 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
262 struct rdma_cm_event *event)
264 struct svcxprt_rdma *rdma = cma_id->context;
265 struct svc_xprt *xprt = &rdma->sc_xprt;
267 switch (event->event) {
268 case RDMA_CM_EVENT_ESTABLISHED:
269 /* Accept complete */
271 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
272 "cm_id=%p\n", xprt, cma_id);
273 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
274 svc_xprt_enqueue(xprt);
276 case RDMA_CM_EVENT_DISCONNECTED:
277 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
279 set_bit(XPT_CLOSE, &xprt->xpt_flags);
280 svc_xprt_enqueue(xprt);
283 case RDMA_CM_EVENT_DEVICE_REMOVAL:
284 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
285 "event = %s (%d)\n", cma_id, xprt,
286 rdma_event_msg(event->event), event->event);
287 set_bit(XPT_CLOSE, &xprt->xpt_flags);
288 svc_xprt_enqueue(xprt);
292 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
293 "event = %s (%d)\n", cma_id,
294 rdma_event_msg(event->event), event->event);
301 * Create a listening RDMA service endpoint.
303 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
305 struct sockaddr *sa, int salen,
308 struct rdma_cm_id *listen_id;
309 struct svcxprt_rdma *cma_xprt;
312 dprintk("svcrdma: Creating RDMA listener\n");
313 if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
314 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
315 return ERR_PTR(-EAFNOSUPPORT);
317 cma_xprt = svc_rdma_create_xprt(serv, net);
319 return ERR_PTR(-ENOMEM);
320 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
321 strcpy(cma_xprt->sc_xprt.xpt_remotebuf, "listener");
323 listen_id = rdma_create_id(net, rdma_listen_handler, cma_xprt,
324 RDMA_PS_TCP, IB_QPT_RC);
325 if (IS_ERR(listen_id)) {
326 ret = PTR_ERR(listen_id);
327 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
331 /* Allow both IPv4 and IPv6 sockets to bind a single port
334 #if IS_ENABLED(CONFIG_IPV6)
335 ret = rdma_set_afonly(listen_id, 1);
337 dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
341 ret = rdma_bind_addr(listen_id, sa);
343 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
346 cma_xprt->sc_cm_id = listen_id;
348 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
350 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
355 * We need to use the address from the cm_id in case the
356 * caller specified 0 for the port number.
358 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
359 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
361 return &cma_xprt->sc_xprt;
364 rdma_destroy_id(listen_id);
371 * This is the xpo_recvfrom function for listening endpoints. Its
372 * purpose is to accept incoming connections. The CMA callback handler
373 * has already created a new transport and attached it to the new CMA
376 * There is a queue of pending connections hung on the listening
377 * transport. This queue contains the new svc_xprt structure. This
378 * function takes svc_xprt structures off the accept_q and completes
381 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
383 struct svcxprt_rdma *listen_rdma;
384 struct svcxprt_rdma *newxprt = NULL;
385 struct rdma_conn_param conn_param;
386 struct rpcrdma_connect_private pmsg;
387 struct ib_qp_init_attr qp_attr;
388 unsigned int ctxts, rq_depth;
389 struct ib_device *dev;
391 RPC_IFDEBUG(struct sockaddr *sap);
393 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
394 clear_bit(XPT_CONN, &xprt->xpt_flags);
395 /* Get the next entry off the accept list */
396 spin_lock(&listen_rdma->sc_lock);
397 if (!list_empty(&listen_rdma->sc_accept_q)) {
398 newxprt = list_entry(listen_rdma->sc_accept_q.next,
399 struct svcxprt_rdma, sc_accept_q);
400 list_del_init(&newxprt->sc_accept_q);
402 if (!list_empty(&listen_rdma->sc_accept_q))
403 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
404 spin_unlock(&listen_rdma->sc_lock);
408 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
409 newxprt, newxprt->sc_cm_id);
411 dev = newxprt->sc_cm_id->device;
412 newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
414 /* Qualify the transport resource defaults with the
415 * capabilities of this particular device */
416 /* Transport header, head iovec, tail iovec */
417 newxprt->sc_max_send_sges = 3;
418 /* Add one SGE per page list entry */
419 newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
420 if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
421 newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
422 newxprt->sc_max_req_size = svcrdma_max_req_size;
423 newxprt->sc_max_requests = svcrdma_max_requests;
424 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
425 rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests;
426 if (rq_depth > dev->attrs.max_qp_wr) {
427 pr_warn("svcrdma: reducing receive depth to %d\n",
428 dev->attrs.max_qp_wr);
429 rq_depth = dev->attrs.max_qp_wr;
430 newxprt->sc_max_requests = rq_depth - 2;
431 newxprt->sc_max_bc_requests = 2;
433 newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
434 ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES);
435 ctxts *= newxprt->sc_max_requests;
436 newxprt->sc_sq_depth = rq_depth + ctxts;
437 if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) {
438 pr_warn("svcrdma: reducing send depth to %d\n",
439 dev->attrs.max_qp_wr);
440 newxprt->sc_sq_depth = dev->attrs.max_qp_wr;
442 atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
444 newxprt->sc_pd = ib_alloc_pd(dev, 0);
445 if (IS_ERR(newxprt->sc_pd)) {
446 dprintk("svcrdma: error creating PD for connect request\n");
449 newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
451 if (IS_ERR(newxprt->sc_sq_cq)) {
452 dprintk("svcrdma: error creating SQ CQ for connect request\n");
456 ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
457 if (IS_ERR(newxprt->sc_rq_cq)) {
458 dprintk("svcrdma: error creating RQ CQ for connect request\n");
462 memset(&qp_attr, 0, sizeof qp_attr);
463 qp_attr.event_handler = qp_event_handler;
464 qp_attr.qp_context = &newxprt->sc_xprt;
465 qp_attr.port_num = newxprt->sc_port_num;
466 qp_attr.cap.max_rdma_ctxs = ctxts;
467 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts;
468 qp_attr.cap.max_recv_wr = rq_depth;
469 qp_attr.cap.max_send_sge = newxprt->sc_max_send_sges;
470 qp_attr.cap.max_recv_sge = 1;
471 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
472 qp_attr.qp_type = IB_QPT_RC;
473 qp_attr.send_cq = newxprt->sc_sq_cq;
474 qp_attr.recv_cq = newxprt->sc_rq_cq;
475 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
476 newxprt->sc_cm_id, newxprt->sc_pd);
477 dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
478 qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
479 dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
480 qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
482 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
484 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
487 newxprt->sc_qp = newxprt->sc_cm_id->qp;
489 if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
490 newxprt->sc_snd_w_inv = false;
491 if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
492 !rdma_ib_or_roce(dev, newxprt->sc_port_num))
495 if (!svc_rdma_post_recvs(newxprt))
498 /* Swap out the handler */
499 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
501 /* Construct RDMA-CM private message */
502 pmsg.cp_magic = rpcrdma_cmp_magic;
503 pmsg.cp_version = RPCRDMA_CMP_VERSION;
505 pmsg.cp_send_size = pmsg.cp_recv_size =
506 rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);
508 /* Accept Connection */
509 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
510 memset(&conn_param, 0, sizeof conn_param);
511 conn_param.responder_resources = 0;
512 conn_param.initiator_depth = min_t(int, newxprt->sc_ord,
513 dev->attrs.max_qp_init_rd_atom);
514 if (!conn_param.initiator_depth) {
515 dprintk("svcrdma: invalid ORD setting\n");
519 conn_param.private_data = &pmsg;
520 conn_param.private_data_len = sizeof(pmsg);
521 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
525 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
526 dprintk("svcrdma: new connection %p accepted:\n", newxprt);
527 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
528 dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap));
529 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
530 dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap));
531 dprintk(" max_sge : %d\n", newxprt->sc_max_send_sges);
532 dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth);
533 dprintk(" rdma_rw_ctxs : %d\n", ctxts);
534 dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
535 dprintk(" ord : %d\n", conn_param.initiator_depth);
538 trace_svcrdma_xprt_accept(&newxprt->sc_xprt);
539 return &newxprt->sc_xprt;
542 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
543 trace_svcrdma_xprt_fail(&newxprt->sc_xprt);
544 /* Take a reference in case the DTO handler runs */
545 svc_xprt_get(&newxprt->sc_xprt);
546 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
547 ib_destroy_qp(newxprt->sc_qp);
548 rdma_destroy_id(newxprt->sc_cm_id);
549 /* This call to put will destroy the transport */
550 svc_xprt_put(&newxprt->sc_xprt);
555 * When connected, an svc_xprt has at least two references:
557 * - A reference held by the cm_id between the ESTABLISHED and
558 * DISCONNECTED events. If the remote peer disconnected first, this
559 * reference could be gone.
561 * - A reference held by the svc_recv code that called this function
562 * as part of close processing.
564 * At a minimum one references should still be held.
566 static void svc_rdma_detach(struct svc_xprt *xprt)
568 struct svcxprt_rdma *rdma =
569 container_of(xprt, struct svcxprt_rdma, sc_xprt);
571 /* Disconnect and flush posted WQE */
572 rdma_disconnect(rdma->sc_cm_id);
575 static void __svc_rdma_free(struct work_struct *work)
577 struct svcxprt_rdma *rdma =
578 container_of(work, struct svcxprt_rdma, sc_work);
579 struct svc_xprt *xprt = &rdma->sc_xprt;
581 trace_svcrdma_xprt_free(xprt);
583 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
584 ib_drain_qp(rdma->sc_qp);
586 svc_rdma_flush_recv_queues(rdma);
588 /* Final put of backchannel client transport */
589 if (xprt->xpt_bc_xprt) {
590 xprt_put(xprt->xpt_bc_xprt);
591 xprt->xpt_bc_xprt = NULL;
594 svc_rdma_destroy_rw_ctxts(rdma);
595 svc_rdma_send_ctxts_destroy(rdma);
596 svc_rdma_recv_ctxts_destroy(rdma);
598 /* Destroy the QP if present (not a listener) */
599 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
600 ib_destroy_qp(rdma->sc_qp);
602 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
603 ib_free_cq(rdma->sc_sq_cq);
605 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
606 ib_free_cq(rdma->sc_rq_cq);
608 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
609 ib_dealloc_pd(rdma->sc_pd);
611 /* Destroy the CM ID */
612 rdma_destroy_id(rdma->sc_cm_id);
617 static void svc_rdma_free(struct svc_xprt *xprt)
619 struct svcxprt_rdma *rdma =
620 container_of(xprt, struct svcxprt_rdma, sc_xprt);
622 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
623 schedule_work(&rdma->sc_work);
626 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
628 struct svcxprt_rdma *rdma =
629 container_of(xprt, struct svcxprt_rdma, sc_xprt);
632 * If there are already waiters on the SQ,
635 if (waitqueue_active(&rdma->sc_send_wait))
638 /* Otherwise return true. */
642 static void svc_rdma_secure_port(struct svc_rqst *rqstp)
644 set_bit(RQ_SECURE, &rqstp->rq_flags);
647 static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)