1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93 * (see rdma_read_complete() below).
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
109 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
111 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
113 static inline struct svc_rdma_recv_ctxt *
114 svc_rdma_next_recv_ctxt(struct list_head *list)
116 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
120 static struct svc_rdma_recv_ctxt *
121 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
123 struct svc_rdma_recv_ctxt *ctxt;
127 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
130 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
133 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
134 rdma->sc_max_req_size, DMA_FROM_DEVICE);
135 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
138 ctxt->rc_recv_wr.next = NULL;
139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
140 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
141 ctxt->rc_recv_wr.num_sge = 1;
142 ctxt->rc_cqe.done = svc_rdma_wc_receive;
143 ctxt->rc_recv_sge.addr = addr;
144 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
145 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
146 ctxt->rc_recv_buf = buffer;
147 ctxt->rc_temp = false;
158 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
159 struct svc_rdma_recv_ctxt *ctxt)
161 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
162 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
163 kfree(ctxt->rc_recv_buf);
168 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
169 * @rdma: svcxprt_rdma being torn down
172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
174 struct svc_rdma_recv_ctxt *ctxt;
176 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) {
177 list_del(&ctxt->rc_list);
178 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
182 static struct svc_rdma_recv_ctxt *
183 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
185 struct svc_rdma_recv_ctxt *ctxt;
187 spin_lock(&rdma->sc_recv_lock);
188 ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts);
191 list_del(&ctxt->rc_list);
192 spin_unlock(&rdma->sc_recv_lock);
195 ctxt->rc_page_count = 0;
199 spin_unlock(&rdma->sc_recv_lock);
201 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
208 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
209 * @rdma: controlling svcxprt_rdma
210 * @ctxt: object to return to the free list
213 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
214 struct svc_rdma_recv_ctxt *ctxt)
218 for (i = 0; i < ctxt->rc_page_count; i++)
219 put_page(ctxt->rc_pages[i]);
221 if (!ctxt->rc_temp) {
222 spin_lock(&rdma->sc_recv_lock);
223 list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts);
224 spin_unlock(&rdma->sc_recv_lock);
226 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
229 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
230 struct svc_rdma_recv_ctxt *ctxt)
234 svc_xprt_get(&rdma->sc_xprt);
235 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
236 trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
242 svc_rdma_recv_ctxt_put(rdma, ctxt);
243 svc_xprt_put(&rdma->sc_xprt);
247 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
249 struct svc_rdma_recv_ctxt *ctxt;
251 ctxt = svc_rdma_recv_ctxt_get(rdma);
254 return __svc_rdma_post_recv(rdma, ctxt);
258 * svc_rdma_post_recvs - Post initial set of Recv WRs
259 * @rdma: fresh svcxprt_rdma
261 * Returns true if successful, otherwise false.
263 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
265 struct svc_rdma_recv_ctxt *ctxt;
269 for (i = 0; i < rdma->sc_max_requests; i++) {
270 ctxt = svc_rdma_recv_ctxt_get(rdma);
273 ctxt->rc_temp = true;
274 ret = __svc_rdma_post_recv(rdma, ctxt);
282 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
283 * @cq: Completion Queue context
284 * @wc: Work Completion object
286 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
287 * the Receive completion handler could be running.
289 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
291 struct svcxprt_rdma *rdma = cq->cq_context;
292 struct ib_cqe *cqe = wc->wr_cqe;
293 struct svc_rdma_recv_ctxt *ctxt;
295 trace_svcrdma_wc_receive(wc);
297 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
298 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
300 if (wc->status != IB_WC_SUCCESS)
303 if (svc_rdma_post_recv(rdma))
306 /* All wc fields are now known to be valid */
307 ctxt->rc_byte_len = wc->byte_len;
308 ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
309 ctxt->rc_recv_sge.addr,
310 wc->byte_len, DMA_FROM_DEVICE);
312 spin_lock(&rdma->sc_rq_dto_lock);
313 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
314 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
315 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
316 spin_unlock(&rdma->sc_rq_dto_lock);
317 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
318 svc_xprt_enqueue(&rdma->sc_xprt);
323 svc_rdma_recv_ctxt_put(rdma, ctxt);
324 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
325 svc_xprt_enqueue(&rdma->sc_xprt);
327 svc_xprt_put(&rdma->sc_xprt);
331 * svc_rdma_flush_recv_queues - Drain pending Receive work
332 * @rdma: svcxprt_rdma being shut down
335 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
337 struct svc_rdma_recv_ctxt *ctxt;
339 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
340 list_del(&ctxt->rc_list);
341 svc_rdma_recv_ctxt_put(rdma, ctxt);
343 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
344 list_del(&ctxt->rc_list);
345 svc_rdma_recv_ctxt_put(rdma, ctxt);
349 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
350 struct svc_rdma_recv_ctxt *ctxt)
352 struct xdr_buf *arg = &rqstp->rq_arg;
354 arg->head[0].iov_base = ctxt->rc_recv_buf;
355 arg->head[0].iov_len = ctxt->rc_byte_len;
356 arg->tail[0].iov_base = NULL;
357 arg->tail[0].iov_len = 0;
360 arg->buflen = ctxt->rc_byte_len;
361 arg->len = ctxt->rc_byte_len;
364 /* This accommodates the largest possible Write chunk,
367 #define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
369 /* This accommodates the largest possible Position-Zero
370 * Read chunk or Reply chunk, in one segment.
372 #define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
374 /* Sanity check the Read list.
376 * Implementation limits:
377 * - This implementation supports only one Read chunk.
380 * - Read list does not overflow buffer.
381 * - Segment size limited by largest NFS data payload.
383 * The segment count is limited to how many segments can
384 * fit in the transport header without overflowing the
385 * buffer. That's about 40 Read segments for a 1KB inline
388 * Returns pointer to the following Write list.
390 static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
396 while (*p++ != xdr_zero) {
398 position = be32_to_cpup(p++);
400 } else if (be32_to_cpup(p++) != position) {
404 if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
414 /* The segment count is limited to how many segments can
415 * fit in the transport header without overflowing the
416 * buffer. That's about 60 Write segments for a 1KB inline
419 static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
424 segcount = be32_to_cpup(p++);
425 for (i = 0; i < segcount; i++) {
427 if (be32_to_cpup(p++) > maxlen)
438 /* Sanity check the Write list.
440 * Implementation limits:
441 * - This implementation supports only one Write chunk.
444 * - Write list does not overflow buffer.
445 * - Segment size limited by largest NFS data payload.
447 * Returns pointer to the following Reply chunk.
449 static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
454 while (*p++ != xdr_zero) {
455 p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
464 /* Sanity check the Reply chunk.
467 * - Reply chunk does not overflow buffer.
468 * - Segment size limited by largest NFS data payload.
470 * Returns pointer to the following RPC header.
472 static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
474 if (*p++ != xdr_zero) {
475 p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
482 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
483 * Responder's choice: requester signals it can handle Send With
484 * Invalidate, and responder chooses one R_key to invalidate.
486 * If there is exactly one distinct R_key in the received transport
487 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
489 * Perform this operation while the received transport header is
490 * still in the CPU cache.
492 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
493 struct svc_rdma_recv_ctxt *ctxt)
498 ctxt->rc_inv_rkey = 0;
500 if (!rdma->sc_snd_w_inv)
504 p = ctxt->rc_recv_buf;
505 p += rpcrdma_fixed_maxsz;
508 while (*p++ != xdr_zero) {
510 if (inv_rkey == xdr_zero)
512 else if (inv_rkey != *p)
518 while (*p++ != xdr_zero) {
519 segcount = be32_to_cpup(p++);
520 for (i = 0; i < segcount; i++) {
521 if (inv_rkey == xdr_zero)
523 else if (inv_rkey != *p)
530 if (*p++ != xdr_zero) {
531 segcount = be32_to_cpup(p++);
532 for (i = 0; i < segcount; i++) {
533 if (inv_rkey == xdr_zero)
535 else if (inv_rkey != *p)
541 ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey);
544 /* On entry, xdr->head[0].iov_base points to first byte in the
545 * RPC-over-RDMA header.
547 * On successful exit, head[0] points to first byte past the
548 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
549 * The length of the RPC-over-RDMA header is returned.
552 * - The transport header is entirely contained in the head iovec.
554 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
556 __be32 *p, *end, *rdma_argp;
557 unsigned int hdr_len;
559 /* Verify that there's enough bytes for header + something */
560 if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
563 rdma_argp = rq_arg->head[0].iov_base;
564 if (*(rdma_argp + 1) != rpcrdma_version)
567 switch (*(rdma_argp + 3)) {
583 end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
584 p = xdr_check_read_list(rdma_argp + 4, end);
587 p = xdr_check_write_list(p, end);
590 p = xdr_check_reply_chunk(p, end);
596 rq_arg->head[0].iov_base = p;
597 hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
598 rq_arg->head[0].iov_len -= hdr_len;
599 rq_arg->len -= hdr_len;
600 trace_svcrdma_decode_rqst(rdma_argp, hdr_len);
604 trace_svcrdma_decode_short(rq_arg->len);
608 trace_svcrdma_decode_badvers(rdma_argp);
609 return -EPROTONOSUPPORT;
612 trace_svcrdma_decode_drop(rdma_argp);
616 trace_svcrdma_decode_badproc(rdma_argp);
620 trace_svcrdma_decode_parse(rdma_argp);
624 static void rdma_read_complete(struct svc_rqst *rqstp,
625 struct svc_rdma_recv_ctxt *head)
629 /* Move Read chunk pages to rqstp so that they will be released
630 * when svc_process is done with them.
632 for (page_no = 0; page_no < head->rc_page_count; page_no++) {
633 put_page(rqstp->rq_pages[page_no]);
634 rqstp->rq_pages[page_no] = head->rc_pages[page_no];
636 head->rc_page_count = 0;
638 /* Point rq_arg.pages past header */
639 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
640 rqstp->rq_arg.page_len = head->rc_arg.page_len;
642 /* rq_respages starts after the last arg page */
643 rqstp->rq_respages = &rqstp->rq_pages[page_no];
644 rqstp->rq_next_page = rqstp->rq_respages + 1;
646 /* Rebuild rq_arg head and tail. */
647 rqstp->rq_arg.head[0] = head->rc_arg.head[0];
648 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
649 rqstp->rq_arg.len = head->rc_arg.len;
650 rqstp->rq_arg.buflen = head->rc_arg.buflen;
653 static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
654 __be32 *rdma_argp, int status)
656 struct svc_rdma_send_ctxt *ctxt;
661 ctxt = svc_rdma_send_ctxt_get(xprt);
665 p = ctxt->sc_xprt_buf;
667 *p++ = *(rdma_argp + 1);
668 *p++ = xprt->sc_fc_credits;
671 case -EPROTONOSUPPORT:
673 *p++ = rpcrdma_version;
674 *p++ = rpcrdma_version;
675 trace_svcrdma_err_vers(*rdma_argp);
679 trace_svcrdma_err_chunk(*rdma_argp);
681 length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf;
682 svc_rdma_sync_reply_hdr(xprt, ctxt, length);
684 ctxt->sc_send_wr.opcode = IB_WR_SEND;
685 ret = svc_rdma_send(xprt, &ctxt->sc_send_wr);
687 svc_rdma_send_ctxt_put(xprt, ctxt);
690 /* By convention, backchannel calls arrive via rdma_msg type
691 * messages, and never populate the chunk lists. This makes
692 * the RPC/RDMA header small and fixed in size, so it is
693 * straightforward to check the RPC header's direction field.
695 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
700 if (!xprt->xpt_bc_xprt)
704 if (*p++ != rdma_msg)
707 if (*p++ != xdr_zero)
709 if (*p++ != xdr_zero)
711 if (*p++ != xdr_zero)
715 if (*p++ != *rdma_resp)
718 if (*p == cpu_to_be32(RPC_CALL))
725 * svc_rdma_recvfrom - Receive an RPC call
726 * @rqstp: request structure into which to receive an RPC Call
729 * The positive number of bytes in the RPC Call message,
730 * %0 if there were no Calls ready to return,
731 * %-EINVAL if the Read chunk data is too large,
732 * %-ENOMEM if rdma_rw context pool was exhausted,
733 * %-ENOTCONN if posting failed (connection is lost),
734 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
736 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
737 * when there are no remaining ctxt's to process.
739 * The next ctxt is removed from the "receive" lists.
741 * - If the ctxt completes a Read, then finish assembling the Call
742 * message and return the number of bytes in the message.
744 * - If the ctxt completes a Receive, then construct the Call
745 * message from the contents of the Receive buffer.
747 * - If there are no Read chunks in this message, then finish
748 * assembling the Call message and return the number of bytes
751 * - If there are Read chunks in this message, post Read WRs to
752 * pull that payload and return 0.
754 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
756 struct svc_xprt *xprt = rqstp->rq_xprt;
757 struct svcxprt_rdma *rdma_xprt =
758 container_of(xprt, struct svcxprt_rdma, sc_xprt);
759 struct svc_rdma_recv_ctxt *ctxt;
763 spin_lock(&rdma_xprt->sc_rq_dto_lock);
764 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
766 list_del(&ctxt->rc_list);
767 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
768 rdma_read_complete(rqstp, ctxt);
771 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
773 /* No new incoming requests, terminate the loop */
774 clear_bit(XPT_DATA, &xprt->xpt_flags);
775 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
778 list_del(&ctxt->rc_list);
779 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
781 atomic_inc(&rdma_stat_recv);
783 svc_rdma_build_arg_xdr(rqstp, ctxt);
785 /* Prevent svc_xprt_release from releasing pages in rq_pages
786 * if we return 0 or an error.
788 rqstp->rq_respages = rqstp->rq_pages;
789 rqstp->rq_next_page = rqstp->rq_respages;
791 p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
792 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
797 rqstp->rq_xprt_hlen = ret;
799 if (svc_rdma_is_backchannel_reply(xprt, p)) {
800 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
802 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
805 svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
807 p += rpcrdma_fixed_maxsz;
812 rqstp->rq_xprt_ctxt = ctxt;
813 rqstp->rq_prot = IPPROTO_MAX;
814 svc_xprt_copy_addrs(rqstp, xprt);
815 return rqstp->rq_arg.len;
818 ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
824 svc_rdma_send_error(rdma_xprt, p, ret);
825 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
830 svc_rdma_send_error(rdma_xprt, p, ret);
831 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
835 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);