Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / net / sunrpc / xprtrdma / backchannel.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2015 Oracle.  All rights reserved.
4  *
5  * Support for backward direction RPCs on RPC/RDMA.
6  */
7
8 #include <linux/sunrpc/xprt.h>
9 #include <linux/sunrpc/svc.h>
10 #include <linux/sunrpc/svc_xprt.h>
11 #include <linux/sunrpc/svc_rdma.h>
12
13 #include "xprt_rdma.h"
14 #include <trace/events/rpcrdma.h>
15
16 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17 # define RPCDBG_FACILITY        RPCDBG_TRANS
18 #endif
19
20 #undef RPCRDMA_BACKCHANNEL_DEBUG
21
22 /**
23  * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
24  * @xprt: transport associated with these backchannel resources
25  * @reqs: number of concurrent incoming requests to expect
26  *
27  * Returns 0 on success; otherwise a negative errno
28  */
29 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
30 {
31         struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
32
33         r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
34         trace_xprtrdma_cb_setup(r_xprt, reqs);
35         return 0;
36 }
37
38 /**
39  * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
40  * @xprt: transport
41  *
42  * Returns maximum size, in bytes, of a backchannel message
43  */
44 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
45 {
46         struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
47         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
48         size_t maxmsg;
49
50         maxmsg = min_t(unsigned int, ep->rep_inline_send, ep->rep_inline_recv);
51         maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
52         return maxmsg - RPCRDMA_HDRLEN_MIN;
53 }
54
55 unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
56 {
57         struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
58
59         return r_xprt->rx_buf.rb_bc_srv_max_requests;
60 }
61
62 static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
63 {
64         struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
65         struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
66         __be32 *p;
67
68         rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
69         xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
70                         rdmab_data(req->rl_rdmabuf), rqst);
71
72         p = xdr_reserve_space(&req->rl_stream, 28);
73         if (unlikely(!p))
74                 return -EIO;
75         *p++ = rqst->rq_xid;
76         *p++ = rpcrdma_version;
77         *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
78         *p++ = rdma_msg;
79         *p++ = xdr_zero;
80         *p++ = xdr_zero;
81         *p = xdr_zero;
82
83         if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
84                                       &rqst->rq_snd_buf, rpcrdma_noch))
85                 return -EIO;
86
87         trace_xprtrdma_cb_reply(rqst);
88         return 0;
89 }
90
91 /**
92  * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
93  * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
94  *
95  * Caller holds the transport's write lock.
96  *
97  * Returns:
98  *      %0 if the RPC message has been sent
99  *      %-ENOTCONN if the caller should reconnect and call again
100  *      %-EIO if a permanent error occurred and the request was not
101  *              sent. Do not try to send this message again.
102  */
103 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
104 {
105         struct rpc_xprt *xprt = rqst->rq_xprt;
106         struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
107         struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
108         int rc;
109
110         if (!xprt_connected(xprt))
111                 return -ENOTCONN;
112
113         if (!xprt_request_get_cong(xprt, rqst))
114                 return -EBADSLT;
115
116         rc = rpcrdma_bc_marshal_reply(rqst);
117         if (rc < 0)
118                 goto failed_marshal;
119
120         if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
121                 goto drop_connection;
122         return 0;
123
124 failed_marshal:
125         if (rc != -ENOTCONN)
126                 return rc;
127 drop_connection:
128         xprt_rdma_close(xprt);
129         return -ENOTCONN;
130 }
131
132 /**
133  * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
134  * @xprt: transport associated with these backchannel resources
135  * @reqs: number of incoming requests to destroy; ignored
136  */
137 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
138 {
139         struct rpc_rqst *rqst, *tmp;
140
141         spin_lock(&xprt->bc_pa_lock);
142         list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
143                 list_del(&rqst->rq_bc_pa_list);
144                 spin_unlock(&xprt->bc_pa_lock);
145
146                 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
147
148                 spin_lock(&xprt->bc_pa_lock);
149         }
150         spin_unlock(&xprt->bc_pa_lock);
151 }
152
153 /**
154  * xprt_rdma_bc_free_rqst - Release a backchannel rqst
155  * @rqst: request to release
156  */
157 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
158 {
159         struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
160         struct rpc_xprt *xprt = rqst->rq_xprt;
161
162         rpcrdma_recv_buffer_put(req->rl_reply);
163         req->rl_reply = NULL;
164
165         spin_lock(&xprt->bc_pa_lock);
166         list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
167         spin_unlock(&xprt->bc_pa_lock);
168         xprt_put(xprt);
169 }
170
171 static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
172 {
173         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
174         struct rpcrdma_req *req;
175         struct rpc_rqst *rqst;
176         size_t size;
177
178         spin_lock(&xprt->bc_pa_lock);
179         rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
180                                         rq_bc_pa_list);
181         if (!rqst)
182                 goto create_req;
183         list_del(&rqst->rq_bc_pa_list);
184         spin_unlock(&xprt->bc_pa_lock);
185         return rqst;
186
187 create_req:
188         spin_unlock(&xprt->bc_pa_lock);
189
190         /* Set a limit to prevent a remote from overrunning our resources.
191          */
192         if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
193                 return NULL;
194
195         size = min_t(size_t, r_xprt->rx_ep.rep_inline_recv, PAGE_SIZE);
196         req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
197         if (!req)
198                 return NULL;
199
200         xprt->bc_alloc_count++;
201         rqst = &req->rl_slot;
202         rqst->rq_xprt = xprt;
203         __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
204         xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
205         return rqst;
206 }
207
208 /**
209  * rpcrdma_bc_receive_call - Handle a backward direction call
210  * @r_xprt: transport receiving the call
211  * @rep: receive buffer containing the call
212  *
213  * Operational assumptions:
214  *    o Backchannel credits are ignored, just as the NFS server
215  *      forechannel currently does
216  *    o The ULP manages a replay cache (eg, NFSv4.1 sessions).
217  *      No replay detection is done at the transport level
218  */
219 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
220                              struct rpcrdma_rep *rep)
221 {
222         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
223         struct svc_serv *bc_serv;
224         struct rpcrdma_req *req;
225         struct rpc_rqst *rqst;
226         struct xdr_buf *buf;
227         size_t size;
228         __be32 *p;
229
230         p = xdr_inline_decode(&rep->rr_stream, 0);
231         size = xdr_stream_remaining(&rep->rr_stream);
232
233 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
234         pr_info("RPC:       %s: callback XID %08x, length=%u\n",
235                 __func__, be32_to_cpup(p), size);
236         pr_info("RPC:       %s: %*ph\n", __func__, size, p);
237 #endif
238
239         rqst = rpcrdma_bc_rqst_get(r_xprt);
240         if (!rqst)
241                 goto out_overflow;
242
243         rqst->rq_reply_bytes_recvd = 0;
244         rqst->rq_xid = *p;
245
246         rqst->rq_private_buf.len = size;
247
248         buf = &rqst->rq_rcv_buf;
249         memset(buf, 0, sizeof(*buf));
250         buf->head[0].iov_base = p;
251         buf->head[0].iov_len = size;
252         buf->len = size;
253
254         /* The receive buffer has to be hooked to the rpcrdma_req
255          * so that it is not released while the req is pointing
256          * to its buffer, and so that it can be reposted after
257          * the Upper Layer is done decoding it.
258          */
259         req = rpcr_to_rdmar(rqst);
260         req->rl_reply = rep;
261         trace_xprtrdma_cb_call(rqst);
262
263         /* Queue rqst for ULP's callback service */
264         bc_serv = xprt->bc_serv;
265         xprt_get(xprt);
266         spin_lock(&bc_serv->sv_cb_lock);
267         list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
268         spin_unlock(&bc_serv->sv_cb_lock);
269
270         wake_up(&bc_serv->sv_cb_waitq);
271
272         r_xprt->rx_stats.bcall_count++;
273         return;
274
275 out_overflow:
276         pr_warn("RPC/RDMA backchannel overflow\n");
277         xprt_force_disconnect(xprt);
278         /* This receive buffer gets reposted automatically
279          * when the connection is re-established.
280          */
281         return;
282 }