1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frwr_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
27 * As an optimization, frwr_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
29 * rb_mrs immediately so that no work (like managing a linked list
30 * under a spinlock) is needed in the completion upcall.
32 * But this means that frwr_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
47 * When the underlying transport disconnects, MRs are left in one of
50 * INVALID: The MR was not in use before the QP entered ERROR state.
52 * VALID: The MR was registered before the QP entered ERROR state.
54 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
60 * When frwr_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
62 * allocates fresh resources, it is deferred to a workqueue, and the
63 * recovered MRs are placed back on the rb_mrs list when recovery is
64 * complete. frwr_map allocates another MR for the current RPC while
65 * the broken MR is reset.
67 * To ensure that frwr_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
73 #include <linux/sunrpc/rpc_rdma.h>
74 #include <linux/sunrpc/svc_rdma.h>
76 #include "xprt_rdma.h"
77 #include <trace/events/rpcrdma.h>
79 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
80 # define RPCDBG_FACILITY RPCDBG_TRANS
84 * frwr_is_supported - Check if device supports FRWR
85 * @device: interface adapter to check
87 * Returns true if device supports FRWR, otherwise false
89 bool frwr_is_supported(struct ib_device *device)
91 struct ib_device_attr *attrs = &device->attrs;
93 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
94 goto out_not_supported;
95 if (attrs->max_fast_reg_page_list_len == 0)
96 goto out_not_supported;
100 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
106 * frwr_release_mr - Destroy one MR
107 * @mr: MR allocated by frwr_init_mr
110 void frwr_release_mr(struct rpcrdma_mr *mr)
114 rc = ib_dereg_mr(mr->frwr.fr_mr);
116 trace_xprtrdma_frwr_dereg(mr, rc);
121 /* MRs are dynamically allocated, so simply clean up and release the MR.
122 * A replacement MR will subsequently be allocated on demand.
125 frwr_mr_recycle_worker(struct work_struct *work)
127 struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
128 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
130 trace_xprtrdma_mr_recycle(mr);
132 if (mr->mr_dir != DMA_NONE) {
133 trace_xprtrdma_mr_unmap(mr);
134 ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
135 mr->mr_sg, mr->mr_nents, mr->mr_dir);
136 mr->mr_dir = DMA_NONE;
139 spin_lock(&r_xprt->rx_buf.rb_mrlock);
140 list_del(&mr->mr_all);
141 r_xprt->rx_stats.mrs_recycled++;
142 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
147 /* frwr_reset - Place MRs back on the free list
148 * @req: request to reset
150 * Used after a failed marshal. For FRWR, this means the MRs
151 * don't have to be fully released and recreated.
153 * NB: This is safe only as long as none of @req's MRs are
154 * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
157 void frwr_reset(struct rpcrdma_req *req)
159 while (!list_empty(&req->rl_registered)) {
160 struct rpcrdma_mr *mr;
162 mr = rpcrdma_mr_pop(&req->rl_registered);
163 rpcrdma_mr_unmap_and_put(mr);
168 * frwr_init_mr - Initialize one MR
169 * @ia: interface adapter
170 * @mr: generic MR to prepare for FRWR
172 * Returns zero if successful. Otherwise a negative errno
175 int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
177 unsigned int depth = ia->ri_max_frwr_depth;
178 struct scatterlist *sg;
182 frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
186 sg = kcalloc(depth, sizeof(*sg), GFP_KERNEL);
190 mr->frwr.fr_mr = frmr;
191 mr->mr_dir = DMA_NONE;
192 INIT_LIST_HEAD(&mr->mr_list);
193 INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
194 init_completion(&mr->frwr.fr_linv_done);
196 sg_init_table(sg, depth);
202 trace_xprtrdma_frwr_alloc(mr, rc);
206 dprintk("RPC: %s: sg allocation failure\n",
213 * frwr_open - Prepare an endpoint for use with FRWR
214 * @ia: interface adapter this endpoint will use
215 * @ep: endpoint to prepare
218 * ep->rep_attr.cap.max_send_wr
219 * ep->rep_attr.cap.max_recv_wr
220 * ep->rep_max_requests
223 * And these FRWR-related fields:
224 * ia->ri_max_frwr_depth
227 * On failure, a negative errno is returned.
229 int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
231 struct ib_device_attr *attrs = &ia->ri_id->device->attrs;
232 int max_qp_wr, depth, delta;
234 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
235 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
236 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
238 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
239 * capability, but perform optimally when the MRs are not larger
242 if (attrs->max_sge_rd > 1)
243 ia->ri_max_frwr_depth = attrs->max_sge_rd;
245 ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
246 if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
247 ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
248 dprintk("RPC: %s: max FR page list depth = %u\n",
249 __func__, ia->ri_max_frwr_depth);
251 /* Add room for frwr register and invalidate WRs.
252 * 1. FRWR reg WR for head
253 * 2. FRWR invalidate WR for head
254 * 3. N FRWR reg WRs for pagelist
255 * 4. N FRWR invalidate WRs for pagelist
256 * 5. FRWR reg WR for tail
257 * 6. FRWR invalidate WR for tail
258 * 7. The RDMA_SEND WR
262 /* Calculate N if the device max FRWR depth is smaller than
263 * RPCRDMA_MAX_DATA_SEGS.
265 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
266 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
268 depth += 2; /* FRWR reg + invalidate */
269 delta -= ia->ri_max_frwr_depth;
273 max_qp_wr = ia->ri_id->device->attrs.max_qp_wr;
274 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
276 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
278 if (ep->rep_max_requests > max_qp_wr)
279 ep->rep_max_requests = max_qp_wr;
280 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
281 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
282 ep->rep_max_requests = max_qp_wr / depth;
283 if (!ep->rep_max_requests)
285 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
287 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
288 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
289 ep->rep_attr.cap.max_recv_wr = ep->rep_max_requests;
290 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
291 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
293 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
294 ia->ri_max_frwr_depth);
295 /* Reply chunks require segments for head and tail buffers */
296 ia->ri_max_segs += 2;
297 if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
298 ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS;
303 * frwr_maxpages - Compute size of largest payload
306 * Returns maximum size of an RPC message, in pages.
308 * FRWR mode conveys a list of pages per chunk segment. The
309 * maximum length of that list is the FRWR page list depth.
311 size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
313 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
315 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
316 (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
320 * frwr_map - Register a memory region
321 * @r_xprt: controlling transport
322 * @seg: memory region co-ordinates
323 * @nsegs: number of segments remaining
324 * @writing: true when RDMA Write will be used
325 * @xid: XID of RPC using the registered memory
326 * @out: initialized MR
328 * Prepare a REG_MR Work Request to register a memory region
329 * for remote access via RDMA READ or RDMA WRITE.
331 * Returns the next segment or a negative errno pointer.
332 * On success, the prepared MR is planted in @out.
334 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
335 struct rpcrdma_mr_seg *seg,
336 int nsegs, bool writing, __be32 xid,
337 struct rpcrdma_mr **out)
339 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
340 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
341 struct rpcrdma_mr *mr;
343 struct ib_reg_wr *reg_wr;
347 mr = rpcrdma_mr_get(r_xprt);
351 if (nsegs > ia->ri_max_frwr_depth)
352 nsegs = ia->ri_max_frwr_depth;
353 for (i = 0; i < nsegs;) {
355 sg_set_page(&mr->mr_sg[i],
358 offset_in_page(seg->mr_offset));
360 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
367 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
368 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
371 mr->mr_dir = rpcrdma_data_dir(writing);
374 ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir);
378 ibmr = mr->frwr.fr_mr;
379 n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
380 if (unlikely(n != mr->mr_nents))
383 ibmr->iova &= 0x00000000ffffffff;
384 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
385 key = (u8)(ibmr->rkey & 0x000000FF);
386 ib_update_fast_reg_key(ibmr, ++key);
388 reg_wr = &mr->frwr.fr_regwr;
390 reg_wr->key = ibmr->rkey;
391 reg_wr->access = writing ?
392 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
393 IB_ACCESS_REMOTE_READ;
395 mr->mr_handle = ibmr->rkey;
396 mr->mr_length = ibmr->length;
397 mr->mr_offset = ibmr->iova;
398 trace_xprtrdma_mr_map(mr);
404 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
405 return ERR_PTR(-EAGAIN);
408 mr->mr_dir = DMA_NONE;
409 trace_xprtrdma_frwr_sgerr(mr, i);
411 return ERR_PTR(-EIO);
414 trace_xprtrdma_frwr_maperr(mr, n);
415 rpcrdma_mr_recycle(mr);
416 return ERR_PTR(-EIO);
420 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
421 * @cq: completion queue (ignored)
425 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
427 struct ib_cqe *cqe = wc->wr_cqe;
428 struct rpcrdma_frwr *frwr =
429 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
431 /* WARNING: Only wr_cqe and status are reliable at this point */
432 trace_xprtrdma_wc_fastreg(wc, frwr);
433 /* The MR will get recycled when the associated req is retransmitted */
437 * frwr_send - post Send WR containing the RPC Call message
438 * @ia: interface adapter
439 * @req: Prepared RPC Call
441 * For FRWR, chain any FastReg WRs to the Send WR. Only a
442 * single ib_post_send call is needed to register memory
443 * and then post the Send WR.
445 * Returns the result of ib_post_send.
447 int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
449 struct ib_send_wr *post_wr;
450 struct rpcrdma_mr *mr;
452 post_wr = &req->rl_sendctx->sc_wr;
453 list_for_each_entry(mr, &req->rl_registered, mr_list) {
454 struct rpcrdma_frwr *frwr;
458 frwr->fr_cqe.done = frwr_wc_fastreg;
459 frwr->fr_regwr.wr.next = post_wr;
460 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
461 frwr->fr_regwr.wr.num_sge = 0;
462 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
463 frwr->fr_regwr.wr.send_flags = 0;
465 post_wr = &frwr->fr_regwr.wr;
468 /* If ib_post_send fails, the next ->send_request for
469 * @req will queue these MRs for recovery.
471 return ib_post_send(ia->ri_id->qp, post_wr, NULL);
475 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
476 * @rep: Received reply
477 * @mrs: list of MRs to check
480 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
482 struct rpcrdma_mr *mr;
484 list_for_each_entry(mr, mrs, mr_list)
485 if (mr->mr_handle == rep->rr_inv_rkey) {
486 list_del_init(&mr->mr_list);
487 trace_xprtrdma_mr_remoteinv(mr);
488 rpcrdma_mr_unmap_and_put(mr);
489 break; /* only one invalidated MR per RPC */
493 static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
495 if (wc->status != IB_WC_SUCCESS)
496 rpcrdma_mr_recycle(mr);
498 rpcrdma_mr_unmap_and_put(mr);
502 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
503 * @cq: completion queue (ignored)
507 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
509 struct ib_cqe *cqe = wc->wr_cqe;
510 struct rpcrdma_frwr *frwr =
511 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
512 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
514 /* WARNING: Only wr_cqe and status are reliable at this point */
515 trace_xprtrdma_wc_li(wc, frwr);
516 __frwr_release_mr(wc, mr);
520 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
521 * @cq: completion queue (ignored)
524 * Awaken anyone waiting for an MR to finish being fenced.
526 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
528 struct ib_cqe *cqe = wc->wr_cqe;
529 struct rpcrdma_frwr *frwr =
530 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
531 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
533 /* WARNING: Only wr_cqe and status are reliable at this point */
534 trace_xprtrdma_wc_li_wake(wc, frwr);
535 complete(&frwr->fr_linv_done);
536 __frwr_release_mr(wc, mr);
540 * frwr_unmap_sync - invalidate memory regions that were registered for @req
541 * @r_xprt: controlling transport instance
542 * @req: rpcrdma_req with a non-empty list of MRs to process
544 * Sleeps until it is safe for the host CPU to access the previously mapped
545 * memory regions. This guarantees that registered MRs are properly fenced
546 * from the server before the RPC consumer accesses the data in them. It
547 * also ensures proper Send flow control: waking the next RPC waits until
548 * this RPC has relinquished all its Send Queue entries.
550 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
552 struct ib_send_wr *first, **prev, *last;
553 const struct ib_send_wr *bad_wr;
554 struct rpcrdma_frwr *frwr;
555 struct rpcrdma_mr *mr;
558 /* ORDER: Invalidate all of the MRs first
560 * Chain the LOCAL_INV Work Requests and post them with
561 * a single ib_post_send() call.
565 while (!list_empty(&req->rl_registered)) {
566 mr = rpcrdma_mr_pop(&req->rl_registered);
568 trace_xprtrdma_mr_localinv(mr);
569 r_xprt->rx_stats.local_inv_needed++;
572 frwr->fr_cqe.done = frwr_wc_localinv;
573 last = &frwr->fr_invwr;
575 last->wr_cqe = &frwr->fr_cqe;
576 last->sg_list = NULL;
578 last->opcode = IB_WR_LOCAL_INV;
579 last->send_flags = IB_SEND_SIGNALED;
580 last->ex.invalidate_rkey = mr->mr_handle;
586 /* Strong send queue ordering guarantees that when the
587 * last WR in the chain completes, all WRs in the chain
590 frwr->fr_cqe.done = frwr_wc_localinv_wake;
591 reinit_completion(&frwr->fr_linv_done);
593 /* Transport disconnect drains the receive CQ before it
594 * replaces the QP. The RPC reply handler won't call us
595 * unless ri_id->qp is a valid pointer.
598 rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
599 trace_xprtrdma_post_send(req, rc);
601 /* The final LOCAL_INV WR in the chain is supposed to
602 * do the wake. If it was never posted, the wake will
603 * not happen, so don't wait in that case.
606 wait_for_completion(&frwr->fr_linv_done);
610 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
613 frwr = container_of(bad_wr, struct rpcrdma_frwr,
615 mr = container_of(frwr, struct rpcrdma_mr, frwr);
616 bad_wr = bad_wr->next;
618 list_del_init(&mr->mr_list);
619 rpcrdma_mr_recycle(mr);
624 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
625 * @cq: completion queue (ignored)
629 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
631 struct ib_cqe *cqe = wc->wr_cqe;
632 struct rpcrdma_frwr *frwr =
633 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
634 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
636 /* WARNING: Only wr_cqe and status are reliable at this point */
637 trace_xprtrdma_wc_li_done(wc, frwr);
638 rpcrdma_complete_rqst(frwr->fr_req->rl_reply);
639 __frwr_release_mr(wc, mr);
643 * frwr_unmap_async - invalidate memory regions that were registered for @req
644 * @r_xprt: controlling transport instance
645 * @req: rpcrdma_req with a non-empty list of MRs to process
647 * This guarantees that registered MRs are properly fenced from the
648 * server before the RPC consumer accesses the data in them. It also
649 * ensures proper Send flow control: waking the next RPC waits until
650 * this RPC has relinquished all its Send Queue entries.
652 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
654 struct ib_send_wr *first, *last, **prev;
655 const struct ib_send_wr *bad_wr;
656 struct rpcrdma_frwr *frwr;
657 struct rpcrdma_mr *mr;
660 /* Chain the LOCAL_INV Work Requests and post them with
661 * a single ib_post_send() call.
665 while (!list_empty(&req->rl_registered)) {
666 mr = rpcrdma_mr_pop(&req->rl_registered);
668 trace_xprtrdma_mr_localinv(mr);
669 r_xprt->rx_stats.local_inv_needed++;
672 frwr->fr_cqe.done = frwr_wc_localinv;
674 last = &frwr->fr_invwr;
676 last->wr_cqe = &frwr->fr_cqe;
677 last->sg_list = NULL;
679 last->opcode = IB_WR_LOCAL_INV;
680 last->send_flags = IB_SEND_SIGNALED;
681 last->ex.invalidate_rkey = mr->mr_handle;
687 /* Strong send queue ordering guarantees that when the
688 * last WR in the chain completes, all WRs in the chain
689 * are complete. The last completion will wake up the
692 frwr->fr_cqe.done = frwr_wc_localinv_done;
694 /* Transport disconnect drains the receive CQ before it
695 * replaces the QP. The RPC reply handler won't call us
696 * unless ri_id->qp is a valid pointer.
699 rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
700 trace_xprtrdma_post_send(req, rc);
704 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
707 frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
708 mr = container_of(frwr, struct rpcrdma_mr, frwr);
709 bad_wr = bad_wr->next;
711 rpcrdma_mr_recycle(mr);
714 /* The final LOCAL_INV WR in the chain is supposed to
715 * do the wake. If it was never posted, the wake will
716 * not happen, so wake here in that case.
718 rpcrdma_complete_rqst(req->rl_reply);