4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 * This file contains Asynchronous System Trap (AST) handlers and related
34 * LDLM request-processing routines.
36 * An AST is a callback issued on a lock when its state is changed. There are
37 * several different types of ASTs (callbacks) registered for each lock:
39 * - completion AST: when a lock is enqueued by some process, but cannot be
40 * granted immediately due to other conflicting locks on the same resource,
41 * the completion AST is sent to notify the caller when the lock is
44 * - blocking AST: when a lock is granted to some process, if another process
45 * enqueues a conflicting (blocking) lock on a resource, a blocking AST is
46 * sent to notify the holder(s) of the lock(s) of the conflicting lock
47 * request. The lock holder(s) must release their lock(s) on that resource in
48 * a timely manner or be evicted by the server.
50 * - glimpse AST: this is used when a process wants information about a lock
51 * (i.e. the lock value block (LVB)) but does not necessarily require holding
52 * the lock. If the resource is locked, the lock holder(s) are sent glimpse
53 * ASTs and the LVB is returned to the caller, and lock holder(s) may CANCEL
54 * their lock(s) if they are idle. If the resource is not locked, the server
58 #define DEBUG_SUBSYSTEM S_LDLM
60 #include <lustre_errno.h>
61 #include <lustre_dlm.h>
62 #include <obd_class.h>
65 #include "ldlm_internal.h"
67 unsigned int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
68 module_param(ldlm_enqueue_min, uint, 0644);
69 MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
71 /* in client side, whether the cached locks will be canceled before replay */
72 unsigned int ldlm_cancel_unused_locks_before_replay = 1;
74 static void interrupted_completion_wait(void *data)
78 struct lock_wait_data {
79 struct ldlm_lock *lwd_lock;
83 struct ldlm_async_args {
84 struct lustre_handle lock_handle;
88 * ldlm_request_bufsize
90 * @count: number of ldlm handles
93 * If opcode=LDLM_ENQUEUE, 1 slot is already occupied,
94 * LDLM_LOCKREQ_HANDLE -1 slots are available.
95 * Otherwise, LDLM_LOCKREQ_HANDLE slots are available.
97 * Return: size of the request buffer
99 static int ldlm_request_bufsize(int count, int type)
101 int avail = LDLM_LOCKREQ_HANDLES;
103 if (type == LDLM_ENQUEUE)
104 avail -= LDLM_ENQUEUE_CANCEL_OFF;
107 avail = (count - avail) * sizeof(struct lustre_handle);
111 return sizeof(struct ldlm_request) + avail;
114 static int ldlm_expired_completion_wait(void *data)
116 struct lock_wait_data *lwd = data;
117 struct ldlm_lock *lock = lwd->lwd_lock;
118 struct obd_import *imp;
119 struct obd_device *obd;
121 if (!lock->l_conn_export) {
122 static unsigned long next_dump, last_dump;
124 LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
125 (s64)lock->l_last_activity,
126 (s64)(ktime_get_real_seconds() -
127 lock->l_last_activity));
128 if (cfs_time_after(cfs_time_current(), next_dump)) {
129 last_dump = next_dump;
130 next_dump = cfs_time_shift(300);
131 ldlm_namespace_dump(D_DLMTRACE,
132 ldlm_lock_to_ns(lock));
134 libcfs_debug_dumplog();
139 obd = lock->l_conn_export->exp_obd;
140 imp = obd->u.cli.cl_import;
141 ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
142 LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
143 (s64)lock->l_last_activity,
144 (s64)(ktime_get_real_seconds() - lock->l_last_activity),
145 obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
151 * Calculate the Completion timeout (covering enqueue, BL AST, data flush,
152 * lock cancel, and their replies). Used for lock completion timeout on the
155 * \param[in] lock lock which is waiting the completion callback
157 * \retval timeout in seconds to wait for the server reply
159 /* We use the same basis for both server side and client side functions
160 * from a single node.
162 static unsigned int ldlm_cp_timeout(struct ldlm_lock *lock)
164 unsigned int timeout;
170 * Wait a long time for enqueue - server may have to callback a
171 * lock from another client. Server will evict the other client if it
172 * doesn't respond reasonably, and then give us the lock.
174 timeout = at_get(ldlm_lock_to_ns_at(lock));
175 return max(3 * timeout, ldlm_enqueue_min);
179 * Helper function for ldlm_completion_ast(), updating timings when lock is
182 static int ldlm_completion_tail(struct ldlm_lock *lock, void *data)
187 if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
188 LDLM_DEBUG(lock, "client-side enqueue: destroyed");
191 LDLM_DEBUG(lock, "client-side enqueue: granted");
193 /* Take into AT only CP RPC, not immediately granted locks */
194 delay = ktime_get_real_seconds() - lock->l_last_activity;
195 LDLM_DEBUG(lock, "client-side enqueue: granted after %lds",
198 /* Update our time estimate */
199 at_measured(ldlm_lock_to_ns_at(lock), delay);
205 * Implementation of ->l_completion_ast() for a client, that doesn't wait
206 * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
207 * other threads that cannot block for long.
209 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
211 if (flags == LDLM_FL_WAIT_NOREPROC) {
212 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
216 if (!(flags & LDLM_FL_BLOCKED_MASK)) {
217 wake_up(&lock->l_waitq);
218 return ldlm_completion_tail(lock, data);
221 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
224 EXPORT_SYMBOL(ldlm_completion_ast_async);
227 * Generic LDLM "completion" AST. This is called in several cases:
229 * - when a reply to an ENQUEUE RPC is received from the server
230 * (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at
231 * this point (determined by flags);
233 * - when LDLM_CP_CALLBACK RPC comes to client to notify it that lock has
236 * - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
239 * - to force all locks when resource is destroyed (cleanup_resource());
241 * - during lock conversion (not used currently).
243 * If lock is not granted in the first case, this function waits until second
244 * or penultimate cases happen in some other thread.
247 int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
249 /* XXX ALLOCATE - 160 bytes */
250 struct lock_wait_data lwd;
251 struct obd_device *obd;
252 struct obd_import *imp = NULL;
253 struct l_wait_info lwi;
257 if (flags == LDLM_FL_WAIT_NOREPROC) {
258 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
262 if (!(flags & LDLM_FL_BLOCKED_MASK)) {
263 wake_up(&lock->l_waitq);
267 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
271 obd = class_exp2obd(lock->l_conn_export);
273 /* if this is a local lock, then there is no import */
275 imp = obd->u.cli.cl_import;
277 timeout = ldlm_cp_timeout(lock);
280 lock->l_last_activity = ktime_get_real_seconds();
282 if (ldlm_is_no_timeout(lock)) {
283 LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
284 lwi = LWI_INTR(interrupted_completion_wait, &lwd);
286 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
287 ldlm_expired_completion_wait,
288 interrupted_completion_wait, &lwd);
292 spin_lock(&imp->imp_lock);
293 lwd.lwd_conn_cnt = imp->imp_conn_cnt;
294 spin_unlock(&imp->imp_lock);
297 if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
298 OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
299 ldlm_set_fail_loc(lock);
302 /* Go to sleep until the lock is granted or cancelled. */
303 rc = l_wait_event(lock->l_waitq,
304 is_granted_or_cancelled(lock), &lwi);
308 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
313 return ldlm_completion_tail(lock, data);
315 EXPORT_SYMBOL(ldlm_completion_ast);
317 static void failed_lock_cleanup(struct ldlm_namespace *ns,
318 struct ldlm_lock *lock, int mode)
322 /* Set a flag to prevent us from sending a CANCEL (bug 407) */
323 lock_res_and_lock(lock);
324 /* Check that lock is not granted or failed, we might race. */
325 if ((lock->l_req_mode != lock->l_granted_mode) &&
326 !ldlm_is_failed(lock)) {
327 /* Make sure that this lock will not be found by raced
328 * bl_ast and -EINVAL reply is sent to server anyways.
331 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
332 LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
335 unlock_res_and_lock(lock);
339 "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
341 LDLM_DEBUG(lock, "lock was granted or failed in race");
343 /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
344 * from llite/file.c/ll_file_flock().
346 /* This code makes for the fact that we do not have blocking handler on
347 * a client for flock locks. As such this is the place where we must
348 * completely kill failed locks. (interrupted and those that
349 * were waiting to be granted when server evicted us.
351 if (lock->l_resource->lr_type == LDLM_FLOCK) {
352 lock_res_and_lock(lock);
353 if (!ldlm_is_destroyed(lock)) {
354 ldlm_resource_unlink_lock(lock);
355 ldlm_lock_decref_internal_nolock(lock, mode);
356 ldlm_lock_destroy_nolock(lock);
358 unlock_res_and_lock(lock);
360 ldlm_lock_decref_internal(lock, mode);
365 * Finishing portion of client lock enqueue code.
367 * Called after receiving reply from server.
369 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
370 enum ldlm_type type, __u8 with_policy,
372 __u64 *flags, void *lvb, __u32 lvb_len,
373 const struct lustre_handle *lockh, int rc)
375 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
376 int is_replay = *flags & LDLM_FL_REPLAY;
377 struct ldlm_lock *lock;
378 struct ldlm_reply *reply;
379 int cleanup_phase = 1;
381 lock = ldlm_handle2lock(lockh);
382 /* ldlm_cli_enqueue is holding a reference on this lock. */
384 LASSERT(type == LDLM_FLOCK);
388 LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len),
389 "lvb_len = %d, l_lvb_len = %d\n", lvb_len, lock->l_lvb_len);
391 if (rc != ELDLM_OK) {
393 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
394 rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
396 if (rc != ELDLM_LOCK_ABORTED)
400 /* Before we return, swab the reply */
401 reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
410 size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
413 LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", size);
416 } else if (unlikely(size > lvb_len)) {
417 LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
425 if (rc == ELDLM_LOCK_ABORTED) {
426 if (lvb_len > 0 && lvb)
427 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
430 rc = ELDLM_LOCK_ABORTED;
434 /* lock enqueued on the server */
437 lock_res_and_lock(lock);
438 /* Key change rehash lock in per-export hash with new key */
439 if (exp->exp_lock_hash) {
440 /* In the function below, .hs_keycmp resolves to
441 * ldlm_export_lock_keycmp()
443 /* coverity[overrun-buffer-val] */
444 cfs_hash_rehash_key(exp->exp_lock_hash,
445 &lock->l_remote_handle,
449 lock->l_remote_handle = reply->lock_handle;
452 *flags = ldlm_flags_from_wire(reply->lock_flags);
453 lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
454 LDLM_FL_INHERIT_MASK);
455 unlock_res_and_lock(lock);
457 CDEBUG(D_INFO, "local: %p, remote cookie: %#llx, flags: 0x%llx\n",
458 lock, reply->lock_handle.cookie, *flags);
460 /* If enqueue returned a blocked lock but the completion handler has
461 * already run, then it fixed up the resource and we don't need to do it
464 if ((*flags) & LDLM_FL_LOCK_CHANGED) {
465 int newmode = reply->lock_desc.l_req_mode;
468 if (newmode && newmode != lock->l_req_mode) {
469 LDLM_DEBUG(lock, "server returned different mode %s",
470 ldlm_lockname[newmode]);
471 lock->l_req_mode = newmode;
474 if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
475 &lock->l_resource->lr_name)) {
476 CDEBUG(D_INFO, "remote intent success, locking " DLDLMRES
477 " instead of " DLDLMRES "\n",
478 PLDLMRES(&reply->lock_desc.l_resource),
479 PLDLMRES(lock->l_resource));
481 rc = ldlm_lock_change_resource(ns, lock,
482 &reply->lock_desc.l_resource.lr_name);
483 if (rc || !lock->l_resource) {
487 LDLM_DEBUG(lock, "client-side enqueue, new resource");
490 if (!(type == LDLM_IBITS &&
491 !(exp_connect_flags(exp) & OBD_CONNECT_IBITS)))
492 /* We assume lock type cannot change on server*/
493 ldlm_convert_policy_to_local(exp,
494 lock->l_resource->lr_type,
495 &reply->lock_desc.l_policy_data,
496 &lock->l_policy_data);
497 if (type != LDLM_PLAIN)
499 "client-side enqueue, new policy data");
502 if ((*flags) & LDLM_FL_AST_SENT) {
503 lock_res_and_lock(lock);
504 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
505 unlock_res_and_lock(lock);
506 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
509 /* If the lock has already been granted by a completion AST, don't
510 * clobber the LVB with an older one.
513 /* We must lock or a racing completion might update lvb without
514 * letting us know and we'll clobber the correct value.
515 * Cannot unlock after the check either, as that still leaves
516 * a tiny window for completion to get in
518 lock_res_and_lock(lock);
519 if (lock->l_req_mode != lock->l_granted_mode)
520 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
521 lock->l_lvb_data, lvb_len);
522 unlock_res_and_lock(lock);
530 rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
531 if (lock->l_completion_ast) {
532 int err = lock->l_completion_ast(lock, *flags, NULL);
541 if (lvb_len > 0 && lvb) {
542 /* Copy the LVB here, and not earlier, because the completion
543 * AST (if any) can override what we got in the reply
545 memcpy(lvb, lock->l_lvb_data, lvb_len);
548 LDLM_DEBUG(lock, "client-side enqueue END");
550 if (cleanup_phase == 1 && rc)
551 failed_lock_cleanup(ns, lock, mode);
552 /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
554 LDLM_LOCK_RELEASE(lock);
557 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
560 * Estimate number of lock handles that would fit into request of given
561 * size. PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
562 * a single page on the send/receive side. XXX: 512 should be changed to
563 * more adequate value.
565 static inline int ldlm_req_handles_avail(int req_size, int off)
569 avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
570 if (likely(avail >= 0))
571 avail /= (int)sizeof(struct lustre_handle);
574 avail += LDLM_LOCKREQ_HANDLES - off;
579 static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
580 enum req_location loc,
583 u32 size = req_capsule_msg_size(pill, loc);
585 return ldlm_req_handles_avail(size, off);
588 static inline int ldlm_format_handles_avail(struct obd_import *imp,
589 const struct req_format *fmt,
590 enum req_location loc, int off)
592 u32 size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
594 return ldlm_req_handles_avail(size, off);
598 * Cancel LRU locks and pack them into the enqueue request. Pack there the given
599 * \a count locks in \a cancels.
601 * This is to be called by functions preparing their own requests that
602 * might contain lists of locks to cancel in addition to actual operation
603 * that needs to be performed.
605 int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
606 int version, int opc, int canceloff,
607 struct list_head *cancels, int count)
609 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
610 struct req_capsule *pill = &req->rq_pill;
611 struct ldlm_request *dlm = NULL;
612 int flags, avail, to_free, pack = 0;
618 if (ns_connect_cancelset(ns)) {
619 /* Estimate the amount of available space in the request. */
620 req_capsule_filled_sizes(pill, RCL_CLIENT);
621 avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
623 flags = ns_connect_lru_resize(ns) ?
624 LDLM_LRU_FLAG_LRUR_NO_WAIT : LDLM_LRU_FLAG_AGED;
625 to_free = !ns_connect_lru_resize(ns) &&
626 opc == LDLM_ENQUEUE ? 1 : 0;
628 /* Cancel LRU locks here _only_ if the server supports
629 * EARLY_CANCEL. Otherwise we have to send extra CANCEL
630 * RPC, which will make us slower.
633 count += ldlm_cancel_lru_local(ns, cancels, to_free,
634 avail - count, 0, flags);
639 req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
640 ldlm_request_bufsize(pack, opc));
643 rc = ptlrpc_request_pack(req, version, opc);
645 ldlm_lock_list_put(cancels, l_bl_ast, count);
649 if (ns_connect_cancelset(ns)) {
651 dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
653 /* Skip first lock handler in ldlm_request_pack(),
654 * this method will increment @lock_count according
655 * to the lock handle amount actually written to
658 dlm->lock_count = canceloff;
660 /* Pack into the request @pack lock handles. */
661 ldlm_cli_cancel_list(cancels, pack, req, 0);
662 /* Prepare and send separate cancel RPC for others. */
663 ldlm_cli_cancel_list(cancels, count - pack, NULL, 0);
665 ldlm_lock_list_put(cancels, l_bl_ast, count);
669 EXPORT_SYMBOL(ldlm_prep_elc_req);
671 int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
672 struct list_head *cancels, int count)
674 return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
675 LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
677 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
679 static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp,
682 struct ptlrpc_request *req;
685 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
687 return ERR_PTR(-ENOMEM);
689 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
691 ptlrpc_request_free(req);
695 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
696 ptlrpc_request_set_replen(req);
701 * Client-side lock enqueue.
703 * If a request has some specific initialisation it is passed in \a reqp,
704 * otherwise it is created in ldlm_cli_enqueue.
706 * Supports sync and async requests, pass \a async flag accordingly. If a
707 * request was created in ldlm_cli_enqueue and it is the async request,
708 * pass it to the caller in \a reqp.
710 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
711 struct ldlm_enqueue_info *einfo,
712 const struct ldlm_res_id *res_id,
713 union ldlm_policy_data const *policy, __u64 *flags,
714 void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
715 struct lustre_handle *lockh, int async)
717 struct ldlm_namespace *ns;
718 struct ldlm_lock *lock;
719 struct ldlm_request *body;
720 int is_replay = *flags & LDLM_FL_REPLAY;
721 int req_passed_in = 1;
723 struct ptlrpc_request *req;
725 ns = exp->exp_obd->obd_namespace;
727 /* If we're replaying this lock, just check some invariants.
728 * If we're creating a new lock, get everything all setup nicely.
731 lock = ldlm_handle2lock_long(lockh, 0);
733 LDLM_DEBUG(lock, "client-side enqueue START");
734 LASSERT(exp == lock->l_conn_export);
736 const struct ldlm_callback_suite cbs = {
737 .lcs_completion = einfo->ei_cb_cp,
738 .lcs_blocking = einfo->ei_cb_bl,
739 .lcs_glimpse = einfo->ei_cb_gl
741 lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
742 einfo->ei_mode, &cbs, einfo->ei_cbdata,
745 return PTR_ERR(lock);
746 /* for the local lock, add the reference */
747 ldlm_lock_addref_internal(lock, einfo->ei_mode);
748 ldlm_lock2handle(lock, lockh);
750 lock->l_policy_data = *policy;
752 if (einfo->ei_type == LDLM_EXTENT) {
753 /* extent lock without policy is a bug */
757 lock->l_req_extent = policy->l_extent;
759 LDLM_DEBUG(lock, "client-side enqueue START, flags %llx",
763 lock->l_conn_export = exp;
764 lock->l_export = NULL;
765 lock->l_blocking_ast = einfo->ei_cb_bl;
766 lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
767 lock->l_last_activity = ktime_get_real_seconds();
769 /* lock not sent to server yet */
770 if (!reqp || !*reqp) {
771 req = ldlm_enqueue_pack(exp, lvb_len);
773 failed_lock_cleanup(ns, lock, einfo->ei_mode);
774 LDLM_LOCK_RELEASE(lock);
785 len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ,
787 LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n",
788 DLM_LOCKREQ_OFF, len, (int)sizeof(*body));
791 /* Dump lock data into the request buffer */
792 body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
793 ldlm_lock2desc(lock, &body->lock_desc);
794 body->lock_flags = ldlm_flags_to_wire(*flags);
795 body->lock_handle[0] = *lockh;
802 LDLM_DEBUG(lock, "sending request");
804 rc = ptlrpc_queue_wait(req);
806 err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
807 einfo->ei_mode, flags, lvb, lvb_len,
810 /* If ldlm_cli_enqueue_fini did not find the lock, we need to free
811 * one reference that we took
814 LDLM_LOCK_RELEASE(lock);
818 if (!req_passed_in && req) {
819 ptlrpc_req_finished(req);
826 EXPORT_SYMBOL(ldlm_cli_enqueue);
829 * Cancel locks locally.
831 * \retval LDLM_FL_LOCAL_ONLY if there is no need for a CANCEL RPC to the server
832 * \retval LDLM_FL_CANCELING otherwise;
833 * \retval LDLM_FL_BL_AST if there is a need for a separate CANCEL RPC.
835 static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
837 __u64 rc = LDLM_FL_LOCAL_ONLY;
839 if (lock->l_conn_export) {
842 LDLM_DEBUG(lock, "client-side cancel");
843 /* Set this flag to prevent others from getting new references*/
844 lock_res_and_lock(lock);
845 ldlm_set_cbpending(lock);
846 local_only = !!(lock->l_flags &
847 (LDLM_FL_LOCAL_ONLY | LDLM_FL_CANCEL_ON_BLOCK));
848 ldlm_cancel_callback(lock);
849 rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
850 unlock_res_and_lock(lock);
853 CDEBUG(D_DLMTRACE, "not sending request (at caller's instruction)\n");
854 rc = LDLM_FL_LOCAL_ONLY;
856 ldlm_lock_cancel(lock);
858 LDLM_ERROR(lock, "Trying to cancel local lock");
866 * Pack \a count locks in \a head into ldlm_request buffer of request \a req.
868 static void ldlm_cancel_pack(struct ptlrpc_request *req,
869 struct list_head *head, int count)
871 struct ldlm_request *dlm;
872 struct ldlm_lock *lock;
875 dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
878 /* Check the room in the request buffer. */
879 max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
880 sizeof(struct ldlm_request);
881 max /= sizeof(struct lustre_handle);
882 max += LDLM_LOCKREQ_HANDLES;
883 LASSERT(max >= dlm->lock_count + count);
885 /* XXX: it would be better to pack lock handles grouped by resource.
886 * so that the server cancel would call filter_lvbo_update() less
889 list_for_each_entry(lock, head, l_bl_ast) {
892 LASSERT(lock->l_conn_export);
893 /* Pack the lock handle to the given request buffer. */
894 LDLM_DEBUG(lock, "packing");
895 dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle;
898 CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
902 * Prepare and send a batched cancel RPC. It will include \a count lock
903 * handles of locks given in \a cancels list.
905 static int ldlm_cli_cancel_req(struct obd_export *exp,
906 struct list_head *cancels,
907 int count, enum ldlm_cancel_flags flags)
909 struct ptlrpc_request *req = NULL;
910 struct obd_import *imp;
917 CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
919 if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
922 free = ldlm_format_handles_avail(class_exp2cliimp(exp),
923 &RQF_LDLM_CANCEL, RCL_CLIENT, 0);
928 imp = class_exp2cliimp(exp);
929 if (!imp || imp->imp_invalid) {
931 "skipping cancel on invalid import %p\n", imp);
935 req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
941 req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
942 req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
943 ldlm_request_bufsize(count, LDLM_CANCEL));
945 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL);
947 ptlrpc_request_free(req);
951 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
952 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
953 ptlrpc_at_set_req_timeout(req);
955 ldlm_cancel_pack(req, cancels, count);
957 ptlrpc_request_set_replen(req);
958 if (flags & LCF_ASYNC) {
959 ptlrpcd_add_req(req);
964 rc = ptlrpc_queue_wait(req);
965 if (rc == LUSTRE_ESTALE) {
966 CDEBUG(D_DLMTRACE, "client/server (nid %s) out of sync -- not fatal\n",
967 libcfs_nid2str(req->rq_import->
968 imp_connection->c_peer.nid));
970 } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/
971 req->rq_import_generation == imp->imp_generation) {
972 ptlrpc_req_finished(req);
974 } else if (rc != ELDLM_OK) {
975 /* -ESHUTDOWN is common on umount */
976 CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
977 "Got rc %d from cancel RPC: canceling anyway\n",
985 ptlrpc_req_finished(req);
987 return sent ? sent : rc;
990 static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
992 return &imp->imp_obd->obd_namespace->ns_pool;
996 * Update client's OBD pool related fields with new SLV and Limit from \a req.
998 int ldlm_cli_update_pool(struct ptlrpc_request *req)
1000 struct obd_device *obd;
1004 if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
1005 !imp_connect_lru_resize(req->rq_import))) {
1007 * Do nothing for corner cases.
1012 /* In some cases RPC may contain SLV and limit zeroed out. This
1013 * is the case when server does not support LRU resize feature.
1014 * This is also possible in some recovery cases when server-side
1015 * reqs have no reference to the OBD export and thus access to
1016 * server-side namespace is not possible.
1018 if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
1019 lustre_msg_get_limit(req->rq_repmsg) == 0) {
1020 DEBUG_REQ(D_HA, req,
1021 "Zero SLV or Limit found (SLV: %llu, Limit: %u)",
1022 lustre_msg_get_slv(req->rq_repmsg),
1023 lustre_msg_get_limit(req->rq_repmsg));
1027 new_limit = lustre_msg_get_limit(req->rq_repmsg);
1028 new_slv = lustre_msg_get_slv(req->rq_repmsg);
1029 obd = req->rq_import->imp_obd;
1031 /* Set new SLV and limit in OBD fields to make them accessible
1032 * to the pool thread. We do not access obd_namespace and pool
1033 * directly here as there is no reliable way to make sure that
1034 * they are still alive at cleanup time. Evil races are possible
1035 * which may cause Oops at that time.
1037 write_lock(&obd->obd_pool_lock);
1038 obd->obd_pool_slv = new_slv;
1039 obd->obd_pool_limit = new_limit;
1040 write_unlock(&obd->obd_pool_lock);
1046 * Client side lock cancel.
1048 * Lock must not have any readers or writers by this time.
1050 int ldlm_cli_cancel(const struct lustre_handle *lockh,
1051 enum ldlm_cancel_flags cancel_flags)
1053 struct obd_export *exp;
1054 int avail, flags, count = 1;
1056 struct ldlm_namespace *ns;
1057 struct ldlm_lock *lock;
1060 lock = ldlm_handle2lock_long(lockh, 0);
1062 LDLM_DEBUG_NOLOCK("lock is already being destroyed");
1066 lock_res_and_lock(lock);
1067 /* Lock is being canceled and the caller doesn't want to wait */
1068 if (ldlm_is_canceling(lock) && (cancel_flags & LCF_ASYNC)) {
1069 unlock_res_and_lock(lock);
1070 LDLM_LOCK_RELEASE(lock);
1074 ldlm_set_canceling(lock);
1075 unlock_res_and_lock(lock);
1077 rc = ldlm_cli_cancel_local(lock);
1078 if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
1079 LDLM_LOCK_RELEASE(lock);
1082 /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
1083 * RPC which goes to canceld portal, so we can cancel other LRU locks
1084 * here and send them all as one LDLM_CANCEL RPC.
1086 LASSERT(list_empty(&lock->l_bl_ast));
1087 list_add(&lock->l_bl_ast, &cancels);
1089 exp = lock->l_conn_export;
1090 if (exp_connect_cancelset(exp)) {
1091 avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
1096 ns = ldlm_lock_to_ns(lock);
1097 flags = ns_connect_lru_resize(ns) ?
1098 LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED;
1099 count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
1102 ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
1105 EXPORT_SYMBOL(ldlm_cli_cancel);
1108 * Locally cancel up to \a count locks in list \a cancels.
1109 * Return the number of cancelled locks.
1111 int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
1112 enum ldlm_cancel_flags flags)
1115 struct ldlm_lock *lock, *next;
1116 int left = 0, bl_ast = 0;
1120 list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
1124 if (flags & LCF_LOCAL) {
1125 rc = LDLM_FL_LOCAL_ONLY;
1126 ldlm_lock_cancel(lock);
1128 rc = ldlm_cli_cancel_local(lock);
1130 /* Until we have compound requests and can send LDLM_CANCEL
1131 * requests batched with generic RPCs, we need to send cancels
1132 * with the LDLM_FL_BL_AST flag in a separate RPC from
1133 * the one being generated now.
1135 if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
1136 LDLM_DEBUG(lock, "Cancel lock separately");
1137 list_del_init(&lock->l_bl_ast);
1138 list_add(&lock->l_bl_ast, &head);
1142 if (rc == LDLM_FL_LOCAL_ONLY) {
1143 /* CANCEL RPC should not be sent to server. */
1144 list_del_init(&lock->l_bl_ast);
1145 LDLM_LOCK_RELEASE(lock);
1151 ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
1158 * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
1159 * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
1160 * readahead requests, ...)
1162 static enum ldlm_policy_res
1163 ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1164 int unused, int added, int count)
1166 enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
1168 /* don't check added & count since we want to process all locks
1170 * It's fine to not take lock to access lock->l_resource since
1171 * the lock has already been granted so it won't change.
1173 switch (lock->l_resource->lr_type) {
1176 if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
1179 result = LDLM_POLICY_SKIP_LOCK;
1180 lock_res_and_lock(lock);
1181 ldlm_set_skipped(lock);
1182 unlock_res_and_lock(lock);
1190 * Callback function for LRU-resize policy. Decides whether to keep
1191 * \a lock in LRU for current \a LRU size \a unused, added in current
1192 * scan \a added and number of locks to be preferably canceled \a count.
1194 * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1196 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1198 static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1199 struct ldlm_lock *lock,
1200 int unused, int added,
1203 unsigned long cur = cfs_time_current();
1204 struct ldlm_pool *pl = &ns->ns_pool;
1208 /* Stop LRU processing when we reach past @count or have checked all
1211 if (count && added >= count)
1212 return LDLM_POLICY_KEEP_LOCK;
1215 * Despite of the LV, It doesn't make sense to keep the lock which
1216 * is unused for ns_max_age time.
1218 if (cfs_time_after(cfs_time_current(),
1219 cfs_time_add(lock->l_last_used, ns->ns_max_age)))
1220 return LDLM_POLICY_CANCEL_LOCK;
1222 slv = ldlm_pool_get_slv(pl);
1223 lvf = ldlm_pool_get_lvf(pl);
1224 la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
1225 lv = lvf * la * unused;
1227 /* Inform pool about current CLV to see it via debugfs. */
1228 ldlm_pool_set_clv(pl, lv);
1230 /* Stop when SLV is not yet come from server or lv is smaller than
1233 if (slv == 0 || lv < slv)
1234 return LDLM_POLICY_KEEP_LOCK;
1236 return LDLM_POLICY_CANCEL_LOCK;
1240 * Callback function for debugfs used policy. Makes decision whether to keep
1241 * \a lock in LRU for current \a LRU size \a unused, added in current scan \a
1242 * added and number of locks to be preferably canceled \a count.
1244 * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1246 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1248 static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1249 struct ldlm_lock *lock,
1250 int unused, int added,
1253 /* Stop LRU processing when we reach past @count or have checked all
1256 return (added >= count) ?
1257 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1261 * Callback function for aged policy. Makes decision whether to keep \a lock in
1262 * LRU for current LRU size \a unused, added in current scan \a added and
1263 * number of locks to be preferably canceled \a count.
1265 * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1267 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1269 static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1270 struct ldlm_lock *lock,
1271 int unused, int added,
1274 if ((added >= count) &&
1275 time_before(cfs_time_current(),
1276 cfs_time_add(lock->l_last_used, ns->ns_max_age)))
1277 return LDLM_POLICY_KEEP_LOCK;
1279 return LDLM_POLICY_CANCEL_LOCK;
1282 static enum ldlm_policy_res
1283 ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
1284 struct ldlm_lock *lock,
1285 int unused, int added,
1288 enum ldlm_policy_res result;
1290 result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
1291 if (result == LDLM_POLICY_KEEP_LOCK)
1294 return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count);
1298 * Callback function for default policy. Makes decision whether to keep \a lock
1299 * in LRU for current LRU size \a unused, added in current scan \a added and
1300 * number of locks to be preferably canceled \a count.
1302 * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1304 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1306 static enum ldlm_policy_res
1307 ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1308 int unused, int added, int count)
1310 /* Stop LRU processing when we reach past count or have checked all
1313 return (added >= count) ?
1314 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1317 typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
1318 struct ldlm_namespace *,
1319 struct ldlm_lock *, int,
1322 static ldlm_cancel_lru_policy_t
1323 ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
1325 if (flags & LDLM_LRU_FLAG_NO_WAIT)
1326 return ldlm_cancel_no_wait_policy;
1328 if (ns_connect_lru_resize(ns)) {
1329 if (flags & LDLM_LRU_FLAG_SHRINK)
1330 /* We kill passed number of old locks. */
1331 return ldlm_cancel_passed_policy;
1332 else if (flags & LDLM_LRU_FLAG_LRUR)
1333 return ldlm_cancel_lrur_policy;
1334 else if (flags & LDLM_LRU_FLAG_PASSED)
1335 return ldlm_cancel_passed_policy;
1336 else if (flags & LDLM_LRU_FLAG_LRUR_NO_WAIT)
1337 return ldlm_cancel_lrur_no_wait_policy;
1339 if (flags & LDLM_LRU_FLAG_AGED)
1340 return ldlm_cancel_aged_policy;
1343 return ldlm_cancel_default_policy;
1347 * - Free space in LRU for \a count new locks,
1348 * redundant unused locks are canceled locally;
1349 * - also cancel locally unused aged locks;
1350 * - do not cancel more than \a max locks;
1351 * - GET the found locks and add them into the \a cancels list.
1353 * A client lock can be added to the l_bl_ast list only when it is
1354 * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing
1355 * CANCEL. There are the following use cases:
1356 * ldlm_cancel_resource_local(), ldlm_cancel_lru_local() and
1357 * ldlm_cli_cancel(), which check and set this flag properly. As any
1358 * attempt to cancel a lock rely on this flag, l_bl_ast list is accessed
1359 * later without any special locking.
1361 * Calling policies for enabled LRU resize:
1362 * ----------------------------------------
1363 * flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to
1364 * cancel not more than \a count locks;
1366 * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located at
1367 * the beginning of LRU list);
1369 * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according to
1370 * memory pressure policy function;
1372 * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to "aged policy".
1374 * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
1375 * (typically before replaying locks) w/o
1376 * sending any RPCs or waiting for any
1377 * outstanding RPC to complete.
1379 static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1380 struct list_head *cancels, int count, int max,
1383 ldlm_cancel_lru_policy_t pf;
1384 struct ldlm_lock *lock, *next;
1385 int added = 0, unused, remained;
1386 int no_wait = flags & (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
1388 spin_lock(&ns->ns_lock);
1389 unused = ns->ns_nr_unused;
1392 if (!ns_connect_lru_resize(ns))
1393 count += unused - ns->ns_max_unused;
1395 pf = ldlm_cancel_lru_policy(ns, flags);
1398 while (!list_empty(&ns->ns_unused_list)) {
1399 enum ldlm_policy_res result;
1400 time_t last_use = 0;
1402 /* all unused locks */
1403 if (remained-- <= 0)
1406 /* For any flags, stop scanning if @max is reached. */
1407 if (max && added >= max)
1410 list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
1412 /* No locks which got blocking requests. */
1413 LASSERT(!ldlm_is_bl_ast(lock));
1415 if (no_wait && ldlm_is_skipped(lock))
1416 /* already processed */
1419 last_use = lock->l_last_used;
1420 if (last_use == cfs_time_current())
1423 /* Somebody is already doing CANCEL. No need for this
1424 * lock in LRU, do not traverse it again.
1426 if (!ldlm_is_canceling(lock))
1429 ldlm_lock_remove_from_lru_nolock(lock);
1431 if (&lock->l_lru == &ns->ns_unused_list)
1434 LDLM_LOCK_GET(lock);
1435 spin_unlock(&ns->ns_lock);
1436 lu_ref_add(&lock->l_reference, __func__, current);
1438 /* Pass the lock through the policy filter and see if it
1439 * should stay in LRU.
1441 * Even for shrinker policy we stop scanning if
1442 * we find a lock that should stay in the cache.
1443 * We should take into account lock age anyway
1444 * as a new lock is a valuable resource even if
1445 * it has a low weight.
1447 * That is, for shrinker policy we drop only
1448 * old locks, but additionally choose them by
1449 * their weight. Big extent locks will stay in
1452 result = pf(ns, lock, unused, added, count);
1453 if (result == LDLM_POLICY_KEEP_LOCK) {
1454 lu_ref_del(&lock->l_reference,
1456 LDLM_LOCK_RELEASE(lock);
1457 spin_lock(&ns->ns_lock);
1460 if (result == LDLM_POLICY_SKIP_LOCK) {
1461 lu_ref_del(&lock->l_reference,
1463 LDLM_LOCK_RELEASE(lock);
1464 spin_lock(&ns->ns_lock);
1468 lock_res_and_lock(lock);
1469 /* Check flags again under the lock. */
1470 if (ldlm_is_canceling(lock) ||
1471 (ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) {
1472 /* Another thread is removing lock from LRU, or
1473 * somebody is already doing CANCEL, or there
1474 * is a blocking request which will send cancel
1475 * by itself, or the lock is no longer unused or
1476 * the lock has been used since the pf() call and
1477 * pages could be put under it.
1479 unlock_res_and_lock(lock);
1480 lu_ref_del(&lock->l_reference,
1482 LDLM_LOCK_RELEASE(lock);
1483 spin_lock(&ns->ns_lock);
1486 LASSERT(!lock->l_readers && !lock->l_writers);
1488 /* If we have chosen to cancel this lock voluntarily, we
1489 * better send cancel notification to server, so that it
1490 * frees appropriate state. This might lead to a race
1491 * where while we are doing cancel here, server is also
1492 * silently cancelling this lock.
1494 ldlm_clear_cancel_on_block(lock);
1496 /* Setting the CBPENDING flag is a little misleading,
1497 * but prevents an important race; namely, once
1498 * CBPENDING is set, the lock can accumulate no more
1499 * readers/writers. Since readers and writers are
1500 * already zero here, ldlm_lock_decref() won't see
1501 * this flag and call l_blocking_ast
1503 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
1505 /* We can't re-add to l_lru as it confuses the
1506 * refcounting in ldlm_lock_remove_from_lru() if an AST
1507 * arrives after we drop lr_lock below. We use l_bl_ast
1508 * and can't use l_pending_chain as it is used both on
1509 * server and client nevertheless bug 5666 says it is
1510 * used only on server
1512 LASSERT(list_empty(&lock->l_bl_ast));
1513 list_add(&lock->l_bl_ast, cancels);
1514 unlock_res_and_lock(lock);
1515 lu_ref_del(&lock->l_reference, __func__, current);
1516 spin_lock(&ns->ns_lock);
1520 spin_unlock(&ns->ns_lock);
1524 int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
1525 struct list_head *cancels, int count, int max,
1526 enum ldlm_cancel_flags cancel_flags, int flags)
1530 added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
1533 return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
1537 * Cancel at least \a nr locks from given namespace LRU.
1539 * When called with LCF_ASYNC the blocking callback will be handled
1540 * in a thread and this function will return after the thread has been
1541 * asked to call the callback. When called with LCF_ASYNC the blocking
1542 * callback will be performed in this function.
1544 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
1545 enum ldlm_cancel_flags cancel_flags,
1551 /* Just prepare the list of locks, do not actually cancel them yet.
1552 * Locks are cancelled later in a separate thread.
1554 count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
1555 rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
1563 * Find and cancel locally unused locks found on resource, matched to the
1564 * given policy, mode. GET the found locks and add them into the \a cancels
1567 int ldlm_cancel_resource_local(struct ldlm_resource *res,
1568 struct list_head *cancels,
1569 union ldlm_policy_data *policy,
1570 enum ldlm_mode mode, __u64 lock_flags,
1571 enum ldlm_cancel_flags cancel_flags,
1574 struct ldlm_lock *lock;
1578 list_for_each_entry(lock, &res->lr_granted, l_res_link) {
1579 if (opaque && lock->l_ast_data != opaque) {
1580 LDLM_ERROR(lock, "data %p doesn't match opaque %p",
1581 lock->l_ast_data, opaque);
1585 if (lock->l_readers || lock->l_writers)
1588 /* If somebody is already doing CANCEL, or blocking AST came,
1591 if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
1594 if (lockmode_compat(lock->l_granted_mode, mode))
1597 /* If policy is given and this is IBITS lock, add to list only
1598 * those locks that match by policy.
1600 if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
1601 !(lock->l_policy_data.l_inodebits.bits &
1602 policy->l_inodebits.bits))
1605 /* See CBPENDING comment in ldlm_cancel_lru */
1606 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
1609 LASSERT(list_empty(&lock->l_bl_ast));
1610 list_add(&lock->l_bl_ast, cancels);
1611 LDLM_LOCK_GET(lock);
1616 return ldlm_cli_cancel_list_local(cancels, count, cancel_flags);
1618 EXPORT_SYMBOL(ldlm_cancel_resource_local);
1621 * Cancel client-side locks from a list and send/prepare cancel RPCs to the
1623 * If \a req is NULL, send CANCEL request to server with handles of locks
1624 * in the \a cancels. If EARLY_CANCEL is not supported, send CANCEL requests
1625 * separately per lock.
1626 * If \a req is not NULL, put handles of locks in \a cancels into the request
1627 * buffer at the offset \a off.
1628 * Destroy \a cancels at the end.
1630 int ldlm_cli_cancel_list(struct list_head *cancels, int count,
1631 struct ptlrpc_request *req,
1632 enum ldlm_cancel_flags flags)
1634 struct ldlm_lock *lock;
1637 if (list_empty(cancels) || count == 0)
1640 /* XXX: requests (both batched and not) could be sent in parallel.
1641 * Usually it is enough to have just 1 RPC, but it is possible that
1642 * there are too many locks to be cancelled in LRU or on a resource.
1643 * It would also speed up the case when the server does not support
1647 LASSERT(!list_empty(cancels));
1648 lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
1649 LASSERT(lock->l_conn_export);
1651 if (exp_connect_cancelset(lock->l_conn_export)) {
1654 ldlm_cancel_pack(req, cancels, count);
1656 res = ldlm_cli_cancel_req(lock->l_conn_export,
1660 res = ldlm_cli_cancel_req(lock->l_conn_export,
1665 CDEBUG_LIMIT(res == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
1666 "%s: %d\n", __func__, res);
1671 ldlm_lock_list_put(cancels, l_bl_ast, res);
1673 LASSERT(count == 0);
1676 EXPORT_SYMBOL(ldlm_cli_cancel_list);
1679 * Cancel all locks on a resource that have 0 readers/writers.
1681 * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
1682 * to notify the server.
1684 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1685 const struct ldlm_res_id *res_id,
1686 union ldlm_policy_data *policy,
1687 enum ldlm_mode mode,
1688 enum ldlm_cancel_flags flags,
1691 struct ldlm_resource *res;
1696 res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1698 /* This is not a problem. */
1699 CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
1703 LDLM_RESOURCE_ADDREF(res);
1704 count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
1705 0, flags | LCF_BL_AST, opaque);
1706 rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
1708 CERROR("canceling unused lock " DLDLMRES ": rc = %d\n",
1711 LDLM_RESOURCE_DELREF(res);
1712 ldlm_resource_putref(res);
1715 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
1717 struct ldlm_cli_cancel_arg {
1722 static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
1723 struct cfs_hash_bd *bd,
1724 struct hlist_node *hnode, void *arg)
1726 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1727 struct ldlm_cli_cancel_arg *lc = arg;
1729 ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
1731 lc->lc_flags, lc->lc_opaque);
1732 /* must return 0 for hash iteration */
1737 * Cancel all locks on a namespace (or a specific resource, if given)
1738 * that have 0 readers/writers.
1740 * If flags & LCF_LOCAL, throw the locks away without trying
1741 * to notify the server.
1743 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
1744 const struct ldlm_res_id *res_id,
1745 enum ldlm_cancel_flags flags, void *opaque)
1747 struct ldlm_cli_cancel_arg arg = {
1749 .lc_opaque = opaque,
1756 return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
1760 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1761 ldlm_cli_hash_cancel_unused, &arg, 0);
1765 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1767 /* Lock iterators. */
1769 static int ldlm_resource_foreach(struct ldlm_resource *res,
1770 ldlm_iterator_t iter, void *closure)
1772 struct list_head *tmp, *next;
1773 struct ldlm_lock *lock;
1774 int rc = LDLM_ITER_CONTINUE;
1777 return LDLM_ITER_CONTINUE;
1780 list_for_each_safe(tmp, next, &res->lr_granted) {
1781 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1783 if (iter(lock, closure) == LDLM_ITER_STOP) {
1784 rc = LDLM_ITER_STOP;
1789 list_for_each_safe(tmp, next, &res->lr_waiting) {
1790 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1792 if (iter(lock, closure) == LDLM_ITER_STOP) {
1793 rc = LDLM_ITER_STOP;
1802 struct iter_helper_data {
1803 ldlm_iterator_t iter;
1807 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
1809 struct iter_helper_data *helper = closure;
1811 return helper->iter(lock, helper->closure);
1814 static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1815 struct hlist_node *hnode, void *arg)
1818 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1820 return ldlm_resource_foreach(res, ldlm_iter_helper, arg) ==
1824 static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
1825 ldlm_iterator_t iter, void *closure)
1828 struct iter_helper_data helper = {
1833 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1834 ldlm_res_iter_helper, &helper, 0);
1837 /* non-blocking function to manipulate a lock whose cb_data is being put away.
1838 * return 0: find no resource
1839 * > 0: must be LDLM_ITER_STOP/LDLM_ITER_CONTINUE.
1842 int ldlm_resource_iterate(struct ldlm_namespace *ns,
1843 const struct ldlm_res_id *res_id,
1844 ldlm_iterator_t iter, void *data)
1846 struct ldlm_resource *res;
1849 LASSERTF(ns, "must pass in namespace\n");
1851 res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1855 LDLM_RESOURCE_ADDREF(res);
1856 rc = ldlm_resource_foreach(res, iter, data);
1857 LDLM_RESOURCE_DELREF(res);
1858 ldlm_resource_putref(res);
1861 EXPORT_SYMBOL(ldlm_resource_iterate);
1865 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
1867 struct list_head *list = closure;
1869 /* we use l_pending_chain here, because it's unused on clients. */
1870 LASSERTF(list_empty(&lock->l_pending_chain),
1871 "lock %p next %p prev %p\n",
1872 lock, &lock->l_pending_chain.next,
1873 &lock->l_pending_chain.prev);
1874 /* bug 9573: don't replay locks left after eviction, or
1875 * bug 17614: locks being actively cancelled. Get a reference
1876 * on a lock so that it does not disappear under us (e.g. due to cancel)
1878 if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_BL_DONE))) {
1879 list_add(&lock->l_pending_chain, list);
1880 LDLM_LOCK_GET(lock);
1883 return LDLM_ITER_CONTINUE;
1886 static int replay_lock_interpret(const struct lu_env *env,
1887 struct ptlrpc_request *req,
1888 struct ldlm_async_args *aa, int rc)
1890 struct ldlm_lock *lock;
1891 struct ldlm_reply *reply;
1892 struct obd_export *exp;
1894 atomic_dec(&req->rq_import->imp_replay_inflight);
1898 reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1904 lock = ldlm_handle2lock(&aa->lock_handle);
1906 CERROR("received replay ack for unknown local cookie %#llx remote cookie %#llx from server %s id %s\n",
1907 aa->lock_handle.cookie, reply->lock_handle.cookie,
1908 req->rq_export->exp_client_uuid.uuid,
1909 libcfs_id2str(req->rq_peer));
1914 /* Key change rehash lock in per-export hash with new key */
1915 exp = req->rq_export;
1916 if (exp && exp->exp_lock_hash) {
1917 /* In the function below, .hs_keycmp resolves to
1918 * ldlm_export_lock_keycmp()
1920 /* coverity[overrun-buffer-val] */
1921 cfs_hash_rehash_key(exp->exp_lock_hash,
1922 &lock->l_remote_handle,
1923 &reply->lock_handle,
1926 lock->l_remote_handle = reply->lock_handle;
1929 LDLM_DEBUG(lock, "replayed lock:");
1930 ptlrpc_import_recovery_state_machine(req->rq_import);
1931 LDLM_LOCK_PUT(lock);
1934 ptlrpc_connect_import(req->rq_import);
1939 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
1941 struct ptlrpc_request *req;
1942 struct ldlm_async_args *aa;
1943 struct ldlm_request *body;
1946 /* Bug 11974: Do not replay a lock which is actively being canceled */
1947 if (ldlm_is_bl_done(lock)) {
1948 LDLM_DEBUG(lock, "Not replaying canceled lock:");
1952 /* If this is reply-less callback lock, we cannot replay it, since
1953 * server might have long dropped it, but notification of that event was
1954 * lost by network. (and server granted conflicting lock already)
1956 if (ldlm_is_cancel_on_block(lock)) {
1957 LDLM_DEBUG(lock, "Not replaying reply-less lock:");
1958 ldlm_lock_cancel(lock);
1963 * If granted mode matches the requested mode, this lock is granted.
1965 * If they differ, but we have a granted mode, then we were granted
1966 * one mode and now want another: ergo, converting.
1968 * If we haven't been granted anything and are on a resource list,
1969 * then we're blocked/waiting.
1971 * If we haven't been granted anything and we're NOT on a resource list,
1972 * then we haven't got a reply yet and don't have a known disposition.
1973 * This happens whenever a lock enqueue is the request that triggers
1976 if (lock->l_granted_mode == lock->l_req_mode)
1977 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
1978 else if (lock->l_granted_mode)
1979 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
1980 else if (!list_empty(&lock->l_res_link))
1981 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
1983 flags = LDLM_FL_REPLAY;
1985 req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
1986 LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
1990 /* We're part of recovery, so don't wait for it. */
1991 req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
1993 body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1994 ldlm_lock2desc(lock, &body->lock_desc);
1995 body->lock_flags = ldlm_flags_to_wire(flags);
1997 ldlm_lock2handle(lock, &body->lock_handle[0]);
1998 if (lock->l_lvb_len > 0)
1999 req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
2000 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
2002 ptlrpc_request_set_replen(req);
2003 /* notify the server we've replayed all requests.
2004 * also, we mark the request to be put on a dedicated
2005 * queue to be processed after all request replayes.
2008 lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
2010 LDLM_DEBUG(lock, "replaying lock:");
2012 atomic_inc(&req->rq_import->imp_replay_inflight);
2013 BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
2014 aa = ptlrpc_req_async_args(req);
2015 aa->lock_handle = body->lock_handle[0];
2016 req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
2017 ptlrpcd_add_req(req);
2023 * Cancel as many unused locks as possible before replay. since we are
2024 * in recovery, we can't wait for any outstanding RPCs to send any RPC
2027 * Called only in recovery before replaying locks. there is no need to
2028 * replay locks that are unused. since the clients may hold thousands of
2029 * cached unused locks, dropping the unused locks can greatly reduce the
2030 * load on the servers at recovery time.
2032 static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
2037 CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
2038 ldlm_ns_name(ns), ns->ns_nr_unused);
2040 /* We don't need to care whether or not LRU resize is enabled
2041 * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the
2044 canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
2045 LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT);
2047 CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
2048 canceled, ldlm_ns_name(ns));
2051 int ldlm_replay_locks(struct obd_import *imp)
2053 struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
2055 struct ldlm_lock *lock, *next;
2058 LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
2060 /* don't replay locks if import failed recovery */
2061 if (imp->imp_vbr_failed)
2064 /* ensure this doesn't fall to 0 before all have been queued */
2065 atomic_inc(&imp->imp_replay_inflight);
2067 if (ldlm_cancel_unused_locks_before_replay)
2068 ldlm_cancel_unused_locks_for_replay(ns);
2070 ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
2072 list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
2073 list_del_init(&lock->l_pending_chain);
2075 LDLM_LOCK_RELEASE(lock);
2076 continue; /* or try to do the rest? */
2078 rc = replay_one_lock(imp, lock);
2079 LDLM_LOCK_RELEASE(lock);
2082 atomic_dec(&imp->imp_replay_inflight);