These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / staging / lustre / lustre / ldlm / ldlm_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36 /**
37  * This file contains Asynchronous System Trap (AST) handlers and related
38  * LDLM request-processing routines.
39  *
40  * An AST is a callback issued on a lock when its state is changed. There are
41  * several different types of ASTs (callbacks) registered for each lock:
42  *
43  * - completion AST: when a lock is enqueued by some process, but cannot be
44  *   granted immediately due to other conflicting locks on the same resource,
45  *   the completion AST is sent to notify the caller when the lock is
46  *   eventually granted
47  *
48  * - blocking AST: when a lock is granted to some process, if another process
49  *   enqueues a conflicting (blocking) lock on a resource, a blocking AST is
50  *   sent to notify the holder(s) of the lock(s) of the conflicting lock
51  *   request. The lock holder(s) must release their lock(s) on that resource in
52  *   a timely manner or be evicted by the server.
53  *
54  * - glimpse AST: this is used when a process wants information about a lock
55  *   (i.e. the lock value block (LVB)) but does not necessarily require holding
56  *   the lock. If the resource is locked, the lock holder(s) are sent glimpse
57  *   ASTs and the LVB is returned to the caller, and lock holder(s) may CANCEL
58  *   their lock(s) if they are idle. If the resource is not locked, the server
59  *   may grant the lock.
60  */
61
62 #define DEBUG_SUBSYSTEM S_LDLM
63
64 #include "../include/lustre_dlm.h"
65 #include "../include/obd_class.h"
66 #include "../include/obd.h"
67
68 #include "ldlm_internal.h"
69
70 int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
71 module_param(ldlm_enqueue_min, int, 0644);
72 MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
73
74 /* in client side, whether the cached locks will be canceled before replay */
75 unsigned int ldlm_cancel_unused_locks_before_replay = 1;
76
77 static void interrupted_completion_wait(void *data)
78 {
79 }
80
81 struct lock_wait_data {
82         struct ldlm_lock *lwd_lock;
83         __u32        lwd_conn_cnt;
84 };
85
86 struct ldlm_async_args {
87         struct lustre_handle lock_handle;
88 };
89
90 static int ldlm_expired_completion_wait(void *data)
91 {
92         struct lock_wait_data *lwd = data;
93         struct ldlm_lock *lock = lwd->lwd_lock;
94         struct obd_import *imp;
95         struct obd_device *obd;
96
97         if (lock->l_conn_export == NULL) {
98                 static unsigned long next_dump, last_dump;
99
100                 LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n",
101                               (s64)lock->l_last_activity,
102                               (s64)(ktime_get_real_seconds() -
103                                     lock->l_last_activity));
104                 LDLM_DEBUG(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
105                            (s64)lock->l_last_activity,
106                            (s64)(ktime_get_real_seconds() -
107                                  lock->l_last_activity));
108                 if (cfs_time_after(cfs_time_current(), next_dump)) {
109                         last_dump = next_dump;
110                         next_dump = cfs_time_shift(300);
111                         ldlm_namespace_dump(D_DLMTRACE,
112                                             ldlm_lock_to_ns(lock));
113                         if (last_dump == 0)
114                                 libcfs_debug_dumplog();
115                 }
116                 return 0;
117         }
118
119         obd = lock->l_conn_export->exp_obd;
120         imp = obd->u.cli.cl_import;
121         ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
122         LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
123                    (s64)lock->l_last_activity,
124                    (s64)(ktime_get_real_seconds() - lock->l_last_activity),
125                    obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
126
127         return 0;
128 }
129
130 /* We use the same basis for both server side and client side functions
131    from a single node. */
132 static int ldlm_get_enq_timeout(struct ldlm_lock *lock)
133 {
134         int timeout = at_get(ldlm_lock_to_ns_at(lock));
135
136         if (AT_OFF)
137                 return obd_timeout / 2;
138         /* Since these are non-updating timeouts, we should be conservative.
139            It would be nice to have some kind of "early reply" mechanism for
140            lock callbacks too... */
141         timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
142         return max(timeout, ldlm_enqueue_min);
143 }
144
145 /**
146  * Helper function for ldlm_completion_ast(), updating timings when lock is
147  * actually granted.
148  */
149 static int ldlm_completion_tail(struct ldlm_lock *lock)
150 {
151         long delay;
152         int  result;
153
154         if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
155                 LDLM_DEBUG(lock, "client-side enqueue: destroyed");
156                 result = -EIO;
157         } else {
158                 delay = ktime_get_real_seconds() - lock->l_last_activity;
159                 LDLM_DEBUG(lock, "client-side enqueue: granted after %lds",
160                            delay);
161
162                 /* Update our time estimate */
163                 at_measured(ldlm_lock_to_ns_at(lock),
164                             delay);
165                 result = 0;
166         }
167         return result;
168 }
169
170 /**
171  * Implementation of ->l_completion_ast() for a client, that doesn't wait
172  * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
173  * other threads that cannot block for long.
174  */
175 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
176 {
177         if (flags == LDLM_FL_WAIT_NOREPROC) {
178                 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
179                 return 0;
180         }
181
182         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
183                        LDLM_FL_BLOCK_CONV))) {
184                 wake_up(&lock->l_waitq);
185                 return ldlm_completion_tail(lock);
186         }
187
188         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
189         return 0;
190 }
191 EXPORT_SYMBOL(ldlm_completion_ast_async);
192
193 /**
194  * Generic LDLM "completion" AST. This is called in several cases:
195  *
196  *     - when a reply to an ENQUEUE RPC is received from the server
197  *       (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at
198  *       this point (determined by flags);
199  *
200  *     - when LDLM_CP_CALLBACK RPC comes to client to notify it that lock has
201  *       been granted;
202  *
203  *     - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
204  *       gets correct lvb;
205  *
206  *     - to force all locks when resource is destroyed (cleanup_resource());
207  *
208  *     - during lock conversion (not used currently).
209  *
210  * If lock is not granted in the first case, this function waits until second
211  * or penultimate cases happen in some other thread.
212  *
213  */
214 int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
215 {
216         /* XXX ALLOCATE - 160 bytes */
217         struct lock_wait_data lwd;
218         struct obd_device *obd;
219         struct obd_import *imp = NULL;
220         struct l_wait_info lwi;
221         __u32 timeout;
222         int rc = 0;
223
224         if (flags == LDLM_FL_WAIT_NOREPROC) {
225                 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
226                 goto noreproc;
227         }
228
229         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
230                        LDLM_FL_BLOCK_CONV))) {
231                 wake_up(&lock->l_waitq);
232                 return 0;
233         }
234
235         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
236
237 noreproc:
238
239         obd = class_exp2obd(lock->l_conn_export);
240
241         /* if this is a local lock, then there is no import */
242         if (obd != NULL)
243                 imp = obd->u.cli.cl_import;
244
245         /* Wait a long time for enqueue - server may have to callback a
246            lock from another client.  Server will evict the other client if it
247            doesn't respond reasonably, and then give us the lock. */
248         timeout = ldlm_get_enq_timeout(lock) * 2;
249
250         lwd.lwd_lock = lock;
251
252         if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
253                 LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
254                 lwi = LWI_INTR(interrupted_completion_wait, &lwd);
255         } else {
256                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
257                                        ldlm_expired_completion_wait,
258                                        interrupted_completion_wait, &lwd);
259         }
260
261         if (imp != NULL) {
262                 spin_lock(&imp->imp_lock);
263                 lwd.lwd_conn_cnt = imp->imp_conn_cnt;
264                 spin_unlock(&imp->imp_lock);
265         }
266
267         if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
268                                  OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
269                 lock->l_flags |= LDLM_FL_FAIL_LOC;
270                 rc = -EINTR;
271         } else {
272                 /* Go to sleep until the lock is granted or cancelled. */
273                 rc = l_wait_event(lock->l_waitq,
274                                   is_granted_or_cancelled(lock), &lwi);
275         }
276
277         if (rc) {
278                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
279                            rc);
280                 return rc;
281         }
282
283         return ldlm_completion_tail(lock);
284 }
285 EXPORT_SYMBOL(ldlm_completion_ast);
286
287 static void failed_lock_cleanup(struct ldlm_namespace *ns,
288                                 struct ldlm_lock *lock, int mode)
289 {
290         int need_cancel = 0;
291
292         /* Set a flag to prevent us from sending a CANCEL (bug 407) */
293         lock_res_and_lock(lock);
294         /* Check that lock is not granted or failed, we might race. */
295         if ((lock->l_req_mode != lock->l_granted_mode) &&
296             !(lock->l_flags & LDLM_FL_FAILED)) {
297                 /* Make sure that this lock will not be found by raced
298                  * bl_ast and -EINVAL reply is sent to server anyways.
299                  * bug 17645 */
300                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
301                                  LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
302                 need_cancel = 1;
303         }
304         unlock_res_and_lock(lock);
305
306         if (need_cancel)
307                 LDLM_DEBUG(lock,
308                            "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
309         else
310                 LDLM_DEBUG(lock, "lock was granted or failed in race");
311
312         ldlm_lock_decref_internal(lock, mode);
313
314         /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
315          *       from llite/file.c/ll_file_flock(). */
316         /* This code makes for the fact that we do not have blocking handler on
317          * a client for flock locks. As such this is the place where we must
318          * completely kill failed locks. (interrupted and those that
319          * were waiting to be granted when server evicted us. */
320         if (lock->l_resource->lr_type == LDLM_FLOCK) {
321                 lock_res_and_lock(lock);
322                 ldlm_resource_unlink_lock(lock);
323                 ldlm_lock_destroy_nolock(lock);
324                 unlock_res_and_lock(lock);
325         }
326 }
327
328 /**
329  * Finishing portion of client lock enqueue code.
330  *
331  * Called after receiving reply from server.
332  */
333 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
334                           ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
335                           __u64 *flags, void *lvb, __u32 lvb_len,
336                           struct lustre_handle *lockh, int rc)
337 {
338         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
339         int is_replay = *flags & LDLM_FL_REPLAY;
340         struct ldlm_lock *lock;
341         struct ldlm_reply *reply;
342         int cleanup_phase = 1;
343         int size = 0;
344
345         lock = ldlm_handle2lock(lockh);
346         /* ldlm_cli_enqueue is holding a reference on this lock. */
347         if (!lock) {
348                 LASSERT(type == LDLM_FLOCK);
349                 return -ENOLCK;
350         }
351
352         LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len),
353                  "lvb_len = %d, l_lvb_len = %d\n", lvb_len, lock->l_lvb_len);
354
355         if (rc != ELDLM_OK) {
356                 LASSERT(!is_replay);
357                 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
358                            rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
359
360                 if (rc != ELDLM_LOCK_ABORTED)
361                         goto cleanup;
362         }
363
364         /* Before we return, swab the reply */
365         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
366         if (reply == NULL) {
367                 rc = -EPROTO;
368                 goto cleanup;
369         }
370
371         if (lvb_len != 0) {
372                 LASSERT(lvb != NULL);
373
374                 size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
375                                             RCL_SERVER);
376                 if (size < 0) {
377                         LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", size);
378                         rc = size;
379                         goto cleanup;
380                 } else if (unlikely(size > lvb_len)) {
381                         LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
382                                    lvb_len, size);
383                         rc = -EINVAL;
384                         goto cleanup;
385                 }
386         }
387
388         if (rc == ELDLM_LOCK_ABORTED) {
389                 if (lvb_len != 0)
390                         rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
391                                            lvb, size);
392                 if (rc == 0)
393                         rc = ELDLM_LOCK_ABORTED;
394                 goto cleanup;
395         }
396
397         /* lock enqueued on the server */
398         cleanup_phase = 0;
399
400         lock_res_and_lock(lock);
401         /* Key change rehash lock in per-export hash with new key */
402         if (exp->exp_lock_hash) {
403                 /* In the function below, .hs_keycmp resolves to
404                  * ldlm_export_lock_keycmp() */
405                 /* coverity[overrun-buffer-val] */
406                 cfs_hash_rehash_key(exp->exp_lock_hash,
407                                     &lock->l_remote_handle,
408                                     &reply->lock_handle,
409                                     &lock->l_exp_hash);
410         } else {
411                 lock->l_remote_handle = reply->lock_handle;
412         }
413
414         *flags = ldlm_flags_from_wire(reply->lock_flags);
415         lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
416                                               LDLM_INHERIT_FLAGS);
417         /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
418          * to wait with no timeout as well */
419         lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
420                                               LDLM_FL_NO_TIMEOUT);
421         unlock_res_and_lock(lock);
422
423         CDEBUG(D_INFO, "local: %p, remote cookie: %#llx, flags: 0x%llx\n",
424                lock, reply->lock_handle.cookie, *flags);
425
426         /* If enqueue returned a blocked lock but the completion handler has
427          * already run, then it fixed up the resource and we don't need to do it
428          * again. */
429         if ((*flags) & LDLM_FL_LOCK_CHANGED) {
430                 int newmode = reply->lock_desc.l_req_mode;
431
432                 LASSERT(!is_replay);
433                 if (newmode && newmode != lock->l_req_mode) {
434                         LDLM_DEBUG(lock, "server returned different mode %s",
435                                    ldlm_lockname[newmode]);
436                         lock->l_req_mode = newmode;
437                 }
438
439                 if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
440                                  &lock->l_resource->lr_name)) {
441                         CDEBUG(D_INFO, "remote intent success, locking "DLDLMRES
442                                        " instead of "DLDLMRES"\n",
443                                PLDLMRES(&reply->lock_desc.l_resource),
444                                PLDLMRES(lock->l_resource));
445
446                         rc = ldlm_lock_change_resource(ns, lock,
447                                         &reply->lock_desc.l_resource.lr_name);
448                         if (rc || lock->l_resource == NULL) {
449                                 rc = -ENOMEM;
450                                 goto cleanup;
451                         }
452                         LDLM_DEBUG(lock, "client-side enqueue, new resource");
453                 }
454                 if (with_policy)
455                         if (!(type == LDLM_IBITS &&
456                               !(exp_connect_flags(exp) & OBD_CONNECT_IBITS)))
457                                 /* We assume lock type cannot change on server*/
458                                 ldlm_convert_policy_to_local(exp,
459                                                 lock->l_resource->lr_type,
460                                                 &reply->lock_desc.l_policy_data,
461                                                 &lock->l_policy_data);
462                 if (type != LDLM_PLAIN)
463                         LDLM_DEBUG(lock,
464                                    "client-side enqueue, new policy data");
465         }
466
467         if ((*flags) & LDLM_FL_AST_SENT ||
468             /* Cancel extent locks as soon as possible on a liblustre client,
469              * because it cannot handle asynchronous ASTs robustly (see
470              * bug 7311). */
471             (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
472                 lock_res_and_lock(lock);
473                 lock->l_flags |= LDLM_FL_CBPENDING |  LDLM_FL_BL_AST;
474                 unlock_res_and_lock(lock);
475                 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
476         }
477
478         /* If the lock has already been granted by a completion AST, don't
479          * clobber the LVB with an older one. */
480         if (lvb_len != 0) {
481                 /* We must lock or a racing completion might update lvb without
482                  * letting us know and we'll clobber the correct value.
483                  * Cannot unlock after the check either, a that still leaves
484                  * a tiny window for completion to get in */
485                 lock_res_and_lock(lock);
486                 if (lock->l_req_mode != lock->l_granted_mode)
487                         rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
488                                            lock->l_lvb_data, size);
489                 unlock_res_and_lock(lock);
490                 if (rc < 0) {
491                         cleanup_phase = 1;
492                         goto cleanup;
493                 }
494         }
495
496         if (!is_replay) {
497                 rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
498                 if (lock->l_completion_ast != NULL) {
499                         int err = lock->l_completion_ast(lock, *flags, NULL);
500
501                         if (!rc)
502                                 rc = err;
503                         if (rc)
504                                 cleanup_phase = 1;
505                 }
506         }
507
508         if (lvb_len && lvb != NULL) {
509                 /* Copy the LVB here, and not earlier, because the completion
510                  * AST (if any) can override what we got in the reply */
511                 memcpy(lvb, lock->l_lvb_data, lvb_len);
512         }
513
514         LDLM_DEBUG(lock, "client-side enqueue END");
515 cleanup:
516         if (cleanup_phase == 1 && rc)
517                 failed_lock_cleanup(ns, lock, mode);
518         /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
519         LDLM_LOCK_PUT(lock);
520         LDLM_LOCK_RELEASE(lock);
521         return rc;
522 }
523 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
524
525 /**
526  * Estimate number of lock handles that would fit into request of given
527  * size.  PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
528  * a single page on the send/receive side. XXX: 512 should be changed to
529  * more adequate value.
530  */
531 static inline int ldlm_req_handles_avail(int req_size, int off)
532 {
533         int avail;
534
535         avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
536         if (likely(avail >= 0))
537                 avail /= (int)sizeof(struct lustre_handle);
538         else
539                 avail = 0;
540         avail += LDLM_LOCKREQ_HANDLES - off;
541
542         return avail;
543 }
544
545 static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
546                                              enum req_location loc,
547                                              int off)
548 {
549         int size = req_capsule_msg_size(pill, loc);
550
551         return ldlm_req_handles_avail(size, off);
552 }
553
554 static inline int ldlm_format_handles_avail(struct obd_import *imp,
555                                             const struct req_format *fmt,
556                                             enum req_location loc, int off)
557 {
558         int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
559
560         return ldlm_req_handles_avail(size, off);
561 }
562
563 /**
564  * Cancel LRU locks and pack them into the enqueue request. Pack there the given
565  * \a count locks in \a cancels.
566  *
567  * This is to be called by functions preparing their own requests that
568  * might contain lists of locks to cancel in addition to actual operation
569  * that needs to be performed.
570  */
571 int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
572                       int version, int opc, int canceloff,
573                       struct list_head *cancels, int count)
574 {
575         struct ldlm_namespace   *ns = exp->exp_obd->obd_namespace;
576         struct req_capsule      *pill = &req->rq_pill;
577         struct ldlm_request     *dlm = NULL;
578         int flags, avail, to_free, pack = 0;
579         LIST_HEAD(head);
580         int rc;
581
582         if (cancels == NULL)
583                 cancels = &head;
584         if (ns_connect_cancelset(ns)) {
585                 /* Estimate the amount of available space in the request. */
586                 req_capsule_filled_sizes(pill, RCL_CLIENT);
587                 avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
588
589                 flags = ns_connect_lru_resize(ns) ?
590                         LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
591                 to_free = !ns_connect_lru_resize(ns) &&
592                           opc == LDLM_ENQUEUE ? 1 : 0;
593
594                 /* Cancel LRU locks here _only_ if the server supports
595                  * EARLY_CANCEL. Otherwise we have to send extra CANCEL
596                  * RPC, which will make us slower. */
597                 if (avail > count)
598                         count += ldlm_cancel_lru_local(ns, cancels, to_free,
599                                                        avail - count, 0, flags);
600                 if (avail > count)
601                         pack = count;
602                 else
603                         pack = avail;
604                 req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
605                                      ldlm_request_bufsize(pack, opc));
606         }
607
608         rc = ptlrpc_request_pack(req, version, opc);
609         if (rc) {
610                 ldlm_lock_list_put(cancels, l_bl_ast, count);
611                 return rc;
612         }
613
614         if (ns_connect_cancelset(ns)) {
615                 if (canceloff) {
616                         dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
617                         LASSERT(dlm);
618                         /* Skip first lock handler in ldlm_request_pack(),
619                          * this method will increment @lock_count according
620                          * to the lock handle amount actually written to
621                          * the buffer. */
622                         dlm->lock_count = canceloff;
623                 }
624                 /* Pack into the request @pack lock handles. */
625                 ldlm_cli_cancel_list(cancels, pack, req, 0);
626                 /* Prepare and send separate cancel RPC for others. */
627                 ldlm_cli_cancel_list(cancels, count - pack, NULL, 0);
628         } else {
629                 ldlm_lock_list_put(cancels, l_bl_ast, count);
630         }
631         return 0;
632 }
633 EXPORT_SYMBOL(ldlm_prep_elc_req);
634
635 int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
636                           struct list_head *cancels, int count)
637 {
638         return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
639                                  LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
640 }
641 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
642
643 /**
644  * Client-side lock enqueue.
645  *
646  * If a request has some specific initialisation it is passed in \a reqp,
647  * otherwise it is created in ldlm_cli_enqueue.
648  *
649  * Supports sync and async requests, pass \a async flag accordingly. If a
650  * request was created in ldlm_cli_enqueue and it is the async request,
651  * pass it to the caller in \a reqp.
652  */
653 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
654                      struct ldlm_enqueue_info *einfo,
655                      const struct ldlm_res_id *res_id,
656                      ldlm_policy_data_t const *policy, __u64 *flags,
657                      void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
658                      struct lustre_handle *lockh, int async)
659 {
660         struct ldlm_namespace *ns;
661         struct ldlm_lock      *lock;
662         struct ldlm_request   *body;
663         int                 is_replay = *flags & LDLM_FL_REPLAY;
664         int                 req_passed_in = 1;
665         int                 rc, err;
666         struct ptlrpc_request *req;
667
668         LASSERT(exp != NULL);
669
670         ns = exp->exp_obd->obd_namespace;
671
672         /* If we're replaying this lock, just check some invariants.
673          * If we're creating a new lock, get everything all setup nice. */
674         if (is_replay) {
675                 lock = ldlm_handle2lock_long(lockh, 0);
676                 LASSERT(lock != NULL);
677                 LDLM_DEBUG(lock, "client-side enqueue START");
678                 LASSERT(exp == lock->l_conn_export);
679         } else {
680                 const struct ldlm_callback_suite cbs = {
681                         .lcs_completion = einfo->ei_cb_cp,
682                         .lcs_blocking   = einfo->ei_cb_bl,
683                         .lcs_glimpse    = einfo->ei_cb_gl
684                 };
685                 lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
686                                         einfo->ei_mode, &cbs, einfo->ei_cbdata,
687                                         lvb_len, lvb_type);
688                 if (lock == NULL)
689                         return -ENOMEM;
690                 /* for the local lock, add the reference */
691                 ldlm_lock_addref_internal(lock, einfo->ei_mode);
692                 ldlm_lock2handle(lock, lockh);
693                 if (policy != NULL)
694                                 lock->l_policy_data = *policy;
695
696                 if (einfo->ei_type == LDLM_EXTENT)
697                         lock->l_req_extent = policy->l_extent;
698                 LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n",
699                            *flags);
700         }
701
702         lock->l_conn_export = exp;
703         lock->l_export = NULL;
704         lock->l_blocking_ast = einfo->ei_cb_bl;
705         lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
706
707         /* lock not sent to server yet */
708
709         if (reqp == NULL || *reqp == NULL) {
710                 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
711                                                 &RQF_LDLM_ENQUEUE,
712                                                 LUSTRE_DLM_VERSION,
713                                                 LDLM_ENQUEUE);
714                 if (req == NULL) {
715                         failed_lock_cleanup(ns, lock, einfo->ei_mode);
716                         LDLM_LOCK_RELEASE(lock);
717                         return -ENOMEM;
718                 }
719                 req_passed_in = 0;
720                 if (reqp)
721                         *reqp = req;
722         } else {
723                 int len;
724
725                 req = *reqp;
726                 len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ,
727                                            RCL_CLIENT);
728                 LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n",
729                          DLM_LOCKREQ_OFF, len, (int)sizeof(*body));
730         }
731
732         /* Dump lock data into the request buffer */
733         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
734         ldlm_lock2desc(lock, &body->lock_desc);
735         body->lock_flags = ldlm_flags_to_wire(*flags);
736         body->lock_handle[0] = *lockh;
737
738         /* Continue as normal. */
739         if (!req_passed_in) {
740                 if (lvb_len > 0)
741                         req_capsule_extend(&req->rq_pill,
742                                            &RQF_LDLM_ENQUEUE_LVB);
743                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
744                                      lvb_len);
745                 ptlrpc_request_set_replen(req);
746         }
747
748         /*
749          * Liblustre client doesn't get extent locks, except for O_APPEND case
750          * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
751          * [i_size, OBD_OBJECT_EOF] lock is taken.
752          */
753         LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
754                      policy->l_extent.end == OBD_OBJECT_EOF));
755
756         if (async) {
757                 LASSERT(reqp != NULL);
758                 return 0;
759         }
760
761         LDLM_DEBUG(lock, "sending request");
762
763         rc = ptlrpc_queue_wait(req);
764
765         err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
766                                     einfo->ei_mode, flags, lvb, lvb_len,
767                                     lockh, rc);
768
769         /* If ldlm_cli_enqueue_fini did not find the lock, we need to free
770          * one reference that we took */
771         if (err == -ENOLCK)
772                 LDLM_LOCK_RELEASE(lock);
773         else
774                 rc = err;
775
776         if (!req_passed_in && req != NULL) {
777                 ptlrpc_req_finished(req);
778                 if (reqp)
779                         *reqp = NULL;
780         }
781
782         return rc;
783 }
784 EXPORT_SYMBOL(ldlm_cli_enqueue);
785
786 /**
787  * Cancel locks locally.
788  * Returns:
789  * \retval LDLM_FL_LOCAL_ONLY if there is no need for a CANCEL RPC to the server
790  * \retval LDLM_FL_CANCELING otherwise;
791  * \retval LDLM_FL_BL_AST if there is a need for a separate CANCEL RPC.
792  */
793 static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
794 {
795         __u64 rc = LDLM_FL_LOCAL_ONLY;
796
797         if (lock->l_conn_export) {
798                 bool local_only;
799
800                 LDLM_DEBUG(lock, "client-side cancel");
801                 /* Set this flag to prevent others from getting new references*/
802                 lock_res_and_lock(lock);
803                 lock->l_flags |= LDLM_FL_CBPENDING;
804                 local_only = !!(lock->l_flags &
805                                 (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
806                 ldlm_cancel_callback(lock);
807                 rc = (lock->l_flags & LDLM_FL_BL_AST) ?
808                         LDLM_FL_BL_AST : LDLM_FL_CANCELING;
809                 unlock_res_and_lock(lock);
810
811                 if (local_only) {
812                         CDEBUG(D_DLMTRACE, "not sending request (at caller's instruction)\n");
813                         rc = LDLM_FL_LOCAL_ONLY;
814                 }
815                 ldlm_lock_cancel(lock);
816         } else {
817                 LDLM_ERROR(lock, "Trying to cancel local lock");
818                 LBUG();
819         }
820
821         return rc;
822 }
823
824 /**
825  * Pack \a count locks in \a head into ldlm_request buffer of request \a req.
826  */
827 static void ldlm_cancel_pack(struct ptlrpc_request *req,
828                              struct list_head *head, int count)
829 {
830         struct ldlm_request *dlm;
831         struct ldlm_lock *lock;
832         int max, packed = 0;
833
834         dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
835         LASSERT(dlm != NULL);
836
837         /* Check the room in the request buffer. */
838         max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
839                 sizeof(struct ldlm_request);
840         max /= sizeof(struct lustre_handle);
841         max += LDLM_LOCKREQ_HANDLES;
842         LASSERT(max >= dlm->lock_count + count);
843
844         /* XXX: it would be better to pack lock handles grouped by resource.
845          * so that the server cancel would call filter_lvbo_update() less
846          * frequently. */
847         list_for_each_entry(lock, head, l_bl_ast) {
848                 if (!count--)
849                         break;
850                 LASSERT(lock->l_conn_export);
851                 /* Pack the lock handle to the given request buffer. */
852                 LDLM_DEBUG(lock, "packing");
853                 dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle;
854                 packed++;
855         }
856         CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
857 }
858
859 /**
860  * Prepare and send a batched cancel RPC. It will include \a count lock
861  * handles of locks given in \a cancels list. */
862 static int ldlm_cli_cancel_req(struct obd_export *exp,
863                                struct list_head *cancels,
864                                int count, ldlm_cancel_flags_t flags)
865 {
866         struct ptlrpc_request *req = NULL;
867         struct obd_import *imp;
868         int free, sent = 0;
869         int rc = 0;
870
871         LASSERT(exp != NULL);
872         LASSERT(count > 0);
873
874         CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
875
876         if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
877                 return count;
878
879         free = ldlm_format_handles_avail(class_exp2cliimp(exp),
880                                          &RQF_LDLM_CANCEL, RCL_CLIENT, 0);
881         if (count > free)
882                 count = free;
883
884         while (1) {
885                 imp = class_exp2cliimp(exp);
886                 if (imp == NULL || imp->imp_invalid) {
887                         CDEBUG(D_DLMTRACE,
888                                "skipping cancel on invalid import %p\n", imp);
889                         return count;
890                 }
891
892                 req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
893                 if (req == NULL) {
894                         rc = -ENOMEM;
895                         goto out;
896                 }
897
898                 req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
899                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
900                                      ldlm_request_bufsize(count, LDLM_CANCEL));
901
902                 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL);
903                 if (rc) {
904                         ptlrpc_request_free(req);
905                         goto out;
906                 }
907
908                 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
909                 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
910                 ptlrpc_at_set_req_timeout(req);
911
912                 ldlm_cancel_pack(req, cancels, count);
913
914                 ptlrpc_request_set_replen(req);
915                 if (flags & LCF_ASYNC) {
916                         ptlrpcd_add_req(req);
917                         sent = count;
918                         goto out;
919                 }
920
921                 rc = ptlrpc_queue_wait(req);
922                 if (rc == LUSTRE_ESTALE) {
923                         CDEBUG(D_DLMTRACE, "client/server (nid %s) out of sync -- not fatal\n",
924                                libcfs_nid2str(req->rq_import->
925                                               imp_connection->c_peer.nid));
926                         rc = 0;
927                 } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/
928                            req->rq_import_generation == imp->imp_generation) {
929                         ptlrpc_req_finished(req);
930                         continue;
931                 } else if (rc != ELDLM_OK) {
932                         /* -ESHUTDOWN is common on umount */
933                         CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
934                                      "Got rc %d from cancel RPC: canceling anyway\n",
935                                      rc);
936                         break;
937                 }
938                 sent = count;
939                 break;
940         }
941
942         ptlrpc_req_finished(req);
943 out:
944         return sent ? sent : rc;
945 }
946
947 static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
948 {
949         LASSERT(imp != NULL);
950         return &imp->imp_obd->obd_namespace->ns_pool;
951 }
952
953 /**
954  * Update client's OBD pool related fields with new SLV and Limit from \a req.
955  */
956 int ldlm_cli_update_pool(struct ptlrpc_request *req)
957 {
958         struct obd_device *obd;
959         __u64 new_slv;
960         __u32 new_limit;
961
962         if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
963                      !imp_connect_lru_resize(req->rq_import))) {
964                 /*
965                  * Do nothing for corner cases.
966                  */
967                 return 0;
968         }
969
970         /* In some cases RPC may contain SLV and limit zeroed out. This
971          * is the case when server does not support LRU resize feature.
972          * This is also possible in some recovery cases when server-side
973          * reqs have no reference to the OBD export and thus access to
974          * server-side namespace is not possible. */
975         if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
976             lustre_msg_get_limit(req->rq_repmsg) == 0) {
977                 DEBUG_REQ(D_HA, req,
978                           "Zero SLV or Limit found (SLV: %llu, Limit: %u)",
979                           lustre_msg_get_slv(req->rq_repmsg),
980                           lustre_msg_get_limit(req->rq_repmsg));
981                 return 0;
982         }
983
984         new_limit = lustre_msg_get_limit(req->rq_repmsg);
985         new_slv = lustre_msg_get_slv(req->rq_repmsg);
986         obd = req->rq_import->imp_obd;
987
988         /* Set new SLV and limit in OBD fields to make them accessible
989          * to the pool thread. We do not access obd_namespace and pool
990          * directly here as there is no reliable way to make sure that
991          * they are still alive at cleanup time. Evil races are possible
992          * which may cause Oops at that time. */
993         write_lock(&obd->obd_pool_lock);
994         obd->obd_pool_slv = new_slv;
995         obd->obd_pool_limit = new_limit;
996         write_unlock(&obd->obd_pool_lock);
997
998         return 0;
999 }
1000 EXPORT_SYMBOL(ldlm_cli_update_pool);
1001
1002 /**
1003  * Client side lock cancel.
1004  *
1005  * Lock must not have any readers or writers by this time.
1006  */
1007 int ldlm_cli_cancel(struct lustre_handle *lockh,
1008                     ldlm_cancel_flags_t cancel_flags)
1009 {
1010         struct obd_export *exp;
1011         int avail, flags, count = 1;
1012         __u64 rc = 0;
1013         struct ldlm_namespace *ns;
1014         struct ldlm_lock *lock;
1015         LIST_HEAD(cancels);
1016
1017         /* concurrent cancels on the same handle can happen */
1018         lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
1019         if (lock == NULL) {
1020                 LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
1021                 return 0;
1022         }
1023
1024         rc = ldlm_cli_cancel_local(lock);
1025         if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
1026                 LDLM_LOCK_RELEASE(lock);
1027                 return 0;
1028         }
1029         /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
1030          * RPC which goes to canceld portal, so we can cancel other LRU locks
1031          * here and send them all as one LDLM_CANCEL RPC. */
1032         LASSERT(list_empty(&lock->l_bl_ast));
1033         list_add(&lock->l_bl_ast, &cancels);
1034
1035         exp = lock->l_conn_export;
1036         if (exp_connect_cancelset(exp)) {
1037                 avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
1038                                                   &RQF_LDLM_CANCEL,
1039                                                   RCL_CLIENT, 0);
1040                 LASSERT(avail > 0);
1041
1042                 ns = ldlm_lock_to_ns(lock);
1043                 flags = ns_connect_lru_resize(ns) ?
1044                         LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
1045                 count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
1046                                                LCF_BL_AST, flags);
1047         }
1048         ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
1049         return 0;
1050 }
1051 EXPORT_SYMBOL(ldlm_cli_cancel);
1052
1053 /**
1054  * Locally cancel up to \a count locks in list \a cancels.
1055  * Return the number of cancelled locks.
1056  */
1057 int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
1058                                ldlm_cancel_flags_t flags)
1059 {
1060         LIST_HEAD(head);
1061         struct ldlm_lock *lock, *next;
1062         int left = 0, bl_ast = 0;
1063         __u64 rc;
1064
1065         left = count;
1066         list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
1067                 if (left-- == 0)
1068                         break;
1069
1070                 if (flags & LCF_LOCAL) {
1071                         rc = LDLM_FL_LOCAL_ONLY;
1072                         ldlm_lock_cancel(lock);
1073                 } else {
1074                         rc = ldlm_cli_cancel_local(lock);
1075                 }
1076                 /* Until we have compound requests and can send LDLM_CANCEL
1077                  * requests batched with generic RPCs, we need to send cancels
1078                  * with the LDLM_FL_BL_AST flag in a separate RPC from
1079                  * the one being generated now. */
1080                 if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
1081                         LDLM_DEBUG(lock, "Cancel lock separately");
1082                         list_del_init(&lock->l_bl_ast);
1083                         list_add(&lock->l_bl_ast, &head);
1084                         bl_ast++;
1085                         continue;
1086                 }
1087                 if (rc == LDLM_FL_LOCAL_ONLY) {
1088                         /* CANCEL RPC should not be sent to server. */
1089                         list_del_init(&lock->l_bl_ast);
1090                         LDLM_LOCK_RELEASE(lock);
1091                         count--;
1092                 }
1093         }
1094         if (bl_ast > 0) {
1095                 count -= bl_ast;
1096                 ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
1097         }
1098
1099         return count;
1100 }
1101 EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
1102
1103 /**
1104  * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
1105  * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
1106  * readahead requests, ...)
1107  */
1108 static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
1109                                                     struct ldlm_lock *lock,
1110                                                     int unused, int added,
1111                                                     int count)
1112 {
1113         ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
1114         ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
1115
1116         lock_res_and_lock(lock);
1117
1118         /* don't check added & count since we want to process all locks
1119          * from unused list */
1120         switch (lock->l_resource->lr_type) {
1121         case LDLM_EXTENT:
1122         case LDLM_IBITS:
1123                 if (cb && cb(lock))
1124                         break;
1125         default:
1126                 result = LDLM_POLICY_SKIP_LOCK;
1127                 lock->l_flags |= LDLM_FL_SKIPPED;
1128                 break;
1129         }
1130
1131         unlock_res_and_lock(lock);
1132         return result;
1133 }
1134
1135 /**
1136  * Callback function for LRU-resize policy. Decides whether to keep
1137  * \a lock in LRU for current \a LRU size \a unused, added in current
1138  * scan \a added and number of locks to be preferably canceled \a count.
1139  *
1140  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1141  *
1142  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1143  */
1144 static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1145                                                  struct ldlm_lock *lock,
1146                                                  int unused, int added,
1147                                                  int count)
1148 {
1149         unsigned long cur = cfs_time_current();
1150         struct ldlm_pool *pl = &ns->ns_pool;
1151         __u64 slv, lvf, lv;
1152         unsigned long la;
1153
1154         /* Stop LRU processing when we reach past @count or have checked all
1155          * locks in LRU. */
1156         if (count && added >= count)
1157                 return LDLM_POLICY_KEEP_LOCK;
1158
1159         slv = ldlm_pool_get_slv(pl);
1160         lvf = ldlm_pool_get_lvf(pl);
1161         la = cfs_duration_sec(cfs_time_sub(cur,
1162                               lock->l_last_used));
1163         lv = lvf * la * unused;
1164
1165         /* Inform pool about current CLV to see it via debugfs. */
1166         ldlm_pool_set_clv(pl, lv);
1167
1168         /* Stop when SLV is not yet come from server or lv is smaller than
1169          * it is. */
1170         return (slv == 0 || lv < slv) ?
1171                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1172 }
1173
1174 /**
1175  * Callback function for debugfs used policy. Makes decision whether to keep
1176  * \a lock in LRU for current \a LRU size \a unused, added in current scan \a
1177  * added and number of locks to be preferably canceled \a count.
1178  *
1179  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1180  *
1181  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1182  */
1183 static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1184                                                    struct ldlm_lock *lock,
1185                                                    int unused, int added,
1186                                                    int count)
1187 {
1188         /* Stop LRU processing when we reach past @count or have checked all
1189          * locks in LRU. */
1190         return (added >= count) ?
1191                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1192 }
1193
1194 /**
1195  * Callback function for aged policy. Makes decision whether to keep \a lock in
1196  * LRU for current LRU size \a unused, added in current scan \a added and
1197  * number of locks to be preferably canceled \a count.
1198  *
1199  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1200  *
1201  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1202  */
1203 static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1204                                                  struct ldlm_lock *lock,
1205                                                  int unused, int added,
1206                                                  int count)
1207 {
1208         /* Stop LRU processing if young lock is found and we reach past count */
1209         return ((added >= count) &&
1210                 time_before(cfs_time_current(),
1211                             cfs_time_add(lock->l_last_used, ns->ns_max_age))) ?
1212                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1213 }
1214
1215 /**
1216  * Callback function for default policy. Makes decision whether to keep \a lock
1217  * in LRU for current LRU size \a unused, added in current scan \a added and
1218  * number of locks to be preferably canceled \a count.
1219  *
1220  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1221  *
1222  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1223  */
1224 static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
1225                                                     struct ldlm_lock *lock,
1226                                                     int unused, int added,
1227                                                     int count)
1228 {
1229         /* Stop LRU processing when we reach past count or have checked all
1230          * locks in LRU. */
1231         return (added >= count) ?
1232                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1233 }
1234
1235 typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
1236                                                       struct ldlm_lock *, int,
1237                                                       int, int);
1238
1239 static ldlm_cancel_lru_policy_t
1240 ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
1241 {
1242         if (flags & LDLM_CANCEL_NO_WAIT)
1243                 return ldlm_cancel_no_wait_policy;
1244
1245         if (ns_connect_lru_resize(ns)) {
1246                 if (flags & LDLM_CANCEL_SHRINK)
1247                         /* We kill passed number of old locks. */
1248                         return ldlm_cancel_passed_policy;
1249                 else if (flags & LDLM_CANCEL_LRUR)
1250                         return ldlm_cancel_lrur_policy;
1251                 else if (flags & LDLM_CANCEL_PASSED)
1252                         return ldlm_cancel_passed_policy;
1253         } else {
1254                 if (flags & LDLM_CANCEL_AGED)
1255                         return ldlm_cancel_aged_policy;
1256         }
1257
1258         return ldlm_cancel_default_policy;
1259 }
1260
1261 /**
1262  * - Free space in LRU for \a count new locks,
1263  *   redundant unused locks are canceled locally;
1264  * - also cancel locally unused aged locks;
1265  * - do not cancel more than \a max locks;
1266  * - GET the found locks and add them into the \a cancels list.
1267  *
1268  * A client lock can be added to the l_bl_ast list only when it is
1269  * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing
1270  * CANCEL.  There are the following use cases:
1271  * ldlm_cancel_resource_local(), ldlm_cancel_lru_local() and
1272  * ldlm_cli_cancel(), which check and set this flag properly. As any
1273  * attempt to cancel a lock rely on this flag, l_bl_ast list is accessed
1274  * later without any special locking.
1275  *
1276  * Calling policies for enabled LRU resize:
1277  * ----------------------------------------
1278  * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to
1279  *                          cancel not more than \a count locks;
1280  *
1281  * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at
1282  *                            the beginning of LRU list);
1283  *
1284  * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
1285  *                            memory pressure policy function;
1286  *
1287  * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
1288  *
1289  * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible
1290  *                             (typically before replaying locks) w/o
1291  *                             sending any RPCs or waiting for any
1292  *                             outstanding RPC to complete.
1293  */
1294 static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1295                                  struct list_head *cancels, int count, int max,
1296                                  int flags)
1297 {
1298         ldlm_cancel_lru_policy_t pf;
1299         struct ldlm_lock *lock, *next;
1300         int added = 0, unused, remained;
1301
1302         spin_lock(&ns->ns_lock);
1303         unused = ns->ns_nr_unused;
1304         remained = unused;
1305
1306         if (!ns_connect_lru_resize(ns))
1307                 count += unused - ns->ns_max_unused;
1308
1309         pf = ldlm_cancel_lru_policy(ns, flags);
1310         LASSERT(pf != NULL);
1311
1312         while (!list_empty(&ns->ns_unused_list)) {
1313                 ldlm_policy_res_t result;
1314
1315                 /* all unused locks */
1316                 if (remained-- <= 0)
1317                         break;
1318
1319                 /* For any flags, stop scanning if @max is reached. */
1320                 if (max && added >= max)
1321                         break;
1322
1323                 list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
1324                                              l_lru) {
1325                         /* No locks which got blocking requests. */
1326                         LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
1327
1328                         if (flags & LDLM_CANCEL_NO_WAIT &&
1329                             lock->l_flags & LDLM_FL_SKIPPED)
1330                                 /* already processed */
1331                                 continue;
1332
1333                         /* Somebody is already doing CANCEL. No need for this
1334                          * lock in LRU, do not traverse it again. */
1335                         if (!(lock->l_flags & LDLM_FL_CANCELING))
1336                                 break;
1337
1338                         ldlm_lock_remove_from_lru_nolock(lock);
1339                 }
1340                 if (&lock->l_lru == &ns->ns_unused_list)
1341                         break;
1342
1343                 LDLM_LOCK_GET(lock);
1344                 spin_unlock(&ns->ns_lock);
1345                 lu_ref_add(&lock->l_reference, __func__, current);
1346
1347                 /* Pass the lock through the policy filter and see if it
1348                  * should stay in LRU.
1349                  *
1350                  * Even for shrinker policy we stop scanning if
1351                  * we find a lock that should stay in the cache.
1352                  * We should take into account lock age anyway
1353                  * as a new lock is a valuable resource even if
1354                  * it has a low weight.
1355                  *
1356                  * That is, for shrinker policy we drop only
1357                  * old locks, but additionally choose them by
1358                  * their weight. Big extent locks will stay in
1359                  * the cache. */
1360                 result = pf(ns, lock, unused, added, count);
1361                 if (result == LDLM_POLICY_KEEP_LOCK) {
1362                         lu_ref_del(&lock->l_reference,
1363                                    __func__, current);
1364                         LDLM_LOCK_RELEASE(lock);
1365                         spin_lock(&ns->ns_lock);
1366                         break;
1367                 }
1368                 if (result == LDLM_POLICY_SKIP_LOCK) {
1369                         lu_ref_del(&lock->l_reference,
1370                                    __func__, current);
1371                         LDLM_LOCK_RELEASE(lock);
1372                         spin_lock(&ns->ns_lock);
1373                         continue;
1374                 }
1375
1376                 lock_res_and_lock(lock);
1377                 /* Check flags again under the lock. */
1378                 if ((lock->l_flags & LDLM_FL_CANCELING) ||
1379                     (ldlm_lock_remove_from_lru(lock) == 0)) {
1380                         /* Another thread is removing lock from LRU, or
1381                          * somebody is already doing CANCEL, or there
1382                          * is a blocking request which will send cancel
1383                          * by itself, or the lock is no longer unused. */
1384                         unlock_res_and_lock(lock);
1385                         lu_ref_del(&lock->l_reference,
1386                                    __func__, current);
1387                         LDLM_LOCK_RELEASE(lock);
1388                         spin_lock(&ns->ns_lock);
1389                         continue;
1390                 }
1391                 LASSERT(!lock->l_readers && !lock->l_writers);
1392
1393                 /* If we have chosen to cancel this lock voluntarily, we
1394                  * better send cancel notification to server, so that it
1395                  * frees appropriate state. This might lead to a race
1396                  * where while we are doing cancel here, server is also
1397                  * silently cancelling this lock. */
1398                 lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
1399
1400                 /* Setting the CBPENDING flag is a little misleading,
1401                  * but prevents an important race; namely, once
1402                  * CBPENDING is set, the lock can accumulate no more
1403                  * readers/writers. Since readers and writers are
1404                  * already zero here, ldlm_lock_decref() won't see
1405                  * this flag and call l_blocking_ast */
1406                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
1407
1408                 /* We can't re-add to l_lru as it confuses the
1409                  * refcounting in ldlm_lock_remove_from_lru() if an AST
1410                  * arrives after we drop lr_lock below. We use l_bl_ast
1411                  * and can't use l_pending_chain as it is used both on
1412                  * server and client nevertheless bug 5666 says it is
1413                  * used only on server */
1414                 LASSERT(list_empty(&lock->l_bl_ast));
1415                 list_add(&lock->l_bl_ast, cancels);
1416                 unlock_res_and_lock(lock);
1417                 lu_ref_del(&lock->l_reference, __func__, current);
1418                 spin_lock(&ns->ns_lock);
1419                 added++;
1420                 unused--;
1421         }
1422         spin_unlock(&ns->ns_lock);
1423         return added;
1424 }
1425
1426 int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
1427                           struct list_head *cancels, int count, int max,
1428                           ldlm_cancel_flags_t cancel_flags, int flags)
1429 {
1430         int added;
1431
1432         added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
1433         if (added <= 0)
1434                 return added;
1435         return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
1436 }
1437
1438 /**
1439  * Cancel at least \a nr locks from given namespace LRU.
1440  *
1441  * When called with LCF_ASYNC the blocking callback will be handled
1442  * in a thread and this function will return after the thread has been
1443  * asked to call the callback.  When called with LCF_ASYNC the blocking
1444  * callback will be performed in this function.
1445  */
1446 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
1447                     ldlm_cancel_flags_t cancel_flags,
1448                     int flags)
1449 {
1450         LIST_HEAD(cancels);
1451         int count, rc;
1452
1453         /* Just prepare the list of locks, do not actually cancel them yet.
1454          * Locks are cancelled later in a separate thread. */
1455         count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
1456         rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
1457         if (rc == 0)
1458                 return count;
1459
1460         return 0;
1461 }
1462
1463 /**
1464  * Find and cancel locally unused locks found on resource, matched to the
1465  * given policy, mode. GET the found locks and add them into the \a cancels
1466  * list.
1467  */
1468 int ldlm_cancel_resource_local(struct ldlm_resource *res,
1469                                struct list_head *cancels,
1470                                ldlm_policy_data_t *policy,
1471                                ldlm_mode_t mode, __u64 lock_flags,
1472                                ldlm_cancel_flags_t cancel_flags, void *opaque)
1473 {
1474         struct ldlm_lock *lock;
1475         int count = 0;
1476
1477         lock_res(res);
1478         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
1479                 if (opaque != NULL && lock->l_ast_data != opaque) {
1480                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
1481                                    lock->l_ast_data, opaque);
1482                         continue;
1483                 }
1484
1485                 if (lock->l_readers || lock->l_writers)
1486                         continue;
1487
1488                 /* If somebody is already doing CANCEL, or blocking AST came,
1489                  * skip this lock. */
1490                 if (lock->l_flags & LDLM_FL_BL_AST ||
1491                     lock->l_flags & LDLM_FL_CANCELING)
1492                         continue;
1493
1494                 if (lockmode_compat(lock->l_granted_mode, mode))
1495                         continue;
1496
1497                 /* If policy is given and this is IBITS lock, add to list only
1498                  * those locks that match by policy. */
1499                 if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
1500                     !(lock->l_policy_data.l_inodebits.bits &
1501                       policy->l_inodebits.bits))
1502                         continue;
1503
1504                 /* See CBPENDING comment in ldlm_cancel_lru */
1505                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
1506                                  lock_flags;
1507
1508                 LASSERT(list_empty(&lock->l_bl_ast));
1509                 list_add(&lock->l_bl_ast, cancels);
1510                 LDLM_LOCK_GET(lock);
1511                 count++;
1512         }
1513         unlock_res(res);
1514
1515         return ldlm_cli_cancel_list_local(cancels, count, cancel_flags);
1516 }
1517 EXPORT_SYMBOL(ldlm_cancel_resource_local);
1518
1519 /**
1520  * Cancel client-side locks from a list and send/prepare cancel RPCs to the
1521  * server.
1522  * If \a req is NULL, send CANCEL request to server with handles of locks
1523  * in the \a cancels. If EARLY_CANCEL is not supported, send CANCEL requests
1524  * separately per lock.
1525  * If \a req is not NULL, put handles of locks in \a cancels into the request
1526  * buffer at the offset \a off.
1527  * Destroy \a cancels at the end.
1528  */
1529 int ldlm_cli_cancel_list(struct list_head *cancels, int count,
1530                          struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
1531 {
1532         struct ldlm_lock *lock;
1533         int res = 0;
1534
1535         if (list_empty(cancels) || count == 0)
1536                 return 0;
1537
1538         /* XXX: requests (both batched and not) could be sent in parallel.
1539          * Usually it is enough to have just 1 RPC, but it is possible that
1540          * there are too many locks to be cancelled in LRU or on a resource.
1541          * It would also speed up the case when the server does not support
1542          * the feature. */
1543         while (count > 0) {
1544                 LASSERT(!list_empty(cancels));
1545                 lock = list_entry(cancels->next, struct ldlm_lock,
1546                                       l_bl_ast);
1547                 LASSERT(lock->l_conn_export);
1548
1549                 if (exp_connect_cancelset(lock->l_conn_export)) {
1550                         res = count;
1551                         if (req)
1552                                 ldlm_cancel_pack(req, cancels, count);
1553                         else
1554                                 res = ldlm_cli_cancel_req(lock->l_conn_export,
1555                                                           cancels, count,
1556                                                           flags);
1557                 } else {
1558                         res = ldlm_cli_cancel_req(lock->l_conn_export,
1559                                                   cancels, 1, flags);
1560                 }
1561
1562                 if (res < 0) {
1563                         CDEBUG_LIMIT(res == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
1564                                      "ldlm_cli_cancel_list: %d\n", res);
1565                         res = count;
1566                 }
1567
1568                 count -= res;
1569                 ldlm_lock_list_put(cancels, l_bl_ast, res);
1570         }
1571         LASSERT(count == 0);
1572         return 0;
1573 }
1574 EXPORT_SYMBOL(ldlm_cli_cancel_list);
1575
1576 /**
1577  * Cancel all locks on a resource that have 0 readers/writers.
1578  *
1579  * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
1580  * to notify the server. */
1581 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1582                                     const struct ldlm_res_id *res_id,
1583                                     ldlm_policy_data_t *policy,
1584                                     ldlm_mode_t mode,
1585                                     ldlm_cancel_flags_t flags,
1586                                     void *opaque)
1587 {
1588         struct ldlm_resource *res;
1589         LIST_HEAD(cancels);
1590         int count;
1591         int rc;
1592
1593         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1594         if (res == NULL) {
1595                 /* This is not a problem. */
1596                 CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
1597                 return 0;
1598         }
1599
1600         LDLM_RESOURCE_ADDREF(res);
1601         count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
1602                                            0, flags | LCF_BL_AST, opaque);
1603         rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
1604         if (rc != ELDLM_OK)
1605                 CERROR("canceling unused lock "DLDLMRES": rc = %d\n",
1606                        PLDLMRES(res), rc);
1607
1608         LDLM_RESOURCE_DELREF(res);
1609         ldlm_resource_putref(res);
1610         return 0;
1611 }
1612 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
1613
1614 struct ldlm_cli_cancel_arg {
1615         int     lc_flags;
1616         void   *lc_opaque;
1617 };
1618
1619 static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
1620                                        struct cfs_hash_bd *bd,
1621                                        struct hlist_node *hnode, void *arg)
1622 {
1623         struct ldlm_resource       *res = cfs_hash_object(hs, hnode);
1624         struct ldlm_cli_cancel_arg     *lc = arg;
1625
1626         ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
1627                                         NULL, LCK_MINMODE,
1628                                         lc->lc_flags, lc->lc_opaque);
1629         /* must return 0 for hash iteration */
1630         return 0;
1631 }
1632
1633 /**
1634  * Cancel all locks on a namespace (or a specific resource, if given)
1635  * that have 0 readers/writers.
1636  *
1637  * If flags & LCF_LOCAL, throw the locks away without trying
1638  * to notify the server. */
1639 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
1640                            const struct ldlm_res_id *res_id,
1641                            ldlm_cancel_flags_t flags, void *opaque)
1642 {
1643         struct ldlm_cli_cancel_arg arg = {
1644                 .lc_flags       = flags,
1645                 .lc_opaque      = opaque,
1646         };
1647
1648         if (ns == NULL)
1649                 return ELDLM_OK;
1650
1651         if (res_id != NULL) {
1652                 return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
1653                                                        LCK_MINMODE, flags,
1654                                                        opaque);
1655         } else {
1656                 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1657                                          ldlm_cli_hash_cancel_unused, &arg);
1658                 return ELDLM_OK;
1659         }
1660 }
1661 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1662
1663 /* Lock iterators. */
1664
1665 static int ldlm_resource_foreach(struct ldlm_resource *res,
1666                                  ldlm_iterator_t iter, void *closure)
1667 {
1668         struct list_head *tmp, *next;
1669         struct ldlm_lock *lock;
1670         int rc = LDLM_ITER_CONTINUE;
1671
1672         if (!res)
1673                 return LDLM_ITER_CONTINUE;
1674
1675         lock_res(res);
1676         list_for_each_safe(tmp, next, &res->lr_granted) {
1677                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1678
1679                 if (iter(lock, closure) == LDLM_ITER_STOP) {
1680                         rc = LDLM_ITER_STOP;
1681                         goto out;
1682                 }
1683         }
1684
1685         list_for_each_safe(tmp, next, &res->lr_waiting) {
1686                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1687
1688                 if (iter(lock, closure) == LDLM_ITER_STOP) {
1689                         rc = LDLM_ITER_STOP;
1690                         goto out;
1691                 }
1692         }
1693  out:
1694         unlock_res(res);
1695         return rc;
1696 }
1697
1698 struct iter_helper_data {
1699         ldlm_iterator_t iter;
1700         void *closure;
1701 };
1702
1703 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
1704 {
1705         struct iter_helper_data *helper = closure;
1706
1707         return helper->iter(lock, helper->closure);
1708 }
1709
1710 static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1711                                 struct hlist_node *hnode, void *arg)
1712
1713 {
1714         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1715
1716         return ldlm_resource_foreach(res, ldlm_iter_helper, arg) ==
1717                LDLM_ITER_STOP;
1718 }
1719
1720 static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
1721                                    ldlm_iterator_t iter, void *closure)
1722
1723 {
1724         struct iter_helper_data helper = {
1725                 .iter           = iter,
1726                 .closure        = closure,
1727         };
1728
1729         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1730                                  ldlm_res_iter_helper, &helper);
1731
1732 }
1733
1734 /* non-blocking function to manipulate a lock whose cb_data is being put away.
1735  * return  0:  find no resource
1736  *       > 0:  must be LDLM_ITER_STOP/LDLM_ITER_CONTINUE.
1737  *       < 0:  errors
1738  */
1739 int ldlm_resource_iterate(struct ldlm_namespace *ns,
1740                           const struct ldlm_res_id *res_id,
1741                           ldlm_iterator_t iter, void *data)
1742 {
1743         struct ldlm_resource *res;
1744         int rc;
1745
1746         if (ns == NULL) {
1747                 CERROR("must pass in namespace\n");
1748                 LBUG();
1749         }
1750
1751         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1752         if (res == NULL)
1753                 return 0;
1754
1755         LDLM_RESOURCE_ADDREF(res);
1756         rc = ldlm_resource_foreach(res, iter, data);
1757         LDLM_RESOURCE_DELREF(res);
1758         ldlm_resource_putref(res);
1759         return rc;
1760 }
1761 EXPORT_SYMBOL(ldlm_resource_iterate);
1762
1763 /* Lock replay */
1764
1765 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
1766 {
1767         struct list_head *list = closure;
1768
1769         /* we use l_pending_chain here, because it's unused on clients. */
1770         LASSERTF(list_empty(&lock->l_pending_chain),
1771                  "lock %p next %p prev %p\n",
1772                  lock, &lock->l_pending_chain.next,
1773                  &lock->l_pending_chain.prev);
1774         /* bug 9573: don't replay locks left after eviction, or
1775          * bug 17614: locks being actively cancelled. Get a reference
1776          * on a lock so that it does not disappear under us (e.g. due to cancel)
1777          */
1778         if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
1779                 list_add(&lock->l_pending_chain, list);
1780                 LDLM_LOCK_GET(lock);
1781         }
1782
1783         return LDLM_ITER_CONTINUE;
1784 }
1785
1786 static int replay_lock_interpret(const struct lu_env *env,
1787                                  struct ptlrpc_request *req,
1788                                  struct ldlm_async_args *aa, int rc)
1789 {
1790         struct ldlm_lock     *lock;
1791         struct ldlm_reply    *reply;
1792         struct obd_export    *exp;
1793
1794         atomic_dec(&req->rq_import->imp_replay_inflight);
1795         if (rc != ELDLM_OK)
1796                 goto out;
1797
1798         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1799         if (reply == NULL) {
1800                 rc = -EPROTO;
1801                 goto out;
1802         }
1803
1804         lock = ldlm_handle2lock(&aa->lock_handle);
1805         if (!lock) {
1806                 CERROR("received replay ack for unknown local cookie %#llx remote cookie %#llx from server %s id %s\n",
1807                        aa->lock_handle.cookie, reply->lock_handle.cookie,
1808                        req->rq_export->exp_client_uuid.uuid,
1809                        libcfs_id2str(req->rq_peer));
1810                 rc = -ESTALE;
1811                 goto out;
1812         }
1813
1814         /* Key change rehash lock in per-export hash with new key */
1815         exp = req->rq_export;
1816         if (exp && exp->exp_lock_hash) {
1817                 /* In the function below, .hs_keycmp resolves to
1818                  * ldlm_export_lock_keycmp() */
1819                 /* coverity[overrun-buffer-val] */
1820                 cfs_hash_rehash_key(exp->exp_lock_hash,
1821                                     &lock->l_remote_handle,
1822                                     &reply->lock_handle,
1823                                     &lock->l_exp_hash);
1824         } else {
1825                 lock->l_remote_handle = reply->lock_handle;
1826         }
1827
1828         LDLM_DEBUG(lock, "replayed lock:");
1829         ptlrpc_import_recovery_state_machine(req->rq_import);
1830         LDLM_LOCK_PUT(lock);
1831 out:
1832         if (rc != ELDLM_OK)
1833                 ptlrpc_connect_import(req->rq_import);
1834
1835         return rc;
1836 }
1837
1838 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
1839 {
1840         struct ptlrpc_request *req;
1841         struct ldlm_async_args *aa;
1842         struct ldlm_request   *body;
1843         int flags;
1844
1845         /* Bug 11974: Do not replay a lock which is actively being canceled */
1846         if (lock->l_flags & LDLM_FL_CANCELING) {
1847                 LDLM_DEBUG(lock, "Not replaying canceled lock:");
1848                 return 0;
1849         }
1850
1851         /* If this is reply-less callback lock, we cannot replay it, since
1852          * server might have long dropped it, but notification of that event was
1853          * lost by network. (and server granted conflicting lock already) */
1854         if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
1855                 LDLM_DEBUG(lock, "Not replaying reply-less lock:");
1856                 ldlm_lock_cancel(lock);
1857                 return 0;
1858         }
1859
1860         /*
1861          * If granted mode matches the requested mode, this lock is granted.
1862          *
1863          * If they differ, but we have a granted mode, then we were granted
1864          * one mode and now want another: ergo, converting.
1865          *
1866          * If we haven't been granted anything and are on a resource list,
1867          * then we're blocked/waiting.
1868          *
1869          * If we haven't been granted anything and we're NOT on a resource list,
1870          * then we haven't got a reply yet and don't have a known disposition.
1871          * This happens whenever a lock enqueue is the request that triggers
1872          * recovery.
1873          */
1874         if (lock->l_granted_mode == lock->l_req_mode)
1875                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
1876         else if (lock->l_granted_mode)
1877                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
1878         else if (!list_empty(&lock->l_res_link))
1879                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
1880         else
1881                 flags = LDLM_FL_REPLAY;
1882
1883         req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
1884                                         LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
1885         if (req == NULL)
1886                 return -ENOMEM;
1887
1888         /* We're part of recovery, so don't wait for it. */
1889         req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
1890
1891         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1892         ldlm_lock2desc(lock, &body->lock_desc);
1893         body->lock_flags = ldlm_flags_to_wire(flags);
1894
1895         ldlm_lock2handle(lock, &body->lock_handle[0]);
1896         if (lock->l_lvb_len > 0)
1897                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
1898         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1899                              lock->l_lvb_len);
1900         ptlrpc_request_set_replen(req);
1901         /* notify the server we've replayed all requests.
1902          * also, we mark the request to be put on a dedicated
1903          * queue to be processed after all request replayes.
1904          * bug 6063 */
1905         lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
1906
1907         LDLM_DEBUG(lock, "replaying lock:");
1908
1909         atomic_inc(&req->rq_import->imp_replay_inflight);
1910         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1911         aa = ptlrpc_req_async_args(req);
1912         aa->lock_handle = body->lock_handle[0];
1913         req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
1914         ptlrpcd_add_req(req);
1915
1916         return 0;
1917 }
1918
1919 /**
1920  * Cancel as many unused locks as possible before replay. since we are
1921  * in recovery, we can't wait for any outstanding RPCs to send any RPC
1922  * to the server.
1923  *
1924  * Called only in recovery before replaying locks. there is no need to
1925  * replay locks that are unused. since the clients may hold thousands of
1926  * cached unused locks, dropping the unused locks can greatly reduce the
1927  * load on the servers at recovery time.
1928  */
1929 static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
1930 {
1931         int canceled;
1932         LIST_HEAD(cancels);
1933
1934         CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
1935                ldlm_ns_name(ns), ns->ns_nr_unused);
1936
1937         /* We don't need to care whether or not LRU resize is enabled
1938          * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
1939          * count parameter */
1940         canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
1941                                          LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
1942
1943         CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
1944                            canceled, ldlm_ns_name(ns));
1945 }
1946
1947 int ldlm_replay_locks(struct obd_import *imp)
1948 {
1949         struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
1950         LIST_HEAD(list);
1951         struct ldlm_lock *lock, *next;
1952         int rc = 0;
1953
1954         LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
1955
1956         /* don't replay locks if import failed recovery */
1957         if (imp->imp_vbr_failed)
1958                 return 0;
1959
1960         /* ensure this doesn't fall to 0 before all have been queued */
1961         atomic_inc(&imp->imp_replay_inflight);
1962
1963         if (ldlm_cancel_unused_locks_before_replay)
1964                 ldlm_cancel_unused_locks_for_replay(ns);
1965
1966         ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
1967
1968         list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
1969                 list_del_init(&lock->l_pending_chain);
1970                 if (rc) {
1971                         LDLM_LOCK_RELEASE(lock);
1972                         continue; /* or try to do the rest? */
1973                 }
1974                 rc = replay_one_lock(imp, lock);
1975                 LDLM_LOCK_RELEASE(lock);
1976         }
1977
1978         atomic_dec(&imp->imp_replay_inflight);
1979
1980         return rc;
1981 }
1982 EXPORT_SYMBOL(ldlm_replay_locks);