These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / staging / lustre / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_resource.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  * Author: Peter Braam <braam@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43 #include "../include/lustre_dlm.h"
44 #include "../include/lustre_fid.h"
45 #include "../include/obd_class.h"
46 #include "ldlm_internal.h"
47
48 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
49
50 int ldlm_srv_namespace_nr;
51 int ldlm_cli_namespace_nr;
52
53 struct mutex ldlm_srv_namespace_lock;
54 LIST_HEAD(ldlm_srv_namespace_list);
55
56 struct mutex ldlm_cli_namespace_lock;
57 /* Client Namespaces that have active resources in them.
58  * Once all resources go away, ldlm_poold moves such namespaces to the
59  * inactive list */
60 LIST_HEAD(ldlm_cli_active_namespace_list);
61 /* Client namespaces that don't have any locks in them */
62 static LIST_HEAD(ldlm_cli_inactive_namespace_list);
63
64 static struct dentry *ldlm_debugfs_dir;
65 static struct dentry *ldlm_ns_debugfs_dir;
66 struct dentry *ldlm_svc_debugfs_dir;
67
68 /* during debug dump certain amount of granted locks for one resource to avoid
69  * DDOS. */
70 static unsigned int ldlm_dump_granted_max = 256;
71
72 static ssize_t
73 lprocfs_wr_dump_ns(struct file *file, const char __user *buffer,
74                    size_t count, loff_t *off)
75 {
76         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
77         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
78         return count;
79 }
80
81 LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
82
83 LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
84
85 static struct lprocfs_vars ldlm_debugfs_list[] = {
86         { "dump_namespaces", &ldlm_dump_ns_fops, NULL, 0222 },
87         { "dump_granted_max", &ldlm_rw_uint_fops, &ldlm_dump_granted_max },
88         { NULL }
89 };
90
91 int ldlm_debugfs_setup(void)
92 {
93         int rc;
94
95         ldlm_debugfs_dir = ldebugfs_register(OBD_LDLM_DEVICENAME,
96                                              debugfs_lustre_root,
97                                              NULL, NULL);
98         if (IS_ERR_OR_NULL(ldlm_debugfs_dir)) {
99                 CERROR("LProcFS failed in ldlm-init\n");
100                 rc = ldlm_debugfs_dir ? PTR_ERR(ldlm_debugfs_dir) : -ENOMEM;
101                 goto err;
102         }
103
104         ldlm_ns_debugfs_dir = ldebugfs_register("namespaces",
105                                                 ldlm_debugfs_dir,
106                                                 NULL, NULL);
107         if (IS_ERR_OR_NULL(ldlm_ns_debugfs_dir)) {
108                 CERROR("LProcFS failed in ldlm-init\n");
109                 rc = ldlm_ns_debugfs_dir ? PTR_ERR(ldlm_ns_debugfs_dir)
110                                          : -ENOMEM;
111                 goto err_type;
112         }
113
114         ldlm_svc_debugfs_dir = ldebugfs_register("services",
115                                                  ldlm_debugfs_dir,
116                                                  NULL, NULL);
117         if (IS_ERR_OR_NULL(ldlm_svc_debugfs_dir)) {
118                 CERROR("LProcFS failed in ldlm-init\n");
119                 rc = ldlm_svc_debugfs_dir ? PTR_ERR(ldlm_svc_debugfs_dir)
120                                           : -ENOMEM;
121                 goto err_ns;
122         }
123
124         rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
125
126         return 0;
127
128 err_ns:
129         ldebugfs_remove(&ldlm_ns_debugfs_dir);
130 err_type:
131         ldebugfs_remove(&ldlm_debugfs_dir);
132 err:
133         ldlm_svc_debugfs_dir = NULL;
134         ldlm_ns_debugfs_dir = NULL;
135         ldlm_debugfs_dir = NULL;
136         return rc;
137 }
138
139 void ldlm_debugfs_cleanup(void)
140 {
141         if (!IS_ERR_OR_NULL(ldlm_svc_debugfs_dir))
142                 ldebugfs_remove(&ldlm_svc_debugfs_dir);
143
144         if (!IS_ERR_OR_NULL(ldlm_ns_debugfs_dir))
145                 ldebugfs_remove(&ldlm_ns_debugfs_dir);
146
147         if (!IS_ERR_OR_NULL(ldlm_debugfs_dir))
148                 ldebugfs_remove(&ldlm_debugfs_dir);
149
150         ldlm_svc_debugfs_dir = NULL;
151         ldlm_ns_debugfs_dir = NULL;
152         ldlm_debugfs_dir = NULL;
153 }
154
155 static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
156                                    char *buf)
157 {
158         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
159                                                  ns_kobj);
160         __u64             res = 0;
161         struct cfs_hash_bd        bd;
162         int                 i;
163
164         /* result is not strictly consistent */
165         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
166                 res += cfs_hash_bd_count_get(&bd);
167         return sprintf(buf, "%lld\n", res);
168 }
169 LUSTRE_RO_ATTR(resource_count);
170
171 static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
172                                char *buf)
173 {
174         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
175                                                  ns_kobj);
176         __u64             locks;
177
178         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
179                                         LPROCFS_FIELDS_FLAGS_SUM);
180         return sprintf(buf, "%lld\n", locks);
181 }
182 LUSTRE_RO_ATTR(lock_count);
183
184 static ssize_t lock_unused_count_show(struct kobject *kobj,
185                                       struct attribute *attr,
186                                       char *buf)
187 {
188         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
189                                                  ns_kobj);
190
191         return sprintf(buf, "%d\n", ns->ns_nr_unused);
192 }
193 LUSTRE_RO_ATTR(lock_unused_count);
194
195 static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
196                              char *buf)
197 {
198         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
199                                                  ns_kobj);
200         __u32 *nr = &ns->ns_max_unused;
201
202         if (ns_connect_lru_resize(ns))
203                 nr = &ns->ns_nr_unused;
204         return sprintf(buf, "%u", *nr);
205 }
206
207 static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
208                               const char *buffer, size_t count)
209 {
210         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
211                                                  ns_kobj);
212         unsigned long tmp;
213         int lru_resize;
214         int err;
215
216         if (strncmp(buffer, "clear", 5) == 0) {
217                 CDEBUG(D_DLMTRACE,
218                        "dropping all unused locks from namespace %s\n",
219                        ldlm_ns_name(ns));
220                 if (ns_connect_lru_resize(ns)) {
221                         int canceled, unused  = ns->ns_nr_unused;
222
223                         /* Try to cancel all @ns_nr_unused locks. */
224                         canceled = ldlm_cancel_lru(ns, unused, 0,
225                                                    LDLM_CANCEL_PASSED);
226                         if (canceled < unused) {
227                                 CDEBUG(D_DLMTRACE,
228                                        "not all requested locks are canceled, requested: %d, canceled: %d\n",
229                                        unused,
230                                        canceled);
231                                 return -EINVAL;
232                         }
233                 } else {
234                         tmp = ns->ns_max_unused;
235                         ns->ns_max_unused = 0;
236                         ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
237                         ns->ns_max_unused = tmp;
238                 }
239                 return count;
240         }
241
242         err = kstrtoul(buffer, 10, &tmp);
243         if (err != 0) {
244                 CERROR("lru_size: invalid value written\n");
245                 return -EINVAL;
246         }
247         lru_resize = (tmp == 0);
248
249         if (ns_connect_lru_resize(ns)) {
250                 if (!lru_resize)
251                         ns->ns_max_unused = (unsigned int)tmp;
252
253                 if (tmp > ns->ns_nr_unused)
254                         tmp = ns->ns_nr_unused;
255                 tmp = ns->ns_nr_unused - tmp;
256
257                 CDEBUG(D_DLMTRACE,
258                        "changing namespace %s unused locks from %u to %u\n",
259                        ldlm_ns_name(ns), ns->ns_nr_unused,
260                        (unsigned int)tmp);
261                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
262
263                 if (!lru_resize) {
264                         CDEBUG(D_DLMTRACE,
265                                "disable lru_resize for namespace %s\n",
266                                ldlm_ns_name(ns));
267                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
268                 }
269         } else {
270                 CDEBUG(D_DLMTRACE,
271                        "changing namespace %s max_unused from %u to %u\n",
272                        ldlm_ns_name(ns), ns->ns_max_unused,
273                        (unsigned int)tmp);
274                 ns->ns_max_unused = (unsigned int)tmp;
275                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
276
277                 /* Make sure that LRU resize was originally supported before
278                  * turning it on here. */
279                 if (lru_resize &&
280                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
281                         CDEBUG(D_DLMTRACE,
282                                "enable lru_resize for namespace %s\n",
283                                ldlm_ns_name(ns));
284                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
285                 }
286         }
287
288         return count;
289 }
290 LUSTRE_RW_ATTR(lru_size);
291
292 static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
293                                 char *buf)
294 {
295         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
296                                                  ns_kobj);
297
298         return sprintf(buf, "%u", ns->ns_max_age);
299 }
300
301 static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
302                                  const char *buffer, size_t count)
303 {
304         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
305                                                  ns_kobj);
306         unsigned long tmp;
307         int err;
308
309         err = kstrtoul(buffer, 10, &tmp);
310         if (err != 0)
311                 return -EINVAL;
312
313         ns->ns_max_age = tmp;
314
315         return count;
316 }
317 LUSTRE_RW_ATTR(lru_max_age);
318
319 static ssize_t early_lock_cancel_show(struct kobject *kobj,
320                                       struct attribute *attr,
321                                       char *buf)
322 {
323         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
324                                                  ns_kobj);
325
326         return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
327 }
328
329 static ssize_t early_lock_cancel_store(struct kobject *kobj,
330                                        struct attribute *attr,
331                                        const char *buffer,
332                                        size_t count)
333 {
334         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
335                                                  ns_kobj);
336         unsigned long supp = -1;
337         int rc;
338
339         rc = kstrtoul(buffer, 10, &supp);
340         if (rc < 0)
341                 return rc;
342
343         if (supp == 0)
344                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
345         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
346                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
347         return count;
348 }
349 LUSTRE_RW_ATTR(early_lock_cancel);
350
351 /* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
352 static struct attribute *ldlm_ns_attrs[] = {
353         &lustre_attr_resource_count.attr,
354         &lustre_attr_lock_count.attr,
355         &lustre_attr_lock_unused_count.attr,
356         &lustre_attr_lru_size.attr,
357         &lustre_attr_lru_max_age.attr,
358         &lustre_attr_early_lock_cancel.attr,
359         NULL,
360 };
361
362 static void ldlm_ns_release(struct kobject *kobj)
363 {
364         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
365                                                  ns_kobj);
366         complete(&ns->ns_kobj_unregister);
367 }
368
369 static struct kobj_type ldlm_ns_ktype = {
370         .default_attrs  = ldlm_ns_attrs,
371         .sysfs_ops      = &lustre_sysfs_ops,
372         .release        = ldlm_ns_release,
373 };
374
375 static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
376 {
377         if (IS_ERR_OR_NULL(ns->ns_debugfs_entry))
378                 CERROR("dlm namespace %s has no procfs dir?\n",
379                        ldlm_ns_name(ns));
380         else
381                 ldebugfs_remove(&ns->ns_debugfs_entry);
382
383         if (ns->ns_stats != NULL)
384                 lprocfs_free_stats(&ns->ns_stats);
385 }
386
387 static void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
388 {
389         kobject_put(&ns->ns_kobj);
390         wait_for_completion(&ns->ns_kobj_unregister);
391 }
392
393 static int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
394 {
395         int err;
396
397         ns->ns_kobj.kset = ldlm_ns_kset;
398         init_completion(&ns->ns_kobj_unregister);
399         err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
400                                    "%s", ldlm_ns_name(ns));
401
402         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
403         if (ns->ns_stats == NULL) {
404                 kobject_put(&ns->ns_kobj);
405                 return -ENOMEM;
406         }
407
408         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
409                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
410
411         return err;
412 }
413
414 static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
415 {
416         struct dentry *ns_entry;
417
418         if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) {
419                 ns_entry = ns->ns_debugfs_entry;
420         } else {
421                 ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
422                                               ldlm_ns_debugfs_dir);
423                 if (ns_entry == NULL)
424                         return -ENOMEM;
425                 ns->ns_debugfs_entry = ns_entry;
426         }
427
428         return 0;
429 }
430
431 #undef MAX_STRING_SIZE
432
433 static struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
434 {
435         LASSERT(res);
436         LASSERT(res != LP_POISON);
437         atomic_inc(&res->lr_refcount);
438         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
439                atomic_read(&res->lr_refcount));
440         return res;
441 }
442
443 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
444                                   const void *key, unsigned mask)
445 {
446         const struct ldlm_res_id     *id  = key;
447         unsigned                val = 0;
448         unsigned                i;
449
450         for (i = 0; i < RES_NAME_SIZE; i++)
451                 val += id->name[i];
452         return val & mask;
453 }
454
455 static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
456                                       const void *key, unsigned mask)
457 {
458         const struct ldlm_res_id *id = key;
459         struct lu_fid       fid;
460         __u32          hash;
461         __u32          val;
462
463         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
464         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
465         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
466
467         hash = fid_flatten32(&fid);
468         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
469         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
470                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
471                 hash += (val >> 5) + (val << 11);
472         } else {
473                 val = fid_oid(&fid);
474         }
475         hash = hash_long(hash, hs->hs_bkt_bits);
476         /* give me another random factor */
477         hash -= hash_long((unsigned long)hs, val % 11 + 3);
478
479         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
480         hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
481
482         return hash & mask;
483 }
484
485 static void *ldlm_res_hop_key(struct hlist_node *hnode)
486 {
487         struct ldlm_resource   *res;
488
489         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
490         return &res->lr_name;
491 }
492
493 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
494 {
495         struct ldlm_resource   *res;
496
497         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
498         return ldlm_res_eq((const struct ldlm_res_id *)key,
499                            (const struct ldlm_res_id *)&res->lr_name);
500 }
501
502 static void *ldlm_res_hop_object(struct hlist_node *hnode)
503 {
504         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
505 }
506
507 static void ldlm_res_hop_get_locked(struct cfs_hash *hs,
508                                     struct hlist_node *hnode)
509 {
510         struct ldlm_resource *res;
511
512         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
513         ldlm_resource_getref(res);
514 }
515
516 static void ldlm_res_hop_put_locked(struct cfs_hash *hs,
517                                     struct hlist_node *hnode)
518 {
519         struct ldlm_resource *res;
520
521         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
522         /* cfs_hash_for_each_nolock is the only chance we call it */
523         ldlm_resource_putref_locked(res);
524 }
525
526 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
527 {
528         struct ldlm_resource *res;
529
530         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
531         ldlm_resource_putref(res);
532 }
533
534 static struct cfs_hash_ops ldlm_ns_hash_ops = {
535         .hs_hash        = ldlm_res_hop_hash,
536         .hs_key         = ldlm_res_hop_key,
537         .hs_keycmp      = ldlm_res_hop_keycmp,
538         .hs_keycpy      = NULL,
539         .hs_object      = ldlm_res_hop_object,
540         .hs_get         = ldlm_res_hop_get_locked,
541         .hs_put_locked  = ldlm_res_hop_put_locked,
542         .hs_put         = ldlm_res_hop_put
543 };
544
545 static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
546         .hs_hash        = ldlm_res_hop_fid_hash,
547         .hs_key         = ldlm_res_hop_key,
548         .hs_keycmp      = ldlm_res_hop_keycmp,
549         .hs_keycpy      = NULL,
550         .hs_object      = ldlm_res_hop_object,
551         .hs_get         = ldlm_res_hop_get_locked,
552         .hs_put_locked  = ldlm_res_hop_put_locked,
553         .hs_put         = ldlm_res_hop_put
554 };
555
556 struct ldlm_ns_hash_def {
557         ldlm_ns_type_t  nsd_type;
558         /** hash bucket bits */
559         unsigned        nsd_bkt_bits;
560         /** hash bits */
561         unsigned        nsd_all_bits;
562         /** hash operations */
563         struct cfs_hash_ops *nsd_hops;
564 };
565
566 static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
567         {
568                 .nsd_type       = LDLM_NS_TYPE_MDC,
569                 .nsd_bkt_bits   = 11,
570                 .nsd_all_bits   = 16,
571                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
572         },
573         {
574                 .nsd_type       = LDLM_NS_TYPE_MDT,
575                 .nsd_bkt_bits   = 14,
576                 .nsd_all_bits   = 21,
577                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
578         },
579         {
580                 .nsd_type       = LDLM_NS_TYPE_OSC,
581                 .nsd_bkt_bits   = 8,
582                 .nsd_all_bits   = 12,
583                 .nsd_hops       = &ldlm_ns_hash_ops,
584         },
585         {
586                 .nsd_type       = LDLM_NS_TYPE_OST,
587                 .nsd_bkt_bits   = 11,
588                 .nsd_all_bits   = 17,
589                 .nsd_hops       = &ldlm_ns_hash_ops,
590         },
591         {
592                 .nsd_type       = LDLM_NS_TYPE_MGC,
593                 .nsd_bkt_bits   = 4,
594                 .nsd_all_bits   = 4,
595                 .nsd_hops       = &ldlm_ns_hash_ops,
596         },
597         {
598                 .nsd_type       = LDLM_NS_TYPE_MGT,
599                 .nsd_bkt_bits   = 4,
600                 .nsd_all_bits   = 4,
601                 .nsd_hops       = &ldlm_ns_hash_ops,
602         },
603         {
604                 .nsd_type       = LDLM_NS_TYPE_UNKNOWN,
605         },
606 };
607
608 /** Register \a ns in the list of namespaces */
609 static void ldlm_namespace_register(struct ldlm_namespace *ns,
610                                     ldlm_side_t client)
611 {
612         mutex_lock(ldlm_namespace_lock(client));
613         LASSERT(list_empty(&ns->ns_list_chain));
614         list_add(&ns->ns_list_chain, &ldlm_cli_inactive_namespace_list);
615         ldlm_namespace_nr_inc(client);
616         mutex_unlock(ldlm_namespace_lock(client));
617 }
618
619 /**
620  * Create and initialize new empty namespace.
621  */
622 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
623                                           ldlm_side_t client,
624                                           ldlm_appetite_t apt,
625                                           ldlm_ns_type_t ns_type)
626 {
627         struct ldlm_namespace *ns = NULL;
628         struct ldlm_ns_bucket *nsb;
629         struct ldlm_ns_hash_def    *nsd;
630         struct cfs_hash_bd        bd;
631         int                 idx;
632         int                 rc;
633
634         LASSERT(obd != NULL);
635
636         rc = ldlm_get_ref();
637         if (rc) {
638                 CERROR("ldlm_get_ref failed: %d\n", rc);
639                 return NULL;
640         }
641
642         for (idx = 0;; idx++) {
643                 nsd = &ldlm_ns_hash_defs[idx];
644                 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
645                         CERROR("Unknown type %d for ns %s\n", ns_type, name);
646                         goto out_ref;
647                 }
648
649                 if (nsd->nsd_type == ns_type)
650                         break;
651         }
652
653         ns = kzalloc(sizeof(*ns), GFP_NOFS);
654         if (!ns)
655                 goto out_ref;
656
657         ns->ns_rs_hash = cfs_hash_create(name,
658                                          nsd->nsd_all_bits, nsd->nsd_all_bits,
659                                          nsd->nsd_bkt_bits, sizeof(*nsb),
660                                          CFS_HASH_MIN_THETA,
661                                          CFS_HASH_MAX_THETA,
662                                          nsd->nsd_hops,
663                                          CFS_HASH_DEPTH |
664                                          CFS_HASH_BIGNAME |
665                                          CFS_HASH_SPIN_BKTLOCK |
666                                          CFS_HASH_NO_ITEMREF);
667         if (ns->ns_rs_hash == NULL)
668                 goto out_ns;
669
670         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
671                 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
672                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
673                 nsb->nsb_namespace = ns;
674         }
675
676         ns->ns_obd      = obd;
677         ns->ns_appetite = apt;
678         ns->ns_client   = client;
679
680         INIT_LIST_HEAD(&ns->ns_list_chain);
681         INIT_LIST_HEAD(&ns->ns_unused_list);
682         spin_lock_init(&ns->ns_lock);
683         atomic_set(&ns->ns_bref, 0);
684         init_waitqueue_head(&ns->ns_waitq);
685
686         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
687         ns->ns_nr_unused          = 0;
688         ns->ns_max_unused        = LDLM_DEFAULT_LRU_SIZE;
689         ns->ns_max_age      = LDLM_DEFAULT_MAX_ALIVE;
690         ns->ns_orig_connect_flags = 0;
691         ns->ns_connect_flags      = 0;
692         ns->ns_stopping    = 0;
693
694         rc = ldlm_namespace_sysfs_register(ns);
695         if (rc != 0) {
696                 CERROR("Can't initialize ns sysfs, rc %d\n", rc);
697                 goto out_hash;
698         }
699
700         rc = ldlm_namespace_debugfs_register(ns);
701         if (rc != 0) {
702                 CERROR("Can't initialize ns proc, rc %d\n", rc);
703                 goto out_sysfs;
704         }
705
706         idx = ldlm_namespace_nr_read(client);
707         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
708         if (rc) {
709                 CERROR("Can't initialize lock pool, rc %d\n", rc);
710                 goto out_proc;
711         }
712
713         ldlm_namespace_register(ns, client);
714         return ns;
715 out_proc:
716         ldlm_namespace_debugfs_unregister(ns);
717 out_sysfs:
718         ldlm_namespace_sysfs_unregister(ns);
719         ldlm_namespace_cleanup(ns, 0);
720 out_hash:
721         cfs_hash_putref(ns->ns_rs_hash);
722 out_ns:
723         kfree(ns);
724 out_ref:
725         ldlm_put_ref();
726         return NULL;
727 }
728 EXPORT_SYMBOL(ldlm_namespace_new);
729
730 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
731
732 /**
733  * Cancel and destroy all locks on a resource.
734  *
735  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
736  * clean up.  This is currently only used for recovery, and we make
737  * certain assumptions as a result--notably, that we shouldn't cancel
738  * locks with refs.
739  */
740 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
741                              __u64 flags)
742 {
743         struct list_head *tmp;
744         int rc = 0;
745         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
746
747         do {
748                 struct ldlm_lock *lock = NULL;
749                 struct lustre_handle lockh;
750
751                 /* First, we look for non-cleaned-yet lock
752                  * all cleaned locks are marked by CLEANED flag. */
753                 lock_res(res);
754                 list_for_each(tmp, q) {
755                         lock = list_entry(tmp, struct ldlm_lock,
756                                               l_res_link);
757                         if (lock->l_flags & LDLM_FL_CLEANED) {
758                                 lock = NULL;
759                                 continue;
760                         }
761                         LDLM_LOCK_GET(lock);
762                         lock->l_flags |= LDLM_FL_CLEANED;
763                         break;
764                 }
765
766                 if (lock == NULL) {
767                         unlock_res(res);
768                         break;
769                 }
770
771                 /* Set CBPENDING so nothing in the cancellation path
772                  * can match this lock. */
773                 lock->l_flags |= LDLM_FL_CBPENDING;
774                 lock->l_flags |= LDLM_FL_FAILED;
775                 lock->l_flags |= flags;
776
777                 /* ... without sending a CANCEL message for local_only. */
778                 if (local_only)
779                         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
780
781                 if (local_only && (lock->l_readers || lock->l_writers)) {
782                         /* This is a little bit gross, but much better than the
783                          * alternative: pretend that we got a blocking AST from
784                          * the server, so that when the lock is decref'd, it
785                          * will go away ... */
786                         unlock_res(res);
787                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
788                         if (lock->l_completion_ast)
789                                 lock->l_completion_ast(lock, 0, NULL);
790                         LDLM_LOCK_RELEASE(lock);
791                         continue;
792                 }
793
794                 unlock_res(res);
795                 ldlm_lock2handle(lock, &lockh);
796                 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
797                 if (rc)
798                         CERROR("ldlm_cli_cancel: %d\n", rc);
799                 LDLM_LOCK_RELEASE(lock);
800         } while (1);
801 }
802
803 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
804                                struct hlist_node *hnode, void *arg)
805 {
806         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
807         __u64 flags = *(__u64 *)arg;
808
809         cleanup_resource(res, &res->lr_granted, flags);
810         cleanup_resource(res, &res->lr_waiting, flags);
811
812         return 0;
813 }
814
815 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
816                                   struct hlist_node *hnode, void *arg)
817 {
818         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
819
820         lock_res(res);
821         CERROR("%s: namespace resource "DLDLMRES
822                " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n",
823                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
824                atomic_read(&res->lr_refcount) - 1);
825
826         ldlm_resource_dump(D_ERROR, res);
827         unlock_res(res);
828         return 0;
829 }
830
831 /**
832  * Cancel and destroy all locks in the namespace.
833  *
834  * Typically used during evictions when server notified client that it was
835  * evicted and all of its state needs to be destroyed.
836  * Also used during shutdown.
837  */
838 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
839 {
840         if (ns == NULL) {
841                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
842                 return ELDLM_OK;
843         }
844
845         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
846         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
847         return ELDLM_OK;
848 }
849 EXPORT_SYMBOL(ldlm_namespace_cleanup);
850
851 /**
852  * Attempts to free namespace.
853  *
854  * Only used when namespace goes away, like during an unmount.
855  */
856 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
857 {
858         /* At shutdown time, don't call the cancellation callback */
859         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
860
861         if (atomic_read(&ns->ns_bref) > 0) {
862                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
863                 int rc;
864
865                 CDEBUG(D_DLMTRACE,
866                        "dlm namespace %s free waiting on refcount %d\n",
867                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
868 force_wait:
869                 if (force)
870                         lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
871
872                 rc = l_wait_event(ns->ns_waitq,
873                                   atomic_read(&ns->ns_bref) == 0, &lwi);
874
875                 /* Forced cleanups should be able to reclaim all references,
876                  * so it's safe to wait forever... we can't leak locks... */
877                 if (force && rc == -ETIMEDOUT) {
878                         LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
879                                        ldlm_ns_name(ns),
880                                        atomic_read(&ns->ns_bref), rc);
881                         goto force_wait;
882                 }
883
884                 if (atomic_read(&ns->ns_bref)) {
885                         LCONSOLE_ERROR("Cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
886                                        ldlm_ns_name(ns),
887                                        atomic_read(&ns->ns_bref), rc);
888                         return ELDLM_NAMESPACE_EXISTS;
889                 }
890                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
891                        ldlm_ns_name(ns));
892         }
893
894         return ELDLM_OK;
895 }
896
897 /**
898  * Performs various cleanups for passed \a ns to make it drop refc and be
899  * ready for freeing. Waits for refc == 0.
900  *
901  * The following is done:
902  * (0) Unregister \a ns from its list to make inaccessible for potential
903  * users like pools thread and others;
904  * (1) Clear all locks in \a ns.
905  */
906 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
907                                struct obd_import *imp,
908                                int force)
909 {
910         int rc;
911
912         if (!ns)
913                 return;
914
915         spin_lock(&ns->ns_lock);
916         ns->ns_stopping = 1;
917         spin_unlock(&ns->ns_lock);
918
919         /*
920          * Can fail with -EINTR when force == 0 in which case try harder.
921          */
922         rc = __ldlm_namespace_free(ns, force);
923         if (rc != ELDLM_OK) {
924                 if (imp) {
925                         ptlrpc_disconnect_import(imp, 0);
926                         ptlrpc_invalidate_import(imp);
927                 }
928
929                 /*
930                  * With all requests dropped and the import inactive
931                  * we are guaranteed all reference will be dropped.
932                  */
933                 rc = __ldlm_namespace_free(ns, 1);
934                 LASSERT(rc == 0);
935         }
936 }
937
938 /** Unregister \a ns from the list of namespaces. */
939 static void ldlm_namespace_unregister(struct ldlm_namespace *ns,
940                                       ldlm_side_t client)
941 {
942         mutex_lock(ldlm_namespace_lock(client));
943         LASSERT(!list_empty(&ns->ns_list_chain));
944         /* Some asserts and possibly other parts of the code are still
945          * using list_empty(&ns->ns_list_chain). This is why it is
946          * important to use list_del_init() here. */
947         list_del_init(&ns->ns_list_chain);
948         ldlm_namespace_nr_dec(client);
949         mutex_unlock(ldlm_namespace_lock(client));
950 }
951
952 /**
953  * Performs freeing memory structures related to \a ns. This is only done
954  * when ldlm_namespce_free_prior() successfully removed all resources
955  * referencing \a ns and its refc == 0.
956  */
957 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
958 {
959         if (!ns)
960                 return;
961
962         /* Make sure that nobody can find this ns in its list. */
963         ldlm_namespace_unregister(ns, ns->ns_client);
964         /* Fini pool _before_ parent proc dir is removed. This is important as
965          * ldlm_pool_fini() removes own proc dir which is child to @dir.
966          * Removing it after @dir may cause oops. */
967         ldlm_pool_fini(&ns->ns_pool);
968
969         ldlm_namespace_debugfs_unregister(ns);
970         ldlm_namespace_sysfs_unregister(ns);
971         cfs_hash_putref(ns->ns_rs_hash);
972         /* Namespace \a ns should be not on list at this time, otherwise
973          * this will cause issues related to using freed \a ns in poold
974          * thread. */
975         LASSERT(list_empty(&ns->ns_list_chain));
976         kfree(ns);
977         ldlm_put_ref();
978 }
979
980 void ldlm_namespace_get(struct ldlm_namespace *ns)
981 {
982         atomic_inc(&ns->ns_bref);
983 }
984 EXPORT_SYMBOL(ldlm_namespace_get);
985
986 /* This is only for callers that care about refcount */
987 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
988 {
989         return atomic_inc_return(&ns->ns_bref);
990 }
991
992 void ldlm_namespace_put(struct ldlm_namespace *ns)
993 {
994         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
995                 wake_up(&ns->ns_waitq);
996                 spin_unlock(&ns->ns_lock);
997         }
998 }
999 EXPORT_SYMBOL(ldlm_namespace_put);
1000
1001 /** Should be called with ldlm_namespace_lock(client) taken. */
1002 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1003                                           ldlm_side_t client)
1004 {
1005         LASSERT(!list_empty(&ns->ns_list_chain));
1006         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1007         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1008 }
1009
1010 /** Should be called with ldlm_namespace_lock(client) taken. */
1011 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1012                                             ldlm_side_t client)
1013 {
1014         LASSERT(!list_empty(&ns->ns_list_chain));
1015         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1016         list_move_tail(&ns->ns_list_chain, &ldlm_cli_inactive_namespace_list);
1017 }
1018
1019 /** Should be called with ldlm_namespace_lock(client) taken. */
1020 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
1021 {
1022         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1023         LASSERT(!list_empty(ldlm_namespace_list(client)));
1024         return container_of(ldlm_namespace_list(client)->next,
1025                 struct ldlm_namespace, ns_list_chain);
1026 }
1027
1028 /** Create and initialize new resource. */
1029 static struct ldlm_resource *ldlm_resource_new(void)
1030 {
1031         struct ldlm_resource *res;
1032         int idx;
1033
1034         res = kmem_cache_alloc(ldlm_resource_slab, GFP_NOFS | __GFP_ZERO);
1035         if (res == NULL)
1036                 return NULL;
1037
1038         INIT_LIST_HEAD(&res->lr_granted);
1039         INIT_LIST_HEAD(&res->lr_waiting);
1040
1041         /* Initialize interval trees for each lock mode. */
1042         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1043                 res->lr_itree[idx].lit_size = 0;
1044                 res->lr_itree[idx].lit_mode = 1 << idx;
1045                 res->lr_itree[idx].lit_root = NULL;
1046         }
1047
1048         atomic_set(&res->lr_refcount, 1);
1049         spin_lock_init(&res->lr_lock);
1050         lu_ref_init(&res->lr_reference);
1051
1052         /* The creator of the resource must unlock the mutex after LVB
1053          * initialization. */
1054         mutex_init(&res->lr_lvb_mutex);
1055         mutex_lock(&res->lr_lvb_mutex);
1056
1057         return res;
1058 }
1059
1060 /**
1061  * Return a reference to resource with given name, creating it if necessary.
1062  * Args: namespace with ns_lock unlocked
1063  * Locks: takes and releases NS hash-lock and res->lr_lock
1064  * Returns: referenced, unlocked ldlm_resource or NULL
1065  */
1066 struct ldlm_resource *
1067 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1068                   const struct ldlm_res_id *name, ldlm_type_t type, int create)
1069 {
1070         struct hlist_node     *hnode;
1071         struct ldlm_resource *res;
1072         struct cfs_hash_bd       bd;
1073         __u64            version;
1074         int                   ns_refcount = 0;
1075
1076         LASSERT(ns != NULL);
1077         LASSERT(parent == NULL);
1078         LASSERT(ns->ns_rs_hash != NULL);
1079         LASSERT(name->name[0] != 0);
1080
1081         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1082         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1083         if (hnode != NULL) {
1084                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1085                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1086                 /* Synchronize with regard to resource creation. */
1087                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1088                         mutex_lock(&res->lr_lvb_mutex);
1089                         mutex_unlock(&res->lr_lvb_mutex);
1090                 }
1091
1092                 if (unlikely(res->lr_lvb_len < 0)) {
1093                         ldlm_resource_putref(res);
1094                         res = NULL;
1095                 }
1096                 return res;
1097         }
1098
1099         version = cfs_hash_bd_version_get(&bd);
1100         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1101
1102         if (create == 0)
1103                 return NULL;
1104
1105         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1106                  "type: %d\n", type);
1107         res = ldlm_resource_new();
1108         if (!res)
1109                 return NULL;
1110
1111         res->lr_ns_bucket  = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1112         res->lr_name       = *name;
1113         res->lr_type       = type;
1114         res->lr_most_restr = LCK_NL;
1115
1116         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1117         hnode = (version == cfs_hash_bd_version_get(&bd)) ?  NULL :
1118                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1119
1120         if (hnode != NULL) {
1121                 /* Someone won the race and already added the resource. */
1122                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1123                 /* Clean lu_ref for failed resource. */
1124                 lu_ref_fini(&res->lr_reference);
1125                 /* We have taken lr_lvb_mutex. Drop it. */
1126                 mutex_unlock(&res->lr_lvb_mutex);
1127                 kmem_cache_free(ldlm_resource_slab, res);
1128
1129                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1130                 /* Synchronize with regard to resource creation. */
1131                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1132                         mutex_lock(&res->lr_lvb_mutex);
1133                         mutex_unlock(&res->lr_lvb_mutex);
1134                 }
1135
1136                 if (unlikely(res->lr_lvb_len < 0)) {
1137                         ldlm_resource_putref(res);
1138                         res = NULL;
1139                 }
1140                 return res;
1141         }
1142         /* We won! Let's add the resource. */
1143         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1144         if (cfs_hash_bd_count_get(&bd) == 1)
1145                 ns_refcount = ldlm_namespace_get_return(ns);
1146
1147         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1148         if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1149                 int rc;
1150
1151                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1152                 rc = ns->ns_lvbo->lvbo_init(res);
1153                 if (rc < 0) {
1154                         CERROR("%s: lvbo_init failed for resource %#llx:%#llx: rc = %d\n",
1155                                ns->ns_obd->obd_name, name->name[0],
1156                                name->name[1], rc);
1157                         kfree(res->lr_lvb_data);
1158                         res->lr_lvb_data = NULL;
1159                         res->lr_lvb_len = rc;
1160                         mutex_unlock(&res->lr_lvb_mutex);
1161                         ldlm_resource_putref(res);
1162                         return NULL;
1163                 }
1164         }
1165
1166         /* We create resource with locked lr_lvb_mutex. */
1167         mutex_unlock(&res->lr_lvb_mutex);
1168
1169         /* Let's see if we happened to be the very first resource in this
1170          * namespace. If so, and this is a client namespace, we need to move
1171          * the namespace into the active namespaces list to be patrolled by
1172          * the ldlm_poold. */
1173         if (ns_refcount == 1) {
1174                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1175                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1176                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1177         }
1178
1179         return res;
1180 }
1181 EXPORT_SYMBOL(ldlm_resource_get);
1182
1183 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1184                                          struct ldlm_resource *res)
1185 {
1186         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1187
1188         if (!list_empty(&res->lr_granted)) {
1189                 ldlm_resource_dump(D_ERROR, res);
1190                 LBUG();
1191         }
1192
1193         if (!list_empty(&res->lr_waiting)) {
1194                 ldlm_resource_dump(D_ERROR, res);
1195                 LBUG();
1196         }
1197
1198         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1199                                bd, &res->lr_hash);
1200         lu_ref_fini(&res->lr_reference);
1201         if (cfs_hash_bd_count_get(bd) == 0)
1202                 ldlm_namespace_put(nsb->nsb_namespace);
1203 }
1204
1205 /* Returns 1 if the resource was freed, 0 if it remains. */
1206 int ldlm_resource_putref(struct ldlm_resource *res)
1207 {
1208         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1209         struct cfs_hash_bd   bd;
1210
1211         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1212         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1213                res, atomic_read(&res->lr_refcount) - 1);
1214
1215         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1216         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1217                 __ldlm_resource_putref_final(&bd, res);
1218                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1219                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1220                         ns->ns_lvbo->lvbo_free(res);
1221                 kmem_cache_free(ldlm_resource_slab, res);
1222                 return 1;
1223         }
1224         return 0;
1225 }
1226 EXPORT_SYMBOL(ldlm_resource_putref);
1227
1228 /* Returns 1 if the resource was freed, 0 if it remains. */
1229 int ldlm_resource_putref_locked(struct ldlm_resource *res)
1230 {
1231         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1232
1233         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1234         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1235                res, atomic_read(&res->lr_refcount) - 1);
1236
1237         if (atomic_dec_and_test(&res->lr_refcount)) {
1238                 struct cfs_hash_bd bd;
1239
1240                 cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
1241                                 &res->lr_name, &bd);
1242                 __ldlm_resource_putref_final(&bd, res);
1243                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1244                 /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
1245                  * so we should never be here while calling cfs_hash_del,
1246                  * cfs_hash_for_each_nolock is the only case we can get
1247                  * here, which is safe to release cfs_hash_bd_lock.
1248                  */
1249                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1250                         ns->ns_lvbo->lvbo_free(res);
1251                 kmem_cache_free(ldlm_resource_slab, res);
1252
1253                 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1254                 return 1;
1255         }
1256         return 0;
1257 }
1258
1259 /**
1260  * Add a lock into a given resource into specified lock list.
1261  */
1262 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1263                             struct ldlm_lock *lock)
1264 {
1265         check_res_locked(res);
1266
1267         LDLM_DEBUG(lock, "About to add this lock:\n");
1268
1269         if (lock->l_flags & LDLM_FL_DESTROYED) {
1270                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1271                 return;
1272         }
1273
1274         LASSERT(list_empty(&lock->l_res_link));
1275
1276         list_add_tail(&lock->l_res_link, head);
1277 }
1278
1279 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1280 {
1281         int type = lock->l_resource->lr_type;
1282
1283         check_res_locked(lock->l_resource);
1284         if (type == LDLM_IBITS || type == LDLM_PLAIN)
1285                 ldlm_unlink_lock_skiplist(lock);
1286         else if (type == LDLM_EXTENT)
1287                 ldlm_extent_unlink_lock(lock);
1288         list_del_init(&lock->l_res_link);
1289 }
1290 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1291
1292 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1293 {
1294         desc->lr_type = res->lr_type;
1295         desc->lr_name = res->lr_name;
1296 }
1297
1298 /**
1299  * Print information about all locks in all namespaces on this node to debug
1300  * log.
1301  */
1302 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1303 {
1304         struct list_head *tmp;
1305
1306         if (!((libcfs_debug | D_ERROR) & level))
1307                 return;
1308
1309         mutex_lock(ldlm_namespace_lock(client));
1310
1311         list_for_each(tmp, ldlm_namespace_list(client)) {
1312                 struct ldlm_namespace *ns;
1313
1314                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1315                 ldlm_namespace_dump(level, ns);
1316         }
1317
1318         mutex_unlock(ldlm_namespace_lock(client));
1319 }
1320 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1321
1322 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1323                               struct hlist_node *hnode, void *arg)
1324 {
1325         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1326         int    level = (int)(unsigned long)arg;
1327
1328         lock_res(res);
1329         ldlm_resource_dump(level, res);
1330         unlock_res(res);
1331
1332         return 0;
1333 }
1334
1335 /**
1336  * Print information about all locks in this namespace on this node to debug
1337  * log.
1338  */
1339 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1340 {
1341         if (!((libcfs_debug | D_ERROR) & level))
1342                 return;
1343
1344         CDEBUG(level, "--- Namespace: %s (rc: %d, side: client)\n",
1345                ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
1346
1347         if (time_before(cfs_time_current(), ns->ns_next_dump))
1348                 return;
1349
1350         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1351                                  ldlm_res_hash_dump,
1352                                  (void *)(unsigned long)level);
1353         spin_lock(&ns->ns_lock);
1354         ns->ns_next_dump = cfs_time_shift(10);
1355         spin_unlock(&ns->ns_lock);
1356 }
1357 EXPORT_SYMBOL(ldlm_namespace_dump);
1358
1359 /**
1360  * Print information about all locks in this resource to debug log.
1361  */
1362 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1363 {
1364         struct ldlm_lock *lock;
1365         unsigned int granted = 0;
1366
1367         CLASSERT(RES_NAME_SIZE == 4);
1368
1369         if (!((libcfs_debug | D_ERROR) & level))
1370                 return;
1371
1372         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1373                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1374
1375         if (!list_empty(&res->lr_granted)) {
1376                 CDEBUG(level, "Granted locks (in reverse order):\n");
1377                 list_for_each_entry_reverse(lock, &res->lr_granted,
1378                                                 l_res_link) {
1379                         LDLM_DEBUG_LIMIT(level, lock, "###");
1380                         if (!(level & D_CANTMASK) &&
1381                             ++granted > ldlm_dump_granted_max) {
1382                                 CDEBUG(level, "only dump %d granted locks to avoid DDOS.\n",
1383                                        granted);
1384                                 break;
1385                         }
1386                 }
1387         }
1388         if (!list_empty(&res->lr_waiting)) {
1389                 CDEBUG(level, "Waiting locks:\n");
1390                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1391                         LDLM_DEBUG_LIMIT(level, lock, "###");
1392         }
1393 }