4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_pool.c
38 * Author: Yury Umanets <umka@clusterfs.com>
42 * Idea of this code is rather simple. Each second, for each server namespace
43 * we have SLV - server lock volume which is calculated on current number of
44 * granted locks, grant speed for past period, etc - that is, locking load.
45 * This SLV number may be thought as a flow definition for simplicity. It is
46 * sent to clients with each occasion to let them know what is current load
47 * situation on the server. By default, at the beginning, SLV on server is
48 * set max value which is calculated as the following: allow to one client
49 * have all locks of limit ->pl_limit for 10h.
51 * Next, on clients, number of cached locks is not limited artificially in any
52 * way as it was before. Instead, client calculates CLV, that is, client lock
53 * volume for each lock and compares it with last SLV from the server. CLV is
54 * calculated as the number of locks in LRU * lock live time in seconds. If
55 * CLV > SLV - lock is canceled.
57 * Client has LVF, that is, lock volume factor which regulates how much
58 * sensitive client should be about last SLV from server. The higher LVF is the
59 * more locks will be canceled on client. Default value for it is 1. Setting LVF
60 * to 2 means that client will cancel locks 2 times faster.
62 * Locks on a client will be canceled more intensively in these cases:
63 * (1) if SLV is smaller, that is, load is higher on the server;
64 * (2) client has a lot of locks (the more locks are held by client, the bigger
65 * chances that some of them should be canceled);
66 * (3) client has old locks (taken some time ago);
68 * Thus, according to flow paradigm that we use for better understanding SLV,
69 * CLV is the volume of particle in flow described by SLV. According to this,
70 * if flow is getting thinner, more and more particles become outside of it and
71 * as particles are locks, they should be canceled.
73 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
74 * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
75 * LVF and many cleanups. Flow definition to allow more easy understanding of
76 * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
77 * cleanups and fixes. And design and implementation are done by Yury Umanets
78 * (umka@clusterfs.com).
80 * Glossary for terms used:
82 * pl_limit - Number of allowed locks in pool. Applies to server and client
85 * pl_granted - Number of granted locks (calculated);
86 * pl_grant_rate - Number of granted locks for last T (calculated);
87 * pl_cancel_rate - Number of canceled locks for last T (calculated);
88 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
89 * pl_grant_plan - Planned number of granted locks for next T (calculated);
90 * pl_server_lock_volume - Current server lock volume (calculated);
92 * As it may be seen from list above, we have few possible tunables which may
93 * affect behavior much. They all may be modified via sysfs. However, they also
94 * give a possibility for constructing few pre-defined behavior policies. If
95 * none of predefines is suitable for a working pattern being used, new one may
96 * be "constructed" via sysfs tunables.
99 #define DEBUG_SUBSYSTEM S_LDLM
101 #include "../include/lustre_dlm.h"
102 #include "../include/cl_object.h"
103 #include "../include/obd_class.h"
104 #include "../include/obd_support.h"
105 #include "ldlm_internal.h"
108 * 50 ldlm locks for 1MB of RAM.
110 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
113 * Maximal possible grant step plan in %.
115 #define LDLM_POOL_MAX_GSP (30)
118 * Minimal possible grant step plan in %.
120 #define LDLM_POOL_MIN_GSP (1)
123 * This controls the speed of reaching LDLM_POOL_MAX_GSP
124 * with increasing thread period.
126 #define LDLM_POOL_GSP_STEP_SHIFT (2)
129 * LDLM_POOL_GSP% of all locks is default GP.
131 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
134 * Max age for locks on clients.
136 #define LDLM_POOL_MAX_AGE (36000)
139 * The granularity of SLV calculation.
141 #define LDLM_POOL_SLV_SHIFT (10)
143 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
145 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
148 static inline __u64 ldlm_pool_slv_max(__u32 L)
151 * Allow to have all locks for 1 client for 10 hrs.
152 * Formula is the following: limit * 10h / 1 client.
154 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
158 static inline __u64 ldlm_pool_slv_min(__u32 L)
164 LDLM_POOL_FIRST_STAT = 0,
165 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
166 LDLM_POOL_GRANT_STAT,
167 LDLM_POOL_CANCEL_STAT,
168 LDLM_POOL_GRANT_RATE_STAT,
169 LDLM_POOL_CANCEL_RATE_STAT,
170 LDLM_POOL_GRANT_PLAN_STAT,
172 LDLM_POOL_SHRINK_REQTD_STAT,
173 LDLM_POOL_SHRINK_FREED_STAT,
174 LDLM_POOL_RECALC_STAT,
175 LDLM_POOL_TIMING_STAT,
179 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
181 return container_of(pl, struct ldlm_namespace, ns_pool);
185 * Calculates suggested grant_step in % of available locks for passed
186 * \a period. This is later used in grant_plan calculations.
188 static inline int ldlm_pool_t2gsp(unsigned int t)
191 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
192 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
194 * How this will affect execution is the following:
196 * - for thread period 1s we will have grant_step 1% which good from
197 * pov of taking some load off from server and push it out to clients.
198 * This is like that because 1% for grant_step means that server will
199 * not allow clients to get lots of locks in short period of time and
200 * keep all old locks in their caches. Clients will always have to
201 * get some locks back if they want to take some new;
203 * - for thread period 10s (which is default) we will have 23% which
204 * means that clients will have enough of room to take some new locks
205 * without getting some back. All locks from this 23% which were not
206 * taken by clients in current period will contribute in SLV growing.
207 * SLV growing means more locks cached on clients until limit or grant
210 return LDLM_POOL_MAX_GSP -
211 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
212 (t >> LDLM_POOL_GSP_STEP_SHIFT));
216 * Returns current \a pl limit.
218 static __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
220 return atomic_read(&pl->pl_limit);
224 * Sets passed \a limit to \a pl.
226 static void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
228 atomic_set(&pl->pl_limit, limit);
232 * Recalculates next stats on passed \a pl.
234 * \pre ->pl_lock is locked.
236 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
238 int grant_plan = pl->pl_grant_plan;
239 __u64 slv = pl->pl_server_lock_volume;
240 int granted = atomic_read(&pl->pl_granted);
241 int grant_rate = atomic_read(&pl->pl_grant_rate);
242 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
244 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
246 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
248 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
250 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
252 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
257 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
259 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
261 struct obd_device *obd;
264 * Get new SLV and Limit from obd which is updated with coming
267 obd = ldlm_pl2ns(pl)->ns_obd;
268 LASSERT(obd != NULL);
269 read_lock(&obd->obd_pool_lock);
270 pl->pl_server_lock_volume = obd->obd_pool_slv;
271 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
272 read_unlock(&obd->obd_pool_lock);
276 * Recalculates client size pool \a pl according to current SLV and Limit.
278 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
280 time64_t recalc_interval_sec;
283 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
284 if (recalc_interval_sec < pl->pl_recalc_period)
287 spin_lock(&pl->pl_lock);
289 * Check if we need to recalc lists now.
291 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
292 if (recalc_interval_sec < pl->pl_recalc_period) {
293 spin_unlock(&pl->pl_lock);
298 * Make sure that pool knows last SLV and Limit from obd.
300 ldlm_cli_pool_pop_slv(pl);
302 spin_unlock(&pl->pl_lock);
305 * Do not cancel locks in case lru resize is disabled for this ns.
307 if (!ns_connect_lru_resize(ldlm_pl2ns(pl))) {
313 * In the time of canceling locks on client we do not need to maintain
314 * sharp timing, we only want to cancel locks asap according to new SLV.
315 * It may be called when SLV has changed much, this is why we do not
316 * take into account pl->pl_recalc_time here.
318 ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
321 spin_lock(&pl->pl_lock);
323 * Time of LRU resizing might be longer than period,
324 * so update after LRU resizing rather than before it.
326 pl->pl_recalc_time = ktime_get_real_seconds();
327 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
328 recalc_interval_sec);
329 spin_unlock(&pl->pl_lock);
334 * This function is main entry point for memory pressure handling on client
335 * side. Main goal of this function is to cancel some number of locks on
336 * passed \a pl according to \a nr and \a gfp_mask.
338 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
339 int nr, gfp_t gfp_mask)
341 struct ldlm_namespace *ns;
347 * Do not cancel locks in case lru resize is disabled for this ns.
349 if (!ns_connect_lru_resize(ns))
353 * Make sure that pool knows last SLV and Limit from obd.
355 ldlm_cli_pool_pop_slv(pl);
357 spin_lock(&ns->ns_lock);
358 unused = ns->ns_nr_unused;
359 spin_unlock(&ns->ns_lock);
362 return (unused / 100) * sysctl_vfs_cache_pressure;
364 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
367 static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
368 .po_recalc = ldlm_cli_pool_recalc,
369 .po_shrink = ldlm_cli_pool_shrink
373 * Pool recalc wrapper. Will call either client or server pool recalc callback
374 * depending what pool \a pl is used.
376 static int ldlm_pool_recalc(struct ldlm_pool *pl)
378 u32 recalc_interval_sec;
381 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
382 if (recalc_interval_sec <= 0)
385 spin_lock(&pl->pl_lock);
386 if (recalc_interval_sec > 0) {
388 * Update pool statistics every 1s.
390 ldlm_pool_recalc_stats(pl);
393 * Zero out all rates and speed for the last period.
395 atomic_set(&pl->pl_grant_rate, 0);
396 atomic_set(&pl->pl_cancel_rate, 0);
398 spin_unlock(&pl->pl_lock);
401 if (pl->pl_ops->po_recalc != NULL) {
402 count = pl->pl_ops->po_recalc(pl);
403 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
406 recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
407 pl->pl_recalc_period;
408 if (recalc_interval_sec <= 0) {
409 /* Prevent too frequent recalculation. */
411 "Negative interval(%d), too short period(%lld)",
413 (s64)pl->pl_recalc_period);
414 recalc_interval_sec = 1;
417 return recalc_interval_sec;
421 * Pool shrink wrapper. Will call either client or server pool recalc callback
422 * depending what pool pl is used. When nr == 0, just return the number of
423 * freeable locks. Otherwise, return the number of canceled locks.
425 static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
429 if (pl->pl_ops->po_shrink != NULL) {
430 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
432 lprocfs_counter_add(pl->pl_stats,
433 LDLM_POOL_SHRINK_REQTD_STAT,
435 lprocfs_counter_add(pl->pl_stats,
436 LDLM_POOL_SHRINK_FREED_STAT,
438 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
439 pl->pl_name, nr, cancel);
445 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
447 int granted, grant_rate, cancel_rate;
448 int grant_speed, lvf;
449 struct ldlm_pool *pl = m->private;
453 spin_lock(&pl->pl_lock);
454 slv = pl->pl_server_lock_volume;
455 clv = pl->pl_client_lock_volume;
456 limit = ldlm_pool_get_limit(pl);
457 granted = atomic_read(&pl->pl_granted);
458 grant_rate = atomic_read(&pl->pl_grant_rate);
459 cancel_rate = atomic_read(&pl->pl_cancel_rate);
460 grant_speed = grant_rate - cancel_rate;
461 lvf = atomic_read(&pl->pl_lock_volume_factor);
462 spin_unlock(&pl->pl_lock);
464 seq_printf(m, "LDLM pool state (%s):\n"
468 pl->pl_name, slv, clv, lvf);
470 seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n"
472 grant_rate, cancel_rate, grant_speed,
478 LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
480 static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
483 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
488 spin_lock(&pl->pl_lock);
489 /* serialize with ldlm_pool_recalc */
490 grant_speed = atomic_read(&pl->pl_grant_rate) -
491 atomic_read(&pl->pl_cancel_rate);
492 spin_unlock(&pl->pl_lock);
493 return sprintf(buf, "%d\n", grant_speed);
495 LUSTRE_RO_ATTR(grant_speed);
497 LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
498 LUSTRE_RO_ATTR(grant_plan);
500 LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
501 LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
502 LUSTRE_RW_ATTR(recalc_period);
504 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
505 LUSTRE_RO_ATTR(server_lock_volume);
507 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
508 LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
509 LUSTRE_RW_ATTR(limit);
511 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
512 LUSTRE_RO_ATTR(granted);
514 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
515 LUSTRE_RO_ATTR(cancel_rate);
517 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
518 LUSTRE_RO_ATTR(grant_rate);
520 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
521 LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
522 LUSTRE_RW_ATTR(lock_volume_factor);
524 #define LDLM_POOL_ADD_VAR(name, var, ops) \
526 snprintf(var_name, MAX_STRING_SIZE, #name); \
527 pool_vars[0].data = var; \
528 pool_vars[0].fops = ops; \
529 ldebugfs_add_vars(pl->pl_debugfs_entry, pool_vars, NULL);\
532 /* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
533 static struct attribute *ldlm_pl_attrs[] = {
534 &lustre_attr_grant_speed.attr,
535 &lustre_attr_grant_plan.attr,
536 &lustre_attr_recalc_period.attr,
537 &lustre_attr_server_lock_volume.attr,
538 &lustre_attr_limit.attr,
539 &lustre_attr_granted.attr,
540 &lustre_attr_cancel_rate.attr,
541 &lustre_attr_grant_rate.attr,
542 &lustre_attr_lock_volume_factor.attr,
546 static void ldlm_pl_release(struct kobject *kobj)
548 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
550 complete(&pl->pl_kobj_unregister);
553 static struct kobj_type ldlm_pl_ktype = {
554 .default_attrs = ldlm_pl_attrs,
555 .sysfs_ops = &lustre_sysfs_ops,
556 .release = ldlm_pl_release,
559 static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
561 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
564 init_completion(&pl->pl_kobj_unregister);
565 err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
571 static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
573 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
574 struct dentry *debugfs_ns_parent;
575 struct lprocfs_vars pool_vars[2];
576 char *var_name = NULL;
579 var_name = kzalloc(MAX_STRING_SIZE + 1, GFP_NOFS);
583 debugfs_ns_parent = ns->ns_debugfs_entry;
584 if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
585 CERROR("%s: debugfs entry is not initialized\n",
590 pl->pl_debugfs_entry = ldebugfs_register("pool", debugfs_ns_parent,
592 if (IS_ERR(pl->pl_debugfs_entry)) {
593 CERROR("LdebugFS failed in ldlm-pool-init\n");
594 rc = PTR_ERR(pl->pl_debugfs_entry);
595 pl->pl_debugfs_entry = NULL;
599 var_name[MAX_STRING_SIZE] = '\0';
600 memset(pool_vars, 0, sizeof(pool_vars));
601 pool_vars[0].name = var_name;
603 LDLM_POOL_ADD_VAR(state, pl, &lprocfs_pool_state_fops);
605 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
606 LDLM_POOL_FIRST_STAT, 0);
612 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
613 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
615 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
616 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
618 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
619 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
621 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
622 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
623 "grant_rate", "locks/s");
624 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
625 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
626 "cancel_rate", "locks/s");
627 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
628 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
629 "grant_plan", "locks/s");
630 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
631 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
633 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
634 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
635 "shrink_request", "locks");
636 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
637 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
638 "shrink_freed", "locks");
639 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
640 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
641 "recalc_freed", "locks");
642 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
643 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
644 "recalc_timing", "sec");
645 rc = ldebugfs_register_stats(pl->pl_debugfs_entry, "stats",
653 static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
655 kobject_put(&pl->pl_kobj);
656 wait_for_completion(&pl->pl_kobj_unregister);
659 static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
661 if (pl->pl_stats != NULL) {
662 lprocfs_free_stats(&pl->pl_stats);
665 if (pl->pl_debugfs_entry != NULL) {
666 ldebugfs_remove(&pl->pl_debugfs_entry);
667 pl->pl_debugfs_entry = NULL;
671 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
672 int idx, ldlm_side_t client)
676 spin_lock_init(&pl->pl_lock);
677 atomic_set(&pl->pl_granted, 0);
678 pl->pl_recalc_time = ktime_get_seconds();
679 atomic_set(&pl->pl_lock_volume_factor, 1);
681 atomic_set(&pl->pl_grant_rate, 0);
682 atomic_set(&pl->pl_cancel_rate, 0);
683 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
685 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
686 ldlm_ns_name(ns), idx);
688 ldlm_pool_set_limit(pl, 1);
689 pl->pl_server_lock_volume = 0;
690 pl->pl_ops = &ldlm_cli_pool_ops;
691 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
692 pl->pl_client_lock_volume = 0;
693 rc = ldlm_pool_debugfs_init(pl);
697 rc = ldlm_pool_sysfs_init(pl);
701 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
705 EXPORT_SYMBOL(ldlm_pool_init);
707 void ldlm_pool_fini(struct ldlm_pool *pl)
709 ldlm_pool_sysfs_fini(pl);
710 ldlm_pool_debugfs_fini(pl);
713 * Pool should not be used after this point. We can't free it here as
714 * it lives in struct ldlm_namespace, but still interested in catching
715 * any abnormal using cases.
717 POISON(pl, 0x5a, sizeof(*pl));
719 EXPORT_SYMBOL(ldlm_pool_fini);
722 * Add new taken ldlm lock \a lock into pool \a pl accounting.
724 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
727 * FLOCK locks are special in a sense that they are almost never
728 * cancelled, instead special kind of lock is used to drop them.
729 * also there is no LRU for flock locks, so no point in tracking
732 if (lock->l_resource->lr_type == LDLM_FLOCK)
735 atomic_inc(&pl->pl_granted);
736 atomic_inc(&pl->pl_grant_rate);
737 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
739 * Do not do pool recalc for client side as all locks which
740 * potentially may be canceled has already been packed into
741 * enqueue/cancel rpc. Also we do not want to run out of stack
742 * with too long call paths.
745 EXPORT_SYMBOL(ldlm_pool_add);
748 * Remove ldlm lock \a lock from pool \a pl accounting.
750 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
753 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
755 if (lock->l_resource->lr_type == LDLM_FLOCK)
758 LASSERT(atomic_read(&pl->pl_granted) > 0);
759 atomic_dec(&pl->pl_granted);
760 atomic_inc(&pl->pl_cancel_rate);
762 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
764 EXPORT_SYMBOL(ldlm_pool_del);
767 * Returns current \a pl SLV.
769 * \pre ->pl_lock is not locked.
771 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
775 spin_lock(&pl->pl_lock);
776 slv = pl->pl_server_lock_volume;
777 spin_unlock(&pl->pl_lock);
782 * Sets passed \a clv to \a pl.
784 * \pre ->pl_lock is not locked.
786 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
788 spin_lock(&pl->pl_lock);
789 pl->pl_client_lock_volume = clv;
790 spin_unlock(&pl->pl_lock);
794 * Returns current LVF from \a pl.
796 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
798 return atomic_read(&pl->pl_lock_volume_factor);
801 static int ldlm_pool_granted(struct ldlm_pool *pl)
803 return atomic_read(&pl->pl_granted);
806 static struct ptlrpc_thread *ldlm_pools_thread;
807 static struct completion ldlm_pools_comp;
810 * count locks from all namespaces (if possible). Returns number of
813 static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
815 int total = 0, nr_ns;
816 struct ldlm_namespace *ns;
817 struct ldlm_namespace *ns_old = NULL; /* loop detection */
820 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
823 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
824 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
826 cookie = cl_env_reenter();
829 * Find out how many resources we may release.
831 for (nr_ns = ldlm_namespace_nr_read(client);
832 nr_ns > 0; nr_ns--) {
833 mutex_lock(ldlm_namespace_lock(client));
834 if (list_empty(ldlm_namespace_list(client))) {
835 mutex_unlock(ldlm_namespace_lock(client));
836 cl_env_reexit(cookie);
839 ns = ldlm_namespace_first_locked(client);
842 mutex_unlock(ldlm_namespace_lock(client));
846 if (ldlm_ns_empty(ns)) {
847 ldlm_namespace_move_to_inactive_locked(ns, client);
848 mutex_unlock(ldlm_namespace_lock(client));
855 ldlm_namespace_get(ns);
856 ldlm_namespace_move_to_active_locked(ns, client);
857 mutex_unlock(ldlm_namespace_lock(client));
858 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
859 ldlm_namespace_put(ns);
862 cl_env_reexit(cookie);
866 static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
868 unsigned long freed = 0;
870 struct ldlm_namespace *ns;
873 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
876 cookie = cl_env_reenter();
879 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
881 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
883 int cancel, nr_locks;
886 * Do not call shrink under ldlm_namespace_lock(client)
888 mutex_lock(ldlm_namespace_lock(client));
889 if (list_empty(ldlm_namespace_list(client))) {
890 mutex_unlock(ldlm_namespace_lock(client));
893 ns = ldlm_namespace_first_locked(client);
894 ldlm_namespace_get(ns);
895 ldlm_namespace_move_to_active_locked(ns, client);
896 mutex_unlock(ldlm_namespace_lock(client));
898 nr_locks = ldlm_pool_granted(&ns->ns_pool);
900 * We use to shrink propotionally but with new shrinker API,
901 * we lost the total number of freeable locks.
903 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
904 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
905 ldlm_namespace_put(ns);
907 cl_env_reexit(cookie);
909 * we only decrease the SLV in server pools shrinker, return
910 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
915 static unsigned long ldlm_pools_cli_count(struct shrinker *s,
916 struct shrink_control *sc)
918 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
921 static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
922 struct shrink_control *sc)
924 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
928 static int ldlm_pools_recalc(ldlm_side_t client)
930 struct ldlm_namespace *ns;
931 struct ldlm_namespace *ns_old = NULL;
933 int time = 50; /* seconds of sleep if no active namespaces */
936 * Recalc at least ldlm_namespace_nr_read(client) namespaces.
938 for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
941 * Lock the list, get first @ns in the list, getref, move it
942 * to the tail, unlock and call pool recalc. This way we avoid
943 * calling recalc under @ns lock what is really good as we get
944 * rid of potential deadlock on client nodes when canceling
945 * locks synchronously.
947 mutex_lock(ldlm_namespace_lock(client));
948 if (list_empty(ldlm_namespace_list(client))) {
949 mutex_unlock(ldlm_namespace_lock(client));
952 ns = ldlm_namespace_first_locked(client);
954 if (ns_old == ns) { /* Full pass complete */
955 mutex_unlock(ldlm_namespace_lock(client));
959 /* We got an empty namespace, need to move it back to inactive
961 * The race with parallel resource creation is fine:
962 * - If they do namespace_get before our check, we fail the
963 * check and they move this item to the end of the list anyway
964 * - If we do the check and then they do namespace_get, then
965 * we move the namespace to inactive and they will move
966 * it back to active (synchronised by the lock, so no clash
969 if (ldlm_ns_empty(ns)) {
970 ldlm_namespace_move_to_inactive_locked(ns, client);
971 mutex_unlock(ldlm_namespace_lock(client));
978 spin_lock(&ns->ns_lock);
980 * skip ns which is being freed, and we don't want to increase
981 * its refcount again, not even temporarily. bz21519 & LU-499.
983 if (ns->ns_stopping) {
987 ldlm_namespace_get(ns);
989 spin_unlock(&ns->ns_lock);
991 ldlm_namespace_move_to_active_locked(ns, client);
992 mutex_unlock(ldlm_namespace_lock(client));
995 * After setup is done - recalc the pool.
998 int ttime = ldlm_pool_recalc(&ns->ns_pool);
1003 ldlm_namespace_put(ns);
1009 static int ldlm_pools_thread_main(void *arg)
1011 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
1014 thread_set_flags(thread, SVC_RUNNING);
1015 wake_up(&thread->t_ctl_waitq);
1017 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
1018 "ldlm_poold", current_pid());
1021 struct l_wait_info lwi;
1024 * Recal all pools on this tick.
1026 c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
1029 * Wait until the next check time, or until we're
1032 lwi = LWI_TIMEOUT(cfs_time_seconds(c_time),
1034 l_wait_event(thread->t_ctl_waitq,
1035 thread_is_stopping(thread) ||
1036 thread_is_event(thread),
1039 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
1041 thread_test_and_clear_flags(thread, SVC_EVENT);
1044 thread_set_flags(thread, SVC_STOPPED);
1045 wake_up(&thread->t_ctl_waitq);
1047 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
1048 "ldlm_poold", current_pid());
1050 complete_and_exit(&ldlm_pools_comp, 0);
1053 static int ldlm_pools_thread_start(void)
1055 struct l_wait_info lwi = { 0 };
1056 struct task_struct *task;
1058 if (ldlm_pools_thread != NULL)
1061 ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
1062 if (!ldlm_pools_thread)
1065 init_completion(&ldlm_pools_comp);
1066 init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
1068 task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
1071 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
1072 kfree(ldlm_pools_thread);
1073 ldlm_pools_thread = NULL;
1074 return PTR_ERR(task);
1076 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1077 thread_is_running(ldlm_pools_thread), &lwi);
1081 static void ldlm_pools_thread_stop(void)
1083 if (ldlm_pools_thread == NULL)
1086 thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
1087 wake_up(&ldlm_pools_thread->t_ctl_waitq);
1090 * Make sure that pools thread is finished before freeing @thread.
1091 * This fixes possible race and oops due to accessing freed memory
1094 wait_for_completion(&ldlm_pools_comp);
1095 kfree(ldlm_pools_thread);
1096 ldlm_pools_thread = NULL;
1099 static struct shrinker ldlm_pools_cli_shrinker = {
1100 .count_objects = ldlm_pools_cli_count,
1101 .scan_objects = ldlm_pools_cli_scan,
1102 .seeks = DEFAULT_SEEKS,
1105 int ldlm_pools_init(void)
1109 rc = ldlm_pools_thread_start();
1111 register_shrinker(&ldlm_pools_cli_shrinker);
1115 EXPORT_SYMBOL(ldlm_pools_init);
1117 void ldlm_pools_fini(void)
1119 if (ldlm_pools_thread)
1120 unregister_shrinker(&ldlm_pools_cli_shrinker);
1122 ldlm_pools_thread_stop();
1124 EXPORT_SYMBOL(ldlm_pools_fini);