These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / staging / lustre / lustre / ldlm / ldlm_pool.c
index a9f4833..1a4eef6 100644 (file)
  * pl_server_lock_volume - Current server lock volume (calculated);
  *
  * As it may be seen from list above, we have few possible tunables which may
- * affect behavior much. They all may be modified via proc. However, they also
+ * affect behavior much. They all may be modified via sysfs. However, they also
  * give a possibility for constructing few pre-defined behavior policies. If
  * none of predefines is suitable for a working pattern being used, new one may
- * be "constructed" via proc tunables.
+ * be "constructed" via sysfs tunables.
  */
 
 #define DEBUG_SUBSYSTEM S_LDLM
 #include "../include/obd_support.h"
 #include "ldlm_internal.h"
 
-
 /*
  * 50 ldlm locks for 1MB of RAM.
  */
@@ -214,67 +213,19 @@ static inline int ldlm_pool_t2gsp(unsigned int t)
 }
 
 /**
- * Recalculates next grant limit on passed \a pl.
- *
- * \pre ->pl_lock is locked.
+ * Returns current \a pl limit.
  */
-static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
+static __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
 {
-       int granted, grant_step, limit;
-
-       limit = ldlm_pool_get_limit(pl);
-       granted = atomic_read(&pl->pl_granted);
-
-       grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
-       grant_step = ((limit - granted) * grant_step) / 100;
-       pl->pl_grant_plan = granted + grant_step;
-       limit = (limit * 5) >> 2;
-       if (pl->pl_grant_plan > limit)
-               pl->pl_grant_plan = limit;
+       return atomic_read(&pl->pl_limit);
 }
 
 /**
- * Recalculates next SLV on passed \a pl.
- *
- * \pre ->pl_lock is locked.
+ * Sets passed \a limit to \a pl.
  */
-static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
+static void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
 {
-       int granted;
-       int grant_plan;
-       int round_up;
-       __u64 slv;
-       __u64 slv_factor;
-       __u64 grant_usage;
-       __u32 limit;
-
-       slv = pl->pl_server_lock_volume;
-       grant_plan = pl->pl_grant_plan;
-       limit = ldlm_pool_get_limit(pl);
-       granted = atomic_read(&pl->pl_granted);
-       round_up = granted < limit;
-
-       grant_usage = max_t(int, limit - (granted - grant_plan), 1);
-
-       /*
-        * Find out SLV change factor which is the ratio of grant usage
-        * from limit. SLV changes as fast as the ratio of grant plan
-        * consumption. The more locks from grant plan are not consumed
-        * by clients in last interval (idle time), the faster grows
-        * SLV. And the opposite, the more grant plan is over-consumed
-        * (load time) the faster drops SLV.
-        */
-       slv_factor = grant_usage << LDLM_POOL_SLV_SHIFT;
-       do_div(slv_factor, limit);
-       slv = slv * slv_factor;
-       slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
-
-       if (slv > ldlm_pool_slv_max(limit))
-               slv = ldlm_pool_slv_max(limit);
-       else if (slv < ldlm_pool_slv_min(limit))
-               slv = ldlm_pool_slv_min(limit);
-
-       pl->pl_server_lock_volume = slv;
+       atomic_set(&pl->pl_limit, limit);
 }
 
 /**
@@ -302,147 +253,6 @@ static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
                            cancel_rate);
 }
 
-/**
- * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
- */
-static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
-{
-       struct obd_device *obd;
-
-       /*
-        * Set new SLV in obd field for using it later without accessing the
-        * pool. This is required to avoid race between sending reply to client
-        * with new SLV and cleanup server stack in which we can't guarantee
-        * that namespace is still alive. We know only that obd is alive as
-        * long as valid export is alive.
-        */
-       obd = ldlm_pl2ns(pl)->ns_obd;
-       LASSERT(obd != NULL);
-       write_lock(&obd->obd_pool_lock);
-       obd->obd_pool_slv = pl->pl_server_lock_volume;
-       write_unlock(&obd->obd_pool_lock);
-}
-
-/**
- * Recalculates all pool fields on passed \a pl.
- *
- * \pre ->pl_lock is not locked.
- */
-static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
-{
-       time_t recalc_interval_sec;
-
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
-       if (recalc_interval_sec < pl->pl_recalc_period)
-               return 0;
-
-       spin_lock(&pl->pl_lock);
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
-       if (recalc_interval_sec < pl->pl_recalc_period) {
-               spin_unlock(&pl->pl_lock);
-               return 0;
-       }
-       /*
-        * Recalc SLV after last period. This should be done
-        * _before_ recalculating new grant plan.
-        */
-       ldlm_pool_recalc_slv(pl);
-
-       /*
-        * Make sure that pool informed obd of last SLV changes.
-        */
-       ldlm_srv_pool_push_slv(pl);
-
-       /*
-        * Update grant_plan for new period.
-        */
-       ldlm_pool_recalc_grant_plan(pl);
-
-       pl->pl_recalc_time = get_seconds();
-       lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
-                           recalc_interval_sec);
-       spin_unlock(&pl->pl_lock);
-       return 0;
-}
-
-/**
- * This function is used on server side as main entry point for memory
- * pressure handling. It decreases SLV on \a pl according to passed
- * \a nr and \a gfp_mask.
- *
- * Our goal here is to decrease SLV such a way that clients hold \a nr
- * locks smaller in next 10h.
- */
-static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
-                               int nr, gfp_t gfp_mask)
-{
-       __u32 limit;
-
-       /*
-        * VM is asking how many entries may be potentially freed.
-        */
-       if (nr == 0)
-               return atomic_read(&pl->pl_granted);
-
-       /*
-        * Client already canceled locks but server is already in shrinker
-        * and can't cancel anything. Let's catch this race.
-        */
-       if (atomic_read(&pl->pl_granted) == 0)
-               return 0;
-
-       spin_lock(&pl->pl_lock);
-
-       /*
-        * We want shrinker to possibly cause cancellation of @nr locks from
-        * clients or grant approximately @nr locks smaller next intervals.
-        *
-        * This is why we decreased SLV by @nr. This effect will only be as
-        * long as one re-calc interval (1s these days) and this should be
-        * enough to pass this decreased SLV to all clients. On next recalc
-        * interval pool will either increase SLV if locks load is not high
-        * or will keep on same level or even decrease again, thus, shrinker
-        * decreased SLV will affect next recalc intervals and this way will
-        * make locking load lower.
-        */
-       if (nr < pl->pl_server_lock_volume) {
-               pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
-       } else {
-               limit = ldlm_pool_get_limit(pl);
-               pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
-       }
-
-       /*
-        * Make sure that pool informed obd of last SLV changes.
-        */
-       ldlm_srv_pool_push_slv(pl);
-       spin_unlock(&pl->pl_lock);
-
-       /*
-        * We did not really free any memory here so far, it only will be
-        * freed later may be, so that we return 0 to not confuse VM.
-        */
-       return 0;
-}
-
-/**
- * Setup server side pool \a pl with passed \a limit.
- */
-static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
-{
-       struct obd_device *obd;
-
-       obd = ldlm_pl2ns(pl)->ns_obd;
-       LASSERT(obd != NULL && obd != LP_POISON);
-       LASSERT(obd->obd_type != LP_POISON);
-       write_lock(&obd->obd_pool_lock);
-       obd->obd_pool_limit = limit;
-       write_unlock(&obd->obd_pool_lock);
-
-       ldlm_pool_set_limit(pl, limit);
-       return 0;
-}
-
 /**
  * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
  */
@@ -467,10 +277,10 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
  */
 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
 {
-       time_t recalc_interval_sec;
+       time64_t recalc_interval_sec;
        int ret;
 
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+       recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
        if (recalc_interval_sec < pl->pl_recalc_period)
                return 0;
 
@@ -478,7 +288,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
        /*
         * Check if we need to recalc lists now.
         */
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+       recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
        if (recalc_interval_sec < pl->pl_recalc_period) {
                spin_unlock(&pl->pl_lock);
                return 0;
@@ -513,7 +323,7 @@ out:
         * Time of LRU resizing might be longer than period,
         * so update after LRU resizing rather than before it.
         */
-       pl->pl_recalc_time = get_seconds();
+       pl->pl_recalc_time = ktime_get_real_seconds();
        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
                            recalc_interval_sec);
        spin_unlock(&pl->pl_lock);
@@ -554,12 +364,6 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
                return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
 }
 
-static const struct ldlm_pool_ops ldlm_srv_pool_ops = {
-       .po_recalc = ldlm_srv_pool_recalc,
-       .po_shrink = ldlm_srv_pool_shrink,
-       .po_setup  = ldlm_srv_pool_setup
-};
-
 static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
        .po_recalc = ldlm_cli_pool_recalc,
        .po_shrink = ldlm_cli_pool_shrink
@@ -569,12 +373,12 @@ static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
  * Pool recalc wrapper. Will call either client or server pool recalc callback
  * depending what pool \a pl is used.
  */
-int ldlm_pool_recalc(struct ldlm_pool *pl)
+static int ldlm_pool_recalc(struct ldlm_pool *pl)
 {
-       time_t recalc_interval_sec;
+       u32 recalc_interval_sec;
        int count;
 
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+       recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
        if (recalc_interval_sec <= 0)
                goto recalc;
 
@@ -599,14 +403,14 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
                lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
                                    count);
        }
-       recalc_interval_sec = pl->pl_recalc_time - get_seconds() +
+       recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
                              pl->pl_recalc_period;
        if (recalc_interval_sec <= 0) {
                /* Prevent too frequent recalculation. */
-               CDEBUG(D_DLMTRACE, "Negative interval(%ld), "
-                      "too short period(%ld)",
+               CDEBUG(D_DLMTRACE,
+                      "Negative interval(%d), too short period(%lld)",
                       recalc_interval_sec,
-                      pl->pl_recalc_period);
+                      (s64)pl->pl_recalc_period);
                recalc_interval_sec = 1;
        }
 
@@ -618,8 +422,7 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
  * depending what pool pl is used. When nr == 0, just return the number of
  * freeable locks. Otherwise, return the number of canceled locks.
  */
-int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
-                    gfp_t gfp_mask)
+static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
 {
        int cancel = 0;
 
@@ -638,27 +441,11 @@ int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
        }
        return cancel;
 }
-EXPORT_SYMBOL(ldlm_pool_shrink);
-
-/**
- * Pool setup wrapper. Will call either client or server pool recalc callback
- * depending what pool \a pl is used.
- *
- * Sets passed \a limit into pool \a pl.
- */
-int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
-{
-       if (pl->pl_ops->po_setup != NULL)
-               return pl->pl_ops->po_setup(pl, limit);
-       return 0;
-}
-EXPORT_SYMBOL(ldlm_pool_setup);
 
-#if defined(CONFIG_PROC_FS)
 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
 {
-       int granted, grant_rate, cancel_rate, grant_step;
-       int grant_speed, grant_plan, lvf;
+       int granted, grant_rate, cancel_rate;
+       int grant_speed, lvf;
        struct ldlm_pool *pl = m->private;
        __u64 slv, clv;
        __u32 limit;
@@ -667,13 +454,11 @@ static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
        slv = pl->pl_server_lock_volume;
        clv = pl->pl_client_lock_volume;
        limit = ldlm_pool_get_limit(pl);
-       grant_plan = pl->pl_grant_plan;
        granted = atomic_read(&pl->pl_granted);
        grant_rate = atomic_read(&pl->pl_grant_rate);
        cancel_rate = atomic_read(&pl->pl_cancel_rate);
        grant_speed = grant_rate - cancel_rate;
        lvf = atomic_read(&pl->pl_lock_volume_factor);
-       grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
        spin_unlock(&pl->pl_lock);
 
        seq_printf(m, "LDLM pool state (%s):\n"
@@ -682,11 +467,6 @@ static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
                      "  LVF: %d\n",
                      pl->pl_name, slv, clv, lvf);
 
-       if (ns_is_server(ldlm_pl2ns(pl))) {
-               seq_printf(m, "  GSP: %d%%\n"
-                             "  GP:  %d\n",
-                             grant_step, grant_plan);
-       }
        seq_printf(m, "  GR:  %d\n  CR:  %d\n  GS:  %d\n"
                      "  G:   %d\n  L:   %d\n",
                      grant_rate, cancel_rate, grant_speed,
@@ -694,11 +474,15 @@ static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
 
        return 0;
 }
+
 LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
 
-static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
+static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
+                               char *buf)
 {
-       struct ldlm_pool *pl = m->private;
+       struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
+                                           pl_kobj);
+
        int            grant_speed;
 
        spin_lock(&pl->pl_lock);
@@ -706,63 +490,109 @@ static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
        grant_speed = atomic_read(&pl->pl_grant_rate) -
                        atomic_read(&pl->pl_cancel_rate);
        spin_unlock(&pl->pl_lock);
-       return lprocfs_rd_uint(m, &grant_speed);
+       return sprintf(buf, "%d\n", grant_speed);
 }
+LUSTRE_RO_ATTR(grant_speed);
 
-LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int);
-LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
+LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
+LUSTRE_RO_ATTR(grant_plan);
 
-LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
-LDLM_POOL_PROC_WRITER(recalc_period, int);
-static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
-                                              const char __user *buf,
-                                              size_t len, loff_t *off)
-{
-       struct seq_file *seq = file->private_data;
+LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
+LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
+LUSTRE_RW_ATTR(recalc_period);
 
-       return lprocfs_wr_recalc_period(file, buf, len, seq->private);
-}
-LPROC_SEQ_FOPS(lprocfs_recalc_period);
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
+LUSTRE_RO_ATTR(server_lock_volume);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
+LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
+LUSTRE_RW_ATTR(limit);
 
-LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64);
-LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic);
-LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic);
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
+LUSTRE_RO_ATTR(granted);
 
-LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
+LUSTRE_RO_ATTR(cancel_rate);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
+LUSTRE_RO_ATTR(grant_rate);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
+LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
+LUSTRE_RW_ATTR(lock_volume_factor);
 
 #define LDLM_POOL_ADD_VAR(name, var, ops)                      \
        do {                                                    \
                snprintf(var_name, MAX_STRING_SIZE, #name);     \
                pool_vars[0].data = var;                        \
                pool_vars[0].fops = ops;                        \
-               lprocfs_add_vars(pl->pl_proc_dir, pool_vars, NULL);\
+               ldebugfs_add_vars(pl->pl_debugfs_entry, pool_vars, NULL);\
        } while (0)
 
-static int ldlm_pool_proc_init(struct ldlm_pool *pl)
+/* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
+static struct attribute *ldlm_pl_attrs[] = {
+       &lustre_attr_grant_speed.attr,
+       &lustre_attr_grant_plan.attr,
+       &lustre_attr_recalc_period.attr,
+       &lustre_attr_server_lock_volume.attr,
+       &lustre_attr_limit.attr,
+       &lustre_attr_granted.attr,
+       &lustre_attr_cancel_rate.attr,
+       &lustre_attr_grant_rate.attr,
+       &lustre_attr_lock_volume_factor.attr,
+       NULL,
+};
+
+static void ldlm_pl_release(struct kobject *kobj)
+{
+       struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
+                                           pl_kobj);
+       complete(&pl->pl_kobj_unregister);
+}
+
+static struct kobj_type ldlm_pl_ktype = {
+       .default_attrs  = ldlm_pl_attrs,
+       .sysfs_ops      = &lustre_sysfs_ops,
+       .release        = ldlm_pl_release,
+};
+
+static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
 {
        struct ldlm_namespace *ns = ldlm_pl2ns(pl);
-       struct proc_dir_entry *parent_ns_proc;
+       int err;
+
+       init_completion(&pl->pl_kobj_unregister);
+       err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
+                                  "pool");
+
+       return err;
+}
+
+static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
+{
+       struct ldlm_namespace *ns = ldlm_pl2ns(pl);
+       struct dentry *debugfs_ns_parent;
        struct lprocfs_vars pool_vars[2];
        char *var_name = NULL;
        int rc = 0;
 
-       OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
+       var_name = kzalloc(MAX_STRING_SIZE + 1, GFP_NOFS);
        if (!var_name)
                return -ENOMEM;
 
-       parent_ns_proc = ns->ns_proc_dir_entry;
-       if (parent_ns_proc == NULL) {
-               CERROR("%s: proc entry is not initialized\n",
+       debugfs_ns_parent = ns->ns_debugfs_entry;
+       if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
+               CERROR("%s: debugfs entry is not initialized\n",
                       ldlm_ns_name(ns));
                rc = -EINVAL;
                goto out_free_name;
        }
-       pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
-                                          NULL, NULL);
-       if (IS_ERR(pl->pl_proc_dir)) {
-               CERROR("LProcFS failed in ldlm-pool-init\n");
-               rc = PTR_ERR(pl->pl_proc_dir);
-               pl->pl_proc_dir = NULL;
+       pl->pl_debugfs_entry = ldebugfs_register("pool", debugfs_ns_parent,
+                                                NULL, NULL);
+       if (IS_ERR(pl->pl_debugfs_entry)) {
+               CERROR("LdebugFS failed in ldlm-pool-init\n");
+               rc = PTR_ERR(pl->pl_debugfs_entry);
+               pl->pl_debugfs_entry = NULL;
                goto out_free_name;
        }
 
@@ -770,20 +600,7 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
        memset(pool_vars, 0, sizeof(pool_vars));
        pool_vars[0].name = var_name;
 
-       LDLM_POOL_ADD_VAR("server_lock_volume", &pl->pl_server_lock_volume,
-                         &ldlm_pool_u64_fops);
-       LDLM_POOL_ADD_VAR("limit", &pl->pl_limit, &ldlm_pool_rw_atomic_fops);
-       LDLM_POOL_ADD_VAR("granted", &pl->pl_granted, &ldlm_pool_atomic_fops);
-       LDLM_POOL_ADD_VAR("grant_speed", pl, &lprocfs_grant_speed_fops);
-       LDLM_POOL_ADD_VAR("cancel_rate", &pl->pl_cancel_rate,
-                         &ldlm_pool_atomic_fops);
-       LDLM_POOL_ADD_VAR("grant_rate", &pl->pl_grant_rate,
-                         &ldlm_pool_atomic_fops);
-       LDLM_POOL_ADD_VAR("grant_plan", pl, &lprocfs_grant_plan_fops);
-       LDLM_POOL_ADD_VAR("recalc_period", pl, &lprocfs_recalc_period_fops);
-       LDLM_POOL_ADD_VAR("lock_volume_factor", &pl->pl_lock_volume_factor,
-                         &ldlm_pool_rw_atomic_fops);
-       LDLM_POOL_ADD_VAR("state", pl, &lprocfs_pool_state_fops);
+       LDLM_POOL_ADD_VAR(state, pl, &lprocfs_pool_state_fops);
 
        pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
                                           LDLM_POOL_FIRST_STAT, 0);
@@ -825,32 +642,31 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
                             LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
                             "recalc_timing", "sec");
-       rc = lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
+       rc = ldebugfs_register_stats(pl->pl_debugfs_entry, "stats",
+                                    pl->pl_stats);
 
 out_free_name:
-       OBD_FREE(var_name, MAX_STRING_SIZE + 1);
+       kfree(var_name);
        return rc;
 }
 
-static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
+static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
+{
+       kobject_put(&pl->pl_kobj);
+       wait_for_completion(&pl->pl_kobj_unregister);
+}
+
+static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
 {
        if (pl->pl_stats != NULL) {
                lprocfs_free_stats(&pl->pl_stats);
                pl->pl_stats = NULL;
        }
-       if (pl->pl_proc_dir != NULL) {
-               lprocfs_remove(&pl->pl_proc_dir);
-               pl->pl_proc_dir = NULL;
+       if (pl->pl_debugfs_entry != NULL) {
+               ldebugfs_remove(&pl->pl_debugfs_entry);
+               pl->pl_debugfs_entry = NULL;
        }
 }
-#else /* !CONFIG_PROC_FS */
-static int ldlm_pool_proc_init(struct ldlm_pool *pl)
-{
-       return 0;
-}
-
-static void ldlm_pool_proc_fini(struct ldlm_pool *pl) {}
-#endif /* CONFIG_PROC_FS */
 
 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
                   int idx, ldlm_side_t client)
@@ -859,7 +675,7 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
 
        spin_lock_init(&pl->pl_lock);
        atomic_set(&pl->pl_granted, 0);
-       pl->pl_recalc_time = get_seconds();
+       pl->pl_recalc_time = ktime_get_seconds();
        atomic_set(&pl->pl_lock_volume_factor, 1);
 
        atomic_set(&pl->pl_grant_rate, 0);
@@ -869,19 +685,16 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
        snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
                 ldlm_ns_name(ns), idx);
 
-       if (client == LDLM_NAMESPACE_SERVER) {
-               pl->pl_ops = &ldlm_srv_pool_ops;
-               ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
-               pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
-               pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
-       } else {
-               ldlm_pool_set_limit(pl, 1);
-               pl->pl_server_lock_volume = 0;
-               pl->pl_ops = &ldlm_cli_pool_ops;
-               pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
-       }
+       ldlm_pool_set_limit(pl, 1);
+       pl->pl_server_lock_volume = 0;
+       pl->pl_ops = &ldlm_cli_pool_ops;
+       pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
        pl->pl_client_lock_volume = 0;
-       rc = ldlm_pool_proc_init(pl);
+       rc = ldlm_pool_debugfs_init(pl);
+       if (rc)
+               return rc;
+
+       rc = ldlm_pool_sysfs_init(pl);
        if (rc)
                return rc;
 
@@ -893,7 +706,8 @@ EXPORT_SYMBOL(ldlm_pool_init);
 
 void ldlm_pool_fini(struct ldlm_pool *pl)
 {
-       ldlm_pool_proc_fini(pl);
+       ldlm_pool_sysfs_fini(pl);
+       ldlm_pool_debugfs_fini(pl);
 
        /*
         * Pool should not be used after this point. We can't free it here as
@@ -927,8 +741,6 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
         * enqueue/cancel rpc. Also we do not want to run out of stack
         * with too long call paths.
         */
-       if (ns_is_server(ldlm_pl2ns(pl)))
-               ldlm_pool_recalc(pl);
 }
 EXPORT_SYMBOL(ldlm_pool_add);
 
@@ -948,9 +760,6 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
        atomic_inc(&pl->pl_cancel_rate);
 
        lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
-
-       if (ns_is_server(ldlm_pl2ns(pl)))
-               ldlm_pool_recalc(pl);
 }
 EXPORT_SYMBOL(ldlm_pool_del);
 
@@ -968,36 +777,6 @@ __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
        spin_unlock(&pl->pl_lock);
        return slv;
 }
-EXPORT_SYMBOL(ldlm_pool_get_slv);
-
-/**
- * Sets passed \a slv to \a pl.
- *
- * \pre ->pl_lock is not locked.
- */
-void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
-{
-       spin_lock(&pl->pl_lock);
-       pl->pl_server_lock_volume = slv;
-       spin_unlock(&pl->pl_lock);
-}
-EXPORT_SYMBOL(ldlm_pool_set_slv);
-
-/**
- * Returns current \a pl CLV.
- *
- * \pre ->pl_lock is not locked.
- */
-__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
-{
-       __u64 slv;
-
-       spin_lock(&pl->pl_lock);
-       slv = pl->pl_client_lock_volume;
-       spin_unlock(&pl->pl_lock);
-       return slv;
-}
-EXPORT_SYMBOL(ldlm_pool_get_clv);
 
 /**
  * Sets passed \a clv to \a pl.
@@ -1010,25 +789,6 @@ void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
        pl->pl_client_lock_volume = clv;
        spin_unlock(&pl->pl_lock);
 }
-EXPORT_SYMBOL(ldlm_pool_set_clv);
-
-/**
- * Returns current \a pl limit.
- */
-__u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
-{
-       return atomic_read(&pl->pl_limit);
-}
-EXPORT_SYMBOL(ldlm_pool_get_limit);
-
-/**
- * Sets passed \a limit to \a pl.
- */
-void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
-{
-       atomic_set(&pl->pl_limit, limit);
-}
-EXPORT_SYMBOL(ldlm_pool_set_limit);
 
 /**
  * Returns current LVF from \a pl.
@@ -1037,7 +797,6 @@ __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
 {
        return atomic_read(&pl->pl_lock_volume_factor);
 }
-EXPORT_SYMBOL(ldlm_pool_get_lvf);
 
 static int ldlm_pool_granted(struct ldlm_pool *pl)
 {
@@ -1150,20 +909,7 @@ static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
         * we only decrease the SLV in server pools shrinker, return
         * SHRINK_STOP to kernel to avoid needless loop. LU-1128
         */
-       return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
-}
-
-static unsigned long ldlm_pools_srv_count(struct shrinker *s,
-                                         struct shrink_control *sc)
-{
-       return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
-}
-
-static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
-                                        struct shrink_control *sc)
-{
-       return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
-                              sc->gfp_mask);
+       return freed;
 }
 
 static unsigned long ldlm_pools_cli_count(struct shrinker *s,
@@ -1179,81 +925,13 @@ static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
                               sc->gfp_mask);
 }
 
-int ldlm_pools_recalc(ldlm_side_t client)
+static int ldlm_pools_recalc(ldlm_side_t client)
 {
-       __u32 nr_l = 0, nr_p = 0, l;
        struct ldlm_namespace *ns;
        struct ldlm_namespace *ns_old = NULL;
-       int nr, equal = 0;
+       int nr;
        int time = 50; /* seconds of sleep if no active namespaces */
 
-       /*
-        * No need to setup pool limit for client pools.
-        */
-       if (client == LDLM_NAMESPACE_SERVER) {
-               /*
-                * Check all modest namespaces first.
-                */
-               mutex_lock(ldlm_namespace_lock(client));
-               list_for_each_entry(ns, ldlm_namespace_list(client),
-                                       ns_list_chain) {
-                       if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
-                               continue;
-
-                       l = ldlm_pool_granted(&ns->ns_pool);
-                       if (l == 0)
-                               l = 1;
-
-                       /*
-                        * Set the modest pools limit equal to their avg granted
-                        * locks + ~6%.
-                        */
-                       l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
-                       ldlm_pool_setup(&ns->ns_pool, l);
-                       nr_l += l;
-                       nr_p++;
-               }
-
-               /*
-                * Make sure that modest namespaces did not eat more that 2/3
-                * of limit.
-                */
-               if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
-                       CWARN("\"Modest\" pools eat out 2/3 of server locks limit (%d of %lu). This means that you have too many clients for this amount of server RAM. Upgrade server!\n",
-                             nr_l, LDLM_POOL_HOST_L);
-                       equal = 1;
-               }
-
-               /*
-                * The rest is given to greedy namespaces.
-                */
-               list_for_each_entry(ns, ldlm_namespace_list(client),
-                                   ns_list_chain) {
-                       if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
-                               continue;
-
-                       if (equal) {
-                               /*
-                                * In the case 2/3 locks are eaten out by
-                                * modest pools, we re-setup equal limit
-                                * for _all_ pools.
-                                */
-                               l = LDLM_POOL_HOST_L /
-                                       ldlm_namespace_nr_read(client);
-                       } else {
-                               /*
-                                * All the rest of greedy pools will have
-                                * all locks in equal parts.
-                                */
-                               l = (LDLM_POOL_HOST_L - nr_l) /
-                                       (ldlm_namespace_nr_read(client) -
-                                        nr_p);
-                       }
-                       ldlm_pool_setup(&ns->ns_pool, l);
-               }
-               mutex_unlock(ldlm_namespace_lock(client));
-       }
-
        /*
         * Recalc at least ldlm_namespace_nr_read(client) namespaces.
         */
@@ -1327,12 +1005,11 @@ int ldlm_pools_recalc(ldlm_side_t client)
        }
        return time;
 }
-EXPORT_SYMBOL(ldlm_pools_recalc);
 
 static int ldlm_pools_thread_main(void *arg)
 {
        struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
-       int s_time, c_time;
+       int c_time;
 
        thread_set_flags(thread, SVC_RUNNING);
        wake_up(&thread->t_ctl_waitq);
@@ -1346,14 +1023,13 @@ static int ldlm_pools_thread_main(void *arg)
                /*
                 * Recal all pools on this tick.
                 */
-               s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
                c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
 
                /*
                 * Wait until the next check time, or until we're
                 * stopped.
                 */
-               lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)),
+               lwi = LWI_TIMEOUT(cfs_time_seconds(c_time),
                                  NULL, NULL);
                l_wait_event(thread->t_ctl_waitq,
                             thread_is_stopping(thread) ||
@@ -1362,8 +1038,7 @@ static int ldlm_pools_thread_main(void *arg)
 
                if (thread_test_and_clear_flags(thread, SVC_STOPPING))
                        break;
-               else
-                       thread_test_and_clear_flags(thread, SVC_EVENT);
+               thread_test_and_clear_flags(thread, SVC_EVENT);
        }
 
        thread_set_flags(thread, SVC_STOPPED);
@@ -1383,8 +1058,8 @@ static int ldlm_pools_thread_start(void)
        if (ldlm_pools_thread != NULL)
                return -EALREADY;
 
-       OBD_ALLOC_PTR(ldlm_pools_thread);
-       if (ldlm_pools_thread == NULL)
+       ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
+       if (!ldlm_pools_thread)
                return -ENOMEM;
 
        init_completion(&ldlm_pools_comp);
@@ -1394,7 +1069,7 @@ static int ldlm_pools_thread_start(void)
                           "ldlm_poold");
        if (IS_ERR(task)) {
                CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
-               OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
+               kfree(ldlm_pools_thread);
                ldlm_pools_thread = NULL;
                return PTR_ERR(task);
        }
@@ -1417,16 +1092,10 @@ static void ldlm_pools_thread_stop(void)
         * in pools thread.
         */
        wait_for_completion(&ldlm_pools_comp);
-       OBD_FREE_PTR(ldlm_pools_thread);
+       kfree(ldlm_pools_thread);
        ldlm_pools_thread = NULL;
 }
 
-static struct shrinker ldlm_pools_srv_shrinker = {
-       .count_objects  = ldlm_pools_srv_count,
-       .scan_objects   = ldlm_pools_srv_scan,
-       .seeks          = DEFAULT_SEEKS,
-};
-
 static struct shrinker ldlm_pools_cli_shrinker = {
        .count_objects  = ldlm_pools_cli_count,
        .scan_objects   = ldlm_pools_cli_scan,
@@ -1438,18 +1107,18 @@ int ldlm_pools_init(void)
        int rc;
 
        rc = ldlm_pools_thread_start();
-       if (rc == 0) {
-               register_shrinker(&ldlm_pools_srv_shrinker);
+       if (rc == 0)
                register_shrinker(&ldlm_pools_cli_shrinker);
-       }
+
        return rc;
 }
 EXPORT_SYMBOL(ldlm_pools_init);
 
 void ldlm_pools_fini(void)
 {
-       unregister_shrinker(&ldlm_pools_srv_shrinker);
-       unregister_shrinker(&ldlm_pools_cli_shrinker);
+       if (ldlm_pools_thread)
+               unregister_shrinker(&ldlm_pools_cli_shrinker);
+
        ldlm_pools_thread_stop();
 }
 EXPORT_SYMBOL(ldlm_pools_fini);