These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / cpufreq / cpufreq_governor.c
index 1b44496..d994b0f 100644 (file)
@@ -32,10 +32,10 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
 
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 {
-       struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+       struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-       struct cpufreq_policy *policy;
+       struct cpufreq_policy *policy = cdbs->shared->policy;
        unsigned int sampling_rate;
        unsigned int max_load = 0;
        unsigned int ignore_nice;
@@ -60,11 +60,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
                ignore_nice = cs_tuners->ignore_nice_load;
        }
 
-       policy = cdbs->cur_policy;
-
        /* Get Absolute Load */
        for_each_cpu(j, policy->cpus) {
-               struct cpu_dbs_common_info *j_cdbs;
+               struct cpu_dbs_info *j_cdbs;
                u64 cur_wall_time, cur_idle_time;
                unsigned int idle_time, wall_time;
                unsigned int load;
@@ -163,9 +161,9 @@ EXPORT_SYMBOL_GPL(dbs_check_cpu);
 static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
                unsigned int delay)
 {
-       struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+       struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 
-       mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
+       mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
 }
 
 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
@@ -173,10 +171,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
 {
        int i;
 
-       mutex_lock(&cpufreq_governor_lock);
-       if (!policy->governor_enabled)
-               goto out_unlock;
-
        if (!all_cpus) {
                /*
                 * Use raw_smp_processor_id() to avoid preemptible warnings.
@@ -190,42 +184,81 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
                for_each_cpu(i, policy->cpus)
                        __gov_queue_work(i, dbs_data, delay);
        }
-
-out_unlock:
-       mutex_unlock(&cpufreq_governor_lock);
 }
 EXPORT_SYMBOL_GPL(gov_queue_work);
 
 static inline void gov_cancel_work(struct dbs_data *dbs_data,
                struct cpufreq_policy *policy)
 {
-       struct cpu_dbs_common_info *cdbs;
+       struct cpu_dbs_info *cdbs;
        int i;
 
        for_each_cpu(i, policy->cpus) {
                cdbs = dbs_data->cdata->get_cpu_cdbs(i);
-               cancel_delayed_work_sync(&cdbs->work);
+               cancel_delayed_work_sync(&cdbs->dwork);
        }
 }
 
 /* Will return if we need to evaluate cpu load again or not */
-bool need_load_eval(struct cpu_dbs_common_info *cdbs,
-               unsigned int sampling_rate)
+static bool need_load_eval(struct cpu_common_dbs_info *shared,
+                          unsigned int sampling_rate)
 {
-       if (policy_is_shared(cdbs->cur_policy)) {
+       if (policy_is_shared(shared->policy)) {
                ktime_t time_now = ktime_get();
-               s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+               s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
 
                /* Do nothing if we recently have sampled */
                if (delta_us < (s64)(sampling_rate / 2))
                        return false;
                else
-                       cdbs->time_stamp = time_now;
+                       shared->time_stamp = time_now;
        }
 
        return true;
 }
-EXPORT_SYMBOL_GPL(need_load_eval);
+
+static void dbs_timer(struct work_struct *work)
+{
+       struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
+                                                dwork.work);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
+       struct cpufreq_policy *policy;
+       struct dbs_data *dbs_data;
+       unsigned int sampling_rate, delay;
+       bool modify_all = true;
+
+       mutex_lock(&shared->timer_mutex);
+
+       policy = shared->policy;
+
+       /*
+        * Governor might already be disabled and there is no point continuing
+        * with the work-handler.
+        */
+       if (!policy)
+               goto unlock;
+
+       dbs_data = policy->governor_data;
+
+       if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+               struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+
+               sampling_rate = cs_tuners->sampling_rate;
+       } else {
+               struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+               sampling_rate = od_tuners->sampling_rate;
+       }
+
+       if (!need_load_eval(cdbs->shared, sampling_rate))
+               modify_all = false;
+
+       delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
+       gov_queue_work(dbs_data, policy, delay, modify_all);
+
+unlock:
+       mutex_unlock(&shared->timer_mutex);
+}
 
 static void set_sampling_rate(struct dbs_data *dbs_data,
                unsigned int sampling_rate)
@@ -239,211 +272,303 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
        }
 }
 
-int cpufreq_governor_dbs(struct cpufreq_policy *policy,
-               struct common_dbs_data *cdata, unsigned int event)
+static int alloc_common_dbs_info(struct cpufreq_policy *policy,
+                                struct common_dbs_data *cdata)
 {
-       struct dbs_data *dbs_data;
-       struct od_cpu_dbs_info_s *od_dbs_info = NULL;
-       struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
-       struct od_ops *od_ops = NULL;
-       struct od_dbs_tuners *od_tuners = NULL;
-       struct cs_dbs_tuners *cs_tuners = NULL;
-       struct cpu_dbs_common_info *cpu_cdbs;
-       unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
-       int io_busy = 0;
-       int rc;
+       struct cpu_common_dbs_info *shared;
+       int j;
 
-       if (have_governor_per_policy())
-               dbs_data = policy->governor_data;
-       else
-               dbs_data = cdata->gdbs_data;
+       /* Allocate memory for the common information for policy->cpus */
+       shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+       if (!shared)
+               return -ENOMEM;
 
-       WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
+       /* Set shared for all CPUs, online+offline */
+       for_each_cpu(j, policy->related_cpus)
+               cdata->get_cpu_cdbs(j)->shared = shared;
 
-       switch (event) {
-       case CPUFREQ_GOV_POLICY_INIT:
-               if (have_governor_per_policy()) {
-                       WARN_ON(dbs_data);
-               } else if (dbs_data) {
-                       dbs_data->usage_count++;
-                       policy->governor_data = dbs_data;
-                       return 0;
-               }
+       return 0;
+}
 
-               dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
-               if (!dbs_data) {
-                       pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
-                       return -ENOMEM;
-               }
+static void free_common_dbs_info(struct cpufreq_policy *policy,
+                                struct common_dbs_data *cdata)
+{
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
+       int j;
 
-               dbs_data->cdata = cdata;
-               dbs_data->usage_count = 1;
-               rc = cdata->init(dbs_data);
-               if (rc) {
-                       pr_err("%s: POLICY_INIT: init() failed\n", __func__);
-                       kfree(dbs_data);
-                       return rc;
-               }
+       for_each_cpu(j, policy->cpus)
+               cdata->get_cpu_cdbs(j)->shared = NULL;
 
-               if (!have_governor_per_policy())
-                       WARN_ON(cpufreq_get_global_kobject());
-
-               rc = sysfs_create_group(get_governor_parent_kobj(policy),
-                               get_sysfs_attr(dbs_data));
-               if (rc) {
-                       cdata->exit(dbs_data);
-                       kfree(dbs_data);
-                       return rc;
-               }
+       kfree(shared);
+}
+
+static int cpufreq_governor_init(struct cpufreq_policy *policy,
+                                struct dbs_data *dbs_data,
+                                struct common_dbs_data *cdata)
+{
+       unsigned int latency;
+       int ret;
+
+       /* State should be equivalent to EXIT */
+       if (policy->governor_data)
+               return -EBUSY;
+
+       if (dbs_data) {
+               if (WARN_ON(have_governor_per_policy()))
+                       return -EINVAL;
+
+               ret = alloc_common_dbs_info(policy, cdata);
+               if (ret)
+                       return ret;
 
+               dbs_data->usage_count++;
                policy->governor_data = dbs_data;
+               return 0;
+       }
+
+       dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
+       if (!dbs_data)
+               return -ENOMEM;
+
+       ret = alloc_common_dbs_info(policy, cdata);
+       if (ret)
+               goto free_dbs_data;
+
+       dbs_data->cdata = cdata;
+       dbs_data->usage_count = 1;
+
+       ret = cdata->init(dbs_data, !policy->governor->initialized);
+       if (ret)
+               goto free_common_dbs_info;
 
-               /* policy latency is in ns. Convert it to us first */
-               latency = policy->cpuinfo.transition_latency / 1000;
-               if (latency == 0)
-                       latency = 1;
+       /* policy latency is in ns. Convert it to us first */
+       latency = policy->cpuinfo.transition_latency / 1000;
+       if (latency == 0)
+               latency = 1;
 
-               /* Bring kernel and HW constraints together */
-               dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
-                               MIN_LATENCY_MULTIPLIER * latency);
-               set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
+       /* Bring kernel and HW constraints together */
+       dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
+                                         MIN_LATENCY_MULTIPLIER * latency);
+       set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
                                        latency * LATENCY_MULTIPLIER));
 
-               if ((cdata->governor == GOV_CONSERVATIVE) &&
-                               (!policy->governor->initialized)) {
-                       struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+       if (!have_governor_per_policy())
+               cdata->gdbs_data = dbs_data;
 
-                       cpufreq_register_notifier(cs_ops->notifier_block,
-                                       CPUFREQ_TRANSITION_NOTIFIER);
-               }
+       policy->governor_data = dbs_data;
 
-               if (!have_governor_per_policy())
-                       cdata->gdbs_data = dbs_data;
+       ret = sysfs_create_group(get_governor_parent_kobj(policy),
+                                get_sysfs_attr(dbs_data));
+       if (ret)
+               goto reset_gdbs_data;
 
-               return 0;
-       case CPUFREQ_GOV_POLICY_EXIT:
-               if (!--dbs_data->usage_count) {
-                       sysfs_remove_group(get_governor_parent_kobj(policy),
-                                       get_sysfs_attr(dbs_data));
+       return 0;
+
+reset_gdbs_data:
+       policy->governor_data = NULL;
+
+       if (!have_governor_per_policy())
+               cdata->gdbs_data = NULL;
+       cdata->exit(dbs_data, !policy->governor->initialized);
+free_common_dbs_info:
+       free_common_dbs_info(policy, cdata);
+free_dbs_data:
+       kfree(dbs_data);
+       return ret;
+}
+
+static int cpufreq_governor_exit(struct cpufreq_policy *policy,
+                                struct dbs_data *dbs_data)
+{
+       struct common_dbs_data *cdata = dbs_data->cdata;
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
 
-                       if (!have_governor_per_policy())
-                               cpufreq_put_global_kobject();
+       /* State should be equivalent to INIT */
+       if (!cdbs->shared || cdbs->shared->policy)
+               return -EBUSY;
 
-                       if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
-                               (policy->governor->initialized == 1)) {
-                               struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+       if (!--dbs_data->usage_count) {
+               sysfs_remove_group(get_governor_parent_kobj(policy),
+                                  get_sysfs_attr(dbs_data));
 
-                               cpufreq_unregister_notifier(cs_ops->notifier_block,
-                                               CPUFREQ_TRANSITION_NOTIFIER);
-                       }
+               policy->governor_data = NULL;
 
-                       cdata->exit(dbs_data);
-                       kfree(dbs_data);
+               if (!have_governor_per_policy())
                        cdata->gdbs_data = NULL;
-               }
 
+               cdata->exit(dbs_data, policy->governor->initialized == 1);
+               kfree(dbs_data);
+       } else {
                policy->governor_data = NULL;
-               return 0;
        }
 
-       cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+       free_common_dbs_info(policy, cdata);
+       return 0;
+}
+
+static int cpufreq_governor_start(struct cpufreq_policy *policy,
+                                 struct dbs_data *dbs_data)
+{
+       struct common_dbs_data *cdata = dbs_data->cdata;
+       unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
+       int io_busy = 0;
+
+       if (!policy->cur)
+               return -EINVAL;
+
+       /* State should be equivalent to INIT */
+       if (!shared || shared->policy)
+               return -EBUSY;
+
+       if (cdata->governor == GOV_CONSERVATIVE) {
+               struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 
-       if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
-               cs_tuners = dbs_data->tuners;
-               cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
                sampling_rate = cs_tuners->sampling_rate;
                ignore_nice = cs_tuners->ignore_nice_load;
        } else {
-               od_tuners = dbs_data->tuners;
-               od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+               struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
                sampling_rate = od_tuners->sampling_rate;
                ignore_nice = od_tuners->ignore_nice_load;
-               od_ops = dbs_data->cdata->gov_ops;
                io_busy = od_tuners->io_is_busy;
        }
 
-       switch (event) {
-       case CPUFREQ_GOV_START:
-               if (!policy->cur)
-                       return -EINVAL;
+       shared->policy = policy;
+       shared->time_stamp = ktime_get();
+       mutex_init(&shared->timer_mutex);
 
-               mutex_lock(&dbs_data->mutex);
+       for_each_cpu(j, policy->cpus) {
+               struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
+               unsigned int prev_load;
 
-               for_each_cpu(j, policy->cpus) {
-                       struct cpu_dbs_common_info *j_cdbs =
-                               dbs_data->cdata->get_cpu_cdbs(j);
-                       unsigned int prev_load;
+               j_cdbs->prev_cpu_idle =
+                       get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
 
-                       j_cdbs->cpu = j;
-                       j_cdbs->cur_policy = policy;
-                       j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
-                                              &j_cdbs->prev_cpu_wall, io_busy);
+               prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
+                                           j_cdbs->prev_cpu_idle);
+               j_cdbs->prev_load = 100 * prev_load /
+                                   (unsigned int)j_cdbs->prev_cpu_wall;
 
-                       prev_load = (unsigned int)
-                               (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
-                       j_cdbs->prev_load = 100 * prev_load /
-                                       (unsigned int) j_cdbs->prev_cpu_wall;
+               if (ignore_nice)
+                       j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
-                       if (ignore_nice)
-                               j_cdbs->prev_cpu_nice =
-                                       kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+               INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer);
+       }
 
-                       mutex_init(&j_cdbs->timer_mutex);
-                       INIT_DEFERRABLE_WORK(&j_cdbs->work,
-                                            dbs_data->cdata->gov_dbs_timer);
-               }
+       if (cdata->governor == GOV_CONSERVATIVE) {
+               struct cs_cpu_dbs_info_s *cs_dbs_info =
+                       cdata->get_cpu_dbs_info_s(cpu);
 
-               if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
-                       cs_dbs_info->down_skip = 0;
-                       cs_dbs_info->enable = 1;
-                       cs_dbs_info->requested_freq = policy->cur;
-               } else {
-                       od_dbs_info->rate_mult = 1;
-                       od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
-                       od_ops->powersave_bias_init_cpu(cpu);
-               }
+               cs_dbs_info->down_skip = 0;
+               cs_dbs_info->requested_freq = policy->cur;
+       } else {
+               struct od_ops *od_ops = cdata->gov_ops;
+               struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
 
-               mutex_unlock(&dbs_data->mutex);
+               od_dbs_info->rate_mult = 1;
+               od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+               od_ops->powersave_bias_init_cpu(cpu);
+       }
 
-               /* Initiate timer time stamp */
-               cpu_cdbs->time_stamp = ktime_get();
+       gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
+                      true);
+       return 0;
+}
 
-               gov_queue_work(dbs_data, policy,
-                               delay_for_sampling_rate(sampling_rate), true);
-               break;
+static int cpufreq_governor_stop(struct cpufreq_policy *policy,
+                                struct dbs_data *dbs_data)
+{
+       struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
+       struct cpu_common_dbs_info *shared = cdbs->shared;
 
-       case CPUFREQ_GOV_STOP:
-               if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
-                       cs_dbs_info->enable = 0;
+       /* State should be equivalent to START */
+       if (!shared || !shared->policy)
+               return -EBUSY;
 
-               gov_cancel_work(dbs_data, policy);
+       /*
+        * Work-handler must see this updated, as it should not proceed any
+        * further after governor is disabled. And so timer_mutex is taken while
+        * updating this value.
+        */
+       mutex_lock(&shared->timer_mutex);
+       shared->policy = NULL;
+       mutex_unlock(&shared->timer_mutex);
 
-               mutex_lock(&dbs_data->mutex);
-               mutex_destroy(&cpu_cdbs->timer_mutex);
-               cpu_cdbs->cur_policy = NULL;
+       gov_cancel_work(dbs_data, policy);
 
-               mutex_unlock(&dbs_data->mutex);
+       mutex_destroy(&shared->timer_mutex);
+       return 0;
+}
 
-               break;
+static int cpufreq_governor_limits(struct cpufreq_policy *policy,
+                                  struct dbs_data *dbs_data)
+{
+       struct common_dbs_data *cdata = dbs_data->cdata;
+       unsigned int cpu = policy->cpu;
+       struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+
+       /* State should be equivalent to START */
+       if (!cdbs->shared || !cdbs->shared->policy)
+               return -EBUSY;
+
+       mutex_lock(&cdbs->shared->timer_mutex);
+       if (policy->max < cdbs->shared->policy->cur)
+               __cpufreq_driver_target(cdbs->shared->policy, policy->max,
+                                       CPUFREQ_RELATION_H);
+       else if (policy->min > cdbs->shared->policy->cur)
+               __cpufreq_driver_target(cdbs->shared->policy, policy->min,
+                                       CPUFREQ_RELATION_L);
+       dbs_check_cpu(dbs_data, cpu);
+       mutex_unlock(&cdbs->shared->timer_mutex);
+
+       return 0;
+}
+
+int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+                        struct common_dbs_data *cdata, unsigned int event)
+{
+       struct dbs_data *dbs_data;
+       int ret;
+
+       /* Lock governor to block concurrent initialization of governor */
+       mutex_lock(&cdata->mutex);
+
+       if (have_governor_per_policy())
+               dbs_data = policy->governor_data;
+       else
+               dbs_data = cdata->gdbs_data;
+
+       if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
+               ret = -EINVAL;
+               goto unlock;
+       }
 
+       switch (event) {
+       case CPUFREQ_GOV_POLICY_INIT:
+               ret = cpufreq_governor_init(policy, dbs_data, cdata);
+               break;
+       case CPUFREQ_GOV_POLICY_EXIT:
+               ret = cpufreq_governor_exit(policy, dbs_data);
+               break;
+       case CPUFREQ_GOV_START:
+               ret = cpufreq_governor_start(policy, dbs_data);
+               break;
+       case CPUFREQ_GOV_STOP:
+               ret = cpufreq_governor_stop(policy, dbs_data);
+               break;
        case CPUFREQ_GOV_LIMITS:
-               mutex_lock(&dbs_data->mutex);
-               if (!cpu_cdbs->cur_policy) {
-                       mutex_unlock(&dbs_data->mutex);
-                       break;
-               }
-               mutex_lock(&cpu_cdbs->timer_mutex);
-               if (policy->max < cpu_cdbs->cur_policy->cur)
-                       __cpufreq_driver_target(cpu_cdbs->cur_policy,
-                                       policy->max, CPUFREQ_RELATION_H);
-               else if (policy->min > cpu_cdbs->cur_policy->cur)
-                       __cpufreq_driver_target(cpu_cdbs->cur_policy,
-                                       policy->min, CPUFREQ_RELATION_L);
-               dbs_check_cpu(dbs_data, cpu);
-               mutex_unlock(&cpu_cdbs->timer_mutex);
-               mutex_unlock(&dbs_data->mutex);
+               ret = cpufreq_governor_limits(policy, dbs_data);
                break;
+       default:
+               ret = -EINVAL;
        }
-       return 0;
+
+unlock:
+       mutex_unlock(&cdata->mutex);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);