These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / sched / debug.c
index 34b0000..a2d69b8 100644 (file)
@@ -68,13 +68,8 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
 #define PN(F) \
        SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
 
-       if (!se) {
-               struct sched_avg *avg = &cpu_rq(cpu)->avg;
-               P(avg->runnable_avg_sum);
-               P(avg->avg_period);
+       if (!se)
                return;
-       }
-
 
        PN(se->exec_start);
        PN(se->vruntime);
@@ -93,12 +88,8 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
 #endif
        P(se->load.weight);
 #ifdef CONFIG_SMP
-       P(se->avg.runnable_avg_sum);
-       P(se->avg.running_avg_sum);
-       P(se->avg.avg_period);
-       P(se->avg.load_avg_contrib);
-       P(se->avg.utilization_avg_contrib);
-       P(se->avg.decay_count);
+       P(se->avg.load_avg);
+       P(se->avg.util_avg);
 #endif
 #undef PN
 #undef P
@@ -132,15 +123,17 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                p->prio);
 #ifdef CONFIG_SCHEDSTATS
        SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-               SPLIT_NS(p->se.vruntime),
+               SPLIT_NS(p->se.statistics.wait_sum),
                SPLIT_NS(p->se.sum_exec_runtime),
                SPLIT_NS(p->se.statistics.sum_sleep_runtime));
 #else
-       SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
-               0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
+       SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
+               0LL, 0L,
+               SPLIT_NS(p->se.sum_exec_runtime),
+               0LL, 0L);
 #endif
 #ifdef CONFIG_NUMA_BALANCING
-       SEQ_printf(m, " %d", task_node(p));
+       SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 #endif
 #ifdef CONFIG_CGROUP_SCHED
        SEQ_printf(m, " %s", task_group_path(task_group(p)));
@@ -156,7 +149,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
        SEQ_printf(m,
        "\nrunnable tasks:\n"
        "            task   PID         tree-key  switches  prio"
-       "     exec-runtime         sum-exec        sum-sleep\n"
+       "     wait-time             sum-exec        sum-sleep\n"
        "------------------------------------------------------"
        "----------------------------------------------------\n");
 
@@ -212,26 +205,24 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
        SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
        SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 #ifdef CONFIG_SMP
-       SEQ_printf(m, "  .%-30s: %ld\n", "runnable_load_avg",
+       SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
+                       cfs_rq->avg.load_avg);
+       SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
                        cfs_rq->runnable_load_avg);
-       SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
-                       cfs_rq->blocked_load_avg);
-       SEQ_printf(m, "  .%-30s: %ld\n", "utilization_load_avg",
-                       cfs_rq->utilization_load_avg);
+       SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
+                       cfs_rq->avg.util_avg);
+       SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
+                       atomic_long_read(&cfs_rq->removed_load_avg));
+       SEQ_printf(m, "  .%-30s: %ld\n", "removed_util_avg",
+                       atomic_long_read(&cfs_rq->removed_util_avg));
 #ifdef CONFIG_FAIR_GROUP_SCHED
-       SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_contrib",
-                       cfs_rq->tg_load_contrib);
-       SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
-                       cfs_rq->tg_runnable_contrib);
+       SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
+                       cfs_rq->tg_load_avg_contrib);
        SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
                        atomic_long_read(&cfs_rq->tg->load_avg));
-       SEQ_printf(m, "  .%-30s: %d\n", "tg->runnable_avg",
-                       atomic_read(&cfs_rq->tg->runnable_avg));
 #endif
 #endif
 #ifdef CONFIG_CFS_BANDWIDTH
-       SEQ_printf(m, "  .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
-                       cfs_rq->tg->cfs_bandwidth.timer_active);
        SEQ_printf(m, "  .%-30s: %d\n", "throttled",
                        cfs_rq->throttled);
        SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
@@ -520,11 +511,21 @@ __initcall(init_sched_debug_procfs);
        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
 
 
+#ifdef CONFIG_NUMA_BALANCING
+void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
+               unsigned long tpf, unsigned long gsf, unsigned long gpf)
+{
+       SEQ_printf(m, "numa_faults node=%d ", node);
+       SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
+       SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
+}
+#endif
+
+
 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
 {
 #ifdef CONFIG_NUMA_BALANCING
        struct mempolicy *pol;
-       int node, i;
 
        if (p->mm)
                P(mm->numa_scan_seq);
@@ -536,26 +537,12 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
        mpol_get(pol);
        task_unlock(p);
 
-       SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
-
-       for_each_online_node(node) {
-               for (i = 0; i < 2; i++) {
-                       unsigned long nr_faults = -1;
-                       int cpu_current, home_node;
-
-                       if (p->numa_faults)
-                               nr_faults = p->numa_faults[2*node + i];
-
-                       cpu_current = !i ? (task_node(p) == node) :
-                               (pol && node_isset(node, pol->v.nodes));
-
-                       home_node = (p->numa_preferred_nid == node);
-
-                       SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
-                               i, node, cpu_current, home_node, nr_faults);
-               }
-       }
-
+       P(numa_pages_migrated);
+       P(numa_preferred_nid);
+       P(total_numa_faults);
+       SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
+                       task_node(p), task_numa_group_id(p));
+       show_numa_stats(p, m);
        mpol_put(pol);
 #endif
 }
@@ -585,6 +572,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
        nr_switches = p->nvcsw + p->nivcsw;
 
 #ifdef CONFIG_SCHEDSTATS
+       PN(se.statistics.sum_sleep_runtime);
        PN(se.statistics.wait_start);
        PN(se.statistics.sleep_start);
        PN(se.statistics.block_start);
@@ -642,12 +630,11 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
        P(se.load.weight);
 #ifdef CONFIG_SMP
-       P(se.avg.runnable_avg_sum);
-       P(se.avg.running_avg_sum);
-       P(se.avg.avg_period);
-       P(se.avg.load_avg_contrib);
-       P(se.avg.utilization_avg_contrib);
-       P(se.avg.decay_count);
+       P(se.avg.load_sum);
+       P(se.avg.util_sum);
+       P(se.avg.load_avg);
+       P(se.avg.util_avg);
+       P(se.avg.last_update_time);
 #endif
        P(policy);
        P(prio);