These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / tile / kernel / process.c
index b403c2e..7d57693 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/kernel.h>
 #include <linux/tracehook.h>
 #include <linux/signal.h>
+#include <linux/delay.h>
 #include <linux/context_tracking.h>
 #include <asm/stack.h>
 #include <asm/switch_to.h>
@@ -132,7 +133,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                       (CALLEE_SAVED_REGS_COUNT - 2) * sizeof(unsigned long));
                callee_regs[0] = sp;   /* r30 = function */
                callee_regs[1] = arg;  /* r31 = arg */
-               childregs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
                p->thread.pc = (unsigned long) ret_from_kernel_thread;
                return 0;
        }
@@ -446,6 +446,11 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
        hardwall_switch_tasks(prev, next);
 #endif
 
+       /* Notify the simulator of task exit. */
+       if (unlikely(prev->state == TASK_DEAD))
+               __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |
+                            (prev->pid << _SIM_CONTROL_OPERATOR_BITS));
+
        /*
         * Switch kernel SP, PC, and callee-saved registers.
         * In the context of the new task, return the old task pointer
@@ -546,31 +551,141 @@ void exit_thread(void)
 #endif
 }
 
-void show_regs(struct pt_regs *regs)
+void tile_show_regs(struct pt_regs *regs)
 {
-       struct task_struct *tsk = validate_current();
        int i;
-
-       if (tsk != &corrupt_current)
-               show_regs_print_info(KERN_ERR);
 #ifdef __tilegx__
        for (i = 0; i < 17; i++)
-               pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
+               pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
                       i, regs->regs[i], i+18, regs->regs[i+18],
                       i+36, regs->regs[i+36]);
-       pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n",
+       pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
               regs->regs[17], regs->regs[35], regs->tp);
-       pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr);
+       pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
 #else
        for (i = 0; i < 13; i++)
-               pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
+               pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
+                      " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
                       i, regs->regs[i], i+14, regs->regs[i+14],
                       i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
-       pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n",
+       pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
               regs->regs[13], regs->tp, regs->sp, regs->lr);
 #endif
-       pr_err(" pc : " REGFMT " ex1: %ld     faultnum: %ld\n",
-              regs->pc, regs->ex1, regs->faultnum);
+       pr_err(" pc : "REGFMT" ex1: %ld     faultnum: %ld flags:%s%s%s%s\n",
+              regs->pc, regs->ex1, regs->faultnum,
+              is_compat_task() ? " compat" : "",
+              (regs->flags & PT_FLAGS_DISABLE_IRQ) ? " noirq" : "",
+              !(regs->flags & PT_FLAGS_CALLER_SAVES) ? " nocallersave" : "",
+              (regs->flags & PT_FLAGS_RESTORE_REGS) ? " restoreregs" : "");
+}
+
+void show_regs(struct pt_regs *regs)
+{
+       struct KBacktraceIterator kbt;
+
+       show_regs_print_info(KERN_DEFAULT);
+       tile_show_regs(regs);
+
+       KBacktraceIterator_init(&kbt, NULL, regs);
+       tile_show_stack(&kbt);
+}
 
-       dump_stack_regs(regs);
+/* To ensure stack dump on tiles occurs one by one. */
+static DEFINE_SPINLOCK(backtrace_lock);
+/* To ensure no backtrace occurs before all of the stack dump are done. */
+static atomic_t backtrace_cpus;
+/* The cpu mask to avoid reentrance. */
+static struct cpumask backtrace_mask;
+
+void do_nmi_dump_stack(struct pt_regs *regs)
+{
+       int is_idle = is_idle_task(current) && !in_interrupt();
+       int cpu;
+
+       nmi_enter();
+       cpu = smp_processor_id();
+       if (WARN_ON_ONCE(!cpumask_test_and_clear_cpu(cpu, &backtrace_mask)))
+               goto done;
+
+       spin_lock(&backtrace_lock);
+       if (is_idle)
+               pr_info("CPU: %d idle\n", cpu);
+       else
+               show_regs(regs);
+       spin_unlock(&backtrace_lock);
+       atomic_dec(&backtrace_cpus);
+done:
+       nmi_exit();
+}
+
+#ifdef __tilegx__
+void arch_trigger_all_cpu_backtrace(bool self)
+{
+       struct cpumask mask;
+       HV_Coord tile;
+       unsigned int timeout;
+       int cpu;
+       int ongoing;
+       HV_NMI_Info info[NR_CPUS];
+
+       ongoing = atomic_cmpxchg(&backtrace_cpus, 0, num_online_cpus() - 1);
+       if (ongoing != 0) {
+               pr_err("Trying to do all-cpu backtrace.\n");
+               pr_err("But another all-cpu backtrace is ongoing (%d cpus left)\n",
+                      ongoing);
+               if (self) {
+                       pr_err("Reporting the stack on this cpu only.\n");
+                       dump_stack();
+               }
+               return;
+       }
+
+       cpumask_copy(&mask, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), &mask);
+       cpumask_copy(&backtrace_mask, &mask);
+
+       /* Backtrace for myself first. */
+       if (self)
+               dump_stack();
+
+       /* Tentatively dump stack on remote tiles via NMI. */
+       timeout = 100;
+       while (!cpumask_empty(&mask) && timeout) {
+               for_each_cpu(cpu, &mask) {
+                       tile.x = cpu_x(cpu);
+                       tile.y = cpu_y(cpu);
+                       info[cpu] = hv_send_nmi(tile, TILE_NMI_DUMP_STACK, 0);
+                       if (info[cpu].result == HV_NMI_RESULT_OK)
+                               cpumask_clear_cpu(cpu, &mask);
+               }
+
+               mdelay(10);
+               timeout--;
+       }
+
+       /* Warn about cpus stuck in ICS and decrement their counts here. */
+       if (!cpumask_empty(&mask)) {
+               for_each_cpu(cpu, &mask) {
+                       switch (info[cpu].result) {
+                       case HV_NMI_RESULT_FAIL_ICS:
+                               pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
+                                       cpu, info[cpu].pc);
+                               break;
+                       case HV_NMI_RESULT_FAIL_HV:
+                               pr_warn("Skipping stack dump of cpu %d in hypervisor\n",
+                                       cpu);
+                               break;
+                       case HV_ENOSYS:
+                               pr_warn("Hypervisor too old to allow remote stack dumps.\n");
+                               goto skip_for_each;
+                       default:  /* should not happen */
+                               pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
+                                       cpu, info[cpu].result, info[cpu].pc);
+                               break;
+                       }
+               }
+skip_for_each:
+               atomic_sub(cpumask_weight(&mask), &backtrace_cpus);
+       }
 }
+#endif /* __tilegx_ */