Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / tile / kernel / machine_kexec.c
diff --git a/kernel/arch/tile/kernel/machine_kexec.c b/kernel/arch/tile/kernel/machine_kexec.c
new file mode 100644 (file)
index 0000000..008aa2f
--- /dev/null
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * based on machine_kexec.c from other architectures in linux-2.6.18
+ */
+
+#include <linux/mm.h>
+#include <linux/kexec.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/highmem.h>
+#include <linux/mmu_context.h>
+#include <linux/io.h>
+#include <linux/timex.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/tlbflush.h>
+#include <asm/homecache.h>
+#include <hv/hypervisor.h>
+
+
+/*
+ * This stuff is not in elf.h and is not in any other kernel include.
+ * This stuff is needed below in the little boot notes parser to
+ * extract the command line so we can pass it to the hypervisor.
+ */
+struct Elf32_Bhdr {
+       Elf32_Word b_signature;
+       Elf32_Word b_size;
+       Elf32_Half b_checksum;
+       Elf32_Half b_records;
+};
+#define ELF_BOOT_MAGIC         0x0E1FB007
+#define EBN_COMMAND_LINE       0x00000004
+#define roundupsz(X) (((X) + 3) & ~3)
+
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+
+
+void machine_shutdown(void)
+{
+       /*
+        * Normally we would stop all the other processors here, but
+        * the check in machine_kexec_prepare below ensures we'll only
+        * get this far if we've been booted with "nosmp" on the
+        * command line or without CONFIG_SMP so there's nothing to do
+        * here (for now).
+        */
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+       /*
+        * Cannot happen.  This type of kexec is disabled on this
+        * architecture (and enforced in machine_kexec_prepare below).
+        */
+}
+
+
+int machine_kexec_prepare(struct kimage *image)
+{
+       if (num_online_cpus() > 1) {
+               pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n",
+                       __func__);
+               return -ENOSYS;
+       }
+       if (image->type != KEXEC_TYPE_DEFAULT) {
+               pr_warn("%s: detected attempt to kexec with unsupported type: %d\n",
+                       __func__, image->type);
+               return -ENOSYS;
+       }
+       return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+       /*
+        * We did nothing in machine_kexec_prepare,
+        * so we have nothing to do here.
+        */
+}
+
+/*
+ * If we can find elf boot notes on this page, return the command
+ * line.  Otherwise, silently return null.  Somewhat kludgy, but no
+ * good way to do this without significantly rearchitecting the
+ * architecture-independent kexec code.
+ */
+
+static unsigned char *kexec_bn2cl(void *pg)
+{
+       struct Elf32_Bhdr *bhdrp;
+       Elf32_Nhdr *nhdrp;
+       unsigned char *desc;
+       unsigned char *command_line;
+       __sum16 csum;
+
+       bhdrp = (struct Elf32_Bhdr *) pg;
+
+       /*
+        * This routine is invoked for every source page, so make
+        * sure to quietly ignore every impossible page.
+        */
+       if (bhdrp->b_signature != ELF_BOOT_MAGIC ||
+           bhdrp->b_size > PAGE_SIZE)
+               return 0;
+
+       /*
+        * If we get a checksum mismatch, warn with the checksum
+        * so we can diagnose better.
+        */
+       csum = ip_compute_csum(pg, bhdrp->b_size);
+       if (csum != 0) {
+               pr_warn("%s: bad checksum %#x (size %d)\n",
+                       __func__, csum, bhdrp->b_size);
+               return 0;
+       }
+
+       nhdrp = (Elf32_Nhdr *) (bhdrp + 1);
+
+       while (nhdrp->n_type != EBN_COMMAND_LINE) {
+
+               desc = (unsigned char *) (nhdrp + 1);
+               desc += roundupsz(nhdrp->n_descsz);
+
+               nhdrp = (Elf32_Nhdr *) desc;
+
+               /* still in bounds? */
+               if ((unsigned char *) (nhdrp + 1) >
+                   ((unsigned char *) pg) + bhdrp->b_size) {
+
+                       pr_info("%s: out of bounds\n", __func__);
+                       return 0;
+               }
+       }
+
+       command_line = (unsigned char *) (nhdrp + 1);
+       desc = command_line;
+
+       while (*desc != '\0') {
+               desc++;
+               if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
+                       pr_info("%s: ran off end of page\n", __func__);
+                       return 0;
+               }
+       }
+
+       return command_line;
+}
+
+static void kexec_find_and_set_command_line(struct kimage *image)
+{
+       kimage_entry_t *ptr, entry;
+
+       unsigned char *command_line = 0;
+       unsigned char *r;
+       HV_Errno hverr;
+
+       for (ptr = &image->head;
+            (entry = *ptr) && !(entry & IND_DONE);
+            ptr = (entry & IND_INDIRECTION) ?
+                    phys_to_virt((entry & PAGE_MASK)) : ptr + 1) {
+
+               if ((entry & IND_SOURCE)) {
+                       void *va =
+                               kmap_atomic_pfn(entry >> PAGE_SHIFT);
+                       r = kexec_bn2cl(va);
+                       if (r) {
+                               command_line = r;
+                               break;
+                       }
+                       kunmap_atomic(va);
+               }
+       }
+
+       if (command_line != 0) {
+               pr_info("setting new command line to \"%s\"\n", command_line);
+
+               hverr = hv_set_command_line(
+                       (HV_VirtAddr) command_line, strlen(command_line));
+               kunmap_atomic(command_line);
+       } else {
+               pr_info("%s: no command line found; making empty\n", __func__);
+               hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
+       }
+       if (hverr)
+               pr_warn("%s: hv_set_command_line returned error: %d\n",
+                       __func__, hverr);
+}
+
+/*
+ * The kexec code range-checks all its PAs, so to avoid having it run
+ * amok and allocate memory and then sequester it from every other
+ * controller, we force it to come from controller zero.  We also
+ * disable the oom-killer since if we do end up running out of memory,
+ * that almost certainly won't help.
+ */
+struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
+{
+       gfp_mask |= __GFP_THISNODE | __GFP_NORETRY;
+       return alloc_pages_node(0, gfp_mask, order);
+}
+
+/*
+ * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
+ * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
+ * for tilepro, while for tilegx, we limit it to entire middle level page
+ * table which we assume has been allocated and is undoubtedly large enough.
+ */
+#ifndef __tilegx__
+#define        QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
+#else
+#define        QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
+#endif
+
+static void setup_quasi_va_is_pa(void)
+{
+       HV_PTE pte;
+       unsigned long i;
+
+       /*
+        * Flush our TLB to prevent conflicts between the previous contents
+        * and the new stuff we're about to add.
+        */
+       local_flush_tlb_all();
+
+       /*
+        * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
+        * Note here we assume that level-1 page table is defined by
+        * HPAGE_SIZE.
+        */
+       pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
+       pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
+       for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
+               unsigned long vaddr = i << HPAGE_SHIFT;
+               pgd_t *pgd = pgd_offset(current->mm, vaddr);
+               pud_t *pud = pud_offset(pgd, vaddr);
+               pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
+               unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
+
+               if (pfn_valid(pfn))
+                       __set_pte(ptep, pfn_pte(pfn, pte));
+       }
+}
+
+
+void machine_kexec(struct kimage *image)
+{
+       void *reboot_code_buffer;
+       pte_t *ptep;
+       void (*rnk)(unsigned long, void *, unsigned long)
+               __noreturn;
+
+       /* Mask all interrupts before starting to reboot. */
+       interrupt_mask_set_mask(~0ULL);
+
+       kexec_find_and_set_command_line(image);
+
+       /*
+        * Adjust the home caching of the control page to be cached on
+        * this cpu, and copy the assembly helper into the control
+        * code page, which we map in the vmalloc area.
+        */
+       homecache_change_page_home(image->control_code_page, 0,
+                                  smp_processor_id());
+       reboot_code_buffer = page_address(image->control_code_page);
+       BUG_ON(reboot_code_buffer == NULL);
+       ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
+       __set_pte(ptep, pte_mkexec(*ptep));
+       memcpy(reboot_code_buffer, relocate_new_kernel,
+              relocate_new_kernel_size);
+       __flush_icache_range(
+               (unsigned long) reboot_code_buffer,
+               (unsigned long) reboot_code_buffer + relocate_new_kernel_size);
+
+       setup_quasi_va_is_pa();
+
+       /* now call it */
+       rnk = reboot_code_buffer;
+       (*rnk)(image->head, reboot_code_buffer, image->start);
+}