These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / powerpc / mm / slb.c
index 6e450ca..515730e 100644 (file)
 #include <asm/udbg.h>
 #include <asm/code-patching.h>
 
+enum slb_index {
+       LINEAR_INDEX    = 0, /* Kernel linear map  (0xc000000000000000) */
+       VMALLOC_INDEX   = 1, /* Kernel virtual map (0xd000000000000000) */
+       KSTACK_INDEX    = 2, /* Kernel stack map */
+};
 
 extern void slb_allocate_realmode(unsigned long ea);
 extern void slb_allocate_user(unsigned long ea);
@@ -41,9 +46,9 @@ static void slb_allocate(unsigned long ea)
        (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
 
 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
-                                        unsigned long slot)
+                                        enum slb_index index)
 {
-       return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
+       return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
 }
 
 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
@@ -55,39 +60,39 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
 
 static inline void slb_shadow_update(unsigned long ea, int ssize,
                                     unsigned long flags,
-                                    unsigned long entry)
+                                    enum slb_index index)
 {
+       struct slb_shadow *p = get_slb_shadow();
+
        /*
         * Clear the ESID first so the entry is not valid while we are
         * updating it.  No write barriers are needed here, provided
         * we only update the current CPU's SLB shadow buffer.
         */
-       get_slb_shadow()->save_area[entry].esid = 0;
-       get_slb_shadow()->save_area[entry].vsid =
-                               cpu_to_be64(mk_vsid_data(ea, ssize, flags));
-       get_slb_shadow()->save_area[entry].esid =
-                               cpu_to_be64(mk_esid_data(ea, ssize, entry));
+       p->save_area[index].esid = 0;
+       p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags));
+       p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index));
 }
 
-static inline void slb_shadow_clear(unsigned long entry)
+static inline void slb_shadow_clear(enum slb_index index)
 {
-       get_slb_shadow()->save_area[entry].esid = 0;
+       get_slb_shadow()->save_area[index].esid = 0;
 }
 
 static inline void create_shadowed_slbe(unsigned long ea, int ssize,
                                        unsigned long flags,
-                                       unsigned long entry)
+                                       enum slb_index index)
 {
        /*
         * Updating the shadow buffer before writing the SLB ensures
         * we don't get a stale entry here if we get preempted by PHYP
         * between these two statements.
         */
-       slb_shadow_update(ea, ssize, flags, entry);
+       slb_shadow_update(ea, ssize, flags, index);
 
        asm volatile("slbmte  %0,%1" :
                     : "r" (mk_vsid_data(ea, ssize, flags)),
-                      "r" (mk_esid_data(ea, ssize, entry))
+                      "r" (mk_esid_data(ea, ssize, index))
                     : "memory" );
 }
 
@@ -103,16 +108,16 @@ static void __slb_flush_and_rebolt(void)
        lflags = SLB_VSID_KERNEL | linear_llp;
        vflags = SLB_VSID_KERNEL | vmalloc_llp;
 
-       ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
+       ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
        if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
                ksp_esid_data &= ~SLB_ESID_V;
                ksp_vsid_data = 0;
-               slb_shadow_clear(2);
+               slb_shadow_clear(KSTACK_INDEX);
        } else {
                /* Update stack entry; others don't change */
-               slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
+               slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
                ksp_vsid_data =
-                       be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
+                       be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
        }
 
        /* We need to do this all in asm, so we're sure we don't touch
@@ -151,7 +156,7 @@ void slb_vmalloc_update(void)
        unsigned long vflags;
 
        vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
-       slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+       slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
        slb_flush_and_rebolt();
 }
 
@@ -249,11 +254,24 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 static inline void patch_slb_encoding(unsigned int *insn_addr,
                                      unsigned int immed)
 {
-       int insn = (*insn_addr & 0xffff0000) | immed;
+
+       /*
+        * This function patches either an li or a cmpldi instruction with
+        * a new immediate value. This relies on the fact that both li
+        * (which is actually addi) and cmpldi both take a 16-bit immediate
+        * value, and it is situated in the same location in the instruction,
+        * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
+        * The signedness of the immediate operand differs between the two
+        * instructions however this code is only ever patching a small value,
+        * much less than 1 << 15, so we can get away with it.
+        * To patch the value we read the existing instruction, clear the
+        * immediate value, and or in our new value, then write the instruction
+        * back.
+        */
+       unsigned int insn = (*insn_addr & 0xffff0000) | immed;
        patch_instruction(insn_addr, insn);
 }
 
-extern u32 slb_compare_rr_to_size[];
 extern u32 slb_miss_kernel_load_linear[];
 extern u32 slb_miss_kernel_load_io[];
 extern u32 slb_compare_rr_to_size[];
@@ -309,24 +327,23 @@ void slb_initialize(void)
        lflags = SLB_VSID_KERNEL | linear_llp;
        vflags = SLB_VSID_KERNEL | vmalloc_llp;
 
-       /* Invalidate the entire SLB (even slot 0) & all the ERATS */
+       /* Invalidate the entire SLB (even entry 0) & all the ERATS */
        asm volatile("isync":::"memory");
        asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
        asm volatile("isync; slbia; isync":::"memory");
-       create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-
-       create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+       create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
+       create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
 
        /* For the boot cpu, we're running on the stack in init_thread_union,
         * which is in the first segment of the linear mapping, and also
         * get_paca()->kstack hasn't been initialized yet.
         * For secondary cpus, we need to bolt the kernel stack entry now.
         */
-       slb_shadow_clear(2);
+       slb_shadow_clear(KSTACK_INDEX);
        if (raw_smp_processor_id() != boot_cpuid &&
            (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
                create_shadowed_slbe(get_paca()->kstack,
-                                    mmu_kernel_ssize, lflags, 2);
+                                    mmu_kernel_ssize, lflags, KSTACK_INDEX);
 
        asm volatile("isync":::"memory");
 }