2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
9 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
10 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
11 * PPC44x port. Copyright (C) 2011, IBM Corporation
12 * Author: Suzuki Poulose <suzuki@in.ibm.com>
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #include <linux/sys.h>
22 #include <asm/unistd.h>
23 #include <asm/errno.h>
26 #include <asm/cache.h>
27 #include <asm/cputable.h>
29 #include <asm/ppc_asm.h>
30 #include <asm/thread_info.h>
31 #include <asm/asm-offsets.h>
32 #include <asm/processor.h>
33 #include <asm/kexec.h>
35 #include <asm/ptrace.h>
40 * We store the saved ksp_limit in the unused part
41 * of the STACK_FRAME_OVERHEAD
43 #ifndef CONFIG_PREEMPT_RT_FULL
44 _GLOBAL(call_do_softirq)
47 lwz r10,THREAD+KSP_LIMIT(r2)
48 addi r11,r3,THREAD_INFO_GAP
49 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
52 stw r11,THREAD+KSP_LIMIT(r2)
57 stw r10,THREAD+KSP_LIMIT(r2)
63 * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
68 lwz r10,THREAD+KSP_LIMIT(r2)
69 addi r11,r4,THREAD_INFO_GAP
70 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
73 stw r11,THREAD+KSP_LIMIT(r2)
78 stw r10,THREAD+KSP_LIMIT(r2)
83 * This returns the high 64 bits of the product of two 64-bit numbers.
95 1: beqlr cr1 /* all done if high part of A is 0 */
110 * sub_reloc_offset(x) returns x - reloc_offset().
112 _GLOBAL(sub_reloc_offset)
124 * reloc_got2 runs through the .got2 section adding an offset
129 lis r7,__got2_start@ha
130 addi r7,r7,__got2_start@l
132 addi r8,r8,__got2_end@l
152 * call_setup_cpu - call the setup_cpu function for this cpu
153 * r3 = data offset, r24 = cpu number
155 * Setup function is called with:
157 * r4 = ptr to CPU spec (relocated)
159 _GLOBAL(call_setup_cpu)
160 addis r4,r3,cur_cpu_spec@ha
161 addi r4,r4,cur_cpu_spec@l
164 lwz r5,CPU_SPEC_SETUP(r4)
171 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
173 /* This gets called by via-pmu.c to switch the PLL selection
174 * on 750fx CPU. This function should really be moved to some
175 * other place (as most of the cpufreq code in via-pmu
177 _GLOBAL(low_choose_750fx_pll)
183 /* If switching to PLL1, disable HID0:BTIC */
194 /* Calc new HID1 value */
195 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
196 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
197 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
201 /* Store new HID1 image */
202 CURRENT_THREAD_INFO(r6, r1)
205 addis r6,r6,nap_save_hid1@ha
206 stw r4,nap_save_hid1@l(r6)
208 /* If switching to PLL0, enable HID0:BTIC */
223 _GLOBAL(low_choose_7447a_dfs)
229 /* Calc new HID1 value */
231 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
241 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
244 * complement mask on the msr then "or" some values on.
245 * _nmask_and_or_msr(nmask, value_to_or)
247 _GLOBAL(_nmask_and_or_msr)
248 mfmsr r0 /* Get current msr */
249 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
250 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
251 SYNC /* Some chip revs have problems here... */
252 mtmsr r0 /* Update machine state */
259 * Do an IO access in real mode
277 * Do an IO access in real mode
294 #endif /* CONFIG_40x */
298 * Flush instruction cache.
299 * This is a no-op on the 601.
301 _GLOBAL(flush_instruction_cache)
302 #if defined(CONFIG_8xx)
305 mtspr SPRN_IC_CST, r5
306 #elif defined(CONFIG_4xx)
318 #elif CONFIG_FSL_BOOKE
321 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
322 /* msync; isync recommended here */
326 END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
328 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
332 rlwinm r3,r3,16,16,31
334 beqlr /* for 601, do nothing */
335 /* 603/604 processor - use invalidate-all bit in HID0 */
339 #endif /* CONFIG_8xx/4xx */
344 * Write any modified data cache blocks out to memory
345 * and invalidate the corresponding instruction cache blocks.
346 * This is a no-op on the 601.
348 * flush_icache_range(unsigned long start, unsigned long stop)
350 _KPROBE(flush_icache_range)
353 blr /* for 601, do nothing */
354 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
355 li r5,L1_CACHE_BYTES-1
359 srwi. r4,r4,L1_CACHE_SHIFT
364 addi r3,r3,L1_CACHE_BYTES
366 sync /* wait for dcbst's to get to ram */
370 addi r6,r6,L1_CACHE_BYTES
373 /* Flash invalidate on 44x because we are passed kmapped addresses and
374 this doesn't work for userspace pages due to the virtually tagged
378 sync /* additional sync needed on g4 */
382 * Write any modified data cache blocks out to memory.
383 * Does not invalidate the corresponding cache lines (especially for
384 * any corresponding instruction cache).
386 * clean_dcache_range(unsigned long start, unsigned long stop)
388 _GLOBAL(clean_dcache_range)
389 li r5,L1_CACHE_BYTES-1
393 srwi. r4,r4,L1_CACHE_SHIFT
398 addi r3,r3,L1_CACHE_BYTES
400 sync /* wait for dcbst's to get to ram */
404 * Write any modified data cache blocks out to memory and invalidate them.
405 * Does not invalidate the corresponding instruction cache blocks.
407 * flush_dcache_range(unsigned long start, unsigned long stop)
409 _GLOBAL(flush_dcache_range)
410 li r5,L1_CACHE_BYTES-1
414 srwi. r4,r4,L1_CACHE_SHIFT
419 addi r3,r3,L1_CACHE_BYTES
421 sync /* wait for dcbst's to get to ram */
425 * Like above, but invalidate the D-cache. This is used by the 8xx
426 * to invalidate the cache so the PPC core doesn't get stale data
427 * from the CPM (no cache snooping here :-).
429 * invalidate_dcache_range(unsigned long start, unsigned long stop)
431 _GLOBAL(invalidate_dcache_range)
432 li r5,L1_CACHE_BYTES-1
436 srwi. r4,r4,L1_CACHE_SHIFT
441 addi r3,r3,L1_CACHE_BYTES
443 sync /* wait for dcbi's to get to ram */
447 * Flush a particular page from the data cache to RAM.
448 * Note: this is necessary because the instruction cache does *not*
449 * snoop from the data cache.
450 * This is a no-op on the 601 which has a unified cache.
452 * void __flush_dcache_icache(void *page)
454 _GLOBAL(__flush_dcache_icache)
458 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
459 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
460 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
463 0: dcbst 0,r3 /* Write line to ram */
464 addi r3,r3,L1_CACHE_BYTES
468 /* We don't flush the icache on 44x. Those have a virtual icache
469 * and we don't have access to the virtual address here (it's
470 * not the page vaddr but where it's mapped in user space). The
471 * flushing of the icache on these is handled elsewhere, when
472 * a change in the address space occurs, before returning to
475 BEGIN_MMU_FTR_SECTION
477 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
478 #endif /* CONFIG_44x */
481 addi r6,r6,L1_CACHE_BYTES
489 * Flush a particular page from the data cache to RAM, identified
490 * by its physical address. We turn off the MMU so we can just use
491 * the physical address (this may be a highmem page without a kernel
494 * void __flush_dcache_icache_phys(unsigned long physaddr)
496 _GLOBAL(__flush_dcache_icache_phys)
499 blr /* for 601, do nothing */
500 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
502 rlwinm r0,r10,0,28,26 /* clear DR */
505 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
506 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
509 0: dcbst 0,r3 /* Write line to ram */
510 addi r3,r3,L1_CACHE_BYTES
515 addi r6,r6,L1_CACHE_BYTES
518 mtmsr r10 /* restore DR */
521 #endif /* CONFIG_BOOKE */
524 * Clear pages using the dcbz instruction, which doesn't cause any
525 * memory traffic (except to write out any cache lines which get
526 * displaced). This only works on cacheable memory.
528 * void clear_pages(void *page, int order) ;
531 li r0,PAGE_SIZE/L1_CACHE_BYTES
535 addi r3,r3,L1_CACHE_BYTES
540 * Copy a whole page. We use the dcbz instruction on the destination
541 * to reduce memory traffic (it eliminates the unnecessary reads of
542 * the destination into cache). This requires that the destination
545 #define COPY_16_BYTES \
561 #if MAX_COPY_PREFETCH > 1
562 li r0,MAX_COPY_PREFETCH
566 addi r11,r11,L1_CACHE_BYTES
568 #else /* MAX_COPY_PREFETCH == 1 */
570 li r11,L1_CACHE_BYTES+4
571 #endif /* MAX_COPY_PREFETCH */
572 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
580 #if L1_CACHE_BYTES >= 32
582 #if L1_CACHE_BYTES >= 64
585 #if L1_CACHE_BYTES >= 128
595 crnot 4*cr0+eq,4*cr0+eq
596 li r0,MAX_COPY_PREFETCH
601 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
602 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
604 _GLOBAL(atomic_clear_mask)
611 _GLOBAL(atomic_set_mask)
620 * Extended precision shifts.
622 * Updated to be valid for shift counts from 0 to 63 inclusive.
625 * R3/R4 has 64 bit value
629 * ashrdi3: arithmetic right shift (sign propagation)
630 * lshrdi3: logical right shift
631 * ashldi3: left shift
635 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
636 addi r7,r5,32 # could be xori, or addi with -32
637 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
638 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
639 sraw r7,r3,r7 # t2 = MSW >> (count-32)
640 or r4,r4,r6 # LSW |= t1
641 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
642 sraw r3,r3,r5 # MSW = MSW >> count
643 or r4,r4,r7 # LSW |= t2
648 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
649 addi r7,r5,32 # could be xori, or addi with -32
650 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
651 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
652 or r3,r3,r6 # MSW |= t1
653 slw r4,r4,r5 # LSW = LSW << count
654 or r3,r3,r7 # MSW |= t2
659 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
660 addi r7,r5,32 # could be xori, or addi with -32
661 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
662 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
663 or r4,r4,r6 # LSW |= t1
664 srw r3,r3,r5 # MSW = MSW >> count
665 or r4,r4,r7 # LSW |= t2
669 * 64-bit comparison: __cmpdi2(s64 a, s64 b)
670 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
683 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
684 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
702 rlwimi r9,r4,24,16,23
703 rlwimi r10,r3,24,16,23
715 _GLOBAL(start_secondary_resume)
717 CURRENT_THREAD_INFO(r1, r1)
718 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
720 stw r3,0(r1) /* Zero the stack frame pointer */
723 #endif /* CONFIG_SMP */
726 * This routine is just here to keep GCC happy - sigh...
733 * Must be relocatable PIC code callable as a C function.
735 .globl relocate_new_kernel
738 /* r4 = reboot_code_buffer */
739 /* r5 = start_address */
741 #ifdef CONFIG_FSL_BOOKE
747 #define ENTRY_MAPPING_KEXEC_SETUP
748 #include "fsl_booke_entry_mapping.S"
749 #undef ENTRY_MAPPING_KEXEC_SETUP
756 #elif defined(CONFIG_44x)
758 /* Save our parameters */
763 #ifdef CONFIG_PPC_47x
764 /* Check for 47x cores */
767 cmplwi cr0,r3,PVR_476@h
769 cmplwi cr0,r3,PVR_476_ISS@h
771 #endif /* CONFIG_PPC_47x */
774 * Code for setting up 1:1 mapping for PPC440x for KEXEC
776 * We cannot switch off the MMU on PPC44x.
778 * 1) Invalidate all the mappings except the one we are running from.
779 * 2) Create a tmp mapping for our code in the other address space(TS) and
780 * jump to it. Invalidate the entry we started in.
781 * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
782 * 4) Jump to the 1:1 mapping in original TS.
783 * 5) Invalidate the tmp mapping.
785 * - Based on the kexec support code for FSL BookE
790 * Load the PID with kernel PID (0).
791 * Also load our MSR_IS and TID to MMUCR for TLB search.
798 oris r3,r3,PPC44x_MMUCR_STS@h
804 * Invalidate all the TLB entries except the current entry
805 * where we are running from
807 bl 0f /* Find our address */
808 0: mflr r5 /* Make it accessible */
809 tlbsx r23,0,r5 /* Find entry we are in */
810 li r4,0 /* Start at TLB entry 0 */
811 li r3,0 /* Set PAGEID inval value */
812 1: cmpw r23,r4 /* Is this our entry? */
813 beq skip /* If so, skip the inval */
814 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
816 addi r4,r4,1 /* Increment */
817 cmpwi r4,64 /* Are we done? */
818 bne 1b /* If not, repeat */
821 /* Create a temp mapping and jump to it */
822 andi. r6, r23, 1 /* Find the index to use */
823 addi r24, r6, 1 /* r24 will contain 1 or 2 */
825 mfmsr r9 /* get the MSR */
826 rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */
827 xori r7, r5, 1 /* Use the other address space */
829 /* Read the current mapping entries */
830 tlbre r3, r23, PPC44x_TLB_PAGEID
831 tlbre r4, r23, PPC44x_TLB_XLAT
832 tlbre r5, r23, PPC44x_TLB_ATTRIB
834 /* Save our current XLAT entry */
837 /* Extract the TLB PageSize */
838 li r10, 1 /* r10 will hold PageSize */
839 rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */
841 /* XXX: As of now we use 256M, 4K pages */
842 cmpwi r11, PPC44x_TLB_256M
844 rotlwi r10, r10, 28 /* r10 = 256M */
847 cmpwi r11, PPC44x_TLB_4K
849 rotlwi r10, r10, 12 /* r10 = 4K */
852 rotlwi r10, r10, 10 /* r10 = 1K */
856 * Write out the tmp 1:1 mapping for this code in other address space
857 * Fixup EPN = RPN , TS=other address space
859 insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */
861 /* Write out the tmp mapping entries */
862 tlbwe r3, r24, PPC44x_TLB_PAGEID
863 tlbwe r4, r24, PPC44x_TLB_XLAT
864 tlbwe r5, r24, PPC44x_TLB_ATTRIB
866 subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */
867 not r10, r11 /* Mask for PageNum */
869 /* Switch to other address space in MSR */
870 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
874 addi r8, r8, (2f-1b) /* Find the target offset */
876 /* Jump to the tmp mapping */
882 /* Invalidate the entry we were executing from */
884 tlbwe r3, r23, PPC44x_TLB_PAGEID
886 /* attribute fields. rwx for SUPERVISOR mode */
888 ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
890 /* Create 1:1 mapping in 256M pages */
891 xori r7, r7, 1 /* Revert back to Original TS */
893 li r8, 0 /* PageNumber */
894 li r6, 3 /* TLB Index, start at 3 */
897 rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */
898 mr r4, r3 /* RPN = EPN */
899 ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
900 insrwi r3, r7, 1, 23 /* Set TS from r7 */
902 tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */
903 tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */
904 tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */
906 addi r8, r8, 1 /* Increment PN */
907 addi r6, r6, 1 /* Increment TLB Index */
908 cmpwi r8, 8 /* Are we done ? */
912 /* Jump to the new mapping 1:1 */
914 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
918 and r8, r8, r11 /* Get our offset within page */
921 and r5, r25, r10 /* Get our target PageNum */
922 or r8, r8, r5 /* Target jump address */
928 /* Invalidate the tmp entry we used */
930 tlbwe r3, r24, PPC44x_TLB_PAGEID
934 #ifdef CONFIG_PPC_47x
936 /* 1:1 mapping for 47x */
941 * Load the kernel pid (0) to PID and also to MMUCR[TID].
942 * Also set the MSR IS->MMUCR STS
945 mtspr SPRN_PID, r3 /* Set PID */
946 mfmsr r4 /* Get MSR */
947 andi. r4, r4, MSR_IS@l /* TS=1? */
948 beq 1f /* If not, leave STS=0 */
949 oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
950 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
953 /* Find the entry we are running from */
957 tlbre r24, r23, 0 /* TLB Word 0 */
958 tlbre r25, r23, 1 /* TLB Word 1 */
959 tlbre r26, r23, 2 /* TLB Word 2 */
963 * Invalidates all the tlb entries by writing to 256 RPNs(r4)
964 * of 4k page size in all 4 ways (0-3 in r3).
965 * This would invalidate the entire UTLB including the one we are
966 * running from. However the shadow TLB entries would help us
967 * to continue the execution, until we flush them (rfi/isync).
969 addis r3, 0, 0x8000 /* specify the way */
970 addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
974 /* Align the loop to speed things up. from head_44x.S */
982 addis r3, r3, 0x2000 /* Increment the way */
986 addis r4, r4, 0x100 /* Increment the EPN */
990 /* Create the entries in the other address space */
992 rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
993 xori r7, r7, 1 /* r7 = !TS */
995 insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
998 * write out the TLB entries for the tmp mapping
999 * Use way '0' so that we could easily invalidate it later.
1001 lis r3, 0x8000 /* Way '0' */
1007 /* Update the msr to the new TS */
1008 insrwi r5, r7, 1, 26
1012 addi r6, r6, (2f-1b)
1019 * Now we are in the tmp address space.
1020 * Create a 1:1 mapping for 0-2GiB in the original TS.
1024 li r4, 0 /* TLB Word 0 */
1025 li r5, 0 /* TLB Word 1 */
1027 ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
1029 li r8, 0 /* PageIndex */
1031 xori r7, r7, 1 /* revert back to original TS */
1034 rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
1035 /* ERPN = 0 as we don't use memory above 2G */
1037 mr r4, r5 /* EPN = RPN */
1038 ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
1039 insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
1041 tlbwe r4, r3, 0 /* Write out the entries */
1045 cmpwi r8, 8 /* Have we completed ? */
1048 /* make sure we complete the TLB write up */
1052 * Prepare to jump to the 1:1 mapping.
1053 * 1) Extract page size of the tmp mapping
1054 * DSIZ = TLB_Word0[22:27]
1055 * 2) Calculate the physical address of the address
1058 rlwinm r10, r24, 0, 22, 27
1060 cmpwi r10, PPC47x_TLB0_4K
1062 li r10, 0x1000 /* r10 = 4k */
1066 /* Defaults to 256M */
1071 addi r4, r4, (2f-1b) /* virtual address of 2f */
1073 subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
1074 not r10, r11 /* Pagemask = ~(offsetmask) */
1076 and r5, r25, r10 /* Physical page */
1077 and r6, r4, r11 /* offset within the current page */
1079 or r5, r5, r6 /* Physical address for 2f */
1081 /* Switch the TS in MSR to the original one */
1083 insrwi r8, r7, 1, 26
1090 /* Invalidate the tmp mapping */
1091 lis r3, 0x8000 /* Way '0' */
1093 clrrwi r24, r24, 12 /* Clear the valid bit */
1098 /* Make sure we complete the TLB write and flush the shadow TLB */
1106 /* Restore the parameters */
1116 * Set Machine Status Register to a known status,
1117 * switch the MMU off and jump to 1: in a single step.
1121 ori r8, r8, MSR_RI|MSR_ME
1123 addi r8, r4, 1f - relocate_new_kernel
1130 /* from this point address translation is turned off */
1131 /* and interrupts are disabled */
1133 /* set a new stack at the bottom of our page... */
1134 /* (not really needed now) */
1135 addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
1139 li r6, 0 /* checksum */
1143 0: /* top, read another word for the indirection page */
1147 /* is it a destination page? (r8) */
1148 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
1151 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
1154 2: /* is it an indirection page? (r3) */
1155 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
1158 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
1162 2: /* are we done? */
1163 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
1167 2: /* is it a source page? (r9) */
1168 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
1171 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
1173 li r7, PAGE_SIZE / 4
1178 lwzu r0, 4(r9) /* do the copy */
1192 /* To be certain of avoiding problems with self-modifying code
1193 * execute a serializing instruction here.
1198 mfspr r3, SPRN_PIR /* current core we are running on */
1199 mr r4, r5 /* load physical address of chunk called */
1201 /* jump to the entry point, usually the setup routine */
1207 relocate_new_kernel_end:
1209 .globl relocate_new_kernel_size
1210 relocate_new_kernel_size:
1211 .long relocate_new_kernel_end - relocate_new_kernel