2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/initrd.h>
20 #include <linux/root_dev.h>
21 #include <linux/highmem.h>
22 #include <linux/console.h>
23 #include <linux/pfn.h>
24 #include <linux/debugfs.h>
25 #include <linux/kexec.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <linux/dma-contiguous.h>
30 #include <asm/addrspace.h>
31 #include <asm/bootinfo.h>
33 #include <asm/cache.h>
36 #include <asm/debug.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp-ops.h>
42 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
43 const char __section(.appended_dtb) __appended_dtb[0x100000];
44 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
46 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
48 EXPORT_SYMBOL(cpu_data);
51 struct screen_info screen_info;
55 * Despite it's name this variable is even if we don't have PCI
57 unsigned int PCI_DMA_BUS_IS_PHYS;
59 EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
64 * These are initialized so they are in the .data section
66 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
68 EXPORT_SYMBOL(mips_machtype);
70 struct boot_mem_map boot_mem_map;
72 static char __initdata command_line[COMMAND_LINE_SIZE];
73 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
75 #ifdef CONFIG_CMDLINE_BOOL
76 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
80 * mips_io_port_base is the begin of the address space to which x86 style
81 * I/O ports are mapped.
83 const unsigned long mips_io_port_base = -1;
84 EXPORT_SYMBOL(mips_io_port_base);
86 static struct resource code_resource = { .name = "Kernel code", };
87 static struct resource data_resource = { .name = "Kernel data", };
89 static void *detect_magic __initdata = detect_memory_region;
91 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
93 int x = boot_mem_map.nr_map;
97 if (start + size < start) {
98 pr_warn("Trying to add an invalid memory region, skipped\n");
103 * Try to merge with existing entry, if any.
105 for (i = 0; i < boot_mem_map.nr_map; i++) {
106 struct boot_mem_map_entry *entry = boot_mem_map.map + i;
109 if (entry->type != type)
112 if (start + size < entry->addr)
113 continue; /* no overlap */
115 if (entry->addr + entry->size < start)
116 continue; /* no overlap */
118 top = max(entry->addr + entry->size, start + size);
119 entry->addr = min(entry->addr, start);
120 entry->size = top - entry->addr;
125 if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
126 pr_err("Ooops! Too many entries in the memory map!\n");
130 boot_mem_map.map[x].addr = start;
131 boot_mem_map.map[x].size = size;
132 boot_mem_map.map[x].type = type;
133 boot_mem_map.nr_map++;
136 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
138 void *dm = &detect_magic;
141 for (size = sz_min; size < sz_max; size <<= 1) {
142 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
146 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
147 ((unsigned long long) size) / SZ_1M,
148 (unsigned long long) start,
149 ((unsigned long long) sz_min) / SZ_1M,
150 ((unsigned long long) sz_max) / SZ_1M);
152 add_memory_region(start, size, BOOT_MEM_RAM);
155 static void __init print_memory_map(void)
158 const int field = 2 * sizeof(unsigned long);
160 for (i = 0; i < boot_mem_map.nr_map; i++) {
161 printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
162 field, (unsigned long long) boot_mem_map.map[i].size,
163 field, (unsigned long long) boot_mem_map.map[i].addr);
165 switch (boot_mem_map.map[i].type) {
167 printk(KERN_CONT "(usable)\n");
169 case BOOT_MEM_INIT_RAM:
170 printk(KERN_CONT "(usable after init)\n");
172 case BOOT_MEM_ROM_DATA:
173 printk(KERN_CONT "(ROM data)\n");
175 case BOOT_MEM_RESERVED:
176 printk(KERN_CONT "(reserved)\n");
179 printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
188 #ifdef CONFIG_BLK_DEV_INITRD
190 static int __init rd_start_early(char *p)
192 unsigned long start = memparse(p, &p);
195 /* Guess if the sign extension was forgotten by bootloader */
199 initrd_start = start;
203 early_param("rd_start", rd_start_early);
205 static int __init rd_size_early(char *p)
207 initrd_end += memparse(p, &p);
210 early_param("rd_size", rd_size_early);
212 /* it returns the next free pfn after initrd */
213 static unsigned long __init init_initrd(void)
218 * Board specific code or command line parser should have
219 * already set up initrd_start and initrd_end. In these cases
220 * perfom sanity checks and use them if all looks good.
222 if (!initrd_start || initrd_end <= initrd_start)
225 if (initrd_start & ~PAGE_MASK) {
226 pr_err("initrd start must be page aligned\n");
229 if (initrd_start < PAGE_OFFSET) {
230 pr_err("initrd start < PAGE_OFFSET\n");
235 * Sanitize initrd addresses. For example firmware
236 * can't guess if they need to pass them through
237 * 64-bits values if the kernel has been built in pure
238 * 32-bit. We need also to switch from KSEG0 to XKPHYS
239 * addresses now, so the code can now safely use __pa().
241 end = __pa(initrd_end);
242 initrd_end = (unsigned long)__va(end);
243 initrd_start = (unsigned long)__va(__pa(initrd_start));
245 ROOT_DEV = Root_RAM0;
253 static void __init finalize_initrd(void)
255 unsigned long size = initrd_end - initrd_start;
258 printk(KERN_INFO "Initrd not found or empty");
261 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
262 printk(KERN_ERR "Initrd extends beyond end of memory");
266 reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
267 initrd_below_start_ok = 1;
269 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
273 printk(KERN_CONT " - disabling initrd\n");
278 #else /* !CONFIG_BLK_DEV_INITRD */
280 static unsigned long __init init_initrd(void)
285 #define finalize_initrd() do {} while (0)
290 * Initialize the bootmem allocator. It also setup initrd related data
293 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
295 static void __init bootmem_init(void)
301 #else /* !CONFIG_SGI_IP27 */
303 static void __init bootmem_init(void)
305 unsigned long reserved_end;
306 unsigned long mapstart = ~0UL;
307 unsigned long bootmap_size;
311 * Sanity check any INITRD first. We don't take it into account
312 * for bootmem setup initially, rely on the end-of-kernel-code
313 * as our memory range starting point. Once bootmem is inited we
314 * will reserve the area used for the initrd.
317 reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
320 * max_low_pfn is not a number of pages. The number of pages
321 * of the system is given by 'max_low_pfn - min_low_pfn'.
327 * Find the highest page frame number we have available.
329 for (i = 0; i < boot_mem_map.nr_map; i++) {
330 unsigned long start, end;
332 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
335 start = PFN_UP(boot_mem_map.map[i].addr);
336 end = PFN_DOWN(boot_mem_map.map[i].addr
337 + boot_mem_map.map[i].size);
339 if (end > max_low_pfn)
341 if (start < min_low_pfn)
343 if (end <= reserved_end)
345 #ifdef CONFIG_BLK_DEV_INITRD
346 /* Skip zones before initrd and initrd itself */
347 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
350 if (start >= mapstart)
352 mapstart = max(reserved_end, start);
355 if (min_low_pfn >= max_low_pfn)
356 panic("Incorrect memory mapping !!!");
357 if (min_low_pfn > ARCH_PFN_OFFSET) {
358 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
359 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
360 min_low_pfn - ARCH_PFN_OFFSET);
361 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
362 pr_info("%lu free pages won't be used\n",
363 ARCH_PFN_OFFSET - min_low_pfn);
365 min_low_pfn = ARCH_PFN_OFFSET;
368 * Determine low and high memory ranges
370 max_pfn = max_low_pfn;
371 if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
372 #ifdef CONFIG_HIGHMEM
373 highstart_pfn = PFN_DOWN(HIGHMEM_START);
374 highend_pfn = max_low_pfn;
376 max_low_pfn = PFN_DOWN(HIGHMEM_START);
379 #ifdef CONFIG_BLK_DEV_INITRD
381 * mapstart should be after initrd_end
384 mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
388 * Initialize the boot-time allocator with low memory only.
390 bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
391 min_low_pfn, max_low_pfn);
394 for (i = 0; i < boot_mem_map.nr_map; i++) {
395 unsigned long start, end;
397 start = PFN_UP(boot_mem_map.map[i].addr);
398 end = PFN_DOWN(boot_mem_map.map[i].addr
399 + boot_mem_map.map[i].size);
401 if (start <= min_low_pfn)
406 #ifndef CONFIG_HIGHMEM
407 if (end > max_low_pfn)
411 * ... finally, is the area going away?
417 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
421 * Register fully available low RAM pages with the bootmem allocator.
423 for (i = 0; i < boot_mem_map.nr_map; i++) {
424 unsigned long start, end, size;
426 start = PFN_UP(boot_mem_map.map[i].addr);
427 end = PFN_DOWN(boot_mem_map.map[i].addr
428 + boot_mem_map.map[i].size);
431 * Reserve usable memory.
433 switch (boot_mem_map.map[i].type) {
436 case BOOT_MEM_INIT_RAM:
437 memory_present(0, start, end);
440 /* Not usable memory */
445 * We are rounding up the start address of usable memory
446 * and at the end of the usable range downwards.
448 if (start >= max_low_pfn)
450 if (start < reserved_end)
451 start = reserved_end;
452 if (end > max_low_pfn)
456 * ... finally, is the area going away?
462 /* Register lowmem ranges */
463 free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
464 memory_present(0, start, end);
468 * Reserve the bootmap memory.
470 reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
473 * Reserve initrd memory if needed.
478 #endif /* CONFIG_SGI_IP27 */
481 * arch_mem_init - initialize memory management subsystem
483 * o plat_mem_setup() detects the memory configuration and will record detected
484 * memory areas using add_memory_region.
486 * At this stage the memory configuration of the system is known to the
487 * kernel but generic memory management system is still entirely uninitialized.
492 * o dma_contiguous_reserve()
494 * At this stage the bootmem allocator is ready to use.
496 * NOTE: historically plat_mem_setup did the entire platform initialization.
497 * This was rather impractical because it meant plat_mem_setup had to
498 * get away without any kind of memory allocator. To keep old code from
499 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
500 * initialization hook for anything else was introduced.
503 static int usermem __initdata;
505 static int __init early_parse_mem(char *p)
507 phys_addr_t start, size;
510 * If a user specifies memory size, we
511 * blow away any automatically generated
515 boot_mem_map.nr_map = 0;
519 size = memparse(p, &p);
521 start = memparse(p + 1, &p);
523 add_memory_region(start, size, BOOT_MEM_RAM);
526 early_param("mem", early_parse_mem);
528 #ifdef CONFIG_PROC_VMCORE
529 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
530 static int __init early_parse_elfcorehdr(char *p)
534 setup_elfcorehdr = memparse(p, &p);
536 for (i = 0; i < boot_mem_map.nr_map; i++) {
537 unsigned long start = boot_mem_map.map[i].addr;
538 unsigned long end = (boot_mem_map.map[i].addr +
539 boot_mem_map.map[i].size);
540 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
542 * Reserve from the elf core header to the end of
543 * the memory segment, that should all be kdump
546 setup_elfcorehdr_size = end - setup_elfcorehdr;
551 * If we don't find it in the memory map, then we shouldn't
552 * have to worry about it, as the new kernel won't use it.
556 early_param("elfcorehdr", early_parse_elfcorehdr);
559 static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
568 /* Make sure it is in the boot_mem_map */
569 for (i = 0; i < boot_mem_map.nr_map; i++) {
570 if (mem >= boot_mem_map.map[i].addr &&
571 mem < (boot_mem_map.map[i].addr +
572 boot_mem_map.map[i].size))
575 add_memory_region(mem, size, type);
579 static inline unsigned long long get_total_mem(void)
581 unsigned long long total;
583 total = max_pfn - min_low_pfn;
584 return total << PAGE_SHIFT;
587 static void __init mips_parse_crashkernel(void)
589 unsigned long long total_mem;
590 unsigned long long crash_size, crash_base;
593 total_mem = get_total_mem();
594 ret = parse_crashkernel(boot_command_line, total_mem,
595 &crash_size, &crash_base);
596 if (ret != 0 || crash_size <= 0)
599 crashk_res.start = crash_base;
600 crashk_res.end = crash_base + crash_size - 1;
603 static void __init request_crashkernel(struct resource *res)
607 ret = request_resource(res, &crashk_res);
609 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
610 (unsigned long)((crashk_res.end -
611 crashk_res.start + 1) >> 20),
612 (unsigned long)(crashk_res.start >> 20));
614 #else /* !defined(CONFIG_KEXEC) */
615 static void __init mips_parse_crashkernel(void)
619 static void __init request_crashkernel(struct resource *res)
622 #endif /* !defined(CONFIG_KEXEC) */
624 #define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
625 #define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
626 #define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_EXTEND)
628 static void __init arch_mem_init(char **cmdline_p)
630 struct memblock_region *reg;
631 extern void plat_mem_setup(void);
633 /* call board setup routine */
637 * Make sure all kernel memory is in the maps. The "UP" and
638 * "DOWN" are opposite for initdata since if it crosses over
639 * into another memory section you don't want that to be
640 * freed when the initdata is freed.
642 arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
643 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
645 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
646 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
649 pr_info("Determined physical RAM map:\n");
652 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
653 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
655 if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
656 (USE_DTB_CMDLINE && !boot_command_line[0]))
657 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
659 if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
660 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
661 strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
664 #if defined(CONFIG_CMDLINE_BOOL)
665 if (builtin_cmdline[0]) {
666 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
667 strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
671 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
673 *cmdline_p = command_line;
678 pr_info("User-defined physical RAM map:\n");
683 #ifdef CONFIG_PROC_VMCORE
684 if (setup_elfcorehdr && setup_elfcorehdr_size) {
685 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
686 setup_elfcorehdr, setup_elfcorehdr_size);
687 reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
692 mips_parse_crashkernel();
694 if (crashk_res.start != crashk_res.end)
695 reserve_bootmem(crashk_res.start,
696 crashk_res.end - crashk_res.start + 1,
701 plat_swiotlb_setup();
704 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
705 /* Tell bootmem about cma reserved memblock section */
706 for_each_memblock(reserved, reg)
708 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
710 reserve_bootmem_region(__pa_symbol(&__nosave_begin),
711 __pa_symbol(&__nosave_end)); /* Reserve for hibernation */
714 static void __init resource_init(void)
718 if (UNCAC_BASE != IO_BASE)
721 code_resource.start = __pa_symbol(&_text);
722 code_resource.end = __pa_symbol(&_etext) - 1;
723 data_resource.start = __pa_symbol(&_etext);
724 data_resource.end = __pa_symbol(&_edata) - 1;
726 for (i = 0; i < boot_mem_map.nr_map; i++) {
727 struct resource *res;
728 unsigned long start, end;
730 start = boot_mem_map.map[i].addr;
731 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
732 if (start >= HIGHMEM_START)
734 if (end >= HIGHMEM_START)
735 end = HIGHMEM_START - 1;
737 res = alloc_bootmem(sizeof(struct resource));
738 switch (boot_mem_map.map[i].type) {
740 case BOOT_MEM_INIT_RAM:
741 case BOOT_MEM_ROM_DATA:
742 res->name = "System RAM";
744 case BOOT_MEM_RESERVED:
746 res->name = "reserved";
752 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
753 request_resource(&iomem_resource, res);
756 * We don't know which RAM region contains kernel data,
757 * so we try it repeatedly and let the resource manager
760 request_resource(res, &code_resource);
761 request_resource(res, &data_resource);
762 request_crashkernel(res);
767 static void __init prefill_possible_map(void)
769 int i, possible = num_possible_cpus();
771 if (possible > nr_cpu_ids)
772 possible = nr_cpu_ids;
774 for (i = 0; i < possible; i++)
775 set_cpu_possible(i, true);
776 for (; i < NR_CPUS; i++)
777 set_cpu_possible(i, false);
779 nr_cpu_ids = possible;
782 static inline void prefill_possible_map(void) {}
785 void __init setup_arch(char **cmdline_p)
790 setup_early_fdc_console();
791 #ifdef CONFIG_EARLY_PRINTK
792 setup_early_printk();
797 #if defined(CONFIG_VT)
798 #if defined(CONFIG_VGA_CONSOLE)
799 conswitchp = &vga_con;
800 #elif defined(CONFIG_DUMMY_CONSOLE)
801 conswitchp = &dummy_con;
805 arch_mem_init(cmdline_p);
809 prefill_possible_map();
814 unsigned long kernelsp[NR_CPUS];
815 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
817 #ifdef CONFIG_DEBUG_FS
818 struct dentry *mips_debugfs_dir;
819 static int __init debugfs_mips(void)
823 d = debugfs_create_dir("mips", NULL);
826 mips_debugfs_dir = d;
829 arch_initcall(debugfs_mips);