1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
4 #include <linux/bootmem.h>
6 void *kmap(struct page *page)
9 if (!PageHighMem(page))
10 return page_address(page);
11 return kmap_high(page);
15 void kunmap(struct page *page)
19 if (!PageHighMem(page))
23 EXPORT_SYMBOL(kunmap);
26 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
27 * no global lock is needed and because the kmap code must perform a global TLB
28 * invalidation when the kmap pool wraps.
30 * However when holding an atomic kmap it is not legal to sleep, so atomic
31 * kmaps are appropriate for short, tight code paths only.
33 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
35 pte_t pte = mk_pte(page, prot);
39 preempt_disable_nort();
42 if (!PageHighMem(page))
43 return page_address(page);
45 type = kmap_atomic_idx_push();
46 idx = type + KM_TYPE_NR*smp_processor_id();
47 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
48 BUG_ON(!pte_none(*(kmap_pte-idx)));
49 #ifdef CONFIG_PREEMPT_RT_FULL
50 current->kmap_pte[type] = pte;
52 set_pte(kmap_pte-idx, pte);
53 arch_flush_lazy_mmu_mode();
57 EXPORT_SYMBOL(kmap_atomic_prot);
59 void *kmap_atomic(struct page *page)
61 return kmap_atomic_prot(page, kmap_prot);
63 EXPORT_SYMBOL(kmap_atomic);
66 * This is the same as kmap_atomic() but can map memory that doesn't
67 * have a struct page associated with it.
69 void *kmap_atomic_pfn(unsigned long pfn)
71 return kmap_atomic_prot_pfn(pfn, kmap_prot);
73 EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
75 void __kunmap_atomic(void *kvaddr)
77 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
79 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
80 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
83 type = kmap_atomic_idx();
84 idx = type + KM_TYPE_NR * smp_processor_id();
86 #ifdef CONFIG_DEBUG_HIGHMEM
87 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
90 * Force other mappings to Oops if they'll try to access this
91 * pte without first remap it. Keeping stale mappings around
92 * is a bad idea also, in case the page changes cacheability
93 * attributes or becomes a protected page in a hypervisor.
95 #ifdef CONFIG_PREEMPT_RT_FULL
96 current->kmap_pte[type] = __pte(0);
98 kpte_clear_flush(kmap_pte-idx, vaddr);
99 kmap_atomic_idx_pop();
100 arch_flush_lazy_mmu_mode();
102 #ifdef CONFIG_DEBUG_HIGHMEM
104 BUG_ON(vaddr < PAGE_OFFSET);
105 BUG_ON(vaddr >= (unsigned long)high_memory);
110 preempt_enable_nort();
112 EXPORT_SYMBOL(__kunmap_atomic);
114 void __init set_highmem_pages_init(void)
120 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
121 * is invoked before free_all_bootmem()
123 reset_all_zones_managed_pages();
124 for_each_zone(zone) {
125 unsigned long zone_start_pfn, zone_end_pfn;
127 if (!is_highmem(zone))
130 zone_start_pfn = zone->zone_start_pfn;
131 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
133 nid = zone_to_nid(zone);
134 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
135 zone->name, nid, zone_start_pfn, zone_end_pfn);
137 add_highpages_with_active_regions(nid, zone_start_pfn,