Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / x86 / mm / highmem_32.c
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
4 #include <linux/bootmem.h>
5
6 void *kmap(struct page *page)
7 {
8         might_sleep();
9         if (!PageHighMem(page))
10                 return page_address(page);
11         return kmap_high(page);
12 }
13 EXPORT_SYMBOL(kmap);
14
15 void kunmap(struct page *page)
16 {
17         if (in_interrupt())
18                 BUG();
19         if (!PageHighMem(page))
20                 return;
21         kunmap_high(page);
22 }
23 EXPORT_SYMBOL(kunmap);
24
25 /*
26  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
27  * no global lock is needed and because the kmap code must perform a global TLB
28  * invalidation when the kmap pool wraps.
29  *
30  * However when holding an atomic kmap it is not legal to sleep, so atomic
31  * kmaps are appropriate for short, tight code paths only.
32  */
33 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
34 {
35         pte_t pte = mk_pte(page, prot);
36         unsigned long vaddr;
37         int idx, type;
38
39         preempt_disable_nort();
40         pagefault_disable();
41
42         if (!PageHighMem(page))
43                 return page_address(page);
44
45         type = kmap_atomic_idx_push();
46         idx = type + KM_TYPE_NR*smp_processor_id();
47         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
48         BUG_ON(!pte_none(*(kmap_pte-idx)));
49 #ifdef CONFIG_PREEMPT_RT_FULL
50         current->kmap_pte[type] = pte;
51 #endif
52         set_pte(kmap_pte-idx, pte);
53         arch_flush_lazy_mmu_mode();
54
55         return (void *)vaddr;
56 }
57 EXPORT_SYMBOL(kmap_atomic_prot);
58
59 void *kmap_atomic(struct page *page)
60 {
61         return kmap_atomic_prot(page, kmap_prot);
62 }
63 EXPORT_SYMBOL(kmap_atomic);
64
65 /*
66  * This is the same as kmap_atomic() but can map memory that doesn't
67  * have a struct page associated with it.
68  */
69 void *kmap_atomic_pfn(unsigned long pfn)
70 {
71         return kmap_atomic_prot_pfn(pfn, kmap_prot);
72 }
73 EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
74
75 void __kunmap_atomic(void *kvaddr)
76 {
77         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
78
79         if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
80             vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
81                 int idx, type;
82
83                 type = kmap_atomic_idx();
84                 idx = type + KM_TYPE_NR * smp_processor_id();
85
86 #ifdef CONFIG_DEBUG_HIGHMEM
87                 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
88 #endif
89                 /*
90                  * Force other mappings to Oops if they'll try to access this
91                  * pte without first remap it.  Keeping stale mappings around
92                  * is a bad idea also, in case the page changes cacheability
93                  * attributes or becomes a protected page in a hypervisor.
94                  */
95 #ifdef CONFIG_PREEMPT_RT_FULL
96                 current->kmap_pte[type] = __pte(0);
97 #endif
98                 kpte_clear_flush(kmap_pte-idx, vaddr);
99                 kmap_atomic_idx_pop();
100                 arch_flush_lazy_mmu_mode();
101         }
102 #ifdef CONFIG_DEBUG_HIGHMEM
103         else {
104                 BUG_ON(vaddr < PAGE_OFFSET);
105                 BUG_ON(vaddr >= (unsigned long)high_memory);
106         }
107 #endif
108
109         pagefault_enable();
110         preempt_enable_nort();
111 }
112 EXPORT_SYMBOL(__kunmap_atomic);
113
114 struct page *kmap_atomic_to_page(void *ptr)
115 {
116         unsigned long idx, vaddr = (unsigned long)ptr;
117         pte_t *pte;
118
119         if (vaddr < FIXADDR_START)
120                 return virt_to_page(ptr);
121
122         idx = virt_to_fix(vaddr);
123         pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
124         return pte_page(*pte);
125 }
126 EXPORT_SYMBOL(kmap_atomic_to_page);
127
128 void __init set_highmem_pages_init(void)
129 {
130         struct zone *zone;
131         int nid;
132
133         /*
134          * Explicitly reset zone->managed_pages because set_highmem_pages_init()
135          * is invoked before free_all_bootmem()
136          */
137         reset_all_zones_managed_pages();
138         for_each_zone(zone) {
139                 unsigned long zone_start_pfn, zone_end_pfn;
140
141                 if (!is_highmem(zone))
142                         continue;
143
144                 zone_start_pfn = zone->zone_start_pfn;
145                 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
146
147                 nid = zone_to_nid(zone);
148                 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
149                                 zone->name, nid, zone_start_pfn, zone_end_pfn);
150
151                 add_highpages_with_active_regions(nid, zone_start_pfn,
152                                  zone_end_pfn);
153         }
154 }