These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm / mm / highmem.c
1 /*
2  * arch/arm/mm/highmem.c -- ARM highmem support
3  *
4  * Author:      Nicolas Pitre
5  * Created:     september 8, 2008
6  * Copyright:   Marvell Semiconductors Inc.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include "mm.h"
20
21 static inline void set_fixmap_pte(int idx, pte_t pte)
22 {
23         unsigned long vaddr = __fix_to_virt(idx);
24         pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
25
26         set_pte_ext(ptep, pte, 0);
27         local_flush_tlb_kernel_page(vaddr);
28 }
29
30 static inline pte_t get_fixmap_pte(unsigned long vaddr)
31 {
32         pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
33
34         return *ptep;
35 }
36
37 static unsigned int fixmap_idx(int type)
38 {
39         return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
40 }
41
42 void *kmap(struct page *page)
43 {
44         might_sleep();
45         if (!PageHighMem(page))
46                 return page_address(page);
47         return kmap_high(page);
48 }
49 EXPORT_SYMBOL(kmap);
50
51 void kunmap(struct page *page)
52 {
53         BUG_ON(in_interrupt());
54         if (!PageHighMem(page))
55                 return;
56         kunmap_high(page);
57 }
58 EXPORT_SYMBOL(kunmap);
59
60 void *kmap_atomic(struct page *page)
61 {
62         pte_t pte = mk_pte(page, kmap_prot);
63         unsigned int idx;
64         unsigned long vaddr;
65         void *kmap;
66         int type;
67
68         preempt_disable_nort();
69         pagefault_disable();
70         if (!PageHighMem(page))
71                 return page_address(page);
72
73 #ifdef CONFIG_DEBUG_HIGHMEM
74         /*
75          * There is no cache coherency issue when non VIVT, so force the
76          * dedicated kmap usage for better debugging purposes in that case.
77          */
78         if (!cache_is_vivt())
79                 kmap = NULL;
80         else
81 #endif
82                 kmap = kmap_high_get(page);
83         if (kmap)
84                 return kmap;
85
86         type = kmap_atomic_idx_push();
87
88         idx = fixmap_idx(type);
89         vaddr = __fix_to_virt(idx);
90 #ifdef CONFIG_DEBUG_HIGHMEM
91         /*
92          * With debugging enabled, kunmap_atomic forces that entry to 0.
93          * Make sure it was indeed properly unmapped.
94          */
95         BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
96 #endif
97         /*
98          * When debugging is off, kunmap_atomic leaves the previous mapping
99          * in place, so the contained TLB flush ensures the TLB is updated
100          * with the new mapping.
101          */
102 #ifdef CONFIG_PREEMPT_RT_FULL
103         current->kmap_pte[type] = pte;
104 #endif
105         set_fixmap_pte(idx, pte);
106
107         return (void *)vaddr;
108 }
109 EXPORT_SYMBOL(kmap_atomic);
110
111 void __kunmap_atomic(void *kvaddr)
112 {
113         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
114         int idx, type;
115
116         if (kvaddr >= (void *)FIXADDR_START) {
117                 type = kmap_atomic_idx();
118                 idx = fixmap_idx(type);
119
120                 if (cache_is_vivt())
121                         __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
122 #ifdef CONFIG_PREEMPT_RT_FULL
123                 current->kmap_pte[type] = __pte(0);
124 #endif
125 #ifdef CONFIG_DEBUG_HIGHMEM
126                 BUG_ON(vaddr != __fix_to_virt(idx));
127 #else
128                 (void) idx;  /* to kill a warning */
129 #endif
130                 set_fixmap_pte(idx, __pte(0));
131                 kmap_atomic_idx_pop();
132         } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
133                 /* this address was obtained through kmap_high_get() */
134                 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
135         }
136         pagefault_enable();
137         preempt_enable_nort();
138 }
139 EXPORT_SYMBOL(__kunmap_atomic);
140
141 void *kmap_atomic_pfn(unsigned long pfn)
142 {
143         pte_t pte = pfn_pte(pfn, kmap_prot);
144         unsigned long vaddr;
145         int idx, type;
146         struct page *page = pfn_to_page(pfn);
147
148         preempt_disable_nort();
149         pagefault_disable();
150         if (!PageHighMem(page))
151                 return page_address(page);
152
153         type = kmap_atomic_idx_push();
154         idx = fixmap_idx(type);
155         vaddr = __fix_to_virt(idx);
156 #ifdef CONFIG_DEBUG_HIGHMEM
157         BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
158 #endif
159 #ifdef CONFIG_PREEMPT_RT_FULL
160         current->kmap_pte[type] = pte;
161 #endif
162         set_fixmap_pte(idx, pte);
163
164         return (void *)vaddr;
165 }
166 #if defined CONFIG_PREEMPT_RT_FULL
167 void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
168 {
169         int i;
170
171         /*
172          * Clear @prev's kmap_atomic mappings
173          */
174         for (i = 0; i < prev_p->kmap_idx; i++) {
175                 int idx = fixmap_idx(i);
176
177                 set_fixmap_pte(idx, __pte(0));
178         }
179         /*
180          * Restore @next_p's kmap_atomic mappings
181          */
182         for (i = 0; i < next_p->kmap_idx; i++) {
183                 int idx = fixmap_idx(i);
184
185                 if (!pte_none(next_p->kmap_pte[i]))
186                         set_fixmap_pte(idx, next_p->kmap_pte[i]);
187         }
188 }
189 #endif