2 * arch/arm/mm/highmem.c -- ARM highmem support
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
21 static inline void set_fixmap_pte(int idx, pte_t pte)
23 unsigned long vaddr = __fix_to_virt(idx);
24 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
26 set_pte_ext(ptep, pte, 0);
27 local_flush_tlb_kernel_page(vaddr);
30 static inline pte_t get_fixmap_pte(unsigned long vaddr)
32 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
37 static unsigned int fixmap_idx(int type)
39 return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
42 void *kmap(struct page *page)
45 if (!PageHighMem(page))
46 return page_address(page);
47 return kmap_high(page);
51 void kunmap(struct page *page)
53 BUG_ON(in_interrupt());
54 if (!PageHighMem(page))
58 EXPORT_SYMBOL(kunmap);
60 void *kmap_atomic(struct page *page)
62 pte_t pte = mk_pte(page, kmap_prot);
68 preempt_disable_nort();
70 if (!PageHighMem(page))
71 return page_address(page);
73 #ifdef CONFIG_DEBUG_HIGHMEM
75 * There is no cache coherency issue when non VIVT, so force the
76 * dedicated kmap usage for better debugging purposes in that case.
82 kmap = kmap_high_get(page);
86 type = kmap_atomic_idx_push();
88 idx = fixmap_idx(type);
89 vaddr = __fix_to_virt(idx);
90 #ifdef CONFIG_DEBUG_HIGHMEM
92 * With debugging enabled, kunmap_atomic forces that entry to 0.
93 * Make sure it was indeed properly unmapped.
95 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
98 * When debugging is off, kunmap_atomic leaves the previous mapping
99 * in place, so the contained TLB flush ensures the TLB is updated
100 * with the new mapping.
102 #ifdef CONFIG_PREEMPT_RT_FULL
103 current->kmap_pte[type] = pte;
105 set_fixmap_pte(idx, pte);
107 return (void *)vaddr;
109 EXPORT_SYMBOL(kmap_atomic);
111 void __kunmap_atomic(void *kvaddr)
113 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
116 if (kvaddr >= (void *)FIXADDR_START) {
117 type = kmap_atomic_idx();
118 idx = fixmap_idx(type);
121 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
122 #ifdef CONFIG_PREEMPT_RT_FULL
123 current->kmap_pte[type] = __pte(0);
125 #ifdef CONFIG_DEBUG_HIGHMEM
126 BUG_ON(vaddr != __fix_to_virt(idx));
128 (void) idx; /* to kill a warning */
130 set_fixmap_pte(idx, __pte(0));
131 kmap_atomic_idx_pop();
132 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
133 /* this address was obtained through kmap_high_get() */
134 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
137 preempt_enable_nort();
139 EXPORT_SYMBOL(__kunmap_atomic);
141 void *kmap_atomic_pfn(unsigned long pfn)
143 pte_t pte = pfn_pte(pfn, kmap_prot);
146 struct page *page = pfn_to_page(pfn);
148 preempt_disable_nort();
150 if (!PageHighMem(page))
151 return page_address(page);
153 type = kmap_atomic_idx_push();
154 idx = fixmap_idx(type);
155 vaddr = __fix_to_virt(idx);
156 #ifdef CONFIG_DEBUG_HIGHMEM
157 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
159 #ifdef CONFIG_PREEMPT_RT_FULL
160 current->kmap_pte[type] = pte;
162 set_fixmap_pte(idx, pte);
164 return (void *)vaddr;
166 #if defined CONFIG_PREEMPT_RT_FULL
167 void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
172 * Clear @prev's kmap_atomic mappings
174 for (i = 0; i < prev_p->kmap_idx; i++) {
175 int idx = fixmap_idx(i);
177 set_fixmap_pte(idx, __pte(0));
180 * Restore @next_p's kmap_atomic mappings
182 for (i = 0; i < next_p->kmap_idx; i++) {
183 int idx = fixmap_idx(i);
185 if (!pte_none(next_p->kmap_pte[i]))
186 set_fixmap_pte(idx, next_p->kmap_pte[i]);