These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm / include / asm / switch_to.h
1 #ifndef __ASM_ARM_SWITCH_TO_H
2 #define __ASM_ARM_SWITCH_TO_H
3
4 #include <linux/thread_info.h>
5
6 #if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
7 void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
8 #else
9 static inline void
10 switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
11 #endif
12
13 /*
14  * For v7 SMP cores running a preemptible kernel we may be pre-empted
15  * during a TLB maintenance operation, so execute an inner-shareable dsb
16  * to ensure that the maintenance completes in case we migrate to another
17  * CPU.
18  */
19 #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
20 #define __complete_pending_tlbi()       dsb(ish)
21 #else
22 #define __complete_pending_tlbi()
23 #endif
24
25 /*
26  * switch_to(prev, next) should switch from task `prev' to `next'
27  * `prev' will never be the same as `next'.  schedule() itself
28  * contains the memory barrier to tell GCC not to cache `current'.
29  */
30 extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
31
32 #define switch_to(prev,next,last)                                       \
33 do {                                                                    \
34         __complete_pending_tlbi();                                      \
35         switch_kmaps(prev, next);                                       \
36         last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));        \
37 } while (0)
38
39 #endif /* __ASM_ARM_SWITCH_TO_H */