1 #ifndef __LINUX_PREEMPT_H
2 #define __LINUX_PREEMPT_H
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
9 #include <linux/linkage.h>
10 #include <linux/list.h>
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
16 #define PREEMPT_NEED_RESCHED 0x80000000
18 #include <asm/preempt.h>
20 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
21 extern void preempt_count_add(int val);
22 extern void preempt_count_sub(int val);
23 #define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
25 #define preempt_count_add(val) __preempt_count_add(val)
26 #define preempt_count_sub(val) __preempt_count_sub(val)
27 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
30 #define __preempt_count_inc() __preempt_count_add(1)
31 #define __preempt_count_dec() __preempt_count_sub(1)
33 #define preempt_count_inc() preempt_count_add(1)
34 #define preempt_count_dec() preempt_count_sub(1)
36 #ifdef CONFIG_PREEMPT_LAZY
37 #define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
38 #define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
39 #define inc_preempt_lazy_count() add_preempt_lazy_count(1)
40 #define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
41 #define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
43 #define add_preempt_lazy_count(val) do { } while (0)
44 #define sub_preempt_lazy_count(val) do { } while (0)
45 #define inc_preempt_lazy_count() do { } while (0)
46 #define dec_preempt_lazy_count() do { } while (0)
47 #define preempt_lazy_count() (0)
50 #ifdef CONFIG_PREEMPT_COUNT
52 #define preempt_disable() \
54 preempt_count_inc(); \
58 #define preempt_lazy_disable() \
60 inc_preempt_lazy_count(); \
64 #define sched_preempt_enable_no_resched() \
67 preempt_count_dec(); \
70 #ifdef CONFIG_PREEMPT_RT_BASE
71 # define preempt_enable_no_resched() sched_preempt_enable_no_resched()
72 # define preempt_check_resched_rt() preempt_check_resched()
74 # define preempt_enable_no_resched() preempt_enable()
75 # define preempt_check_resched_rt() barrier();
79 #define preempt_enable() \
82 if (unlikely(preempt_count_dec_and_test())) \
83 __preempt_schedule(); \
86 #define preempt_check_resched() \
88 if (should_resched()) \
89 __preempt_schedule(); \
92 #define preempt_lazy_enable() \
94 dec_preempt_lazy_count(); \
96 preempt_check_resched(); \
100 #define preempt_enable() \
103 preempt_count_dec(); \
105 #define preempt_check_resched() do { } while (0)
108 #define preempt_disable_notrace() \
110 __preempt_count_inc(); \
114 #define preempt_enable_no_resched_notrace() \
117 __preempt_count_dec(); \
120 #ifdef CONFIG_PREEMPT
122 #ifndef CONFIG_CONTEXT_TRACKING
123 #define __preempt_schedule_context() __preempt_schedule()
126 #define preempt_enable_notrace() \
129 if (unlikely(__preempt_count_dec_and_test())) \
130 __preempt_schedule_context(); \
133 #define preempt_enable_notrace() \
136 __preempt_count_dec(); \
140 #else /* !CONFIG_PREEMPT_COUNT */
143 * Even if we don't have any preemption, we need preempt disable/enable
144 * to be barriers, so that we don't have things like get_user/put_user
145 * that can cause faults and scheduling migrate into our preempt-protected
148 #define preempt_disable() barrier()
149 #define sched_preempt_enable_no_resched() barrier()
150 #define preempt_enable_no_resched() barrier()
151 #define preempt_enable() barrier()
152 #define preempt_check_resched() do { } while (0)
154 #define preempt_disable_notrace() barrier()
155 #define preempt_enable_no_resched_notrace() barrier()
156 #define preempt_enable_notrace() barrier()
157 #define preempt_check_resched_rt() barrier()
159 #endif /* CONFIG_PREEMPT_COUNT */
163 * Modules have no business playing preemption tricks.
165 #undef sched_preempt_enable_no_resched
166 #undef preempt_enable_no_resched
167 #undef preempt_enable_no_resched_notrace
168 #undef preempt_check_resched
171 #define preempt_set_need_resched() \
173 set_preempt_need_resched(); \
175 #define preempt_fold_need_resched() \
177 if (tif_need_resched_now()) \
178 set_preempt_need_resched(); \
181 #ifdef CONFIG_PREEMPT_RT_FULL
182 # define preempt_disable_rt() preempt_disable()
183 # define preempt_enable_rt() preempt_enable()
184 # define preempt_disable_nort() barrier()
185 # define preempt_enable_nort() barrier()
187 extern void migrate_disable(void);
188 extern void migrate_enable(void);
189 # else /* CONFIG_SMP */
190 # define migrate_disable() barrier()
191 # define migrate_enable() barrier()
192 # endif /* CONFIG_SMP */
194 # define preempt_disable_rt() barrier()
195 # define preempt_enable_rt() barrier()
196 # define preempt_disable_nort() preempt_disable()
197 # define preempt_enable_nort() preempt_enable()
198 # define migrate_disable() preempt_disable()
199 # define migrate_enable() preempt_enable()
202 #ifdef CONFIG_PREEMPT_NOTIFIERS
204 struct preempt_notifier;
207 * preempt_ops - notifiers called when a task is preempted and rescheduled
208 * @sched_in: we're about to be rescheduled:
209 * notifier: struct preempt_notifier for the task being scheduled
210 * cpu: cpu we're scheduled on
211 * @sched_out: we've just been preempted
212 * notifier: struct preempt_notifier for the task being preempted
213 * next: the task that's kicking us out
215 * Please note that sched_in and out are called under different
216 * contexts. sched_out is called with rq lock held and irq disabled
217 * while sched_in is called without rq lock and irq enabled. This
218 * difference is intentional and depended upon by its users.
221 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
222 void (*sched_out)(struct preempt_notifier *notifier,
223 struct task_struct *next);
227 * preempt_notifier - key for installing preemption notifiers
228 * @link: internal use
229 * @ops: defines the notifier functions to be called
231 * Usually used in conjunction with container_of().
233 struct preempt_notifier {
234 struct hlist_node link;
235 struct preempt_ops *ops;
238 void preempt_notifier_register(struct preempt_notifier *notifier);
239 void preempt_notifier_unregister(struct preempt_notifier *notifier);
241 static inline void preempt_notifier_init(struct preempt_notifier *notifier,
242 struct preempt_ops *ops)
244 INIT_HLIST_NODE(¬ifier->link);
250 #endif /* __LINUX_PREEMPT_H */