1 #ifndef __LINUX_PREEMPT_H
2 #define __LINUX_PREEMPT_H
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
9 #include <linux/linkage.h>
10 #include <linux/list.h>
13 * We put the hardirq and softirq counter into the preemption
14 * counter. The bitmask has the following meaning:
16 * - bits 0-7 are the preemption count (max preemption depth: 256)
17 * - bits 8-15 are the softirq count (max # of softirqs: 256)
19 * The hardirq count could in theory be the same as the number of
20 * interrupts in the system, but we run all interrupt handlers with
21 * interrupts disabled, so we cannot have nesting interrupts. Though
22 * there are a few palaeontologic drivers which reenable interrupts in
23 * the handler, so we need more than one bit here.
25 * PREEMPT_MASK: 0x000000ff
26 * SOFTIRQ_MASK: 0x0000ff00
27 * HARDIRQ_MASK: 0x000f0000
28 * NMI_MASK: 0x00100000
29 * PREEMPT_NEED_RESCHED: 0x80000000
31 #define PREEMPT_BITS 8
32 #define SOFTIRQ_BITS 8
33 #define HARDIRQ_BITS 4
36 #define PREEMPT_SHIFT 0
37 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
38 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
39 #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
41 #define __IRQ_MASK(x) ((1UL << (x))-1)
43 #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
44 #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
45 #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
46 #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
48 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
49 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
50 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
51 #define NMI_OFFSET (1UL << NMI_SHIFT)
53 #ifndef CONFIG_PREEMPT_RT_FULL
54 # define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
56 # define SOFTIRQ_DISABLE_OFFSET (0)
59 /* We use the MSB mostly because its available */
60 #define PREEMPT_NEED_RESCHED 0x80000000
62 /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
63 #include <asm/preempt.h>
65 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
66 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
68 #ifndef CONFIG_PREEMPT_RT_FULL
69 # define softirq_count() (preempt_count() & SOFTIRQ_MASK)
70 # define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
72 # define softirq_count() (0UL)
73 extern int in_serving_softirq(void);
77 * Are we doing bottom half or hardware interrupt processing?
78 * Are we in a softirq context? Interrupt context?
79 * in_softirq - Are we currently processing softirq or have bh disabled?
80 * in_serving_softirq - Are we currently processing softirq?
82 #define in_irq() (hardirq_count())
83 #define in_softirq() (softirq_count())
84 #define in_interrupt() (irq_count())
87 * Are we in NMI context?
89 #define in_nmi() (preempt_count() & NMI_MASK)
92 * The preempt_count offset after preempt_disable();
94 #if defined(CONFIG_PREEMPT_COUNT)
95 # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
97 # define PREEMPT_DISABLE_OFFSET 0
101 * The preempt_count offset after spin_lock()
103 #if !defined(CONFIG_PREEMPT_RT_FULL)
104 #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
106 #define PREEMPT_LOCK_OFFSET 0
110 * The preempt_count offset needed for things like:
114 * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
115 * softirqs, such that unlock sequences of:
122 #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
125 * Are we running in atomic context? WARNING: this macro cannot
126 * always detect atomic context; in particular, it cannot know about
127 * held spinlocks in non-preemptible kernels. Thus it should not be
128 * used in the general case to determine whether sleeping is possible.
129 * Do not use in_atomic() in driver code.
131 #define in_atomic() (preempt_count() != 0)
134 * Check whether we were atomic before we did preempt_disable():
135 * (used by the scheduler)
137 #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
139 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
140 extern void preempt_count_add(int val);
141 extern void preempt_count_sub(int val);
142 #define preempt_count_dec_and_test() \
143 ({ preempt_count_sub(1); should_resched(0); })
145 #define preempt_count_add(val) __preempt_count_add(val)
146 #define preempt_count_sub(val) __preempt_count_sub(val)
147 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
150 #define __preempt_count_inc() __preempt_count_add(1)
151 #define __preempt_count_dec() __preempt_count_sub(1)
153 #define preempt_count_inc() preempt_count_add(1)
154 #define preempt_count_dec() preempt_count_sub(1)
156 #ifdef CONFIG_PREEMPT_LAZY
157 #define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
158 #define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
159 #define inc_preempt_lazy_count() add_preempt_lazy_count(1)
160 #define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
161 #define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
163 #define add_preempt_lazy_count(val) do { } while (0)
164 #define sub_preempt_lazy_count(val) do { } while (0)
165 #define inc_preempt_lazy_count() do { } while (0)
166 #define dec_preempt_lazy_count() do { } while (0)
167 #define preempt_lazy_count() (0)
170 #ifdef CONFIG_PREEMPT_COUNT
172 #define preempt_disable() \
174 preempt_count_inc(); \
178 #define preempt_lazy_disable() \
180 inc_preempt_lazy_count(); \
184 #define sched_preempt_enable_no_resched() \
187 preempt_count_dec(); \
190 #ifdef CONFIG_PREEMPT_RT_BASE
191 # define preempt_enable_no_resched() sched_preempt_enable_no_resched()
192 # define preempt_check_resched_rt() preempt_check_resched()
194 # define preempt_enable_no_resched() preempt_enable()
195 # define preempt_check_resched_rt() barrier();
198 #define preemptible() (preempt_count() == 0 && !irqs_disabled())
200 #ifdef CONFIG_PREEMPT
201 #define preempt_enable() \
204 if (unlikely(preempt_count_dec_and_test())) \
205 __preempt_schedule(); \
208 #define preempt_enable_notrace() \
211 if (unlikely(__preempt_count_dec_and_test())) \
212 __preempt_schedule_notrace(); \
215 #define preempt_check_resched() \
217 if (should_resched(0)) \
218 __preempt_schedule(); \
221 #define preempt_lazy_enable() \
223 dec_preempt_lazy_count(); \
225 preempt_check_resched(); \
228 #else /* !CONFIG_PREEMPT */
229 #define preempt_enable() \
232 preempt_count_dec(); \
235 #define preempt_enable_notrace() \
238 __preempt_count_dec(); \
241 #define preempt_check_resched() do { } while (0)
242 #endif /* CONFIG_PREEMPT */
244 #define preempt_disable_notrace() \
246 __preempt_count_inc(); \
250 #define preempt_enable_no_resched_notrace() \
253 __preempt_count_dec(); \
256 #else /* !CONFIG_PREEMPT_COUNT */
259 * Even if we don't have any preemption, we need preempt disable/enable
260 * to be barriers, so that we don't have things like get_user/put_user
261 * that can cause faults and scheduling migrate into our preempt-protected
264 #define preempt_disable() barrier()
265 #define sched_preempt_enable_no_resched() barrier()
266 #define preempt_enable_no_resched() barrier()
267 #define preempt_enable() barrier()
268 #define preempt_check_resched() do { } while (0)
270 #define preempt_disable_notrace() barrier()
271 #define preempt_enable_no_resched_notrace() barrier()
272 #define preempt_enable_notrace() barrier()
273 #define preempt_check_resched_rt() barrier()
274 #define preemptible() 0
276 #endif /* CONFIG_PREEMPT_COUNT */
280 * Modules have no business playing preemption tricks.
282 #undef sched_preempt_enable_no_resched
283 #undef preempt_enable_no_resched
284 #undef preempt_enable_no_resched_notrace
285 #undef preempt_check_resched
288 #define preempt_set_need_resched() \
290 set_preempt_need_resched(); \
292 #define preempt_fold_need_resched() \
294 if (tif_need_resched_now()) \
295 set_preempt_need_resched(); \
298 #ifdef CONFIG_PREEMPT_RT_FULL
299 # define preempt_disable_rt() preempt_disable()
300 # define preempt_enable_rt() preempt_enable()
301 # define preempt_disable_nort() barrier()
302 # define preempt_enable_nort() barrier()
304 extern void migrate_disable(void);
305 extern void migrate_enable(void);
306 # else /* CONFIG_SMP */
307 # define migrate_disable() barrier()
308 # define migrate_enable() barrier()
309 # endif /* CONFIG_SMP */
311 # define preempt_disable_rt() barrier()
312 # define preempt_enable_rt() barrier()
313 # define preempt_disable_nort() preempt_disable()
314 # define preempt_enable_nort() preempt_enable()
315 # define migrate_disable() preempt_disable()
316 # define migrate_enable() preempt_enable()
319 #ifdef CONFIG_PREEMPT_NOTIFIERS
321 struct preempt_notifier;
324 * preempt_ops - notifiers called when a task is preempted and rescheduled
325 * @sched_in: we're about to be rescheduled:
326 * notifier: struct preempt_notifier for the task being scheduled
327 * cpu: cpu we're scheduled on
328 * @sched_out: we've just been preempted
329 * notifier: struct preempt_notifier for the task being preempted
330 * next: the task that's kicking us out
332 * Please note that sched_in and out are called under different
333 * contexts. sched_out is called with rq lock held and irq disabled
334 * while sched_in is called without rq lock and irq enabled. This
335 * difference is intentional and depended upon by its users.
338 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
339 void (*sched_out)(struct preempt_notifier *notifier,
340 struct task_struct *next);
344 * preempt_notifier - key for installing preemption notifiers
345 * @link: internal use
346 * @ops: defines the notifier functions to be called
348 * Usually used in conjunction with container_of().
350 struct preempt_notifier {
351 struct hlist_node link;
352 struct preempt_ops *ops;
355 void preempt_notifier_inc(void);
356 void preempt_notifier_dec(void);
357 void preempt_notifier_register(struct preempt_notifier *notifier);
358 void preempt_notifier_unregister(struct preempt_notifier *notifier);
360 static inline void preempt_notifier_init(struct preempt_notifier *notifier,
361 struct preempt_ops *ops)
363 INIT_HLIST_NODE(¬ifier->link);
369 #endif /* __LINUX_PREEMPT_H */