1 #ifndef _LINUX_LOCALLOCK_H
2 #define _LINUX_LOCALLOCK_H
4 #include <linux/percpu.h>
5 #include <linux/spinlock.h>
7 #ifdef CONFIG_PREEMPT_RT_BASE
9 #ifdef CONFIG_DEBUG_SPINLOCK
10 # define LL_WARN(cond) WARN_ON(cond)
12 # define LL_WARN(cond) do { } while (0)
16 * per cpu lock based substitute for local_irq_*()
18 struct local_irq_lock {
20 struct task_struct *owner;
25 #define DEFINE_LOCAL_IRQ_LOCK(lvar) \
26 DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
27 .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
29 #define DECLARE_LOCAL_IRQ_LOCK(lvar) \
30 DECLARE_PER_CPU(struct local_irq_lock, lvar)
32 #define local_irq_lock_init(lvar) \
35 for_each_possible_cpu(__cpu) \
36 spin_lock_init(&per_cpu(lvar, __cpu).lock); \
40 * spin_lock|trylock|unlock_local flavour that does not migrate disable
41 * used for __local_lock|trylock|unlock where get_local_var/put_local_var
42 * already takes care of the migrate_disable/enable
43 * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
45 #ifdef CONFIG_PREEMPT_RT_FULL
46 # define spin_lock_local(lock) rt_spin_lock__no_mg(lock)
47 # define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock)
48 # define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock)
50 # define spin_lock_local(lock) spin_lock(lock)
51 # define spin_trylock_local(lock) spin_trylock(lock)
52 # define spin_unlock_local(lock) spin_unlock(lock)
55 static inline void __local_lock(struct local_irq_lock *lv)
57 if (lv->owner != current) {
58 spin_lock_local(&lv->lock);
66 #define local_lock(lvar) \
67 do { __local_lock(&get_local_var(lvar)); } while (0)
69 #define local_lock_on(lvar, cpu) \
70 do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
72 static inline int __local_trylock(struct local_irq_lock *lv)
74 if (lv->owner != current && spin_trylock_local(&lv->lock)) {
84 #define local_trylock(lvar) \
87 __locked = __local_trylock(&get_local_var(lvar)); \
89 put_local_var(lvar); \
93 static inline void __local_unlock(struct local_irq_lock *lv)
95 LL_WARN(lv->nestcnt == 0);
96 LL_WARN(lv->owner != current);
101 spin_unlock_local(&lv->lock);
104 #define local_unlock(lvar) \
106 __local_unlock(this_cpu_ptr(&lvar)); \
107 put_local_var(lvar); \
110 #define local_unlock_on(lvar, cpu) \
111 do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
113 static inline void __local_lock_irq(struct local_irq_lock *lv)
115 spin_lock_irqsave(&lv->lock, lv->flags);
117 LL_WARN(lv->nestcnt);
122 #define local_lock_irq(lvar) \
123 do { __local_lock_irq(&get_local_var(lvar)); } while (0)
125 #define local_lock_irq_on(lvar, cpu) \
126 do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
128 static inline void __local_unlock_irq(struct local_irq_lock *lv)
130 LL_WARN(!lv->nestcnt);
131 LL_WARN(lv->owner != current);
134 spin_unlock_irq(&lv->lock);
137 #define local_unlock_irq(lvar) \
139 __local_unlock_irq(this_cpu_ptr(&lvar)); \
140 put_local_var(lvar); \
143 #define local_unlock_irq_on(lvar, cpu) \
145 __local_unlock_irq(&per_cpu(lvar, cpu)); \
148 static inline int __local_lock_irqsave(struct local_irq_lock *lv)
150 if (lv->owner != current) {
151 __local_lock_irq(lv);
159 #define local_lock_irqsave(lvar, _flags) \
161 if (__local_lock_irqsave(&get_local_var(lvar))) \
162 put_local_var(lvar); \
163 _flags = __this_cpu_read(lvar.flags); \
166 #define local_lock_irqsave_on(lvar, _flags, cpu) \
168 __local_lock_irqsave(&per_cpu(lvar, cpu)); \
169 _flags = per_cpu(lvar, cpu).flags; \
172 static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
175 LL_WARN(!lv->nestcnt);
176 LL_WARN(lv->owner != current);
181 spin_unlock_irqrestore(&lv->lock, lv->flags);
185 #define local_unlock_irqrestore(lvar, flags) \
187 if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
188 put_local_var(lvar); \
191 #define local_unlock_irqrestore_on(lvar, flags, cpu) \
193 __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
196 #define local_spin_trylock_irq(lvar, lock) \
199 local_lock_irq(lvar); \
200 __locked = spin_trylock(lock); \
202 local_unlock_irq(lvar); \
206 #define local_spin_lock_irq(lvar, lock) \
208 local_lock_irq(lvar); \
212 #define local_spin_unlock_irq(lvar, lock) \
215 local_unlock_irq(lvar); \
218 #define local_spin_lock_irqsave(lvar, lock, flags) \
220 local_lock_irqsave(lvar, flags); \
224 #define local_spin_unlock_irqrestore(lvar, lock, flags) \
227 local_unlock_irqrestore(lvar, flags); \
230 #define get_locked_var(lvar, var) \
233 this_cpu_ptr(&var); \
236 #define put_locked_var(lvar, var) local_unlock(lvar);
238 #define local_lock_cpu(lvar) \
241 smp_processor_id(); \
244 #define local_unlock_cpu(lvar) local_unlock(lvar)
246 #else /* PREEMPT_RT_BASE */
248 #define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
249 #define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
251 static inline void local_irq_lock_init(int lvar) { }
253 #define local_lock(lvar) preempt_disable()
254 #define local_unlock(lvar) preempt_enable()
255 #define local_lock_irq(lvar) local_irq_disable()
256 #define local_unlock_irq(lvar) local_irq_enable()
257 #define local_lock_irqsave(lvar, flags) local_irq_save(flags)
258 #define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
260 #define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
261 #define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
262 #define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
263 #define local_spin_lock_irqsave(lvar, lock, flags) \
264 spin_lock_irqsave(lock, flags)
265 #define local_spin_unlock_irqrestore(lvar, lock, flags) \
266 spin_unlock_irqrestore(lock, flags)
268 #define get_locked_var(lvar, var) get_cpu_var(var)
269 #define put_locked_var(lvar, var) put_cpu_var(var)
271 #define local_lock_cpu(lvar) get_cpu()
272 #define local_unlock_cpu(lvar) put_cpu()