1 #ifndef __LINUX_SPINLOCK_RT_H
2 #define __LINUX_SPINLOCK_RT_H
4 #ifndef __LINUX_SPINLOCK_H
5 #error Do not include directly. Use spinlock.h
11 __rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
13 #define spin_lock_init(slock) \
15 static struct lock_class_key __key; \
17 rt_mutex_init(&(slock)->lock); \
18 __rt_spin_lock_init(slock, #slock, &__key); \
21 extern void __lockfunc rt_spin_lock(spinlock_t *lock);
22 extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
23 extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
24 extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
25 extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
26 extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
27 extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
28 extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
29 extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
32 * lockdep-less calls, for derived types like rwlock:
33 * (for trylock they can use rt_mutex_trylock() directly.
35 extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
36 extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
37 extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
39 #define spin_lock(lock) \
45 #define spin_lock_bh(lock) \
52 #define spin_lock_irq(lock) spin_lock(lock)
54 #define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
56 #define spin_trylock(lock) \
60 __locked = spin_do_trylock(lock); \
67 # define spin_lock_nested(lock, subclass) \
70 rt_spin_lock_nested(lock, subclass); \
73 #define spin_lock_bh_nested(lock, subclass) \
77 rt_spin_lock_nested(lock, subclass); \
80 # define spin_lock_irqsave_nested(lock, flags, subclass) \
82 typecheck(unsigned long, flags); \
85 rt_spin_lock_nested(lock, subclass); \
88 # define spin_lock_nested(lock, subclass) spin_lock(lock)
89 # define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock)
91 # define spin_lock_irqsave_nested(lock, flags, subclass) \
93 typecheck(unsigned long, flags); \
99 #define spin_lock_irqsave(lock, flags) \
101 typecheck(unsigned long, flags); \
106 static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
108 unsigned long flags = 0;
109 #ifdef CONFIG_TRACE_IRQFLAGS
110 flags = rt_spin_lock_trace_flags(lock);
112 spin_lock(lock); /* lock_local */
117 /* FIXME: we need rt_spin_lock_nest_lock */
118 #define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
120 #define spin_unlock(lock) \
122 rt_spin_unlock(lock); \
126 #define spin_unlock_bh(lock) \
128 rt_spin_unlock(lock); \
133 #define spin_unlock_irq(lock) spin_unlock(lock)
135 #define spin_unlock_irqrestore(lock, flags) \
137 typecheck(unsigned long, flags); \
142 #define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
143 #define spin_trylock_irq(lock) spin_trylock(lock)
145 #define spin_trylock_irqsave(lock, flags) \
146 rt_spin_trylock_irqsave(lock, &(flags))
148 #define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
150 #ifdef CONFIG_GENERIC_LOCKBREAK
151 # define spin_is_contended(lock) ((lock)->break_lock)
153 # define spin_is_contended(lock) (((void)(lock), 0))
156 static inline int spin_can_lock(spinlock_t *lock)
158 return !rt_mutex_is_locked(&lock->lock);
161 static inline int spin_is_locked(spinlock_t *lock)
163 return rt_mutex_is_locked(&lock->lock);
166 static inline void assert_spin_locked(spinlock_t *lock)
168 BUG_ON(!spin_is_locked(lock));
171 #define atomic_dec_and_lock(atomic, lock) \
172 atomic_dec_and_spin_lock(atomic, lock)