These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / include / linux / spinlock_rt.h
1 #ifndef __LINUX_SPINLOCK_RT_H
2 #define __LINUX_SPINLOCK_RT_H
3
4 #ifndef __LINUX_SPINLOCK_H
5 #error Do not include directly. Use spinlock.h
6 #endif
7
8 #include <linux/bug.h>
9
10 extern void
11 __rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
12
13 #define spin_lock_init(slock)                           \
14 do {                                                    \
15         static struct lock_class_key __key;             \
16                                                         \
17         rt_mutex_init(&(slock)->lock);                  \
18         __rt_spin_lock_init(slock, #slock, &__key);     \
19 } while (0)
20
21 void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
22 void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
23 int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
24
25 extern void __lockfunc rt_spin_lock(spinlock_t *lock);
26 extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
27 extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
28 extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
29 extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
30 extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
31 extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
32 extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
33 extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
34
35 /*
36  * lockdep-less calls, for derived types like rwlock:
37  * (for trylock they can use rt_mutex_trylock() directly.
38  */
39 extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
40 extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
41 extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
42 extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
43
44 #define spin_lock(lock)                 rt_spin_lock(lock)
45
46 #define spin_lock_bh(lock)                      \
47         do {                                    \
48                 local_bh_disable();             \
49                 rt_spin_lock(lock);             \
50         } while (0)
51
52 #define spin_lock_irq(lock)             spin_lock(lock)
53
54 #define spin_do_trylock(lock)           __cond_lock(lock, rt_spin_trylock(lock))
55
56 #define spin_trylock(lock)                      \
57 ({                                              \
58         int __locked;                           \
59         __locked = spin_do_trylock(lock);       \
60         __locked;                               \
61 })
62
63 #ifdef CONFIG_LOCKDEP
64 # define spin_lock_nested(lock, subclass)               \
65         do {                                            \
66                 rt_spin_lock_nested(lock, subclass);    \
67         } while (0)
68
69 #define spin_lock_bh_nested(lock, subclass)             \
70         do {                                            \
71                 local_bh_disable();                     \
72                 rt_spin_lock_nested(lock, subclass);    \
73         } while (0)
74
75 # define spin_lock_irqsave_nested(lock, flags, subclass) \
76         do {                                             \
77                 typecheck(unsigned long, flags);         \
78                 flags = 0;                               \
79                 rt_spin_lock_nested(lock, subclass);     \
80         } while (0)
81 #else
82 # define spin_lock_nested(lock, subclass)       spin_lock(lock)
83 # define spin_lock_bh_nested(lock, subclass)    spin_lock_bh(lock)
84
85 # define spin_lock_irqsave_nested(lock, flags, subclass) \
86         do {                                             \
87                 typecheck(unsigned long, flags);         \
88                 flags = 0;                               \
89                 spin_lock(lock);                         \
90         } while (0)
91 #endif
92
93 #define spin_lock_irqsave(lock, flags)                   \
94         do {                                             \
95                 typecheck(unsigned long, flags);         \
96                 flags = 0;                               \
97                 spin_lock(lock);                         \
98         } while (0)
99
100 static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
101 {
102         unsigned long flags = 0;
103 #ifdef CONFIG_TRACE_IRQFLAGS
104         flags = rt_spin_lock_trace_flags(lock);
105 #else
106         spin_lock(lock); /* lock_local */
107 #endif
108         return flags;
109 }
110
111 /* FIXME: we need rt_spin_lock_nest_lock */
112 #define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
113
114 #define spin_unlock(lock)                       rt_spin_unlock(lock)
115
116 #define spin_unlock_bh(lock)                            \
117         do {                                            \
118                 rt_spin_unlock(lock);                   \
119                 local_bh_enable();                      \
120         } while (0)
121
122 #define spin_unlock_irq(lock)           spin_unlock(lock)
123
124 #define spin_unlock_irqrestore(lock, flags)             \
125         do {                                            \
126                 typecheck(unsigned long, flags);        \
127                 (void) flags;                           \
128                 spin_unlock(lock);                      \
129         } while (0)
130
131 #define spin_trylock_bh(lock)   __cond_lock(lock, rt_spin_trylock_bh(lock))
132 #define spin_trylock_irq(lock)  spin_trylock(lock)
133
134 #define spin_trylock_irqsave(lock, flags)       \
135         rt_spin_trylock_irqsave(lock, &(flags))
136
137 #define spin_unlock_wait(lock)          rt_spin_unlock_wait(lock)
138
139 #ifdef CONFIG_GENERIC_LOCKBREAK
140 # define spin_is_contended(lock)        ((lock)->break_lock)
141 #else
142 # define spin_is_contended(lock)        (((void)(lock), 0))
143 #endif
144
145 static inline int spin_can_lock(spinlock_t *lock)
146 {
147         return !rt_mutex_is_locked(&lock->lock);
148 }
149
150 static inline int spin_is_locked(spinlock_t *lock)
151 {
152         return rt_mutex_is_locked(&lock->lock);
153 }
154
155 static inline void assert_spin_locked(spinlock_t *lock)
156 {
157         BUG_ON(!spin_is_locked(lock));
158 }
159
160 #define atomic_dec_and_lock(atomic, lock) \
161         atomic_dec_and_spin_lock(atomic, lock)
162
163 #endif