Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / include / linux / spinlock_rt.h
1 #ifndef __LINUX_SPINLOCK_RT_H
2 #define __LINUX_SPINLOCK_RT_H
3
4 #ifndef __LINUX_SPINLOCK_H
5 #error Do not include directly. Use spinlock.h
6 #endif
7
8 #include <linux/bug.h>
9
10 extern void
11 __rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
12
13 #define spin_lock_init(slock)                           \
14 do {                                                    \
15         static struct lock_class_key __key;             \
16                                                         \
17         rt_mutex_init(&(slock)->lock);                  \
18         __rt_spin_lock_init(slock, #slock, &__key);     \
19 } while (0)
20
21 void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
22 void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
23 int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
24
25 extern void __lockfunc rt_spin_lock(spinlock_t *lock);
26 extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
27 extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
28 extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
29 extern int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock);
30 extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
31 extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
32 extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
33 extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
34 extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
35
36 /*
37  * lockdep-less calls, for derived types like rwlock:
38  * (for trylock they can use rt_mutex_trylock() directly.
39  */
40 extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
41 extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
42 extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
43 extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
44
45 #define spin_lock(lock)                 rt_spin_lock(lock)
46
47 #define spin_lock_bh(lock)                      \
48         do {                                    \
49                 local_bh_disable();             \
50                 rt_spin_lock(lock);             \
51         } while (0)
52
53 #define spin_lock_irq(lock)             spin_lock(lock)
54
55 #define spin_do_trylock(lock)           __cond_lock(lock, rt_spin_trylock(lock))
56
57 #define spin_trylock(lock)                      \
58 ({                                              \
59         int __locked;                           \
60         __locked = spin_do_trylock(lock);       \
61         __locked;                               \
62 })
63
64 #ifdef CONFIG_LOCKDEP
65 # define spin_lock_nested(lock, subclass)               \
66         do {                                            \
67                 rt_spin_lock_nested(lock, subclass);    \
68         } while (0)
69
70 #define spin_lock_bh_nested(lock, subclass)             \
71         do {                                            \
72                 local_bh_disable();                     \
73                 rt_spin_lock_nested(lock, subclass);    \
74         } while (0)
75
76 # define spin_lock_irqsave_nested(lock, flags, subclass) \
77         do {                                             \
78                 typecheck(unsigned long, flags);         \
79                 flags = 0;                               \
80                 rt_spin_lock_nested(lock, subclass);     \
81         } while (0)
82 #else
83 # define spin_lock_nested(lock, subclass)       spin_lock(lock)
84 # define spin_lock_bh_nested(lock, subclass)    spin_lock_bh(lock)
85
86 # define spin_lock_irqsave_nested(lock, flags, subclass) \
87         do {                                             \
88                 typecheck(unsigned long, flags);         \
89                 flags = 0;                               \
90                 spin_lock(lock);                         \
91         } while (0)
92 #endif
93
94 #define spin_lock_irqsave(lock, flags)                   \
95         do {                                             \
96                 typecheck(unsigned long, flags);         \
97                 flags = 0;                               \
98                 spin_lock(lock);                         \
99         } while (0)
100
101 static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
102 {
103         unsigned long flags = 0;
104 #ifdef CONFIG_TRACE_IRQFLAGS
105         flags = rt_spin_lock_trace_flags(lock);
106 #else
107         spin_lock(lock); /* lock_local */
108 #endif
109         return flags;
110 }
111
112 /* FIXME: we need rt_spin_lock_nest_lock */
113 #define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
114
115 #define spin_unlock(lock)                       rt_spin_unlock(lock)
116 #define spin_unlock_no_deboost(lock)            rt_spin_unlock_no_deboost(lock)
117
118 #define spin_unlock_bh(lock)                            \
119         do {                                            \
120                 rt_spin_unlock(lock);                   \
121                 local_bh_enable();                      \
122         } while (0)
123
124 #define spin_unlock_irq(lock)           spin_unlock(lock)
125
126 #define spin_unlock_irqrestore(lock, flags)             \
127         do {                                            \
128                 typecheck(unsigned long, flags);        \
129                 (void) flags;                           \
130                 spin_unlock(lock);                      \
131         } while (0)
132
133 #define spin_trylock_bh(lock)   __cond_lock(lock, rt_spin_trylock_bh(lock))
134 #define spin_trylock_irq(lock)  spin_trylock(lock)
135
136 #define spin_trylock_irqsave(lock, flags)       \
137         rt_spin_trylock_irqsave(lock, &(flags))
138
139 #define spin_unlock_wait(lock)          rt_spin_unlock_wait(lock)
140
141 #ifdef CONFIG_GENERIC_LOCKBREAK
142 # define spin_is_contended(lock)        ((lock)->break_lock)
143 #else
144 # define spin_is_contended(lock)        (((void)(lock), 0))
145 #endif
146
147 static inline int spin_can_lock(spinlock_t *lock)
148 {
149         return !rt_mutex_is_locked(&lock->lock);
150 }
151
152 static inline int spin_is_locked(spinlock_t *lock)
153 {
154         return rt_mutex_is_locked(&lock->lock);
155 }
156
157 static inline void assert_spin_locked(spinlock_t *lock)
158 {
159         BUG_ON(!spin_is_locked(lock));
160 }
161
162 #define atomic_dec_and_lock(atomic, lock) \
163         atomic_dec_and_spin_lock(atomic, lock)
164
165 #endif