Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / include / linux / spinlock_rt.h
1 #ifndef __LINUX_SPINLOCK_RT_H
2 #define __LINUX_SPINLOCK_RT_H
3
4 #ifndef __LINUX_SPINLOCK_H
5 #error Do not include directly. Use spinlock.h
6 #endif
7
8 #include <linux/bug.h>
9
10 extern void
11 __rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
12
13 #define spin_lock_init(slock)                           \
14 do {                                                    \
15         static struct lock_class_key __key;             \
16                                                         \
17         rt_mutex_init(&(slock)->lock);                  \
18         __rt_spin_lock_init(slock, #slock, &__key);     \
19 } while (0)
20
21 extern void __lockfunc rt_spin_lock(spinlock_t *lock);
22 extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
23 extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
24 extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
25 extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
26 extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
27 extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
28 extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
29 extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
30
31 /*
32  * lockdep-less calls, for derived types like rwlock:
33  * (for trylock they can use rt_mutex_trylock() directly.
34  */
35 extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
36 extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
37 extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
38
39 #define spin_lock(lock)                         \
40         do {                                    \
41                 migrate_disable();              \
42                 rt_spin_lock(lock);             \
43         } while (0)
44
45 #define spin_lock_bh(lock)                      \
46         do {                                    \
47                 local_bh_disable();             \
48                 migrate_disable();              \
49                 rt_spin_lock(lock);             \
50         } while (0)
51
52 #define spin_lock_irq(lock)             spin_lock(lock)
53
54 #define spin_do_trylock(lock)           __cond_lock(lock, rt_spin_trylock(lock))
55
56 #define spin_trylock(lock)                      \
57 ({                                              \
58         int __locked;                           \
59         migrate_disable();                      \
60         __locked = spin_do_trylock(lock);       \
61         if (!__locked)                          \
62                 migrate_enable();               \
63         __locked;                               \
64 })
65
66 #ifdef CONFIG_LOCKDEP
67 # define spin_lock_nested(lock, subclass)               \
68         do {                                            \
69                 migrate_disable();                      \
70                 rt_spin_lock_nested(lock, subclass);    \
71         } while (0)
72
73 #define spin_lock_bh_nested(lock, subclass)             \
74         do {                                            \
75                 local_bh_disable();                     \
76                 migrate_disable();                      \
77                 rt_spin_lock_nested(lock, subclass);    \
78         } while (0)
79
80 # define spin_lock_irqsave_nested(lock, flags, subclass) \
81         do {                                             \
82                 typecheck(unsigned long, flags);         \
83                 flags = 0;                               \
84                 migrate_disable();                       \
85                 rt_spin_lock_nested(lock, subclass);     \
86         } while (0)
87 #else
88 # define spin_lock_nested(lock, subclass)       spin_lock(lock)
89 # define spin_lock_bh_nested(lock, subclass)    spin_lock_bh(lock)
90
91 # define spin_lock_irqsave_nested(lock, flags, subclass) \
92         do {                                             \
93                 typecheck(unsigned long, flags);         \
94                 flags = 0;                               \
95                 spin_lock(lock);                         \
96         } while (0)
97 #endif
98
99 #define spin_lock_irqsave(lock, flags)                   \
100         do {                                             \
101                 typecheck(unsigned long, flags);         \
102                 flags = 0;                               \
103                 spin_lock(lock);                         \
104         } while (0)
105
106 static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
107 {
108         unsigned long flags = 0;
109 #ifdef CONFIG_TRACE_IRQFLAGS
110         flags = rt_spin_lock_trace_flags(lock);
111 #else
112         spin_lock(lock); /* lock_local */
113 #endif
114         return flags;
115 }
116
117 /* FIXME: we need rt_spin_lock_nest_lock */
118 #define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
119
120 #define spin_unlock(lock)                               \
121         do {                                            \
122                 rt_spin_unlock(lock);                   \
123                 migrate_enable();                       \
124         } while (0)
125
126 #define spin_unlock_bh(lock)                            \
127         do {                                            \
128                 rt_spin_unlock(lock);                   \
129                 migrate_enable();                       \
130                 local_bh_enable();                      \
131         } while (0)
132
133 #define spin_unlock_irq(lock)           spin_unlock(lock)
134
135 #define spin_unlock_irqrestore(lock, flags)             \
136         do {                                            \
137                 typecheck(unsigned long, flags);        \
138                 (void) flags;                           \
139                 spin_unlock(lock);                      \
140         } while (0)
141
142 #define spin_trylock_bh(lock)   __cond_lock(lock, rt_spin_trylock_bh(lock))
143 #define spin_trylock_irq(lock)  spin_trylock(lock)
144
145 #define spin_trylock_irqsave(lock, flags)       \
146         rt_spin_trylock_irqsave(lock, &(flags))
147
148 #define spin_unlock_wait(lock)          rt_spin_unlock_wait(lock)
149
150 #ifdef CONFIG_GENERIC_LOCKBREAK
151 # define spin_is_contended(lock)        ((lock)->break_lock)
152 #else
153 # define spin_is_contended(lock)        (((void)(lock), 0))
154 #endif
155
156 static inline int spin_can_lock(spinlock_t *lock)
157 {
158         return !rt_mutex_is_locked(&lock->lock);
159 }
160
161 static inline int spin_is_locked(spinlock_t *lock)
162 {
163         return rt_mutex_is_locked(&lock->lock);
164 }
165
166 static inline void assert_spin_locked(spinlock_t *lock)
167 {
168         BUG_ON(!spin_is_locked(lock));
169 }
170
171 #define atomic_dec_and_lock(atomic, lock) \
172         atomic_dec_and_spin_lock(atomic, lock)
173
174 #endif