Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / include / linux / locallock.h
1 #ifndef _LINUX_LOCALLOCK_H
2 #define _LINUX_LOCALLOCK_H
3
4 #include <linux/percpu.h>
5 #include <linux/spinlock.h>
6
7 #ifdef CONFIG_PREEMPT_RT_BASE
8
9 #ifdef CONFIG_DEBUG_SPINLOCK
10 # define LL_WARN(cond)  WARN_ON(cond)
11 #else
12 # define LL_WARN(cond)  do { } while (0)
13 #endif
14
15 /*
16  * per cpu lock based substitute for local_irq_*()
17  */
18 struct local_irq_lock {
19         spinlock_t              lock;
20         struct task_struct      *owner;
21         int                     nestcnt;
22         unsigned long           flags;
23 };
24
25 #define DEFINE_LOCAL_IRQ_LOCK(lvar)                                     \
26         DEFINE_PER_CPU(struct local_irq_lock, lvar) = {                 \
27                 .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
28
29 #define DECLARE_LOCAL_IRQ_LOCK(lvar)                                    \
30         DECLARE_PER_CPU(struct local_irq_lock, lvar)
31
32 #define local_irq_lock_init(lvar)                                       \
33         do {                                                            \
34                 int __cpu;                                              \
35                 for_each_possible_cpu(__cpu)                            \
36                         spin_lock_init(&per_cpu(lvar, __cpu).lock);     \
37         } while (0)
38
39 /*
40  * spin_lock|trylock|unlock_local flavour that does not migrate disable
41  * used for __local_lock|trylock|unlock where get_local_var/put_local_var
42  * already takes care of the migrate_disable/enable
43  * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
44  */
45 #ifdef CONFIG_PREEMPT_RT_FULL
46 # define spin_lock_local(lock)                  rt_spin_lock(lock)
47 # define spin_trylock_local(lock)               rt_spin_trylock(lock)
48 # define spin_unlock_local(lock)                rt_spin_unlock(lock)
49 #else
50 # define spin_lock_local(lock)                  spin_lock(lock)
51 # define spin_trylock_local(lock)               spin_trylock(lock)
52 # define spin_unlock_local(lock)                spin_unlock(lock)
53 #endif
54
55 static inline void __local_lock(struct local_irq_lock *lv)
56 {
57         if (lv->owner != current) {
58                 spin_lock_local(&lv->lock);
59                 LL_WARN(lv->owner);
60                 LL_WARN(lv->nestcnt);
61                 lv->owner = current;
62         }
63         lv->nestcnt++;
64 }
65
66 #define local_lock(lvar)                                        \
67         do { __local_lock(&get_local_var(lvar)); } while (0)
68
69 static inline int __local_trylock(struct local_irq_lock *lv)
70 {
71         if (lv->owner != current && spin_trylock_local(&lv->lock)) {
72                 LL_WARN(lv->owner);
73                 LL_WARN(lv->nestcnt);
74                 lv->owner = current;
75                 lv->nestcnt = 1;
76                 return 1;
77         }
78         return 0;
79 }
80
81 #define local_trylock(lvar)                                             \
82         ({                                                              \
83                 int __locked;                                           \
84                 __locked = __local_trylock(&get_local_var(lvar));       \
85                 if (!__locked)                                          \
86                         put_local_var(lvar);                            \
87                 __locked;                                               \
88         })
89
90 static inline void __local_unlock(struct local_irq_lock *lv)
91 {
92         LL_WARN(lv->nestcnt == 0);
93         LL_WARN(lv->owner != current);
94         if (--lv->nestcnt)
95                 return;
96
97         lv->owner = NULL;
98         spin_unlock_local(&lv->lock);
99 }
100
101 #define local_unlock(lvar)                                      \
102         do {                                                    \
103                 __local_unlock(this_cpu_ptr(&lvar));            \
104                 put_local_var(lvar);                            \
105         } while (0)
106
107 static inline void __local_lock_irq(struct local_irq_lock *lv)
108 {
109         spin_lock_irqsave(&lv->lock, lv->flags);
110         LL_WARN(lv->owner);
111         LL_WARN(lv->nestcnt);
112         lv->owner = current;
113         lv->nestcnt = 1;
114 }
115
116 #define local_lock_irq(lvar)                                            \
117         do { __local_lock_irq(&get_local_var(lvar)); } while (0)
118
119 #define local_lock_irq_on(lvar, cpu)                                    \
120         do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
121
122 static inline void __local_unlock_irq(struct local_irq_lock *lv)
123 {
124         LL_WARN(!lv->nestcnt);
125         LL_WARN(lv->owner != current);
126         lv->owner = NULL;
127         lv->nestcnt = 0;
128         spin_unlock_irq(&lv->lock);
129 }
130
131 #define local_unlock_irq(lvar)                                          \
132         do {                                                            \
133                 __local_unlock_irq(this_cpu_ptr(&lvar));                \
134                 put_local_var(lvar);                                    \
135         } while (0)
136
137 #define local_unlock_irq_on(lvar, cpu)                                  \
138         do {                                                            \
139                 __local_unlock_irq(&per_cpu(lvar, cpu));                \
140         } while (0)
141
142 static inline int __local_lock_irqsave(struct local_irq_lock *lv)
143 {
144         if (lv->owner != current) {
145                 __local_lock_irq(lv);
146                 return 0;
147         } else {
148                 lv->nestcnt++;
149                 return 1;
150         }
151 }
152
153 #define local_lock_irqsave(lvar, _flags)                                \
154         do {                                                            \
155                 if (__local_lock_irqsave(&get_local_var(lvar)))         \
156                         put_local_var(lvar);                            \
157                 _flags = __this_cpu_read(lvar.flags);                   \
158         } while (0)
159
160 #define local_lock_irqsave_on(lvar, _flags, cpu)                        \
161         do {                                                            \
162                 __local_lock_irqsave(&per_cpu(lvar, cpu));              \
163                 _flags = per_cpu(lvar, cpu).flags;                      \
164         } while (0)
165
166 static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
167                                             unsigned long flags)
168 {
169         LL_WARN(!lv->nestcnt);
170         LL_WARN(lv->owner != current);
171         if (--lv->nestcnt)
172                 return 0;
173
174         lv->owner = NULL;
175         spin_unlock_irqrestore(&lv->lock, lv->flags);
176         return 1;
177 }
178
179 #define local_unlock_irqrestore(lvar, flags)                            \
180         do {                                                            \
181                 if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
182                         put_local_var(lvar);                            \
183         } while (0)
184
185 #define local_unlock_irqrestore_on(lvar, flags, cpu)                    \
186         do {                                                            \
187                 __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags);  \
188         } while (0)
189
190 #define local_spin_trylock_irq(lvar, lock)                              \
191         ({                                                              \
192                 int __locked;                                           \
193                 local_lock_irq(lvar);                                   \
194                 __locked = spin_trylock(lock);                          \
195                 if (!__locked)                                          \
196                         local_unlock_irq(lvar);                         \
197                 __locked;                                               \
198         })
199
200 #define local_spin_lock_irq(lvar, lock)                                 \
201         do {                                                            \
202                 local_lock_irq(lvar);                                   \
203                 spin_lock(lock);                                        \
204         } while (0)
205
206 #define local_spin_unlock_irq(lvar, lock)                               \
207         do {                                                            \
208                 spin_unlock(lock);                                      \
209                 local_unlock_irq(lvar);                                 \
210         } while (0)
211
212 #define local_spin_lock_irqsave(lvar, lock, flags)                      \
213         do {                                                            \
214                 local_lock_irqsave(lvar, flags);                        \
215                 spin_lock(lock);                                        \
216         } while (0)
217
218 #define local_spin_unlock_irqrestore(lvar, lock, flags)                 \
219         do {                                                            \
220                 spin_unlock(lock);                                      \
221                 local_unlock_irqrestore(lvar, flags);                   \
222         } while (0)
223
224 #define get_locked_var(lvar, var)                                       \
225         (*({                                                            \
226                 local_lock(lvar);                                       \
227                 this_cpu_ptr(&var);                                     \
228         }))
229
230 #define put_locked_var(lvar, var)       local_unlock(lvar);
231
232 #define local_lock_cpu(lvar)                                            \
233         ({                                                              \
234                 local_lock(lvar);                                       \
235                 smp_processor_id();                                     \
236         })
237
238 #define local_unlock_cpu(lvar)                  local_unlock(lvar)
239
240 #else /* PREEMPT_RT_BASE */
241
242 #define DEFINE_LOCAL_IRQ_LOCK(lvar)             __typeof__(const int) lvar
243 #define DECLARE_LOCAL_IRQ_LOCK(lvar)            extern __typeof__(const int) lvar
244
245 static inline void local_irq_lock_init(int lvar) { }
246
247 #define local_lock(lvar)                        preempt_disable()
248 #define local_unlock(lvar)                      preempt_enable()
249 #define local_lock_irq(lvar)                    local_irq_disable()
250 #define local_unlock_irq(lvar)                  local_irq_enable()
251 #define local_lock_irqsave(lvar, flags)         local_irq_save(flags)
252 #define local_unlock_irqrestore(lvar, flags)    local_irq_restore(flags)
253
254 #define local_spin_trylock_irq(lvar, lock)      spin_trylock_irq(lock)
255 #define local_spin_lock_irq(lvar, lock)         spin_lock_irq(lock)
256 #define local_spin_unlock_irq(lvar, lock)       spin_unlock_irq(lock)
257 #define local_spin_lock_irqsave(lvar, lock, flags)      \
258         spin_lock_irqsave(lock, flags)
259 #define local_spin_unlock_irqrestore(lvar, lock, flags) \
260         spin_unlock_irqrestore(lock, flags)
261
262 #define get_locked_var(lvar, var)               get_cpu_var(var)
263 #define put_locked_var(lvar, var)               put_cpu_var(var)
264
265 #define local_lock_cpu(lvar)                    get_cpu()
266 #define local_unlock_cpu(lvar)                  put_cpu()
267
268 #endif
269
270 #endif