Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / include / linux / locallock.h
1 #ifndef _LINUX_LOCALLOCK_H
2 #define _LINUX_LOCALLOCK_H
3
4 #include <linux/percpu.h>
5 #include <linux/spinlock.h>
6
7 #ifdef CONFIG_PREEMPT_RT_BASE
8
9 #ifdef CONFIG_DEBUG_SPINLOCK
10 # define LL_WARN(cond)  WARN_ON(cond)
11 #else
12 # define LL_WARN(cond)  do { } while (0)
13 #endif
14
15 /*
16  * per cpu lock based substitute for local_irq_*()
17  */
18 struct local_irq_lock {
19         spinlock_t              lock;
20         struct task_struct      *owner;
21         int                     nestcnt;
22         unsigned long           flags;
23 };
24
25 #define DEFINE_LOCAL_IRQ_LOCK(lvar)                                     \
26         DEFINE_PER_CPU(struct local_irq_lock, lvar) = {                 \
27                 .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
28
29 #define DECLARE_LOCAL_IRQ_LOCK(lvar)                                    \
30         DECLARE_PER_CPU(struct local_irq_lock, lvar)
31
32 #define local_irq_lock_init(lvar)                                       \
33         do {                                                            \
34                 int __cpu;                                              \
35                 for_each_possible_cpu(__cpu)                            \
36                         spin_lock_init(&per_cpu(lvar, __cpu).lock);     \
37         } while (0)
38
39 /*
40  * spin_lock|trylock|unlock_local flavour that does not migrate disable
41  * used for __local_lock|trylock|unlock where get_local_var/put_local_var
42  * already takes care of the migrate_disable/enable
43  * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
44  */
45 #ifdef CONFIG_PREEMPT_RT_FULL
46 # define spin_lock_local(lock)                  rt_spin_lock__no_mg(lock)
47 # define spin_trylock_local(lock)               rt_spin_trylock__no_mg(lock)
48 # define spin_unlock_local(lock)                rt_spin_unlock__no_mg(lock)
49 #else
50 # define spin_lock_local(lock)                  spin_lock(lock)
51 # define spin_trylock_local(lock)               spin_trylock(lock)
52 # define spin_unlock_local(lock)                spin_unlock(lock)
53 #endif
54
55 static inline void __local_lock(struct local_irq_lock *lv)
56 {
57         if (lv->owner != current) {
58                 spin_lock_local(&lv->lock);
59                 LL_WARN(lv->owner);
60                 LL_WARN(lv->nestcnt);
61                 lv->owner = current;
62         }
63         lv->nestcnt++;
64 }
65
66 #define local_lock(lvar)                                        \
67         do { __local_lock(&get_local_var(lvar)); } while (0)
68
69 #define local_lock_on(lvar, cpu)                                \
70         do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
71
72 static inline int __local_trylock(struct local_irq_lock *lv)
73 {
74         if (lv->owner != current && spin_trylock_local(&lv->lock)) {
75                 LL_WARN(lv->owner);
76                 LL_WARN(lv->nestcnt);
77                 lv->owner = current;
78                 lv->nestcnt = 1;
79                 return 1;
80         }
81         return 0;
82 }
83
84 #define local_trylock(lvar)                                             \
85         ({                                                              \
86                 int __locked;                                           \
87                 __locked = __local_trylock(&get_local_var(lvar));       \
88                 if (!__locked)                                          \
89                         put_local_var(lvar);                            \
90                 __locked;                                               \
91         })
92
93 static inline void __local_unlock(struct local_irq_lock *lv)
94 {
95         LL_WARN(lv->nestcnt == 0);
96         LL_WARN(lv->owner != current);
97         if (--lv->nestcnt)
98                 return;
99
100         lv->owner = NULL;
101         spin_unlock_local(&lv->lock);
102 }
103
104 #define local_unlock(lvar)                                      \
105         do {                                                    \
106                 __local_unlock(this_cpu_ptr(&lvar));            \
107                 put_local_var(lvar);                            \
108         } while (0)
109
110 #define local_unlock_on(lvar, cpu)                       \
111         do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
112
113 static inline void __local_lock_irq(struct local_irq_lock *lv)
114 {
115         spin_lock_irqsave(&lv->lock, lv->flags);
116         LL_WARN(lv->owner);
117         LL_WARN(lv->nestcnt);
118         lv->owner = current;
119         lv->nestcnt = 1;
120 }
121
122 #define local_lock_irq(lvar)                                            \
123         do { __local_lock_irq(&get_local_var(lvar)); } while (0)
124
125 #define local_lock_irq_on(lvar, cpu)                                    \
126         do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
127
128 static inline void __local_unlock_irq(struct local_irq_lock *lv)
129 {
130         LL_WARN(!lv->nestcnt);
131         LL_WARN(lv->owner != current);
132         lv->owner = NULL;
133         lv->nestcnt = 0;
134         spin_unlock_irq(&lv->lock);
135 }
136
137 #define local_unlock_irq(lvar)                                          \
138         do {                                                            \
139                 __local_unlock_irq(this_cpu_ptr(&lvar));                \
140                 put_local_var(lvar);                                    \
141         } while (0)
142
143 #define local_unlock_irq_on(lvar, cpu)                                  \
144         do {                                                            \
145                 __local_unlock_irq(&per_cpu(lvar, cpu));                \
146         } while (0)
147
148 static inline int __local_lock_irqsave(struct local_irq_lock *lv)
149 {
150         if (lv->owner != current) {
151                 __local_lock_irq(lv);
152                 return 0;
153         } else {
154                 lv->nestcnt++;
155                 return 1;
156         }
157 }
158
159 #define local_lock_irqsave(lvar, _flags)                                \
160         do {                                                            \
161                 if (__local_lock_irqsave(&get_local_var(lvar)))         \
162                         put_local_var(lvar);                            \
163                 _flags = __this_cpu_read(lvar.flags);                   \
164         } while (0)
165
166 #define local_lock_irqsave_on(lvar, _flags, cpu)                        \
167         do {                                                            \
168                 __local_lock_irqsave(&per_cpu(lvar, cpu));              \
169                 _flags = per_cpu(lvar, cpu).flags;                      \
170         } while (0)
171
172 static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
173                                             unsigned long flags)
174 {
175         LL_WARN(!lv->nestcnt);
176         LL_WARN(lv->owner != current);
177         if (--lv->nestcnt)
178                 return 0;
179
180         lv->owner = NULL;
181         spin_unlock_irqrestore(&lv->lock, lv->flags);
182         return 1;
183 }
184
185 #define local_unlock_irqrestore(lvar, flags)                            \
186         do {                                                            \
187                 if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
188                         put_local_var(lvar);                            \
189         } while (0)
190
191 #define local_unlock_irqrestore_on(lvar, flags, cpu)                    \
192         do {                                                            \
193                 __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags);  \
194         } while (0)
195
196 #define local_spin_trylock_irq(lvar, lock)                              \
197         ({                                                              \
198                 int __locked;                                           \
199                 local_lock_irq(lvar);                                   \
200                 __locked = spin_trylock(lock);                          \
201                 if (!__locked)                                          \
202                         local_unlock_irq(lvar);                         \
203                 __locked;                                               \
204         })
205
206 #define local_spin_lock_irq(lvar, lock)                                 \
207         do {                                                            \
208                 local_lock_irq(lvar);                                   \
209                 spin_lock(lock);                                        \
210         } while (0)
211
212 #define local_spin_unlock_irq(lvar, lock)                               \
213         do {                                                            \
214                 spin_unlock(lock);                                      \
215                 local_unlock_irq(lvar);                                 \
216         } while (0)
217
218 #define local_spin_lock_irqsave(lvar, lock, flags)                      \
219         do {                                                            \
220                 local_lock_irqsave(lvar, flags);                        \
221                 spin_lock(lock);                                        \
222         } while (0)
223
224 #define local_spin_unlock_irqrestore(lvar, lock, flags)                 \
225         do {                                                            \
226                 spin_unlock(lock);                                      \
227                 local_unlock_irqrestore(lvar, flags);                   \
228         } while (0)
229
230 #define get_locked_var(lvar, var)                                       \
231         (*({                                                            \
232                 local_lock(lvar);                                       \
233                 this_cpu_ptr(&var);                                     \
234         }))
235
236 #define put_locked_var(lvar, var)       local_unlock(lvar);
237
238 #define local_lock_cpu(lvar)                                            \
239         ({                                                              \
240                 local_lock(lvar);                                       \
241                 smp_processor_id();                                     \
242         })
243
244 #define local_unlock_cpu(lvar)                  local_unlock(lvar)
245
246 #else /* PREEMPT_RT_BASE */
247
248 #define DEFINE_LOCAL_IRQ_LOCK(lvar)             __typeof__(const int) lvar
249 #define DECLARE_LOCAL_IRQ_LOCK(lvar)            extern __typeof__(const int) lvar
250
251 static inline void local_irq_lock_init(int lvar) { }
252
253 #define local_lock(lvar)                        preempt_disable()
254 #define local_unlock(lvar)                      preempt_enable()
255 #define local_lock_irq(lvar)                    local_irq_disable()
256 #define local_unlock_irq(lvar)                  local_irq_enable()
257 #define local_lock_irqsave(lvar, flags)         local_irq_save(flags)
258 #define local_unlock_irqrestore(lvar, flags)    local_irq_restore(flags)
259
260 #define local_spin_trylock_irq(lvar, lock)      spin_trylock_irq(lock)
261 #define local_spin_lock_irq(lvar, lock)         spin_lock_irq(lock)
262 #define local_spin_unlock_irq(lvar, lock)       spin_unlock_irq(lock)
263 #define local_spin_lock_irqsave(lvar, lock, flags)      \
264         spin_lock_irqsave(lock, flags)
265 #define local_spin_unlock_irqrestore(lvar, lock, flags) \
266         spin_unlock_irqrestore(lock, flags)
267
268 #define get_locked_var(lvar, var)               get_cpu_var(var)
269 #define put_locked_var(lvar, var)               put_cpu_var(var)
270
271 #define local_lock_cpu(lvar)                    get_cpu()
272 #define local_unlock_cpu(lvar)                  put_cpu()
273
274 #endif
275
276 #endif