Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / arm / include / asm / cmpxchg.h
1 #ifndef __ASM_ARM_CMPXCHG_H
2 #define __ASM_ARM_CMPXCHG_H
3
4 #include <linux/irqflags.h>
5 #include <linux/prefetch.h>
6 #include <asm/barrier.h>
7
8 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
9 /*
10  * On the StrongARM, "swp" is terminally broken since it bypasses the
11  * cache totally.  This means that the cache becomes inconsistent, and,
12  * since we use normal loads/stores as well, this is really bad.
13  * Typically, this causes oopsen in filp_close, but could have other,
14  * more disastrous effects.  There are two work-arounds:
15  *  1. Disable interrupts and emulate the atomic swap
16  *  2. Clean the cache, perform atomic swap, flush the cache
17  *
18  * We choose (1) since its the "easiest" to achieve here and is not
19  * dependent on the processor type.
20  *
21  * NOTE that this solution won't work on an SMP system, so explcitly
22  * forbid it here.
23  */
24 #define swp_is_buggy
25 #endif
26
27 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
28 {
29         extern void __bad_xchg(volatile void *, int);
30         unsigned long ret;
31 #ifdef swp_is_buggy
32         unsigned long flags;
33 #endif
34 #if __LINUX_ARM_ARCH__ >= 6
35         unsigned int tmp;
36 #endif
37
38         smp_mb();
39         prefetchw((const void *)ptr);
40
41         switch (size) {
42 #if __LINUX_ARM_ARCH__ >= 6
43         case 1:
44                 asm volatile("@ __xchg1\n"
45                 "1:     ldrexb  %0, [%3]\n"
46                 "       strexb  %1, %2, [%3]\n"
47                 "       teq     %1, #0\n"
48                 "       bne     1b"
49                         : "=&r" (ret), "=&r" (tmp)
50                         : "r" (x), "r" (ptr)
51                         : "memory", "cc");
52                 break;
53         case 4:
54                 asm volatile("@ __xchg4\n"
55                 "1:     ldrex   %0, [%3]\n"
56                 "       strex   %1, %2, [%3]\n"
57                 "       teq     %1, #0\n"
58                 "       bne     1b"
59                         : "=&r" (ret), "=&r" (tmp)
60                         : "r" (x), "r" (ptr)
61                         : "memory", "cc");
62                 break;
63 #elif defined(swp_is_buggy)
64 #ifdef CONFIG_SMP
65 #error SMP is not supported on this platform
66 #endif
67         case 1:
68                 raw_local_irq_save(flags);
69                 ret = *(volatile unsigned char *)ptr;
70                 *(volatile unsigned char *)ptr = x;
71                 raw_local_irq_restore(flags);
72                 break;
73
74         case 4:
75                 raw_local_irq_save(flags);
76                 ret = *(volatile unsigned long *)ptr;
77                 *(volatile unsigned long *)ptr = x;
78                 raw_local_irq_restore(flags);
79                 break;
80 #else
81         case 1:
82                 asm volatile("@ __xchg1\n"
83                 "       swpb    %0, %1, [%2]"
84                         : "=&r" (ret)
85                         : "r" (x), "r" (ptr)
86                         : "memory", "cc");
87                 break;
88         case 4:
89                 asm volatile("@ __xchg4\n"
90                 "       swp     %0, %1, [%2]"
91                         : "=&r" (ret)
92                         : "r" (x), "r" (ptr)
93                         : "memory", "cc");
94                 break;
95 #endif
96         default:
97                 __bad_xchg(ptr, size), ret = 0;
98                 break;
99         }
100         smp_mb();
101
102         return ret;
103 }
104
105 #define xchg(ptr,x) \
106         ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
107
108 #include <asm-generic/cmpxchg-local.h>
109
110 #if __LINUX_ARM_ARCH__ < 6
111 /* min ARCH < ARMv6 */
112
113 #ifdef CONFIG_SMP
114 #error "SMP is not supported on this platform"
115 #endif
116
117 /*
118  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
119  * them available.
120  */
121 #define cmpxchg_local(ptr, o, n)                                               \
122         ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
123                         (unsigned long)(n), sizeof(*(ptr))))
124 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
125
126 #ifndef CONFIG_SMP
127 #include <asm-generic/cmpxchg.h>
128 #endif
129
130 #else   /* min ARCH >= ARMv6 */
131
132 #define __HAVE_ARCH_CMPXCHG 1
133
134 extern void __bad_cmpxchg(volatile void *ptr, int size);
135
136 /*
137  * cmpxchg only support 32-bits operands on ARMv6.
138  */
139
140 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
141                                       unsigned long new, int size)
142 {
143         unsigned long oldval, res;
144
145         prefetchw((const void *)ptr);
146
147         switch (size) {
148 #ifndef CONFIG_CPU_V6   /* min ARCH >= ARMv6K */
149         case 1:
150                 do {
151                         asm volatile("@ __cmpxchg1\n"
152                         "       ldrexb  %1, [%2]\n"
153                         "       mov     %0, #0\n"
154                         "       teq     %1, %3\n"
155                         "       strexbeq %0, %4, [%2]\n"
156                                 : "=&r" (res), "=&r" (oldval)
157                                 : "r" (ptr), "Ir" (old), "r" (new)
158                                 : "memory", "cc");
159                 } while (res);
160                 break;
161         case 2:
162                 do {
163                         asm volatile("@ __cmpxchg1\n"
164                         "       ldrexh  %1, [%2]\n"
165                         "       mov     %0, #0\n"
166                         "       teq     %1, %3\n"
167                         "       strexheq %0, %4, [%2]\n"
168                                 : "=&r" (res), "=&r" (oldval)
169                                 : "r" (ptr), "Ir" (old), "r" (new)
170                                 : "memory", "cc");
171                 } while (res);
172                 break;
173 #endif
174         case 4:
175                 do {
176                         asm volatile("@ __cmpxchg4\n"
177                         "       ldrex   %1, [%2]\n"
178                         "       mov     %0, #0\n"
179                         "       teq     %1, %3\n"
180                         "       strexeq %0, %4, [%2]\n"
181                                 : "=&r" (res), "=&r" (oldval)
182                                 : "r" (ptr), "Ir" (old), "r" (new)
183                                 : "memory", "cc");
184                 } while (res);
185                 break;
186         default:
187                 __bad_cmpxchg(ptr, size);
188                 oldval = 0;
189         }
190
191         return oldval;
192 }
193
194 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
195                                          unsigned long new, int size)
196 {
197         unsigned long ret;
198
199         smp_mb();
200         ret = __cmpxchg(ptr, old, new, size);
201         smp_mb();
202
203         return ret;
204 }
205
206 #define cmpxchg(ptr,o,n)                                                \
207         ((__typeof__(*(ptr)))__cmpxchg_mb((ptr),                        \
208                                           (unsigned long)(o),           \
209                                           (unsigned long)(n),           \
210                                           sizeof(*(ptr))))
211
212 static inline unsigned long __cmpxchg_local(volatile void *ptr,
213                                             unsigned long old,
214                                             unsigned long new, int size)
215 {
216         unsigned long ret;
217
218         switch (size) {
219 #ifdef CONFIG_CPU_V6    /* min ARCH == ARMv6 */
220         case 1:
221         case 2:
222                 ret = __cmpxchg_local_generic(ptr, old, new, size);
223                 break;
224 #endif
225         default:
226                 ret = __cmpxchg(ptr, old, new, size);
227         }
228
229         return ret;
230 }
231
232 static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
233                                              unsigned long long old,
234                                              unsigned long long new)
235 {
236         unsigned long long oldval;
237         unsigned long res;
238
239         prefetchw(ptr);
240
241         __asm__ __volatile__(
242 "1:     ldrexd          %1, %H1, [%3]\n"
243 "       teq             %1, %4\n"
244 "       teqeq           %H1, %H4\n"
245 "       bne             2f\n"
246 "       strexd          %0, %5, %H5, [%3]\n"
247 "       teq             %0, #0\n"
248 "       bne             1b\n"
249 "2:"
250         : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
251         : "r" (ptr), "r" (old), "r" (new)
252         : "cc");
253
254         return oldval;
255 }
256
257 static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
258                                                 unsigned long long old,
259                                                 unsigned long long new)
260 {
261         unsigned long long ret;
262
263         smp_mb();
264         ret = __cmpxchg64(ptr, old, new);
265         smp_mb();
266
267         return ret;
268 }
269
270 #define cmpxchg_local(ptr,o,n)                                          \
271         ((__typeof__(*(ptr)))__cmpxchg_local((ptr),                     \
272                                        (unsigned long)(o),              \
273                                        (unsigned long)(n),              \
274                                        sizeof(*(ptr))))
275
276 #define cmpxchg64(ptr, o, n)                                            \
277         ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),                      \
278                                         (unsigned long long)(o),        \
279                                         (unsigned long long)(n)))
280
281 #define cmpxchg64_relaxed(ptr, o, n)                                    \
282         ((__typeof__(*(ptr)))__cmpxchg64((ptr),                         \
283                                         (unsigned long long)(o),        \
284                                         (unsigned long long)(n)))
285
286 #define cmpxchg64_local(ptr, o, n)      cmpxchg64_relaxed((ptr), (o), (n))
287
288 #endif  /* __LINUX_ARM_ARCH__ >= 6 */
289
290 #endif /* __ASM_ARM_CMPXCHG_H */