1 #ifndef __ASM_ARM_CMPXCHG_H
2 #define __ASM_ARM_CMPXCHG_H
4 #include <linux/irqflags.h>
5 #include <linux/prefetch.h>
6 #include <asm/barrier.h>
8 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
10 * On the StrongARM, "swp" is terminally broken since it bypasses the
11 * cache totally. This means that the cache becomes inconsistent, and,
12 * since we use normal loads/stores as well, this is really bad.
13 * Typically, this causes oopsen in filp_close, but could have other,
14 * more disastrous effects. There are two work-arounds:
15 * 1. Disable interrupts and emulate the atomic swap
16 * 2. Clean the cache, perform atomic swap, flush the cache
18 * We choose (1) since its the "easiest" to achieve here and is not
19 * dependent on the processor type.
21 * NOTE that this solution won't work on an SMP system, so explcitly
27 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
29 extern void __bad_xchg(volatile void *, int);
34 #if __LINUX_ARM_ARCH__ >= 6
39 prefetchw((const void *)ptr);
42 #if __LINUX_ARM_ARCH__ >= 6
44 asm volatile("@ __xchg1\n"
45 "1: ldrexb %0, [%3]\n"
46 " strexb %1, %2, [%3]\n"
49 : "=&r" (ret), "=&r" (tmp)
54 asm volatile("@ __xchg4\n"
56 " strex %1, %2, [%3]\n"
59 : "=&r" (ret), "=&r" (tmp)
63 #elif defined(swp_is_buggy)
65 #error SMP is not supported on this platform
68 raw_local_irq_save(flags);
69 ret = *(volatile unsigned char *)ptr;
70 *(volatile unsigned char *)ptr = x;
71 raw_local_irq_restore(flags);
75 raw_local_irq_save(flags);
76 ret = *(volatile unsigned long *)ptr;
77 *(volatile unsigned long *)ptr = x;
78 raw_local_irq_restore(flags);
82 asm volatile("@ __xchg1\n"
89 asm volatile("@ __xchg4\n"
97 __bad_xchg(ptr, size), ret = 0;
105 #define xchg(ptr,x) \
106 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
108 #include <asm-generic/cmpxchg-local.h>
110 #if __LINUX_ARM_ARCH__ < 6
111 /* min ARCH < ARMv6 */
114 #error "SMP is not supported on this platform"
118 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
121 #define cmpxchg_local(ptr, o, n) \
122 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
123 (unsigned long)(n), sizeof(*(ptr))))
124 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
127 #include <asm-generic/cmpxchg.h>
130 #else /* min ARCH >= ARMv6 */
132 #define __HAVE_ARCH_CMPXCHG 1
134 extern void __bad_cmpxchg(volatile void *ptr, int size);
137 * cmpxchg only support 32-bits operands on ARMv6.
140 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
141 unsigned long new, int size)
143 unsigned long oldval, res;
145 prefetchw((const void *)ptr);
148 #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
151 asm volatile("@ __cmpxchg1\n"
155 " strexbeq %0, %4, [%2]\n"
156 : "=&r" (res), "=&r" (oldval)
157 : "r" (ptr), "Ir" (old), "r" (new)
163 asm volatile("@ __cmpxchg1\n"
167 " strexheq %0, %4, [%2]\n"
168 : "=&r" (res), "=&r" (oldval)
169 : "r" (ptr), "Ir" (old), "r" (new)
176 asm volatile("@ __cmpxchg4\n"
180 " strexeq %0, %4, [%2]\n"
181 : "=&r" (res), "=&r" (oldval)
182 : "r" (ptr), "Ir" (old), "r" (new)
187 __bad_cmpxchg(ptr, size);
194 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
195 unsigned long new, int size)
200 ret = __cmpxchg(ptr, old, new, size);
206 #define cmpxchg(ptr,o,n) \
207 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
208 (unsigned long)(o), \
209 (unsigned long)(n), \
212 static inline unsigned long __cmpxchg_local(volatile void *ptr,
214 unsigned long new, int size)
219 #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
222 ret = __cmpxchg_local_generic(ptr, old, new, size);
226 ret = __cmpxchg(ptr, old, new, size);
232 static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
233 unsigned long long old,
234 unsigned long long new)
236 unsigned long long oldval;
241 __asm__ __volatile__(
242 "1: ldrexd %1, %H1, [%3]\n"
246 " strexd %0, %5, %H5, [%3]\n"
250 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
251 : "r" (ptr), "r" (old), "r" (new)
257 static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
258 unsigned long long old,
259 unsigned long long new)
261 unsigned long long ret;
264 ret = __cmpxchg64(ptr, old, new);
270 #define cmpxchg_local(ptr,o,n) \
271 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
272 (unsigned long)(o), \
273 (unsigned long)(n), \
276 #define cmpxchg64(ptr, o, n) \
277 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
278 (unsigned long long)(o), \
279 (unsigned long long)(n)))
281 #define cmpxchg64_relaxed(ptr, o, n) \
282 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
283 (unsigned long long)(o), \
284 (unsigned long long)(n)))
286 #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
288 #endif /* __LINUX_ARM_ARCH__ >= 6 */
290 #endif /* __ASM_ARM_CMPXCHG_H */