2 * Copyright IBM Corp. 1999, 2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Arnd Bergmann <arndb@de.ibm.com>,
7 * Atomic operations that C can't guarantee us.
8 * Useful for resource counting etc.
9 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
13 #ifndef __ARCH_S390_ATOMIC__
14 #define __ARCH_S390_ATOMIC__
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
21 #define ATOMIC_INIT(i) { (i) }
23 #define __ATOMIC_NO_BARRIER "\n"
25 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
27 #define __ATOMIC_OR "lao"
28 #define __ATOMIC_AND "lan"
29 #define __ATOMIC_ADD "laa"
30 #define __ATOMIC_BARRIER "bcr 14,0\n"
32 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
36 typecheck(atomic_t *, ptr); \
39 op_string " %0,%2,%1\n" \
41 : "=d" (old_val), "+Q" ((ptr)->counter) \
47 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
49 #define __ATOMIC_OR "or"
50 #define __ATOMIC_AND "nr"
51 #define __ATOMIC_ADD "ar"
52 #define __ATOMIC_BARRIER "\n"
54 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
56 int old_val, new_val; \
58 typecheck(atomic_t *, ptr); \
62 op_string " %1,%3\n" \
65 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
71 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
73 static inline int atomic_read(const atomic_t *v)
79 : "=d" (c) : "Q" (v->counter));
83 static inline void atomic_set(atomic_t *v, int i)
87 : "=Q" (v->counter) : "d" (i));
90 static inline int atomic_add_return(int i, atomic_t *v)
92 return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
95 static inline void atomic_add(int i, atomic_t *v)
97 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
98 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
107 __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
110 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
111 #define atomic_inc(_v) atomic_add(1, _v)
112 #define atomic_inc_return(_v) atomic_add_return(1, _v)
113 #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
114 #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
115 #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
116 #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
117 #define atomic_dec(_v) atomic_sub(1, _v)
118 #define atomic_dec_return(_v) atomic_sub_return(1, _v)
119 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
121 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
123 __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
126 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
128 __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
131 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
133 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
137 : "+d" (old), "+Q" (v->counter)
143 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
148 if (unlikely(c == u))
150 old = atomic_cmpxchg(v, c, c + a);
151 if (likely(old == c))
161 #define ATOMIC64_INIT(i) { (i) }
163 #define __ATOMIC64_NO_BARRIER "\n"
165 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
167 #define __ATOMIC64_OR "laog"
168 #define __ATOMIC64_AND "lang"
169 #define __ATOMIC64_ADD "laag"
170 #define __ATOMIC64_BARRIER "bcr 14,0\n"
172 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
176 typecheck(atomic64_t *, ptr); \
179 op_string " %0,%2,%1\n" \
181 : "=d" (old_val), "+Q" ((ptr)->counter) \
187 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
189 #define __ATOMIC64_OR "ogr"
190 #define __ATOMIC64_AND "ngr"
191 #define __ATOMIC64_ADD "agr"
192 #define __ATOMIC64_BARRIER "\n"
194 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
196 long long old_val, new_val; \
198 typecheck(atomic64_t *, ptr); \
202 op_string " %1,%3\n" \
205 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
211 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
213 static inline long long atomic64_read(const atomic64_t *v)
219 : "=d" (c) : "Q" (v->counter));
223 static inline void atomic64_set(atomic64_t *v, long long i)
227 : "=Q" (v->counter) : "d" (i));
230 static inline long long atomic64_add_return(long long i, atomic64_t *v)
232 return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
235 static inline void atomic64_add(long long i, atomic64_t *v)
237 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
238 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
247 __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
250 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
252 __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
255 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
257 __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
260 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
262 static inline long long atomic64_cmpxchg(atomic64_t *v,
263 long long old, long long new)
267 : "+d" (old), "+Q" (v->counter)
273 #undef __ATOMIC64_LOOP
275 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
279 c = atomic64_read(v);
281 if (unlikely(c == u))
283 old = atomic64_cmpxchg(v, c, c + i);
284 if (likely(old == c))
291 static inline long long atomic64_dec_if_positive(atomic64_t *v)
293 long long c, old, dec;
295 c = atomic64_read(v);
298 if (unlikely(dec < 0))
300 old = atomic64_cmpxchg((v), c, dec);
301 if (likely(old == c))
308 #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
309 #define atomic64_inc(_v) atomic64_add(1, _v)
310 #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
311 #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
312 #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
313 #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
314 #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
315 #define atomic64_dec(_v) atomic64_sub(1, _v)
316 #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
317 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
318 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
320 #endif /* __ARCH_S390_ATOMIC__ */