2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * Do not include directly; use <linux/atomic.h>.
17 #ifndef _ASM_TILE_ATOMIC_64_H
18 #define _ASM_TILE_ATOMIC_64_H
22 #include <asm/barrier.h>
23 #include <arch/spr_def.h>
25 /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
27 #define atomic_set(v, i) ((v)->counter = (i))
30 * The smp_mb() operations throughout are to support the fact that
31 * Linux requires memory barriers before and after the operation,
32 * on any routine which updates memory and returns a value.
35 static inline void atomic_add(int i, atomic_t *v)
37 __insn_fetchadd4((void *)&v->counter, i);
40 static inline int atomic_add_return(int i, atomic_t *v)
43 smp_mb(); /* barrier for proper semantics */
44 val = __insn_fetchadd4((void *)&v->counter, i) + i;
45 barrier(); /* the "+ i" above will wait on memory */
49 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
51 int guess, oldval = v->counter;
56 oldval = cmpxchg(&v->counter, guess, guess + a);
57 } while (guess != oldval);
61 /* Now the true 64-bit operations. */
63 #define ATOMIC64_INIT(i) { (i) }
65 #define atomic64_read(v) ((v)->counter)
66 #define atomic64_set(v, i) ((v)->counter = (i))
68 static inline void atomic64_add(long i, atomic64_t *v)
70 __insn_fetchadd((void *)&v->counter, i);
73 static inline long atomic64_add_return(long i, atomic64_t *v)
76 smp_mb(); /* barrier for proper semantics */
77 val = __insn_fetchadd((void *)&v->counter, i) + i;
78 barrier(); /* the "+ i" above will wait on memory */
82 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
84 long guess, oldval = v->counter;
89 oldval = cmpxchg(&v->counter, guess, guess + a);
90 } while (guess != oldval);
94 #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
95 #define atomic64_sub(i, v) atomic64_add(-(i), (v))
96 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
97 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
98 #define atomic64_inc(v) atomic64_add(1, (v))
99 #define atomic64_dec(v) atomic64_sub(1, (v))
101 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
102 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
103 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
104 #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
106 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
108 /* Define this to indicate that cmpxchg is an efficient operation. */
109 #define __HAVE_ARCH_CMPXCHG
111 #endif /* !__ASSEMBLY__ */
113 #endif /* _ASM_TILE_ATOMIC_64_H */