Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / arc / include / asm / atomic.h
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
11
12 #ifndef __ASSEMBLY__
13
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
18 #include <asm/smp.h>
19
20 #define atomic_read(v)  ((v)->counter)
21
22 #ifdef CONFIG_ARC_HAS_LLSC
23
24 #define atomic_set(v, i) (((v)->counter) = (i))
25
26 #define ATOMIC_OP(op, c_op, asm_op)                                     \
27 static inline void atomic_##op(int i, atomic_t *v)                      \
28 {                                                                       \
29         unsigned int temp;                                              \
30                                                                         \
31         __asm__ __volatile__(                                           \
32         "1:     llock   %0, [%1]        \n"                             \
33         "       " #asm_op " %0, %0, %2  \n"                             \
34         "       scond   %0, [%1]        \n"                             \
35         "       bnz     1b              \n"                             \
36         : "=&r"(temp)   /* Early clobber, to prevent reg reuse */       \
37         : "r"(&v->counter), "ir"(i)                                     \
38         : "cc");                                                        \
39 }                                                                       \
40
41 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
42 static inline int atomic_##op##_return(int i, atomic_t *v)              \
43 {                                                                       \
44         unsigned int temp;                                              \
45                                                                         \
46         /*                                                              \
47          * Explicit full memory barrier needed before/after as          \
48          * LLOCK/SCOND thmeselves don't provide any such semantics      \
49          */                                                             \
50         smp_mb();                                                       \
51                                                                         \
52         __asm__ __volatile__(                                           \
53         "1:     llock   %0, [%1]        \n"                             \
54         "       " #asm_op " %0, %0, %2  \n"                             \
55         "       scond   %0, [%1]        \n"                             \
56         "       bnz     1b              \n"                             \
57         : "=&r"(temp)                                                   \
58         : "r"(&v->counter), "ir"(i)                                     \
59         : "cc");                                                        \
60                                                                         \
61         smp_mb();                                                       \
62                                                                         \
63         return temp;                                                    \
64 }
65
66 #else   /* !CONFIG_ARC_HAS_LLSC */
67
68 #ifndef CONFIG_SMP
69
70  /* violating atomic_xxx API locking protocol in UP for optimization sake */
71 #define atomic_set(v, i) (((v)->counter) = (i))
72
73 #else
74
75 static inline void atomic_set(atomic_t *v, int i)
76 {
77         /*
78          * Independent of hardware support, all of the atomic_xxx() APIs need
79          * to follow the same locking rules to make sure that a "hardware"
80          * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
81          * sequence
82          *
83          * Thus atomic_set() despite being 1 insn (and seemingly atomic)
84          * requires the locking.
85          */
86         unsigned long flags;
87
88         atomic_ops_lock(flags);
89         v->counter = i;
90         atomic_ops_unlock(flags);
91 }
92
93 #endif
94
95 /*
96  * Non hardware assisted Atomic-R-M-W
97  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
98  */
99
100 #define ATOMIC_OP(op, c_op, asm_op)                                     \
101 static inline void atomic_##op(int i, atomic_t *v)                      \
102 {                                                                       \
103         unsigned long flags;                                            \
104                                                                         \
105         atomic_ops_lock(flags);                                         \
106         v->counter c_op i;                                              \
107         atomic_ops_unlock(flags);                                       \
108 }
109
110 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
111 static inline int atomic_##op##_return(int i, atomic_t *v)              \
112 {                                                                       \
113         unsigned long flags;                                            \
114         unsigned long temp;                                             \
115                                                                         \
116         /*                                                              \
117          * spin lock/unlock provides the needed smp_mb() before/after   \
118          */                                                             \
119         atomic_ops_lock(flags);                                         \
120         temp = v->counter;                                              \
121         temp c_op i;                                                    \
122         v->counter = temp;                                              \
123         atomic_ops_unlock(flags);                                       \
124                                                                         \
125         return temp;                                                    \
126 }
127
128 #endif /* !CONFIG_ARC_HAS_LLSC */
129
130 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
131         ATOMIC_OP(op, c_op, asm_op)                                     \
132         ATOMIC_OP_RETURN(op, c_op, asm_op)
133
134 ATOMIC_OPS(add, +=, add)
135 ATOMIC_OPS(sub, -=, sub)
136 ATOMIC_OP(and, &=, and)
137
138 #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
139
140 #undef ATOMIC_OPS
141 #undef ATOMIC_OP_RETURN
142 #undef ATOMIC_OP
143
144 /**
145  * __atomic_add_unless - add unless the number is a given value
146  * @v: pointer of type atomic_t
147  * @a: the amount to add to v...
148  * @u: ...unless v is equal to u.
149  *
150  * Atomically adds @a to @v, so long as it was not @u.
151  * Returns the old value of @v
152  */
153 #define __atomic_add_unless(v, a, u)                                    \
154 ({                                                                      \
155         int c, old;                                                     \
156                                                                         \
157         /*                                                              \
158          * Explicit full memory barrier needed before/after as          \
159          * LLOCK/SCOND thmeselves don't provide any such semantics      \
160          */                                                             \
161         smp_mb();                                                       \
162                                                                         \
163         c = atomic_read(v);                                             \
164         while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
165                 c = old;                                                \
166                                                                         \
167         smp_mb();                                                       \
168                                                                         \
169         c;                                                              \
170 })
171
172 #define atomic_inc_not_zero(v)          atomic_add_unless((v), 1, 0)
173
174 #define atomic_inc(v)                   atomic_add(1, v)
175 #define atomic_dec(v)                   atomic_sub(1, v)
176
177 #define atomic_inc_and_test(v)          (atomic_add_return(1, v) == 0)
178 #define atomic_dec_and_test(v)          (atomic_sub_return(1, v) == 0)
179 #define atomic_inc_return(v)            atomic_add_return(1, (v))
180 #define atomic_dec_return(v)            atomic_sub_return(1, (v))
181 #define atomic_sub_and_test(i, v)       (atomic_sub_return(i, v) == 0)
182
183 #define atomic_add_negative(i, v)       (atomic_add_return(i, v) < 0)
184
185 #define ATOMIC_INIT(i)                  { (i) }
186
187 #include <asm-generic/atomic64.h>
188
189 #endif
190
191 #endif