Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / mn10300 / include / asm / atomic.h
1 /* MN10300 Atomic counter operations
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #ifndef _ASM_ATOMIC_H
12 #define _ASM_ATOMIC_H
13
14 #include <asm/irqflags.h>
15 #include <asm/cmpxchg.h>
16 #include <asm/barrier.h>
17
18 #ifndef CONFIG_SMP
19 #include <asm-generic/atomic.h>
20 #else
21
22 /*
23  * Atomic operations that C can't guarantee us.  Useful for
24  * resource counting etc..
25  */
26
27 #define ATOMIC_INIT(i)  { (i) }
28
29 #ifdef __KERNEL__
30
31 /**
32  * atomic_read - read atomic variable
33  * @v: pointer of type atomic_t
34  *
35  * Atomically reads the value of @v.  Note that the guaranteed
36  */
37 #define atomic_read(v)  (ACCESS_ONCE((v)->counter))
38
39 /**
40  * atomic_set - set atomic variable
41  * @v: pointer of type atomic_t
42  * @i: required value
43  *
44  * Atomically sets the value of @v to @i.  Note that the guaranteed
45  */
46 #define atomic_set(v, i) (((v)->counter) = (i))
47
48 #define ATOMIC_OP(op)                                                   \
49 static inline void atomic_##op(int i, atomic_t *v)                      \
50 {                                                                       \
51         int retval, status;                                             \
52                                                                         \
53         asm volatile(                                                   \
54                 "1:     mov     %4,(_AAR,%3)    \n"                     \
55                 "       mov     (_ADR,%3),%1    \n"                     \
56                 "       " #op " %5,%1           \n"                     \
57                 "       mov     %1,(_ADR,%3)    \n"                     \
58                 "       mov     (_ADR,%3),%0    \n"     /* flush */     \
59                 "       mov     (_ASR,%3),%0    \n"                     \
60                 "       or      %0,%0           \n"                     \
61                 "       bne     1b              \n"                     \
62                 : "=&r"(status), "=&r"(retval), "=m"(v->counter)        \
63                 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)   \
64                 : "memory", "cc");                                      \
65 }
66
67 #define ATOMIC_OP_RETURN(op)                                            \
68 static inline int atomic_##op##_return(int i, atomic_t *v)              \
69 {                                                                       \
70         int retval, status;                                             \
71                                                                         \
72         asm volatile(                                                   \
73                 "1:     mov     %4,(_AAR,%3)    \n"                     \
74                 "       mov     (_ADR,%3),%1    \n"                     \
75                 "       " #op " %5,%1           \n"                     \
76                 "       mov     %1,(_ADR,%3)    \n"                     \
77                 "       mov     (_ADR,%3),%0    \n"     /* flush */     \
78                 "       mov     (_ASR,%3),%0    \n"                     \
79                 "       or      %0,%0           \n"                     \
80                 "       bne     1b              \n"                     \
81                 : "=&r"(status), "=&r"(retval), "=m"(v->counter)        \
82                 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)   \
83                 : "memory", "cc");                                      \
84         return retval;                                                  \
85 }
86
87 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
88
89 ATOMIC_OPS(add)
90 ATOMIC_OPS(sub)
91
92 #undef ATOMIC_OPS
93 #undef ATOMIC_OP_RETURN
94 #undef ATOMIC_OP
95
96 static inline int atomic_add_negative(int i, atomic_t *v)
97 {
98         return atomic_add_return(i, v) < 0;
99 }
100
101 static inline void atomic_inc(atomic_t *v)
102 {
103         atomic_add_return(1, v);
104 }
105
106 static inline void atomic_dec(atomic_t *v)
107 {
108         atomic_sub_return(1, v);
109 }
110
111 #define atomic_dec_return(v)            atomic_sub_return(1, (v))
112 #define atomic_inc_return(v)            atomic_add_return(1, (v))
113
114 #define atomic_sub_and_test(i, v)       (atomic_sub_return((i), (v)) == 0)
115 #define atomic_dec_and_test(v)          (atomic_sub_return(1, (v)) == 0)
116 #define atomic_inc_and_test(v)          (atomic_add_return(1, (v)) == 0)
117
118 #define __atomic_add_unless(v, a, u)                            \
119 ({                                                              \
120         int c, old;                                             \
121         c = atomic_read(v);                                     \
122         while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
123                 c = old;                                        \
124         c;                                                      \
125 })
126
127 #define atomic_xchg(ptr, v)             (xchg(&(ptr)->counter, (v)))
128 #define atomic_cmpxchg(v, old, new)     (cmpxchg(&((v)->counter), (old), (new)))
129
130 /**
131  * atomic_clear_mask - Atomically clear bits in memory
132  * @mask: Mask of the bits to be cleared
133  * @v: pointer to word in memory
134  *
135  * Atomically clears the bits set in mask from the memory word specified.
136  */
137 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
138 {
139 #ifdef CONFIG_SMP
140         int status;
141
142         asm volatile(
143                 "1:     mov     %3,(_AAR,%2)    \n"
144                 "       mov     (_ADR,%2),%0    \n"
145                 "       and     %4,%0           \n"
146                 "       mov     %0,(_ADR,%2)    \n"
147                 "       mov     (_ADR,%2),%0    \n"     /* flush */
148                 "       mov     (_ASR,%2),%0    \n"
149                 "       or      %0,%0           \n"
150                 "       bne     1b              \n"
151                 : "=&r"(status), "=m"(*addr)
152                 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
153                 : "memory", "cc");
154 #else
155         unsigned long flags;
156
157         mask = ~mask;
158         flags = arch_local_cli_save();
159         *addr &= mask;
160         arch_local_irq_restore(flags);
161 #endif
162 }
163
164 /**
165  * atomic_set_mask - Atomically set bits in memory
166  * @mask: Mask of the bits to be set
167  * @v: pointer to word in memory
168  *
169  * Atomically sets the bits set in mask from the memory word specified.
170  */
171 static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
172 {
173 #ifdef CONFIG_SMP
174         int status;
175
176         asm volatile(
177                 "1:     mov     %3,(_AAR,%2)    \n"
178                 "       mov     (_ADR,%2),%0    \n"
179                 "       or      %4,%0           \n"
180                 "       mov     %0,(_ADR,%2)    \n"
181                 "       mov     (_ADR,%2),%0    \n"     /* flush */
182                 "       mov     (_ASR,%2),%0    \n"
183                 "       or      %0,%0           \n"
184                 "       bne     1b              \n"
185                 : "=&r"(status), "=m"(*addr)
186                 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
187                 : "memory", "cc");
188 #else
189         unsigned long flags;
190
191         flags = arch_local_cli_save();
192         *addr |= mask;
193         arch_local_irq_restore(flags);
194 #endif
195 }
196
197 #endif /* __KERNEL__ */
198 #endif /* CONFIG_SMP */
199 #endif /* _ASM_ATOMIC_H */