Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / sparc / lib / atomic32.c
1 /*
2  * atomic32.c: 32-bit atomic_t implementation
3  *
4  * Copyright (C) 2004 Keith M Wesolowski
5  * Copyright (C) 2007 Kyle McMartin
6  * 
7  * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
8  */
9
10 #include <linux/atomic.h>
11 #include <linux/spinlock.h>
12 #include <linux/module.h>
13
14 #ifdef CONFIG_SMP
15 #define ATOMIC_HASH_SIZE        4
16 #define ATOMIC_HASH(a)  (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17
18 spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
19         [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
20 };
21
22 #else /* SMP */
23
24 static DEFINE_SPINLOCK(dummy);
25 #define ATOMIC_HASH_SIZE        1
26 #define ATOMIC_HASH(a)          (&dummy)
27
28 #endif /* SMP */
29
30 #define ATOMIC_OP(op, cop)                                              \
31 int atomic_##op##_return(int i, atomic_t *v)                            \
32 {                                                                       \
33         int ret;                                                        \
34         unsigned long flags;                                            \
35         spin_lock_irqsave(ATOMIC_HASH(v), flags);                       \
36                                                                         \
37         ret = (v->counter cop i);                                       \
38                                                                         \
39         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);                  \
40         return ret;                                                     \
41 }                                                                       \
42 EXPORT_SYMBOL(atomic_##op##_return);
43
44 ATOMIC_OP(add, +=)
45
46 #undef ATOMIC_OP
47
48 int atomic_xchg(atomic_t *v, int new)
49 {
50         int ret;
51         unsigned long flags;
52
53         spin_lock_irqsave(ATOMIC_HASH(v), flags);
54         ret = v->counter;
55         v->counter = new;
56         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
57         return ret;
58 }
59 EXPORT_SYMBOL(atomic_xchg);
60
61 int atomic_cmpxchg(atomic_t *v, int old, int new)
62 {
63         int ret;
64         unsigned long flags;
65
66         spin_lock_irqsave(ATOMIC_HASH(v), flags);
67         ret = v->counter;
68         if (likely(ret == old))
69                 v->counter = new;
70
71         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
72         return ret;
73 }
74 EXPORT_SYMBOL(atomic_cmpxchg);
75
76 int __atomic_add_unless(atomic_t *v, int a, int u)
77 {
78         int ret;
79         unsigned long flags;
80
81         spin_lock_irqsave(ATOMIC_HASH(v), flags);
82         ret = v->counter;
83         if (ret != u)
84                 v->counter += a;
85         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
86         return ret;
87 }
88 EXPORT_SYMBOL(__atomic_add_unless);
89
90 /* Atomic operations are already serializing */
91 void atomic_set(atomic_t *v, int i)
92 {
93         unsigned long flags;
94
95         spin_lock_irqsave(ATOMIC_HASH(v), flags);
96         v->counter = i;
97         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
98 }
99 EXPORT_SYMBOL(atomic_set);
100
101 unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
102 {
103         unsigned long old, flags;
104
105         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
106         old = *addr;
107         *addr = old | mask;
108         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
109
110         return old & mask;
111 }
112 EXPORT_SYMBOL(___set_bit);
113
114 unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
115 {
116         unsigned long old, flags;
117
118         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
119         old = *addr;
120         *addr = old & ~mask;
121         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
122
123         return old & mask;
124 }
125 EXPORT_SYMBOL(___clear_bit);
126
127 unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
128 {
129         unsigned long old, flags;
130
131         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
132         old = *addr;
133         *addr = old ^ mask;
134         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
135
136         return old & mask;
137 }
138 EXPORT_SYMBOL(___change_bit);
139
140 unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
141 {
142         unsigned long flags;
143         u32 prev;
144
145         spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
146         if ((prev = *ptr) == old)
147                 *ptr = new;
148         spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
149
150         return (unsigned long)prev;
151 }
152 EXPORT_SYMBOL(__cmpxchg_u32);
153
154 unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
155 {
156         unsigned long flags;
157         u32 prev;
158
159         spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
160         prev = *ptr;
161         *ptr = new;
162         spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
163
164         return (unsigned long)prev;
165 }
166 EXPORT_SYMBOL(__xchg_u32);