These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm / include / asm / atomic.h
index e22c119..9e10c45 100644 (file)
@@ -27,8 +27,8 @@
  * strex/ldrex monitor on some implementations. The reason we can use it for
  * atomic_set() is the clrex or dummy strex done on every exception return.
  */
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i)        (((v)->counter) = (i))
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v,i)        WRITE_ONCE(((v)->counter), (i))
 
 #if __LINUX_ARM_ARCH__ >= 6
 
@@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v)                  \
 }                                                                      \
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)     \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
                                                                        \
-       smp_mb();                                                       \
        prefetchw(&v->counter);                                         \
                                                                        \
        __asm__ __volatile__("@ atomic_" #op "_return\n"                \
@@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        : "r" (&v->counter), "Ir" (i)                                   \
        : "cc");                                                        \
                                                                        \
-       smp_mb();                                                       \
-                                                                       \
        return result;                                                  \
 }
 
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+#define atomic_add_return_relaxed      atomic_add_return_relaxed
+#define atomic_sub_return_relaxed      atomic_sub_return_relaxed
+
+static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
 {
        int oldval;
        unsigned long res;
 
-       smp_mb();
        prefetchw(&ptr->counter);
 
        do {
@@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
                    : "cc");
        } while (res);
 
-       smp_mb();
-
        return oldval;
 }
+#define atomic_cmpxchg_relaxed         atomic_cmpxchg_relaxed
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
@@ -194,6 +192,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or,  |=, orr)
+ATOMIC_OP(xor, ^=, eor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -205,8 +210,8 @@ ATOMIC_OPS(sub, -=, sub)
 
 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+#define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
+#define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
 
 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
@@ -290,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v)              \
 }                                                                      \
 
 #define ATOMIC64_OP_RETURN(op, op1, op2)                               \
-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+static inline long long                                                        \
+atomic64_##op##_return_relaxed(long long i, atomic64_t *v)             \
 {                                                                      \
        long long result;                                               \
        unsigned long tmp;                                              \
                                                                        \
-       smp_mb();                                                       \
        prefetchw(&v->counter);                                         \
                                                                        \
        __asm__ __volatile__("@ atomic64_" #op "_return\n"              \
@@ -309,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
        : "r" (&v->counter), "r" (i)                                    \
        : "cc");                                                        \
                                                                        \
-       smp_mb();                                                       \
-                                                                       \
        return result;                                                  \
 }
 
@@ -321,17 +324,26 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
 ATOMIC64_OPS(add, adds, adc)
 ATOMIC64_OPS(sub, subs, sbc)
 
+#define atomic64_add_return_relaxed    atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
+
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC64_OP(and, and, and)
+ATOMIC64_OP(andnot, bic, bic)
+ATOMIC64_OP(or,  orr, orr)
+ATOMIC64_OP(xor, eor, eor)
+
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
-                                       long long new)
+static inline long long
+atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
 {
        long long oldval;
        unsigned long res;
 
-       smp_mb();
        prefetchw(&ptr->counter);
 
        do {
@@ -346,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
                : "cc");
        } while (res);
 
-       smp_mb();
-
        return oldval;
 }
+#define atomic64_cmpxchg_relaxed       atomic64_cmpxchg_relaxed
 
-static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
 {
        long long result;
        unsigned long tmp;
 
-       smp_mb();
        prefetchw(&ptr->counter);
 
        __asm__ __volatile__("@ atomic64_xchg\n"
@@ -368,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
        : "r" (&ptr->counter), "r" (new)
        : "cc");
 
-       smp_mb();
-
        return result;
 }
+#define atomic64_xchg_relaxed          atomic64_xchg_relaxed
 
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
@@ -433,11 +442,11 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 
 #define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
 #define atomic64_inc(v)                        atomic64_add(1LL, (v))
-#define atomic64_inc_return(v)         atomic64_add_return(1LL, (v))
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
 #define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
 #define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
 #define atomic64_dec(v)                        atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v)         atomic64_sub_return(1LL, (v))
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)