These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / xtensa / include / asm / atomic.h
index 00b7d46..fd8017c 100644 (file)
@@ -29,7 +29,7 @@
  *
  * Locking interrupts looks like this:
  *
- *    rsil a15, LOCKLEVEL
+ *    rsil a15, TOPLEVEL
  *    <code>
  *    wsr  a15, PS
  *    rsync
@@ -47,7 +47,7 @@
  *
  * Atomically reads the value of @v.
  */
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
+#define atomic_read(v)         READ_ONCE((v)->counter)
 
 /**
  * atomic_set - set atomic variable
@@ -56,7 +56,7 @@
  *
  * Atomically sets the value of @v to @i.
  */
-#define atomic_set(v,i)                ((v)->counter = (i))
+#define atomic_set(v,i)                WRITE_ONCE((v)->counter, (i))
 
 #if XCHAL_HAVE_S32C1I
 #define ATOMIC_OP(op)                                                  \
@@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v)                 \
        unsigned int vval;                                              \
                                                                        \
        __asm__ __volatile__(                                           \
-                       "       rsil    a15, "__stringify(LOCKLEVEL)"\n"\
+                       "       rsil    a15, "__stringify(TOPLEVEL)"\n"\
                        "       l32i    %0, %2, 0\n"                    \
                        "       " #op " %0, %0, %1\n"                   \
                        "       s32i    %0, %2, 0\n"                    \
@@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v)         \
        unsigned int vval;                                              \
                                                                        \
        __asm__ __volatile__(                                           \
-                       "       rsil    a15,"__stringify(LOCKLEVEL)"\n" \
+                       "       rsil    a15,"__stringify(TOPLEVEL)"\n"  \
                        "       l32i    %0, %2, 0\n"                    \
                        "       " #op " %0, %0, %1\n"                   \
                        "       s32i    %0, %2, 0\n"                    \
@@ -145,6 +145,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v)                \
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -250,75 +254,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
-
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-       unsigned long tmp;
-       int result;
-
-       __asm__ __volatile__(
-                       "1:     l32i    %1, %3, 0\n"
-                       "       wsr     %1, scompare1\n"
-                       "       and     %0, %1, %2\n"
-                       "       s32c1i  %0, %3, 0\n"
-                       "       bne     %0, %1, 1b\n"
-                       : "=&a" (result), "=&a" (tmp)
-                       : "a" (~mask), "a" (v)
-                       : "memory"
-                       );
-#else
-       unsigned int all_f = -1;
-       unsigned int vval;
-
-       __asm__ __volatile__(
-                       "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
-                       "       l32i    %0, %2, 0\n"
-                       "       xor     %1, %4, %3\n"
-                       "       and     %0, %0, %4\n"
-                       "       s32i    %0, %2, 0\n"
-                       "       wsr     a15, ps\n"
-                       "       rsync\n"
-                       : "=&a" (vval), "=a" (mask)
-                       : "a" (v), "a" (all_f), "1" (mask)
-                       : "a15", "memory"
-                       );
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-       unsigned long tmp;
-       int result;
-
-       __asm__ __volatile__(
-                       "1:     l32i    %1, %3, 0\n"
-                       "       wsr     %1, scompare1\n"
-                       "       or      %0, %1, %2\n"
-                       "       s32c1i  %0, %3, 0\n"
-                       "       bne     %0, %1, 1b\n"
-                       : "=&a" (result), "=&a" (tmp)
-                       : "a" (mask), "a" (v)
-                       : "memory"
-                       );
-#else
-       unsigned int vval;
-
-       __asm__ __volatile__(
-                       "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
-                       "       l32i    %0, %2, 0\n"
-                       "       or      %0, %0, %1\n"
-                       "       s32i    %0, %2, 0\n"
-                       "       wsr     a15, ps\n"
-                       "       rsync\n"
-                       : "=&a" (vval)
-                       : "a" (mask), "a" (v)
-                       : "a15", "memory"
-                       );
-#endif
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _XTENSA_ATOMIC_H */