Kernel bump from 4.1.3-rt to 4.1.7-rt.
[kvmfornfv.git] / kernel / arch / arc / include / asm / bitops.h
index 624a9d0..dae03e6 100644 (file)
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <asm/barrier.h>
+#ifndef CONFIG_ARC_HAS_LLSC
+#include <asm/smp.h>
+#endif
 
-/*
- * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
- * The Kconfig glue ensures that in SMP, this is only set if the container
- * SoC/platform has cross-core coherent LLOCK/SCOND
- */
 #if defined(CONFIG_ARC_HAS_LLSC)
 
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int temp;
-
-       m += nr >> 5;
-
-       /*
-        * ARC ISA micro-optimization:
-        *
-        * Instructions dealing with bitpos only consider lower 5 bits (0-31)
-        * e.g (x << 33) is handled like (x << 1) by ASL instruction
-        *  (mem pointer still needs adjustment to point to next word)
-        *
-        * Hence the masking to clamp @nr arg can be elided in general.
-        *
-        * However if @nr is a constant (above assumed it in a register),
-        * and greater than 31, gcc can optimize away (x << 33) to 0,
-        * as overflow, given the 32-bit ISA. Thus masking needs to be done
-        * for constant @nr, but no code is generated due to const prop.
-        */
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%1]        \n"
-       "       bset    %0, %0, %2      \n"
-       "       scond   %0, [%1]        \n"
-       "       bnz     1b      \n"
-       : "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-}
-
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%1]        \n"
-       "       bclr    %0, %0, %2      \n"
-       "       scond   %0, [%1]        \n"
-       "       bnz     1b      \n"
-       : "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
+/*
+ * Hardware assisted Atomic-R-M-W
+ */
 
-       __asm__ __volatile__(
-       "1:     llock   %0, [%1]        \n"
-       "       bxor    %0, %0, %2      \n"
-       "       scond   %0, [%1]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
+#define BIT_OP(op, c_op, asm_op)                                       \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned int temp;                                              \
+                                                                       \
+       m += nr >> 5;                                                   \
+                                                                       \
+       /*                                                              \
+        * ARC ISA micro-optimization:                                  \
+        *                                                              \
+        * Instructions dealing with bitpos only consider lower 5 bits  \
+        * e.g (x << 33) is handled like (x << 1) by ASL instruction    \
+        *  (mem pointer still needs adjustment to point to next word)  \
+        *                                                              \
+        * Hence the masking to clamp @nr arg can be elided in general. \
+        *                                                              \
+        * However if @nr is a constant (above assumed in a register),  \
+        * and greater than 31, gcc can optimize away (x << 33) to 0,   \
+        * as overflow, given the 32-bit ISA. Thus masking needs to be  \
+        * done for const @nr, but no code is generated due to gcc      \
+        * const prop.                                                  \
+        */                                                             \
+       nr &= 0x1f;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "1:     llock       %0, [%1]            \n"                     \
+       "       " #asm_op " %0, %0, %2  \n"                             \
+       "       scond       %0, [%1]            \n"                     \
+       "       bnz         1b                  \n"                     \
+       : "=&r"(temp)   /* Early clobber, to prevent reg reuse */       \
+       : "r"(m),       /* Not "m": llock only supports reg direct addr mode */ \
+         "ir"(nr)                                                      \
+       : "cc");                                                        \
 }
 
 /*
@@ -108,91 +74,37 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *m)
  * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
  * and the old value of bit is returned
  */
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       /*
-        * Explicit full memory barrier needed before/after as
-        * LLOCK/SCOND themselves don't provide any such semantics
-        */
-       smp_mb();
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%2]        \n"
-       "       bset    %1, %0, %3      \n"
-       "       scond   %1, [%2]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(old), "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-
-       smp_mb();
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int old, temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%2]        \n"
-       "       bclr    %1, %0, %3      \n"
-       "       scond   %1, [%2]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(old), "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-
-       smp_mb();
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int old, temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%2]        \n"
-       "       bxor    %1, %0, %3      \n"
-       "       scond   %1, [%2]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(old), "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-
-       smp_mb();
-
-       return (old & (1 << nr)) != 0;
+#define TEST_N_BIT_OP(op, c_op, asm_op)                                        \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long old, temp;                                        \
+                                                                       \
+       m += nr >> 5;                                                   \
+                                                                       \
+       nr &= 0x1f;                                                     \
+                                                                       \
+       /*                                                              \
+        * Explicit full memory barrier needed before/after as          \
+        * LLOCK/SCOND themselves don't provide any such smenatic       \
+        */                                                             \
+       smp_mb();                                                       \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "1:     llock       %0, [%2]    \n"                             \
+       "       " #asm_op " %1, %0, %3  \n"                             \
+       "       scond       %1, [%2]    \n"                             \
+       "       bnz         1b          \n"                             \
+       : "=&r"(old), "=&r"(temp)                                       \
+       : "r"(m), "ir"(nr)                                              \
+       : "cc");                                                        \
+                                                                       \
+       smp_mb();                                                       \
+                                                                       \
+       return (old & (1 << nr)) != 0;                                  \
 }
 
 #else  /* !CONFIG_ARC_HAS_LLSC */
 
-#include <asm/smp.h>
-
 /*
  * Non hardware assisted Atomic-R-M-W
  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
@@ -209,111 +121,37 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
  *             at compile time)
  */
 
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       temp = *m;
-       *m = temp | (1UL << nr);
-
-       bitops_unlock(flags);
+#define BIT_OP(op, c_op, asm_op)                                       \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long temp, flags;                                      \
+       m += nr >> 5;                                                   \
+                                                                       \
+       /*                                                              \
+        * spin lock/unlock provide the needed smp_mb() before/after    \
+        */                                                             \
+       bitops_lock(flags);                                             \
+                                                                       \
+       temp = *m;                                                      \
+       *m = temp c_op (1UL << (nr & 0x1f));                                    \
+                                                                       \
+       bitops_unlock(flags);                                           \
 }
 
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       temp = *m;
-       *m = temp & ~(1UL << nr);
-
-       bitops_unlock(flags);
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       temp = *m;
-       *m = temp ^ (1UL << nr);
-
-       bitops_unlock(flags);
-}
-
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       /*
-        * spin lock/unlock provide the needed smp_mb() before/after
-        */
-       bitops_lock(flags);
-
-       old = *m;
-       *m = old | (1 << nr);
-
-       bitops_unlock(flags);
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       old = *m;
-       *m = old & ~(1 << nr);
-
-       bitops_unlock(flags);
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       old = *m;
-       *m = old ^ (1 << nr);
-
-       bitops_unlock(flags);
-
-       return (old & (1 << nr)) != 0;
+#define TEST_N_BIT_OP(op, c_op, asm_op)                                        \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long old, flags;                                       \
+       m += nr >> 5;                                                   \
+                                                                       \
+       bitops_lock(flags);                                             \
+                                                                       \
+       old = *m;                                                       \
+       *m = old c_op (1UL << (nr & 0x1f));                             \
+                                                                       \
+       bitops_unlock(flags);                                           \
+                                                                       \
+       return (old & (1UL << (nr & 0x1f))) != 0;                       \
 }
 
 #endif /* CONFIG_ARC_HAS_LLSC */
@@ -322,86 +160,45 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
  * Non atomic variants
  **************************************/
 
-static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       temp = *m;
-       *m = temp | (1UL << nr);
+#define __BIT_OP(op, c_op, asm_op)                                     \
+static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)   \
+{                                                                      \
+       unsigned long temp;                                             \
+       m += nr >> 5;                                                   \
+                                                                       \
+       temp = *m;                                                      \
+       *m = temp c_op (1UL << (nr & 0x1f));                            \
 }
 
-static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       temp = *m;
-       *m = temp & ~(1UL << nr);
+#define __TEST_N_BIT_OP(op, c_op, asm_op)                              \
+static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long old;                                              \
+       m += nr >> 5;                                                   \
+                                                                       \
+       old = *m;                                                       \
+       *m = old c_op (1UL << (nr & 0x1f));                             \
+                                                                       \
+       return (old & (1UL << (nr & 0x1f))) != 0;                       \
 }
 
-static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       temp = *m;
-       *m = temp ^ (1UL << nr);
-}
-
-static inline int
-__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       old = *m;
-       *m = old | (1 << nr);
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       old = *m;
-       *m = old & ~(1 << nr);
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       old = *m;
-       *m = old ^ (1 << nr);
-
-       return (old & (1 << nr)) != 0;
-}
+#define BIT_OPS(op, c_op, asm_op)                                      \
+                                                                       \
+       /* set_bit(), clear_bit(), change_bit() */                      \
+       BIT_OP(op, c_op, asm_op)                                        \
+                                                                       \
+       /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
+       TEST_N_BIT_OP(op, c_op, asm_op)                                 \
+                                                                       \
+       /* __set_bit(), __clear_bit(), __change_bit() */                \
+       __BIT_OP(op, c_op, asm_op)                                      \
+                                                                       \
+       /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
+       __TEST_N_BIT_OP(op, c_op, asm_op)
+
+BIT_OPS(set, |, bset)
+BIT_OPS(clear, & ~, bclr)
+BIT_OPS(change, ^, bxor)
 
 /*
  * This routine doesn't need to be atomic.
@@ -413,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
 
        addr += nr >> 5;
 
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       mask = 1 << nr;
+       mask = 1UL << (nr & 0x1f);
 
        return ((mask & *addr) != 0);
 }