Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / include / asm-generic / atomic.h
diff --git a/kernel/include/asm-generic/atomic.h b/kernel/include/asm-generic/atomic.h
new file mode 100644 (file)
index 0000000..1973ad2
--- /dev/null
@@ -0,0 +1,187 @@
+/*
+ * Generic C implementation of atomic counter operations. Usable on
+ * UP systems only. Do not include in machine independent code.
+ *
+ * Originally implemented for MN10300.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_ATOMIC_H
+#define __ASM_GENERIC_ATOMIC_H
+
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+/*
+ * atomic_$op() - $op integer to atomic variable
+ * @i: integer value to $op
+ * @v: pointer to the atomic variable
+ *
+ * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
+ * smp_mb__{before,after}_atomic().
+ */
+
+/*
+ * atomic_$op_return() - $op interer to atomic variable and returns the result
+ * @i: integer value to $op
+ * @v: pointer to the atomic variable
+ *
+ * Atomically $ops @i to @v. Does imply a full memory barrier.
+ */
+
+#ifdef CONFIG_SMP
+
+/* we can build all atomic primitives from cmpxchg */
+
+#define ATOMIC_OP(op, c_op)                                            \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       int c, old;                                                     \
+                                                                       \
+       c = v->counter;                                                 \
+       while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)          \
+               c = old;                                                \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op)                                     \
+static inline int atomic_##op##_return(int i, atomic_t *v)             \
+{                                                                      \
+       int c, old;                                                     \
+                                                                       \
+       c = v->counter;                                                 \
+       while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)          \
+               c = old;                                                \
+                                                                       \
+       return c c_op i;                                                \
+}
+
+#else
+
+#include <linux/irqflags.h>
+
+#define ATOMIC_OP(op, c_op)                                            \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       unsigned long flags;                                            \
+                                                                       \
+       raw_local_irq_save(flags);                                      \
+       v->counter = v->counter c_op i;                                 \
+       raw_local_irq_restore(flags);                                   \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op)                                     \
+static inline int atomic_##op##_return(int i, atomic_t *v)             \
+{                                                                      \
+       unsigned long flags;                                            \
+       int ret;                                                        \
+                                                                       \
+       raw_local_irq_save(flags);                                      \
+       ret = (v->counter = v->counter c_op i);                         \
+       raw_local_irq_restore(flags);                                   \
+                                                                       \
+       return ret;                                                     \
+}
+
+#endif /* CONFIG_SMP */
+
+#ifndef atomic_add_return
+ATOMIC_OP_RETURN(add, +)
+#endif
+
+#ifndef atomic_sub_return
+ATOMIC_OP_RETURN(sub, -)
+#endif
+
+#ifndef atomic_clear_mask
+ATOMIC_OP(and, &)
+#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
+#endif
+
+#ifndef atomic_set_mask
+#define CONFIG_ARCH_HAS_ATOMIC_OR
+ATOMIC_OP(or, |)
+#define atomic_set_mask(i, v)  atomic_or((i), (v))
+#endif
+
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#ifndef atomic_read
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#endif
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#include <linux/irqflags.h>
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+       return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+       atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+       atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+       atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+       atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i, v)      (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v)         (atomic_dec_return(v) == 0)
+#define atomic_inc_and_test(v)         (atomic_inc_return(v) == 0)
+
+#define atomic_xchg(ptr, v)            (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new)    (cmpxchg(&((v)->counter), (old), (new)))
+
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
+               c = old;
+       return c;
+}
+
+#endif /* __ASM_GENERIC_ATOMIC_H */