Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / include / linux / lockref.h
diff --git a/kernel/include/linux/lockref.h b/kernel/include/linux/lockref.h
new file mode 100644 (file)
index 0000000..b10b122
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __LINUX_LOCKREF_H
+#define __LINUX_LOCKREF_H
+
+/*
+ * Locked reference counts.
+ *
+ * These are different from just plain atomic refcounts in that they
+ * are atomic with respect to the spinlock that goes with them.  In
+ * particular, there can be implementations that don't actually get
+ * the spinlock for the common decrement/increment operations, but they
+ * still have to check that the operation is done semantically as if
+ * the spinlock had been taken (using a cmpxchg operation that covers
+ * both the lock and the count word, or using memory transactions, for
+ * example).
+ */
+
+#include <linux/spinlock.h>
+#include <generated/bounds.h>
+
+#define USE_CMPXCHG_LOCKREF \
+       (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
+        IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
+
+struct lockref {
+       union {
+#if USE_CMPXCHG_LOCKREF
+               aligned_u64 lock_count;
+#endif
+               struct {
+                       spinlock_t lock;
+                       int count;
+               };
+       };
+};
+
+extern void lockref_get(struct lockref *);
+extern int lockref_put_return(struct lockref *);
+extern int lockref_get_not_zero(struct lockref *);
+extern int lockref_get_or_lock(struct lockref *);
+extern int lockref_put_or_lock(struct lockref *);
+
+extern void lockref_mark_dead(struct lockref *);
+extern int lockref_get_not_dead(struct lockref *);
+
+/* Must be called under spinlock for reliable results */
+static inline int __lockref_is_dead(const struct lockref *l)
+{
+       return ((int)l->count < 0);
+}
+
+#endif /* __LINUX_LOCKREF_H */