Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / include / asm-generic / qspinlock.h
1 /*
2  * Queued spinlock
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15  *
16  * Authors: Waiman Long <waiman.long@hp.com>
17  */
18 #ifndef __ASM_GENERIC_QSPINLOCK_H
19 #define __ASM_GENERIC_QSPINLOCK_H
20
21 #include <asm-generic/qspinlock_types.h>
22
23 /**
24  * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
25  * @lock : Pointer to queued spinlock structure
26  *
27  * There is a very slight possibility of live-lock if the lockers keep coming
28  * and the waiter is just unfortunate enough to not see any unlock state.
29  */
30 #ifndef queued_spin_unlock_wait
31 extern void queued_spin_unlock_wait(struct qspinlock *lock);
32 #endif
33
34 /**
35  * queued_spin_is_locked - is the spinlock locked?
36  * @lock: Pointer to queued spinlock structure
37  * Return: 1 if it is locked, 0 otherwise
38  */
39 #ifndef queued_spin_is_locked
40 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
41 {
42         /*
43          * See queued_spin_unlock_wait().
44          *
45          * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
46          * isn't immediately observable.
47          */
48         return atomic_read(&lock->val);
49 }
50 #endif
51
52 /**
53  * queued_spin_value_unlocked - is the spinlock structure unlocked?
54  * @lock: queued spinlock structure
55  * Return: 1 if it is unlocked, 0 otherwise
56  *
57  * N.B. Whenever there are tasks waiting for the lock, it is considered
58  *      locked wrt the lockref code to avoid lock stealing by the lockref
59  *      code and change things underneath the lock. This also allows some
60  *      optimizations to be applied without conflict with lockref.
61  */
62 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
63 {
64         return !atomic_read(&lock.val);
65 }
66
67 /**
68  * queued_spin_is_contended - check if the lock is contended
69  * @lock : Pointer to queued spinlock structure
70  * Return: 1 if lock contended, 0 otherwise
71  */
72 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
73 {
74         return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
75 }
76 /**
77  * queued_spin_trylock - try to acquire the queued spinlock
78  * @lock : Pointer to queued spinlock structure
79  * Return: 1 if lock acquired, 0 if failed
80  */
81 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
82 {
83         if (!atomic_read(&lock->val) &&
84            (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
85                 return 1;
86         return 0;
87 }
88
89 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
90
91 /**
92  * queued_spin_lock - acquire a queued spinlock
93  * @lock: Pointer to queued spinlock structure
94  */
95 static __always_inline void queued_spin_lock(struct qspinlock *lock)
96 {
97         u32 val;
98
99         val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
100         if (likely(val == 0))
101                 return;
102         queued_spin_lock_slowpath(lock, val);
103 }
104
105 #ifndef queued_spin_unlock
106 /**
107  * queued_spin_unlock - release a queued spinlock
108  * @lock : Pointer to queued spinlock structure
109  */
110 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
111 {
112         /*
113          * smp_mb__before_atomic() in order to guarantee release semantics
114          */
115         smp_mb__before_atomic_dec();
116         atomic_sub(_Q_LOCKED_VAL, &lock->val);
117 }
118 #endif
119
120 #ifndef virt_spin_lock
121 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
122 {
123         return false;
124 }
125 #endif
126
127 /*
128  * Initializier
129  */
130 #define __ARCH_SPIN_LOCK_UNLOCKED       { ATOMIC_INIT(0) }
131
132 /*
133  * Remapping spinlock architecture specific functions to the corresponding
134  * queued spinlock functions.
135  */
136 #define arch_spin_is_locked(l)          queued_spin_is_locked(l)
137 #define arch_spin_is_contended(l)       queued_spin_is_contended(l)
138 #define arch_spin_value_unlocked(l)     queued_spin_value_unlocked(l)
139 #define arch_spin_lock(l)               queued_spin_lock(l)
140 #define arch_spin_trylock(l)            queued_spin_trylock(l)
141 #define arch_spin_unlock(l)             queued_spin_unlock(l)
142 #define arch_spin_lock_flags(l, f)      queued_spin_lock(l)
143 #define arch_spin_unlock_wait(l)        queued_spin_unlock_wait(l)
144
145 #endif /* __ASM_GENERIC_QSPINLOCK_H */