2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
5 * This file contains the spinlock/rwlock implementations for
9 #include <linux/spinlock.h>
10 #include <linux/nmi.h>
11 #include <linux/interrupt.h>
12 #include <linux/debug_locks.h>
13 #include <linux/delay.h>
14 #include <linux/export.h>
16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key)
19 #ifdef CONFIG_DEBUG_LOCK_ALLOC
21 * Make sure we are not reinitializing a held lock:
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map(&lock->dep_map, name, key, 0);
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT;
32 EXPORT_SYMBOL(__raw_spin_lock_init);
34 #ifndef CONFIG_PREEMPT_RT_FULL
35 void __rwlock_init(rwlock_t *lock, const char *name,
36 struct lock_class_key *key)
38 #ifdef CONFIG_DEBUG_LOCK_ALLOC
40 * Make sure we are not reinitializing a held lock:
42 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
43 lockdep_init_map(&lock->dep_map, name, key, 0);
45 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
46 lock->magic = RWLOCK_MAGIC;
47 lock->owner = SPINLOCK_OWNER_INIT;
51 EXPORT_SYMBOL(__rwlock_init);
54 static void spin_dump(raw_spinlock_t *lock, const char *msg)
56 struct task_struct *owner = NULL;
58 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
60 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
61 msg, raw_smp_processor_id(),
62 current->comm, task_pid_nr(current));
63 printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
66 owner ? owner->comm : "<none>",
67 owner ? task_pid_nr(owner) : -1,
72 static void spin_bug(raw_spinlock_t *lock, const char *msg)
74 if (!debug_locks_off())
80 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
83 debug_spin_lock_before(raw_spinlock_t *lock)
85 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
86 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
87 SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
88 lock, "cpu recursion");
91 static inline void debug_spin_lock_after(raw_spinlock_t *lock)
93 lock->owner_cpu = raw_smp_processor_id();
94 lock->owner = current;
97 static inline void debug_spin_unlock(raw_spinlock_t *lock)
99 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
100 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
101 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
102 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
104 lock->owner = SPINLOCK_OWNER_INIT;
105 lock->owner_cpu = -1;
108 static void __spin_lock_debug(raw_spinlock_t *lock)
111 u64 loops = loops_per_jiffy * HZ;
113 for (i = 0; i < loops; i++) {
114 if (arch_spin_trylock(&lock->raw_lock))
118 /* lockup suspected: */
119 spin_dump(lock, "lockup suspected");
121 trigger_all_cpu_backtrace();
125 * The trylock above was causing a livelock. Give the lower level arch
126 * specific lock code a chance to acquire the lock. We have already
127 * printed a warning/backtrace at this point. The non-debug arch
128 * specific code might actually succeed in acquiring the lock. If it is
129 * not successful, the end-result is the same - there is no forward
132 arch_spin_lock(&lock->raw_lock);
135 void do_raw_spin_lock(raw_spinlock_t *lock)
137 debug_spin_lock_before(lock);
138 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
139 __spin_lock_debug(lock);
140 debug_spin_lock_after(lock);
143 int do_raw_spin_trylock(raw_spinlock_t *lock)
145 int ret = arch_spin_trylock(&lock->raw_lock);
148 debug_spin_lock_after(lock);
151 * Must not happen on UP:
153 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
158 void do_raw_spin_unlock(raw_spinlock_t *lock)
160 debug_spin_unlock(lock);
161 arch_spin_unlock(&lock->raw_lock);
164 #ifndef CONFIG_PREEMPT_RT_FULL
165 static void rwlock_bug(rwlock_t *lock, const char *msg)
167 if (!debug_locks_off())
170 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
171 msg, raw_smp_processor_id(), current->comm,
172 task_pid_nr(current), lock);
176 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
178 #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
179 static void __read_lock_debug(rwlock_t *lock)
182 u64 loops = loops_per_jiffy * HZ;
186 for (i = 0; i < loops; i++) {
187 if (arch_read_trylock(&lock->raw_lock))
191 /* lockup suspected: */
194 printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
196 raw_smp_processor_id(), current->comm,
204 void do_raw_read_lock(rwlock_t *lock)
206 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
207 arch_read_lock(&lock->raw_lock);
210 int do_raw_read_trylock(rwlock_t *lock)
212 int ret = arch_read_trylock(&lock->raw_lock);
216 * Must not happen on UP:
218 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
223 void do_raw_read_unlock(rwlock_t *lock)
225 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
226 arch_read_unlock(&lock->raw_lock);
229 static inline void debug_write_lock_before(rwlock_t *lock)
231 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
232 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
233 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
234 lock, "cpu recursion");
237 static inline void debug_write_lock_after(rwlock_t *lock)
239 lock->owner_cpu = raw_smp_processor_id();
240 lock->owner = current;
243 static inline void debug_write_unlock(rwlock_t *lock)
245 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
246 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
247 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
249 lock->owner = SPINLOCK_OWNER_INIT;
250 lock->owner_cpu = -1;
253 #if 0 /* This can cause lockups */
254 static void __write_lock_debug(rwlock_t *lock)
257 u64 loops = loops_per_jiffy * HZ;
261 for (i = 0; i < loops; i++) {
262 if (arch_write_trylock(&lock->raw_lock))
266 /* lockup suspected: */
269 printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
271 raw_smp_processor_id(), current->comm,
279 void do_raw_write_lock(rwlock_t *lock)
281 debug_write_lock_before(lock);
282 arch_write_lock(&lock->raw_lock);
283 debug_write_lock_after(lock);
286 int do_raw_write_trylock(rwlock_t *lock)
288 int ret = arch_write_trylock(&lock->raw_lock);
291 debug_write_lock_after(lock);
294 * Must not happen on UP:
296 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
301 void do_raw_write_unlock(rwlock_t *lock)
303 debug_write_unlock(lock);
304 arch_write_unlock(&lock->raw_lock);