4 * Real-Time Preemption Support
6 * started by Ingo Molnar:
8 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
11 * historic credit for proving that Linux spinlocks can be implemented via
12 * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
13 * and others) who prototyped it on 2.4 and did lots of comparative
14 * research and analysis; TimeSys, for proving that you can implement a
15 * fully preemptible kernel via the use of IRQ threading and mutexes;
16 * Bill Huey for persuasively arguing on lkml that the mutex model is the
17 * right one; and to MontaVista, who ported pmutexes to 2.6.
19 * This code is a from-scratch implementation and is not based on pmutexes,
20 * but the idea of converting spinlocks to mutexes is used here too.
22 * lock debugging, locking tree, deadlock detection:
24 * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
25 * Released under the General Public License (GPL).
27 * Includes portions of the generic R/W semaphore implementation from:
29 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
30 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
31 * - Derived also from comments by Linus
33 * Pending ownership of locks and ownership stealing:
35 * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
37 * (also by Steven Rostedt)
38 * - Converted single pi_lock to individual task locks.
41 * Doing priority inheritance with help of the scheduler.
43 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
44 * - major rework based on Esben Nielsens initial patch
45 * - replaced thread_info references by task_struct refs
46 * - removed task->pending_owner dependency
47 * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
48 * in the scheduler return path as discussed with Steven Rostedt
50 * Copyright (C) 2006, Kihon Technologies Inc.
51 * Steven Rostedt <rostedt@goodmis.org>
52 * - debugged and patched Thomas Gleixner's rework.
53 * - added back the cmpxchg to the rework.
54 * - turned atomic require back on for SMP.
57 #include <linux/spinlock.h>
58 #include <linux/rtmutex.h>
59 #include <linux/sched.h>
60 #include <linux/delay.h>
61 #include <linux/module.h>
62 #include <linux/kallsyms.h>
63 #include <linux/syscalls.h>
64 #include <linux/interrupt.h>
65 #include <linux/plist.h>
67 #include <linux/futex.h>
68 #include <linux/hrtimer.h>
70 #include "rtmutex_common.h"
73 * struct mutex functions
75 void __mutex_do_init(struct mutex *mutex, const char *name,
76 struct lock_class_key *key)
78 #ifdef CONFIG_DEBUG_LOCK_ALLOC
80 * Make sure we are not reinitializing a held lock:
82 debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
83 lockdep_init_map(&mutex->dep_map, name, key, 0);
85 mutex->lock.save_state = 0;
87 EXPORT_SYMBOL(__mutex_do_init);
89 void __lockfunc _mutex_lock(struct mutex *lock)
91 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
92 rt_mutex_lock(&lock->lock);
94 EXPORT_SYMBOL(_mutex_lock);
96 int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
100 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
101 ret = rt_mutex_lock_interruptible(&lock->lock);
103 mutex_release(&lock->dep_map, 1, _RET_IP_);
106 EXPORT_SYMBOL(_mutex_lock_interruptible);
108 int __lockfunc _mutex_lock_killable(struct mutex *lock)
112 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
113 ret = rt_mutex_lock_killable(&lock->lock);
115 mutex_release(&lock->dep_map, 1, _RET_IP_);
118 EXPORT_SYMBOL(_mutex_lock_killable);
120 #ifdef CONFIG_DEBUG_LOCK_ALLOC
121 void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
123 mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
124 rt_mutex_lock(&lock->lock);
126 EXPORT_SYMBOL(_mutex_lock_nested);
128 void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
130 mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
131 rt_mutex_lock(&lock->lock);
133 EXPORT_SYMBOL(_mutex_lock_nest_lock);
135 int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
139 mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
140 ret = rt_mutex_lock_interruptible(&lock->lock);
142 mutex_release(&lock->dep_map, 1, _RET_IP_);
145 EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
147 int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
151 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
152 ret = rt_mutex_lock_killable(&lock->lock);
154 mutex_release(&lock->dep_map, 1, _RET_IP_);
157 EXPORT_SYMBOL(_mutex_lock_killable_nested);
160 int __lockfunc _mutex_trylock(struct mutex *lock)
162 int ret = rt_mutex_trylock(&lock->lock);
165 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
169 EXPORT_SYMBOL(_mutex_trylock);
171 void __lockfunc _mutex_unlock(struct mutex *lock)
173 mutex_release(&lock->dep_map, 1, _RET_IP_);
174 rt_mutex_unlock(&lock->lock);
176 EXPORT_SYMBOL(_mutex_unlock);
181 int __lockfunc rt_write_trylock(rwlock_t *rwlock)
186 ret = rt_mutex_trylock(&rwlock->lock);
188 rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
194 EXPORT_SYMBOL(rt_write_trylock);
196 int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
201 ret = rt_write_trylock(rwlock);
204 EXPORT_SYMBOL(rt_write_trylock_irqsave);
206 int __lockfunc rt_read_trylock(rwlock_t *rwlock)
208 struct rt_mutex *lock = &rwlock->lock;
212 * recursive read locks succeed when current owns the lock,
213 * but not when read_depth == 0 which means that the lock is
216 if (rt_mutex_owner(lock) != current) {
218 ret = rt_mutex_trylock(lock);
220 rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
224 } else if (!rwlock->read_depth) {
229 rwlock->read_depth++;
233 EXPORT_SYMBOL(rt_read_trylock);
235 void __lockfunc rt_write_lock(rwlock_t *rwlock)
237 rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
238 __rt_spin_lock(&rwlock->lock);
240 EXPORT_SYMBOL(rt_write_lock);
242 void __lockfunc rt_read_lock(rwlock_t *rwlock)
244 struct rt_mutex *lock = &rwlock->lock;
248 * recursive read locks succeed when current owns the lock
250 if (rt_mutex_owner(lock) != current) {
251 rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
252 __rt_spin_lock(lock);
254 rwlock->read_depth++;
257 EXPORT_SYMBOL(rt_read_lock);
259 void __lockfunc rt_write_unlock(rwlock_t *rwlock)
261 /* NOTE: we always pass in '1' for nested, for simplicity */
262 rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
263 __rt_spin_unlock(&rwlock->lock);
266 EXPORT_SYMBOL(rt_write_unlock);
268 void __lockfunc rt_read_unlock(rwlock_t *rwlock)
270 /* Release the lock only when read_depth is down to 0 */
271 if (--rwlock->read_depth == 0) {
272 rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
273 __rt_spin_unlock(&rwlock->lock);
277 EXPORT_SYMBOL(rt_read_unlock);
279 unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
281 rt_write_lock(rwlock);
285 EXPORT_SYMBOL(rt_write_lock_irqsave);
287 unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
289 rt_read_lock(rwlock);
293 EXPORT_SYMBOL(rt_read_lock_irqsave);
295 void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
297 #ifdef CONFIG_DEBUG_LOCK_ALLOC
299 * Make sure we are not reinitializing a held lock:
301 debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
302 lockdep_init_map(&rwlock->dep_map, name, key, 0);
304 rwlock->lock.save_state = 1;
305 rwlock->read_depth = 0;
307 EXPORT_SYMBOL(__rt_rwlock_init);
313 void rt_up_write(struct rw_semaphore *rwsem)
315 rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
316 rt_mutex_unlock(&rwsem->lock);
318 EXPORT_SYMBOL(rt_up_write);
320 void __rt_up_read(struct rw_semaphore *rwsem)
322 if (--rwsem->read_depth == 0)
323 rt_mutex_unlock(&rwsem->lock);
326 void rt_up_read(struct rw_semaphore *rwsem)
328 rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
331 EXPORT_SYMBOL(rt_up_read);
334 * downgrade a write lock into a read lock
335 * - just wake up any readers at the front of the queue
337 void rt_downgrade_write(struct rw_semaphore *rwsem)
339 BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
340 rwsem->read_depth = 1;
342 EXPORT_SYMBOL(rt_downgrade_write);
344 int rt_down_write_trylock(struct rw_semaphore *rwsem)
346 int ret = rt_mutex_trylock(&rwsem->lock);
349 rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
352 EXPORT_SYMBOL(rt_down_write_trylock);
354 void rt_down_write(struct rw_semaphore *rwsem)
356 rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
357 rt_mutex_lock(&rwsem->lock);
359 EXPORT_SYMBOL(rt_down_write);
361 void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
363 rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
364 rt_mutex_lock(&rwsem->lock);
366 EXPORT_SYMBOL(rt_down_write_nested);
368 void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
369 struct lockdep_map *nest)
371 rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
372 rt_mutex_lock(&rwsem->lock);
374 EXPORT_SYMBOL(rt_down_write_nested_lock);
376 int rt__down_read_trylock(struct rw_semaphore *rwsem)
378 struct rt_mutex *lock = &rwsem->lock;
382 * recursive read locks succeed when current owns the rwsem,
383 * but not when read_depth == 0 which means that the rwsem is
386 if (rt_mutex_owner(lock) != current)
387 ret = rt_mutex_trylock(&rwsem->lock);
388 else if (!rwsem->read_depth)
397 int rt_down_read_trylock(struct rw_semaphore *rwsem)
401 ret = rt__down_read_trylock(rwsem);
403 rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
407 EXPORT_SYMBOL(rt_down_read_trylock);
409 void rt__down_read(struct rw_semaphore *rwsem)
411 struct rt_mutex *lock = &rwsem->lock;
413 if (rt_mutex_owner(lock) != current)
414 rt_mutex_lock(&rwsem->lock);
417 EXPORT_SYMBOL(rt__down_read);
419 static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
421 rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
422 rt__down_read(rwsem);
425 void rt_down_read(struct rw_semaphore *rwsem)
427 __rt_down_read(rwsem, 0);
429 EXPORT_SYMBOL(rt_down_read);
431 void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
433 __rt_down_read(rwsem, subclass);
435 EXPORT_SYMBOL(rt_down_read_nested);
437 void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
438 struct lock_class_key *key)
440 #ifdef CONFIG_DEBUG_LOCK_ALLOC
442 * Make sure we are not reinitializing a held lock:
444 debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
445 lockdep_init_map(&rwsem->dep_map, name, key, 0);
447 rwsem->read_depth = 0;
448 rwsem->lock.save_state = 0;
450 EXPORT_SYMBOL(__rt_rwsem_init);
453 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
454 * @cnt: the atomic which we are to dec
455 * @lock: the mutex to return holding if we dec to 0
457 * return true and hold lock if we dec to 0, return false otherwise
459 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
461 /* dec if we can't possibly hit 0 */
462 if (atomic_add_unless(cnt, -1, 1))
464 /* we might hit 0, so take the lock */
466 if (!atomic_dec_and_test(cnt)) {
467 /* when we actually did the dec, we didn't hit 0 */
471 /* we hit 0, and we hold the lock */
474 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);