2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner.
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
11 * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
13 * Adaptive Spinlocks simplification:
14 * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
16 * See Documentation/locking/rt-mutex-design.txt for details.
18 #include <linux/spinlock.h>
19 #include <linux/export.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/deadline.h>
23 #include <linux/timer.h>
24 #include <linux/ww_mutex.h>
26 #include "rtmutex_common.h"
29 * lock->owner state tracking:
31 * lock->owner holds the task_struct pointer of the owner. Bit 0
32 * is used to keep track of the "lock has waiters" state.
35 * NULL 0 lock is free (fast acquire possible)
36 * NULL 1 lock is free and has waiters and the top waiter
37 * is going to take the lock*
38 * taskpointer 0 lock is held (fast release possible)
39 * taskpointer 1 lock is held and has waiters**
41 * The fast atomic compare exchange based acquire and release is only
42 * possible when bit 0 of lock->owner is 0.
44 * (*) It also can be a transitional state when grabbing the lock
45 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
46 * we need to set the bit0 before looking at the lock, and the owner may be
47 * NULL in this small time, hence this can be a transitional state.
49 * (**) There is a small time when bit 0 is set but there are no
50 * waiters. This can happen when grabbing the lock in the slow path.
51 * To prevent a cmpxchg of the owner releasing the lock, we need to
52 * set this bit before looking at the lock.
56 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
58 unsigned long val = (unsigned long)owner;
60 if (rt_mutex_has_waiters(lock))
61 val |= RT_MUTEX_HAS_WAITERS;
63 lock->owner = (struct task_struct *)val;
66 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
68 lock->owner = (struct task_struct *)
69 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
72 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
74 unsigned long owner, *p = (unsigned long *) &lock->owner;
76 if (rt_mutex_has_waiters(lock))
80 * The rbtree has no waiters enqueued, now make sure that the
81 * lock->owner still has the waiters bit set, otherwise the
82 * following can happen:
88 * l->owner = T1 | HAS_WAITERS;
96 * l->owner = T1 | HAS_WAITERS;
101 * signal(->T2) signal(->T3)
108 * ==> wait list is empty
112 * fixup_rt_mutex_waiters()
113 * if (wait_list_empty(l) {
115 * owner = l->owner & ~HAS_WAITERS;
119 * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
120 * if (wait_list_empty(l) {
121 * owner = l->owner & ~HAS_WAITERS;
122 * cmpxchg(l->owner, T1, NULL)
123 * ===> Success (l->owner = NULL)
129 * With the check for the waiter bit in place T3 on CPU2 will not
130 * overwrite. All tasks fiddling with the waiters bit are
131 * serialized by l->lock, so nothing else can modify the waiters
132 * bit. If the bit is set then nothing can change l->owner either
133 * so the simple RMW is safe. The cmpxchg() will simply fail if it
134 * happens in the middle of the RMW because the waiters bit is
137 owner = READ_ONCE(*p);
138 if (owner & RT_MUTEX_HAS_WAITERS)
139 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
142 static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
144 return waiter && waiter != PI_WAKEUP_INPROGRESS &&
145 waiter != PI_REQUEUE_INPROGRESS;
149 * We can speed up the acquire/release, if there's no debugging state to be
152 #ifndef CONFIG_DEBUG_RT_MUTEXES
153 # define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
154 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
155 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
158 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
159 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
160 * relaxed semantics suffice.
162 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
164 unsigned long owner, *p = (unsigned long *) &lock->owner;
168 } while (cmpxchg_relaxed(p, owner,
169 owner | RT_MUTEX_HAS_WAITERS) != owner);
173 * Safe fastpath aware unlock:
174 * 1) Clear the waiters bit
175 * 2) Drop lock->wait_lock
176 * 3) Try to unlock the lock with cmpxchg
178 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
180 __releases(lock->wait_lock)
182 struct task_struct *owner = rt_mutex_owner(lock);
184 clear_rt_mutex_waiters(lock);
185 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
187 * If a new waiter comes in between the unlock and the cmpxchg
188 * we have two situations:
192 * cmpxchg(p, owner, 0) == owner
193 * mark_rt_mutex_waiters(lock);
199 * mark_rt_mutex_waiters(lock);
201 * cmpxchg(p, owner, 0) != owner
210 return rt_mutex_cmpxchg_release(lock, owner, NULL);
214 # define rt_mutex_cmpxchg_relaxed(l,c,n) (0)
215 # define rt_mutex_cmpxchg_acquire(l,c,n) (0)
216 # define rt_mutex_cmpxchg_release(l,c,n) (0)
218 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
220 lock->owner = (struct task_struct *)
221 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
225 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
227 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
229 __releases(lock->wait_lock)
232 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
238 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
239 struct rt_mutex_waiter *right)
241 if (left->prio < right->prio)
245 * If both waiters have dl_prio(), we check the deadlines of the
247 * If left waiter has a dl_prio(), and we didn't return 1 above,
248 * then right waiter has a dl_prio() too.
250 if (dl_prio(left->prio))
251 return dl_time_before(left->task->dl.deadline,
252 right->task->dl.deadline);
258 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
260 struct rb_node **link = &lock->waiters.rb_node;
261 struct rb_node *parent = NULL;
262 struct rt_mutex_waiter *entry;
267 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
268 if (rt_mutex_waiter_less(waiter, entry)) {
269 link = &parent->rb_left;
271 link = &parent->rb_right;
277 lock->waiters_leftmost = &waiter->tree_entry;
279 rb_link_node(&waiter->tree_entry, parent, link);
280 rb_insert_color(&waiter->tree_entry, &lock->waiters);
284 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
286 if (RB_EMPTY_NODE(&waiter->tree_entry))
289 if (lock->waiters_leftmost == &waiter->tree_entry)
290 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
292 rb_erase(&waiter->tree_entry, &lock->waiters);
293 RB_CLEAR_NODE(&waiter->tree_entry);
297 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
299 struct rb_node **link = &task->pi_waiters.rb_node;
300 struct rb_node *parent = NULL;
301 struct rt_mutex_waiter *entry;
306 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
307 if (rt_mutex_waiter_less(waiter, entry)) {
308 link = &parent->rb_left;
310 link = &parent->rb_right;
316 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
318 rb_link_node(&waiter->pi_tree_entry, parent, link);
319 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
323 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
325 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
328 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
329 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
331 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
332 RB_CLEAR_NODE(&waiter->pi_tree_entry);
336 * Calculate task priority from the waiter tree priority
338 * Return task->normal_prio when the waiter tree is empty or when
339 * the waiter is not allowed to do priority boosting
341 int rt_mutex_getprio(struct task_struct *task)
343 if (likely(!task_has_pi_waiters(task)))
344 return task->normal_prio;
346 return min(task_top_pi_waiter(task)->prio,
350 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
352 if (likely(!task_has_pi_waiters(task)))
355 return task_top_pi_waiter(task)->task;
359 * Called by sched_setscheduler() to get the priority which will be
360 * effective after the change.
362 int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
364 if (!task_has_pi_waiters(task))
367 if (task_top_pi_waiter(task)->task->prio <= newprio)
368 return task_top_pi_waiter(task)->task->prio;
373 * Adjust the priority of a task, after its pi_waiters got modified.
375 * This can be both boosting and unboosting. task->pi_lock must be held.
377 static void __rt_mutex_adjust_prio(struct task_struct *task)
379 int prio = rt_mutex_getprio(task);
381 if (task->prio != prio || dl_prio(prio))
382 rt_mutex_setprio(task, prio);
386 * Adjust task priority (undo boosting). Called from the exit path of
387 * rt_mutex_slowunlock() and rt_mutex_slowlock().
389 * (Note: We do this outside of the protection of lock->wait_lock to
390 * allow the lock to be taken while or before we readjust the priority
391 * of task. We do not use the spin_xx_mutex() variants here as we are
392 * outside of the debug path.)
394 void rt_mutex_adjust_prio(struct task_struct *task)
398 raw_spin_lock_irqsave(&task->pi_lock, flags);
399 __rt_mutex_adjust_prio(task);
400 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
404 * Deadlock detection is conditional:
406 * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
407 * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
409 * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
410 * conducted independent of the detect argument.
412 * If the waiter argument is NULL this indicates the deboost path and
413 * deadlock detection is disabled independent of the detect argument
414 * and the config settings.
416 static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
417 enum rtmutex_chainwalk chwalk)
420 * This is just a wrapper function for the following call,
421 * because debug_rt_mutex_detect_deadlock() smells like a magic
422 * debug feature and I wanted to keep the cond function in the
423 * main source file along with the comments instead of having
424 * two of the same in the headers.
426 return debug_rt_mutex_detect_deadlock(waiter, chwalk);
429 static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
431 if (waiter->savestate)
432 wake_up_lock_sleeper(waiter->task);
434 wake_up_process(waiter->task);
438 * Max number of times we'll walk the boosting chain:
440 int max_lock_depth = 1024;
442 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
444 return rt_mutex_real_waiter(p->pi_blocked_on) ?
445 p->pi_blocked_on->lock : NULL;
449 * Adjust the priority chain. Also used for deadlock detection.
450 * Decreases task's usage by one - may thus free the task.
452 * @task: the task owning the mutex (owner) for which a chain walk is
454 * @chwalk: do we have to carry out deadlock detection?
455 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
456 * things for a task that has just got its priority adjusted, and
457 * is waiting on a mutex)
458 * @next_lock: the mutex on which the owner of @orig_lock was blocked before
459 * we dropped its pi_lock. Is never dereferenced, only used for
460 * comparison to detect lock chain changes.
461 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
462 * its priority to the mutex owner (can be NULL in the case
463 * depicted above or if the top waiter is gone away and we are
464 * actually deboosting the owner)
465 * @top_task: the current top waiter
467 * Returns 0 or -EDEADLK.
469 * Chain walk basics and protection scope
471 * [R] refcount on task
472 * [P] task->pi_lock held
473 * [L] rtmutex->wait_lock held
475 * Step Description Protected by
476 * function arguments:
478 * @orig_lock if != NULL @top_task is blocked on it
479 * @next_lock Unprotected. Cannot be
480 * dereferenced. Only used for
482 * @orig_waiter if != NULL @top_task is blocked on it
483 * @top_task current, or in case of proxy
484 * locking protected by calling
487 * loop_sanity_check();
489 * [1] lock(task->pi_lock); [R] acquire [P]
490 * [2] waiter = task->pi_blocked_on; [P]
491 * [3] check_exit_conditions_1(); [P]
492 * [4] lock = waiter->lock; [P]
493 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
494 * unlock(task->pi_lock); release [P]
497 * [6] check_exit_conditions_2(); [P] + [L]
498 * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
499 * [8] unlock(task->pi_lock); release [P]
500 * put_task_struct(task); release [R]
501 * [9] check_exit_conditions_3(); [L]
502 * [10] task = owner(lock); [L]
503 * get_task_struct(task); [L] acquire [R]
504 * lock(task->pi_lock); [L] acquire [P]
505 * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
506 * [12] check_exit_conditions_4(); [P] + [L]
507 * [13] unlock(task->pi_lock); release [P]
508 * unlock(lock->wait_lock); release [L]
511 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
512 enum rtmutex_chainwalk chwalk,
513 struct rt_mutex *orig_lock,
514 struct rt_mutex *next_lock,
515 struct rt_mutex_waiter *orig_waiter,
516 struct task_struct *top_task)
518 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
519 struct rt_mutex_waiter *prerequeue_top_waiter;
520 int ret = 0, depth = 0;
521 struct rt_mutex *lock;
522 bool detect_deadlock;
525 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
528 * The (de)boosting is a step by step approach with a lot of
529 * pitfalls. We want this to be preemptible and we want hold a
530 * maximum of two locks per step. So we have to check
531 * carefully whether things change under us.
535 * We limit the lock chain length for each invocation.
537 if (++depth > max_lock_depth) {
541 * Print this only once. If the admin changes the limit,
542 * print a new message when reaching the limit again.
544 if (prev_max != max_lock_depth) {
545 prev_max = max_lock_depth;
546 printk(KERN_WARNING "Maximum lock depth %d reached "
547 "task: %s (%d)\n", max_lock_depth,
548 top_task->comm, task_pid_nr(top_task));
550 put_task_struct(task);
556 * We are fully preemptible here and only hold the refcount on
557 * @task. So everything can have changed under us since the
558 * caller or our own code below (goto retry/again) dropped all
563 * [1] Task cannot go away as we did a get_task() before !
565 raw_spin_lock_irq(&task->pi_lock);
568 * [2] Get the waiter on which @task is blocked on.
570 waiter = task->pi_blocked_on;
573 * [3] check_exit_conditions_1() protected by task->pi_lock.
577 * Check whether the end of the boosting chain has been
578 * reached or the state of the chain has changed while we
581 if (!rt_mutex_real_waiter(waiter))
585 * Check the orig_waiter state. After we dropped the locks,
586 * the previous owner of the lock might have released the lock.
588 if (orig_waiter && !rt_mutex_owner(orig_lock))
592 * We dropped all locks after taking a refcount on @task, so
593 * the task might have moved on in the lock chain or even left
594 * the chain completely and blocks now on an unrelated lock or
597 * We stored the lock on which @task was blocked in @next_lock,
598 * so we can detect the chain change.
600 if (next_lock != waiter->lock)
604 * Drop out, when the task has no waiters. Note,
605 * top_waiter can be NULL, when we are in the deboosting
609 if (!task_has_pi_waiters(task))
612 * If deadlock detection is off, we stop here if we
613 * are not the top pi waiter of the task. If deadlock
614 * detection is enabled we continue, but stop the
615 * requeueing in the chain walk.
617 if (top_waiter != task_top_pi_waiter(task)) {
618 if (!detect_deadlock)
626 * If the waiter priority is the same as the task priority
627 * then there is no further priority adjustment necessary. If
628 * deadlock detection is off, we stop the chain walk. If its
629 * enabled we continue, but stop the requeueing in the chain
632 if (waiter->prio == task->prio) {
633 if (!detect_deadlock)
640 * [4] Get the next lock
644 * [5] We need to trylock here as we are holding task->pi_lock,
645 * which is the reverse lock order versus the other rtmutex
648 if (!raw_spin_trylock(&lock->wait_lock)) {
649 raw_spin_unlock_irq(&task->pi_lock);
655 * [6] check_exit_conditions_2() protected by task->pi_lock and
658 * Deadlock detection. If the lock is the same as the original
659 * lock which caused us to walk the lock chain or if the
660 * current lock is owned by the task which initiated the chain
661 * walk, we detected a deadlock.
663 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
664 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
665 raw_spin_unlock(&lock->wait_lock);
671 * If we just follow the lock chain for deadlock detection, no
672 * need to do all the requeue operations. To avoid a truckload
673 * of conditionals around the various places below, just do the
674 * minimum chain walk checks.
678 * No requeue[7] here. Just release @task [8]
680 raw_spin_unlock(&task->pi_lock);
681 put_task_struct(task);
684 * [9] check_exit_conditions_3 protected by lock->wait_lock.
685 * If there is no owner of the lock, end of chain.
687 if (!rt_mutex_owner(lock)) {
688 raw_spin_unlock_irq(&lock->wait_lock);
692 /* [10] Grab the next task, i.e. owner of @lock */
693 task = rt_mutex_owner(lock);
694 get_task_struct(task);
695 raw_spin_lock(&task->pi_lock);
698 * No requeue [11] here. We just do deadlock detection.
700 * [12] Store whether owner is blocked
701 * itself. Decision is made after dropping the locks
703 next_lock = task_blocked_on_lock(task);
705 * Get the top waiter for the next iteration
707 top_waiter = rt_mutex_top_waiter(lock);
709 /* [13] Drop locks */
710 raw_spin_unlock(&task->pi_lock);
711 raw_spin_unlock_irq(&lock->wait_lock);
713 /* If owner is not blocked, end of chain. */
720 * Store the current top waiter before doing the requeue
721 * operation on @lock. We need it for the boost/deboost
724 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
726 /* [7] Requeue the waiter in the lock waiter tree. */
727 rt_mutex_dequeue(lock, waiter);
728 waiter->prio = task->prio;
729 rt_mutex_enqueue(lock, waiter);
731 /* [8] Release the task */
732 raw_spin_unlock(&task->pi_lock);
733 put_task_struct(task);
736 * [9] check_exit_conditions_3 protected by lock->wait_lock.
738 * We must abort the chain walk if there is no lock owner even
739 * in the dead lock detection case, as we have nothing to
740 * follow here. This is the end of the chain we are walking.
742 if (!rt_mutex_owner(lock)) {
743 struct rt_mutex_waiter *lock_top_waiter;
746 * If the requeue [7] above changed the top waiter,
747 * then we need to wake the new top waiter up to try
750 lock_top_waiter = rt_mutex_top_waiter(lock);
751 if (prerequeue_top_waiter != lock_top_waiter)
752 rt_mutex_wake_waiter(lock_top_waiter);
753 raw_spin_unlock_irq(&lock->wait_lock);
757 /* [10] Grab the next task, i.e. the owner of @lock */
758 task = rt_mutex_owner(lock);
759 get_task_struct(task);
760 raw_spin_lock(&task->pi_lock);
762 /* [11] requeue the pi waiters if necessary */
763 if (waiter == rt_mutex_top_waiter(lock)) {
765 * The waiter became the new top (highest priority)
766 * waiter on the lock. Replace the previous top waiter
767 * in the owner tasks pi waiters tree with this waiter
768 * and adjust the priority of the owner.
770 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
771 rt_mutex_enqueue_pi(task, waiter);
772 __rt_mutex_adjust_prio(task);
774 } else if (prerequeue_top_waiter == waiter) {
776 * The waiter was the top waiter on the lock, but is
777 * no longer the top prority waiter. Replace waiter in
778 * the owner tasks pi waiters tree with the new top
779 * (highest priority) waiter and adjust the priority
781 * The new top waiter is stored in @waiter so that
782 * @waiter == @top_waiter evaluates to true below and
783 * we continue to deboost the rest of the chain.
785 rt_mutex_dequeue_pi(task, waiter);
786 waiter = rt_mutex_top_waiter(lock);
787 rt_mutex_enqueue_pi(task, waiter);
788 __rt_mutex_adjust_prio(task);
791 * Nothing changed. No need to do any priority
797 * [12] check_exit_conditions_4() protected by task->pi_lock
798 * and lock->wait_lock. The actual decisions are made after we
801 * Check whether the task which owns the current lock is pi
802 * blocked itself. If yes we store a pointer to the lock for
803 * the lock chain change detection above. After we dropped
804 * task->pi_lock next_lock cannot be dereferenced anymore.
806 next_lock = task_blocked_on_lock(task);
808 * Store the top waiter of @lock for the end of chain walk
811 top_waiter = rt_mutex_top_waiter(lock);
813 /* [13] Drop the locks */
814 raw_spin_unlock(&task->pi_lock);
815 raw_spin_unlock_irq(&lock->wait_lock);
818 * Make the actual exit decisions [12], based on the stored
821 * We reached the end of the lock chain. Stop right here. No
822 * point to go back just to figure that out.
828 * If the current waiter is not the top waiter on the lock,
829 * then we can stop the chain walk here if we are not in full
830 * deadlock detection mode.
832 if (!detect_deadlock && waiter != top_waiter)
838 raw_spin_unlock_irq(&task->pi_lock);
840 put_task_struct(task);
846 #define STEAL_NORMAL 0
847 #define STEAL_LATERAL 1
850 * Note that RT tasks are excluded from lateral-steals to prevent the
851 * introduction of an unbounded latency
853 static inline int lock_is_stealable(struct task_struct *task,
854 struct task_struct *pendowner, int mode)
856 if (mode == STEAL_NORMAL || rt_task(task)) {
857 if (task->prio >= pendowner->prio)
859 } else if (task->prio > pendowner->prio)
865 * Try to take an rt-mutex
867 * Must be called with lock->wait_lock held and interrupts disabled
869 * @lock: The lock to be acquired.
870 * @task: The task which wants to acquire the lock
871 * @waiter: The waiter that is queued to the lock's wait tree if the
872 * callsite called task_blocked_on_lock(), otherwise NULL
874 static int __try_to_take_rt_mutex(struct rt_mutex *lock,
875 struct task_struct *task,
876 struct rt_mutex_waiter *waiter, int mode)
879 * Before testing whether we can acquire @lock, we set the
880 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
881 * other tasks which try to modify @lock into the slow path
882 * and they serialize on @lock->wait_lock.
884 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
885 * as explained at the top of this file if and only if:
887 * - There is a lock owner. The caller must fixup the
888 * transient state if it does a trylock or leaves the lock
889 * function due to a signal or timeout.
891 * - @task acquires the lock and there are no other
892 * waiters. This is undone in rt_mutex_set_owner(@task) at
893 * the end of this function.
895 mark_rt_mutex_waiters(lock);
898 * If @lock has an owner, give up.
900 if (rt_mutex_owner(lock))
904 * If @waiter != NULL, @task has already enqueued the waiter
905 * into @lock waiter tree. If @waiter == NULL then this is a
910 * If waiter is not the highest priority waiter of
913 if (waiter != rt_mutex_top_waiter(lock)) {
914 /* XXX lock_is_stealable() ? */
919 * We can acquire the lock. Remove the waiter from the
922 rt_mutex_dequeue(lock, waiter);
926 * If the lock has waiters already we check whether @task is
927 * eligible to take over the lock.
929 * If there are no other waiters, @task can acquire
930 * the lock. @task->pi_blocked_on is NULL, so it does
931 * not need to be dequeued.
933 if (rt_mutex_has_waiters(lock)) {
934 struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
936 if (task != pown && !lock_is_stealable(task, pown, mode))
939 * The current top waiter stays enqueued. We
940 * don't have to change anything in the lock
945 * No waiters. Take the lock without the
946 * pi_lock dance.@task->pi_blocked_on is NULL
947 * and we have no waiters to enqueue in @task
955 * Clear @task->pi_blocked_on. Requires protection by
956 * @task->pi_lock. Redundant operation for the @waiter == NULL
957 * case, but conditionals are more expensive than a redundant
960 raw_spin_lock(&task->pi_lock);
961 task->pi_blocked_on = NULL;
963 * Finish the lock acquisition. @task is the new owner. If
964 * other waiters exist we have to insert the highest priority
965 * waiter into @task->pi_waiters tree.
967 if (rt_mutex_has_waiters(lock))
968 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
969 raw_spin_unlock(&task->pi_lock);
972 /* We got the lock. */
973 debug_rt_mutex_lock(lock);
976 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
977 * are still waiters or clears it.
979 rt_mutex_set_owner(lock, task);
981 rt_mutex_deadlock_account_lock(lock, task);
986 #ifdef CONFIG_PREEMPT_RT_FULL
988 * preemptible spin_lock functions:
990 static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
991 void (*slowfn)(struct rt_mutex *lock,
995 might_sleep_no_state_check();
1000 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1001 rt_mutex_deadlock_account_lock(lock, current);
1003 slowfn(lock, do_mig_dis);
1006 static inline int rt_spin_lock_fastunlock(struct rt_mutex *lock,
1007 int (*slowfn)(struct rt_mutex *lock))
1009 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1010 rt_mutex_deadlock_account_unlock(current);
1013 return slowfn(lock);
1017 * Note that owner is a speculative pointer and dereferencing relies
1018 * on rcu_read_lock() and the check against the lock owner.
1020 static int adaptive_wait(struct rt_mutex *lock,
1021 struct task_struct *owner)
1027 if (owner != rt_mutex_owner(lock))
1030 * Ensure that owner->on_cpu is dereferenced _after_
1031 * checking the above to be valid.
1034 if (!owner->on_cpu) {
1044 static int adaptive_wait(struct rt_mutex *lock,
1045 struct task_struct *orig_owner)
1051 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
1052 struct rt_mutex_waiter *waiter,
1053 struct task_struct *task,
1054 enum rtmutex_chainwalk chwalk);
1056 * Slow path lock function spin_lock style: this variant is very
1057 * careful not to miss any non-lock wakeups.
1059 * We store the current state under p->pi_lock in p->saved_state and
1060 * the try_to_wake_up() code handles this accordingly.
1062 static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock,
1065 struct task_struct *lock_owner, *self = current;
1066 struct rt_mutex_waiter waiter, *top_waiter;
1067 unsigned long flags;
1070 rt_mutex_init_waiter(&waiter, true);
1072 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1074 if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
1075 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1079 BUG_ON(rt_mutex_owner(lock) == self);
1082 * We save whatever state the task is in and we'll restore it
1083 * after acquiring the lock taking real wakeups into account
1084 * as well. We are serialized via pi_lock against wakeups. See
1087 raw_spin_lock(&self->pi_lock);
1088 self->saved_state = self->state;
1089 __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1090 raw_spin_unlock(&self->pi_lock);
1092 ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
1096 /* Try to acquire the lock again. */
1097 if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
1100 top_waiter = rt_mutex_top_waiter(lock);
1101 lock_owner = rt_mutex_owner(lock);
1103 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1105 debug_rt_mutex_print_deadlock(&waiter);
1107 if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
1115 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1117 raw_spin_lock(&self->pi_lock);
1118 __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1119 raw_spin_unlock(&self->pi_lock);
1123 * Restore the task state to current->saved_state. We set it
1124 * to the original state above and the try_to_wake_up() code
1125 * has possibly updated it when a real (non-rtmutex) wakeup
1126 * happened while we were blocked. Clear saved_state so
1127 * try_to_wakeup() does not get confused.
1129 raw_spin_lock(&self->pi_lock);
1130 __set_current_state_no_track(self->saved_state);
1131 self->saved_state = TASK_RUNNING;
1132 raw_spin_unlock(&self->pi_lock);
1135 * try_to_take_rt_mutex() sets the waiter bit
1136 * unconditionally. We might have to fix that up:
1138 fixup_rt_mutex_waiters(lock);
1140 BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
1141 BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
1143 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1145 debug_rt_mutex_free_waiter(&waiter);
1148 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1149 struct wake_q_head *wake_sleeper_q,
1150 struct rt_mutex *lock);
1152 * Slow path to release a rt_mutex spin_lock style
1154 static int noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
1156 unsigned long flags;
1158 WAKE_Q(wake_sleeper_q);
1160 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1162 debug_rt_mutex_unlock(lock);
1164 rt_mutex_deadlock_account_unlock(current);
1166 if (!rt_mutex_has_waiters(lock)) {
1168 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1172 mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
1174 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1176 wake_up_q_sleeper(&wake_sleeper_q);
1178 /* Undo pi boosting.when necessary */
1179 rt_mutex_adjust_prio(current);
1183 static int noinline __sched rt_spin_lock_slowunlock_no_deboost(struct rt_mutex *lock)
1185 unsigned long flags;
1187 WAKE_Q(wake_sleeper_q);
1189 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1191 debug_rt_mutex_unlock(lock);
1193 rt_mutex_deadlock_account_unlock(current);
1195 if (!rt_mutex_has_waiters(lock)) {
1197 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1201 mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
1203 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1205 wake_up_q_sleeper(&wake_sleeper_q);
1209 void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
1211 rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
1212 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1214 EXPORT_SYMBOL(rt_spin_lock__no_mg);
1216 void __lockfunc rt_spin_lock(spinlock_t *lock)
1218 rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
1219 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1221 EXPORT_SYMBOL(rt_spin_lock);
1223 void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
1225 rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
1227 EXPORT_SYMBOL(__rt_spin_lock);
1229 void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
1231 rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
1233 EXPORT_SYMBOL(__rt_spin_lock__no_mg);
1235 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1236 void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
1238 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1239 rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
1241 EXPORT_SYMBOL(rt_spin_lock_nested);
1244 void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
1246 /* NOTE: we always pass in '1' for nested, for simplicity */
1247 spin_release(&lock->dep_map, 1, _RET_IP_);
1248 rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
1250 EXPORT_SYMBOL(rt_spin_unlock__no_mg);
1252 void __lockfunc rt_spin_unlock(spinlock_t *lock)
1254 /* NOTE: we always pass in '1' for nested, for simplicity */
1255 spin_release(&lock->dep_map, 1, _RET_IP_);
1256 rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
1259 EXPORT_SYMBOL(rt_spin_unlock);
1261 int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock)
1265 /* NOTE: we always pass in '1' for nested, for simplicity */
1266 spin_release(&lock->dep_map, 1, _RET_IP_);
1267 ret = rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_no_deboost);
1272 void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
1274 rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
1276 EXPORT_SYMBOL(__rt_spin_unlock);
1279 * Wait for the lock to get unlocked: instead of polling for an unlock
1280 * (like raw spinlocks do), we lock and unlock, to force the kernel to
1281 * schedule if there's contention:
1283 void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
1288 EXPORT_SYMBOL(rt_spin_unlock_wait);
1290 int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
1292 return rt_mutex_trylock(lock);
1295 int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
1299 ret = rt_mutex_trylock(&lock->lock);
1301 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1304 EXPORT_SYMBOL(rt_spin_trylock__no_mg);
1306 int __lockfunc rt_spin_trylock(spinlock_t *lock)
1311 ret = rt_mutex_trylock(&lock->lock);
1313 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1318 EXPORT_SYMBOL(rt_spin_trylock);
1320 int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
1325 ret = rt_mutex_trylock(&lock->lock);
1328 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1333 EXPORT_SYMBOL(rt_spin_trylock_bh);
1335 int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
1340 ret = rt_mutex_trylock(&lock->lock);
1343 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1347 EXPORT_SYMBOL(rt_spin_trylock_irqsave);
1349 int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
1351 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
1352 if (atomic_add_unless(atomic, -1, 1))
1355 if (atomic_dec_and_test(atomic))
1357 rt_spin_unlock(lock);
1360 EXPORT_SYMBOL(atomic_dec_and_spin_lock);
1363 __rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
1365 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1367 * Make sure we are not reinitializing a held lock:
1369 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
1370 lockdep_init_map(&lock->dep_map, name, key, 0);
1373 EXPORT_SYMBOL(__rt_spin_lock_init);
1375 #endif /* PREEMPT_RT_FULL */
1377 #ifdef CONFIG_PREEMPT_RT_FULL
1378 static inline int __sched
1379 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1381 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1382 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
1387 if (unlikely(ctx == hold_ctx))
1390 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
1391 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
1392 #ifdef CONFIG_DEBUG_MUTEXES
1393 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
1394 ctx->contending_lock = ww;
1402 static inline int __sched
1403 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1412 try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
1413 struct rt_mutex_waiter *waiter)
1415 return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
1419 * Task blocks on lock.
1421 * Prepare waiter and propagate pi chain
1423 * This must be called with lock->wait_lock held and interrupts disabled
1425 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
1426 struct rt_mutex_waiter *waiter,
1427 struct task_struct *task,
1428 enum rtmutex_chainwalk chwalk)
1430 struct task_struct *owner = rt_mutex_owner(lock);
1431 struct rt_mutex_waiter *top_waiter = waiter;
1432 struct rt_mutex *next_lock;
1433 int chain_walk = 0, res;
1436 * Early deadlock detection. We really don't want the task to
1437 * enqueue on itself just to untangle the mess later. It's not
1438 * only an optimization. We drop the locks, so another waiter
1439 * can come in before the chain walk detects the deadlock. So
1440 * the other will detect the deadlock and return -EDEADLOCK,
1441 * which is wrong, as the other waiter is not in a deadlock
1447 raw_spin_lock(&task->pi_lock);
1450 * In the case of futex requeue PI, this will be a proxy
1451 * lock. The task will wake unaware that it is enqueueed on
1452 * this lock. Avoid blocking on two locks and corrupting
1453 * pi_blocked_on via the PI_WAKEUP_INPROGRESS
1454 * flag. futex_wait_requeue_pi() sets this when it wakes up
1455 * before requeue (due to a signal or timeout). Do not enqueue
1456 * the task if PI_WAKEUP_INPROGRESS is set.
1458 if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
1459 raw_spin_unlock(&task->pi_lock);
1463 BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
1465 __rt_mutex_adjust_prio(task);
1466 waiter->task = task;
1467 waiter->lock = lock;
1468 waiter->prio = task->prio;
1470 /* Get the top priority waiter on the lock */
1471 if (rt_mutex_has_waiters(lock))
1472 top_waiter = rt_mutex_top_waiter(lock);
1473 rt_mutex_enqueue(lock, waiter);
1475 task->pi_blocked_on = waiter;
1477 raw_spin_unlock(&task->pi_lock);
1482 raw_spin_lock(&owner->pi_lock);
1483 if (waiter == rt_mutex_top_waiter(lock)) {
1484 rt_mutex_dequeue_pi(owner, top_waiter);
1485 rt_mutex_enqueue_pi(owner, waiter);
1487 __rt_mutex_adjust_prio(owner);
1488 if (rt_mutex_real_waiter(owner->pi_blocked_on))
1490 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1494 /* Store the lock on which owner is blocked or NULL */
1495 next_lock = task_blocked_on_lock(owner);
1497 raw_spin_unlock(&owner->pi_lock);
1499 * Even if full deadlock detection is on, if the owner is not
1500 * blocked itself, we can avoid finding this out in the chain
1503 if (!chain_walk || !next_lock)
1507 * The owner can't disappear while holding a lock,
1508 * so the owner struct is protected by wait_lock.
1509 * Gets dropped in rt_mutex_adjust_prio_chain()!
1511 get_task_struct(owner);
1513 raw_spin_unlock_irq(&lock->wait_lock);
1515 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1516 next_lock, waiter, task);
1518 raw_spin_lock_irq(&lock->wait_lock);
1524 * Remove the top waiter from the current tasks pi waiter tree and
1527 * Called with lock->wait_lock held and interrupts disabled.
1529 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1530 struct wake_q_head *wake_sleeper_q,
1531 struct rt_mutex *lock)
1533 struct rt_mutex_waiter *waiter;
1535 raw_spin_lock(¤t->pi_lock);
1537 waiter = rt_mutex_top_waiter(lock);
1540 * Remove it from current->pi_waiters. We do not adjust a
1541 * possible priority boost right now. We execute wakeup in the
1542 * boosted mode and go back to normal after releasing
1545 rt_mutex_dequeue_pi(current, waiter);
1548 * As we are waking up the top waiter, and the waiter stays
1549 * queued on the lock until it gets the lock, this lock
1550 * obviously has waiters. Just set the bit here and this has
1551 * the added benefit of forcing all new tasks into the
1552 * slow path making sure no task of lower priority than
1553 * the top waiter can steal this lock.
1555 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1557 raw_spin_unlock(¤t->pi_lock);
1559 if (waiter->savestate)
1560 wake_q_add(wake_sleeper_q, waiter->task);
1562 wake_q_add(wake_q, waiter->task);
1566 * Remove a waiter from a lock and give up
1568 * Must be called with lock->wait_lock held and interrupts disabled. I must
1569 * have just failed to try_to_take_rt_mutex().
1571 static void remove_waiter(struct rt_mutex *lock,
1572 struct rt_mutex_waiter *waiter)
1574 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1575 struct task_struct *owner = rt_mutex_owner(lock);
1576 struct rt_mutex *next_lock = NULL;
1578 raw_spin_lock(¤t->pi_lock);
1579 rt_mutex_dequeue(lock, waiter);
1580 current->pi_blocked_on = NULL;
1581 raw_spin_unlock(¤t->pi_lock);
1584 * Only update priority if the waiter was the highest priority
1585 * waiter of the lock and there is an owner to update.
1587 if (!owner || !is_top_waiter)
1590 raw_spin_lock(&owner->pi_lock);
1592 rt_mutex_dequeue_pi(owner, waiter);
1594 if (rt_mutex_has_waiters(lock))
1595 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1597 __rt_mutex_adjust_prio(owner);
1599 /* Store the lock on which owner is blocked or NULL */
1600 if (rt_mutex_real_waiter(owner->pi_blocked_on))
1601 next_lock = task_blocked_on_lock(owner);
1603 raw_spin_unlock(&owner->pi_lock);
1606 * Don't walk the chain, if the owner task is not blocked
1612 /* gets dropped in rt_mutex_adjust_prio_chain()! */
1613 get_task_struct(owner);
1615 raw_spin_unlock_irq(&lock->wait_lock);
1617 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1618 next_lock, NULL, current);
1620 raw_spin_lock_irq(&lock->wait_lock);
1624 * Recheck the pi chain, in case we got a priority setting
1626 * Called from sched_setscheduler
1628 void rt_mutex_adjust_pi(struct task_struct *task)
1630 struct rt_mutex_waiter *waiter;
1631 struct rt_mutex *next_lock;
1632 unsigned long flags;
1634 raw_spin_lock_irqsave(&task->pi_lock, flags);
1636 waiter = task->pi_blocked_on;
1637 if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
1638 !dl_prio(task->prio))) {
1639 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1642 next_lock = waiter->lock;
1644 /* gets dropped in rt_mutex_adjust_prio_chain()! */
1645 get_task_struct(task);
1647 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1648 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1649 next_lock, NULL, task);
1653 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
1654 * @lock: the rt_mutex to take
1655 * @state: the state the task should block in (TASK_INTERRUPTIBLE
1656 * or TASK_UNINTERRUPTIBLE)
1657 * @timeout: the pre-initialized and started timer, or NULL for none
1658 * @waiter: the pre-initialized rt_mutex_waiter
1660 * Must be called with lock->wait_lock held and interrupts disabled
1663 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
1664 struct hrtimer_sleeper *timeout,
1665 struct rt_mutex_waiter *waiter,
1666 struct ww_acquire_ctx *ww_ctx)
1671 /* Try to acquire the lock: */
1672 if (try_to_take_rt_mutex(lock, current, waiter))
1676 * TASK_INTERRUPTIBLE checks for signals and
1677 * timeout. Ignored otherwise.
1679 if (unlikely(state == TASK_INTERRUPTIBLE)) {
1680 /* Signal pending? */
1681 if (signal_pending(current))
1683 if (timeout && !timeout->task)
1689 if (ww_ctx && ww_ctx->acquired > 0) {
1690 ret = __mutex_lock_check_stamp(lock, ww_ctx);
1695 raw_spin_unlock_irq(&lock->wait_lock);
1697 debug_rt_mutex_print_deadlock(waiter);
1701 raw_spin_lock_irq(&lock->wait_lock);
1702 set_current_state(state);
1705 __set_current_state(TASK_RUNNING);
1709 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1710 struct rt_mutex_waiter *w)
1713 * If the result is not -EDEADLOCK or the caller requested
1714 * deadlock detection, nothing to do here.
1716 if (res != -EDEADLOCK || detect_deadlock)
1720 * Yell lowdly and stop the task right here.
1722 rt_mutex_print_deadlock(w);
1724 set_current_state(TASK_INTERRUPTIBLE);
1729 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
1730 struct ww_acquire_ctx *ww_ctx)
1732 #ifdef CONFIG_DEBUG_MUTEXES
1734 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
1735 * but released with a normal mutex_unlock in this call.
1737 * This should never happen, always use ww_mutex_unlock.
1739 DEBUG_LOCKS_WARN_ON(ww->ctx);
1742 * Not quite done after calling ww_acquire_done() ?
1744 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
1746 if (ww_ctx->contending_lock) {
1748 * After -EDEADLK you tried to
1749 * acquire a different ww_mutex? Bad!
1751 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
1754 * You called ww_mutex_lock after receiving -EDEADLK,
1755 * but 'forgot' to unlock everything else first?
1757 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
1758 ww_ctx->contending_lock = NULL;
1762 * Naughty, using a different class will lead to undefined behavior!
1764 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
1769 #ifdef CONFIG_PREEMPT_RT_FULL
1770 static void ww_mutex_account_lock(struct rt_mutex *lock,
1771 struct ww_acquire_ctx *ww_ctx)
1773 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1774 struct rt_mutex_waiter *waiter, *n;
1777 * This branch gets optimized out for the common case,
1778 * and is only important for ww_mutex_lock.
1780 ww_mutex_lock_acquired(ww, ww_ctx);
1784 * Give any possible sleeping processes the chance to wake up,
1785 * so they can recheck if they have to back off.
1787 rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
1789 /* XXX debug rt mutex waiter wakeup */
1791 BUG_ON(waiter->lock != lock);
1792 rt_mutex_wake_waiter(waiter);
1798 static void ww_mutex_account_lock(struct rt_mutex *lock,
1799 struct ww_acquire_ctx *ww_ctx)
1806 * Slow path lock function:
1809 rt_mutex_slowlock(struct rt_mutex *lock, int state,
1810 struct hrtimer_sleeper *timeout,
1811 enum rtmutex_chainwalk chwalk,
1812 struct ww_acquire_ctx *ww_ctx)
1814 struct rt_mutex_waiter waiter;
1815 unsigned long flags;
1818 rt_mutex_init_waiter(&waiter, false);
1821 * Technically we could use raw_spin_[un]lock_irq() here, but this can
1822 * be called in early boot if the cmpxchg() fast path is disabled
1823 * (debug, no architecture support). In this case we will acquire the
1824 * rtmutex with lock->wait_lock held. But we cannot unconditionally
1825 * enable interrupts in that early boot case. So we need to use the
1826 * irqsave/restore variants.
1828 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1830 /* Try to acquire the lock again: */
1831 if (try_to_take_rt_mutex(lock, current, NULL)) {
1833 ww_mutex_account_lock(lock, ww_ctx);
1834 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1838 set_current_state(state);
1840 /* Setup the timer, when timeout != NULL */
1841 if (unlikely(timeout))
1842 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1844 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1847 /* sleep on the mutex */
1848 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
1851 /* ww_mutex received EDEADLK, let it become EALREADY */
1852 ret = __mutex_lock_check_stamp(lock, ww_ctx);
1856 if (unlikely(ret)) {
1857 __set_current_state(TASK_RUNNING);
1858 if (rt_mutex_has_waiters(lock))
1859 remove_waiter(lock, &waiter);
1860 /* ww_mutex want to report EDEADLK/EALREADY, let them */
1862 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1863 } else if (ww_ctx) {
1864 ww_mutex_account_lock(lock, ww_ctx);
1868 * try_to_take_rt_mutex() sets the waiter bit
1869 * unconditionally. We might have to fix that up.
1871 fixup_rt_mutex_waiters(lock);
1873 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1875 /* Remove pending timer: */
1876 if (unlikely(timeout))
1877 hrtimer_cancel(&timeout->timer);
1879 debug_rt_mutex_free_waiter(&waiter);
1885 * Slow path try-lock function:
1887 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1889 unsigned long flags;
1893 * If the lock already has an owner we fail to get the lock.
1894 * This can be done without taking the @lock->wait_lock as
1895 * it is only being read, and this is a trylock anyway.
1897 if (rt_mutex_owner(lock))
1901 * The mutex has currently no owner. Lock the wait lock and try to
1902 * acquire the lock. We use irqsave here to support early boot calls.
1904 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1906 ret = try_to_take_rt_mutex(lock, current, NULL);
1909 * try_to_take_rt_mutex() sets the lock waiters bit
1910 * unconditionally. Clean this up.
1912 fixup_rt_mutex_waiters(lock);
1914 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1920 * Slow path to release a rt-mutex.
1921 * Return whether the current task needs to undo a potential priority boosting.
1923 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1924 struct wake_q_head *wake_q,
1925 struct wake_q_head *wake_sleeper_q)
1927 unsigned long flags;
1929 /* irqsave required to support early boot calls */
1930 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1932 debug_rt_mutex_unlock(lock);
1934 rt_mutex_deadlock_account_unlock(current);
1937 * We must be careful here if the fast path is enabled. If we
1938 * have no waiters queued we cannot set owner to NULL here
1941 * foo->lock->owner = NULL;
1942 * rtmutex_lock(foo->lock); <- fast path
1943 * free = atomic_dec_and_test(foo->refcnt);
1944 * rtmutex_unlock(foo->lock); <- fast path
1947 * raw_spin_unlock(foo->lock->wait_lock);
1949 * So for the fastpath enabled kernel:
1951 * Nothing can set the waiters bit as long as we hold
1952 * lock->wait_lock. So we do the following sequence:
1954 * owner = rt_mutex_owner(lock);
1955 * clear_rt_mutex_waiters(lock);
1956 * raw_spin_unlock(&lock->wait_lock);
1957 * if (cmpxchg(&lock->owner, owner, 0) == owner)
1961 * The fastpath disabled variant is simple as all access to
1962 * lock->owner is serialized by lock->wait_lock:
1964 * lock->owner = NULL;
1965 * raw_spin_unlock(&lock->wait_lock);
1967 while (!rt_mutex_has_waiters(lock)) {
1968 /* Drops lock->wait_lock ! */
1969 if (unlock_rt_mutex_safe(lock, flags) == true)
1971 /* Relock the rtmutex and try again */
1972 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1976 * The wakeup next waiter path does not suffer from the above
1977 * race. See the comments there.
1979 * Queue the next waiter for wakeup once we release the wait_lock.
1981 mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
1983 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1985 /* check PI boosting */
1990 * debug aware fast / slowpath lock,trylock,unlock
1992 * The atomic acquire/release ops are compiled away, when either the
1993 * architecture does not support cmpxchg or when debugging is enabled.
1996 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1997 struct ww_acquire_ctx *ww_ctx,
1998 int (*slowfn)(struct rt_mutex *lock, int state,
1999 struct hrtimer_sleeper *timeout,
2000 enum rtmutex_chainwalk chwalk,
2001 struct ww_acquire_ctx *ww_ctx))
2003 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
2004 rt_mutex_deadlock_account_lock(lock, current);
2007 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
2012 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
2013 struct hrtimer_sleeper *timeout,
2014 enum rtmutex_chainwalk chwalk,
2015 struct ww_acquire_ctx *ww_ctx,
2016 int (*slowfn)(struct rt_mutex *lock, int state,
2017 struct hrtimer_sleeper *timeout,
2018 enum rtmutex_chainwalk chwalk,
2019 struct ww_acquire_ctx *ww_ctx))
2021 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
2022 likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
2023 rt_mutex_deadlock_account_lock(lock, current);
2026 return slowfn(lock, state, timeout, chwalk, ww_ctx);
2030 rt_mutex_fasttrylock(struct rt_mutex *lock,
2031 int (*slowfn)(struct rt_mutex *lock))
2033 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
2034 rt_mutex_deadlock_account_lock(lock, current);
2037 return slowfn(lock);
2041 rt_mutex_fastunlock(struct rt_mutex *lock,
2042 bool (*slowfn)(struct rt_mutex *lock,
2043 struct wake_q_head *wqh,
2044 struct wake_q_head *wq_sleeper))
2047 WAKE_Q(wake_sleeper_q);
2049 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
2050 rt_mutex_deadlock_account_unlock(current);
2053 bool deboost = slowfn(lock, &wake_q, &wake_sleeper_q);
2056 wake_up_q_sleeper(&wake_sleeper_q);
2058 /* Undo pi boosting if necessary: */
2060 rt_mutex_adjust_prio(current);
2065 * rt_mutex_lock - lock a rt_mutex
2067 * @lock: the rt_mutex to be locked
2069 void __sched rt_mutex_lock(struct rt_mutex *lock)
2073 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
2075 EXPORT_SYMBOL_GPL(rt_mutex_lock);
2078 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
2080 * @lock: the rt_mutex to be locked
2084 * -EINTR when interrupted by a signal
2086 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
2090 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
2092 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
2095 * Futex variant with full deadlock detection.
2097 int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
2098 struct hrtimer_sleeper *timeout)
2102 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
2103 RT_MUTEX_FULL_CHAINWALK, NULL,
2108 * rt_mutex_lock_killable - lock a rt_mutex killable
2110 * @lock: the rt_mutex to be locked
2111 * @detect_deadlock: deadlock detection on/off
2115 * -EINTR when interrupted by a signal
2116 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
2118 int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
2122 return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
2124 EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
2127 * rt_mutex_timed_lock - lock a rt_mutex interruptible
2128 * the timeout structure is provided
2131 * @lock: the rt_mutex to be locked
2132 * @timeout: timeout structure or NULL (no timeout)
2136 * -EINTR when interrupted by a signal
2137 * -ETIMEDOUT when the timeout expired
2140 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
2144 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
2145 RT_MUTEX_MIN_CHAINWALK,
2149 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
2152 * rt_mutex_trylock - try to lock a rt_mutex
2154 * @lock: the rt_mutex to be locked
2156 * This function can only be called in thread context. It's safe to
2157 * call it from atomic regions, but not from hard interrupt or soft
2158 * interrupt context.
2160 * Returns 1 on success and 0 on contention
2162 int __sched rt_mutex_trylock(struct rt_mutex *lock)
2164 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
2166 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
2169 * rt_mutex_unlock - unlock a rt_mutex
2171 * @lock: the rt_mutex to be unlocked
2173 void __sched rt_mutex_unlock(struct rt_mutex *lock)
2175 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
2177 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
2180 * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
2181 * @lock: the rt_mutex to be unlocked
2183 * Returns: true/false indicating whether priority adjustment is
2186 bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
2187 struct wake_q_head *wqh,
2188 struct wake_q_head *wq_sleeper)
2190 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
2191 rt_mutex_deadlock_account_unlock(current);
2194 return rt_mutex_slowunlock(lock, wqh, wq_sleeper);
2198 * rt_mutex_destroy - mark a mutex unusable
2199 * @lock: the mutex to be destroyed
2201 * This function marks the mutex uninitialized, and any subsequent
2202 * use of the mutex is forbidden. The mutex must not be locked when
2203 * this function is called.
2205 void rt_mutex_destroy(struct rt_mutex *lock)
2207 WARN_ON(rt_mutex_is_locked(lock));
2208 #ifdef CONFIG_DEBUG_RT_MUTEXES
2213 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
2216 * __rt_mutex_init - initialize the rt lock
2218 * @lock: the rt lock to be initialized
2220 * Initialize the rt lock to unlocked state.
2222 * Initializing of a locked rt lock is not allowed
2224 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
2227 lock->waiters = RB_ROOT;
2228 lock->waiters_leftmost = NULL;
2230 debug_rt_mutex_init(lock, name);
2232 EXPORT_SYMBOL(__rt_mutex_init);
2235 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
2238 * @lock: the rt_mutex to be locked
2239 * @proxy_owner:the task to set as owner
2241 * No locking. Caller has to do serializing itself
2242 * Special API call for PI-futex support
2244 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
2245 struct task_struct *proxy_owner)
2247 rt_mutex_init(lock);
2248 debug_rt_mutex_proxy_lock(lock, proxy_owner);
2249 rt_mutex_set_owner(lock, proxy_owner);
2250 rt_mutex_deadlock_account_lock(lock, proxy_owner);
2254 * rt_mutex_proxy_unlock - release a lock on behalf of owner
2256 * @lock: the rt_mutex to be locked
2258 * No locking. Caller has to do serializing itself
2259 * Special API call for PI-futex support
2261 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
2262 struct task_struct *proxy_owner)
2264 debug_rt_mutex_proxy_unlock(lock);
2265 rt_mutex_set_owner(lock, NULL);
2266 rt_mutex_deadlock_account_unlock(proxy_owner);
2270 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
2271 * @lock: the rt_mutex to take
2272 * @waiter: the pre-initialized rt_mutex_waiter
2273 * @task: the task to prepare
2276 * 0 - task blocked on lock
2277 * 1 - acquired the lock for task, caller should wake it up
2280 * Special API call for FUTEX_REQUEUE_PI support.
2282 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
2283 struct rt_mutex_waiter *waiter,
2284 struct task_struct *task)
2288 raw_spin_lock_irq(&lock->wait_lock);
2290 if (try_to_take_rt_mutex(lock, task, NULL)) {
2291 raw_spin_unlock_irq(&lock->wait_lock);
2295 #ifdef CONFIG_PREEMPT_RT_FULL
2297 * In PREEMPT_RT there's an added race.
2298 * If the task, that we are about to requeue, times out,
2299 * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
2300 * to skip this task. But right after the task sets
2301 * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
2302 * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
2303 * This will replace the PI_WAKEUP_INPROGRESS with the actual
2304 * lock that it blocks on. We *must not* place this task
2305 * on this proxy lock in that case.
2307 * To prevent this race, we first take the task's pi_lock
2308 * and check if it has updated its pi_blocked_on. If it has,
2309 * we assume that it woke up and we return -EAGAIN.
2310 * Otherwise, we set the task's pi_blocked_on to
2311 * PI_REQUEUE_INPROGRESS, so that if the task is waking up
2312 * it will know that we are in the process of requeuing it.
2314 raw_spin_lock(&task->pi_lock);
2315 if (task->pi_blocked_on) {
2316 raw_spin_unlock(&task->pi_lock);
2317 raw_spin_unlock_irq(&lock->wait_lock);
2320 task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
2321 raw_spin_unlock(&task->pi_lock);
2324 /* We enforce deadlock detection for futexes */
2325 ret = task_blocks_on_rt_mutex(lock, waiter, task,
2326 RT_MUTEX_FULL_CHAINWALK);
2328 if (ret && !rt_mutex_owner(lock)) {
2330 * Reset the return value. We might have
2331 * returned with -EDEADLK and the owner
2332 * released the lock while we were walking the
2333 * pi chain. Let the waiter sort it out.
2338 if (ret && rt_mutex_has_waiters(lock))
2339 remove_waiter(lock, waiter);
2341 raw_spin_unlock_irq(&lock->wait_lock);
2343 debug_rt_mutex_print_deadlock(waiter);
2349 * rt_mutex_next_owner - return the next owner of the lock
2351 * @lock: the rt lock query
2353 * Returns the next owner of the lock or NULL
2355 * Caller has to serialize against other accessors to the lock
2358 * Special API call for PI-futex support
2360 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
2362 if (!rt_mutex_has_waiters(lock))
2365 return rt_mutex_top_waiter(lock)->task;
2369 * rt_mutex_finish_proxy_lock() - Complete lock acquisition
2370 * @lock: the rt_mutex we were woken on
2371 * @to: the timeout, null if none. hrtimer should already have
2373 * @waiter: the pre-initialized rt_mutex_waiter
2375 * Complete the lock acquisition started our behalf by another thread.
2379 * <0 - error, one of -EINTR, -ETIMEDOUT
2381 * Special API call for PI-futex requeue support
2383 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
2384 struct hrtimer_sleeper *to,
2385 struct rt_mutex_waiter *waiter)
2389 raw_spin_lock_irq(&lock->wait_lock);
2391 set_current_state(TASK_INTERRUPTIBLE);
2393 /* sleep on the mutex */
2394 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
2397 remove_waiter(lock, waiter);
2400 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
2401 * have to fix that up.
2403 fixup_rt_mutex_waiters(lock);
2405 raw_spin_unlock_irq(&lock->wait_lock);
2411 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2413 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
2416 if (ctx->deadlock_inject_countdown-- == 0) {
2417 tmp = ctx->deadlock_inject_interval;
2418 if (tmp > UINT_MAX/4)
2421 tmp = tmp*2 + tmp + tmp/2;
2423 ctx->deadlock_inject_interval = tmp;
2424 ctx->deadlock_inject_countdown = tmp;
2425 ctx->contending_lock = lock;
2427 ww_mutex_unlock(lock);
2436 #ifdef CONFIG_PREEMPT_RT_FULL
2438 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
2444 mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
2445 ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
2447 mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2448 else if (!ret && ww_ctx->acquired > 1)
2449 return ww_mutex_deadlock_injection(lock, ww_ctx);
2453 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
2456 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
2462 mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
2463 ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
2465 mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2466 else if (!ret && ww_ctx->acquired > 1)
2467 return ww_mutex_deadlock_injection(lock, ww_ctx);
2471 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
2473 void __sched ww_mutex_unlock(struct ww_mutex *lock)
2475 int nest = !!lock->ctx;
2478 * The unlocking fastpath is the 0->1 transition from 'locked'
2479 * into 'unlocked' state:
2482 #ifdef CONFIG_DEBUG_MUTEXES
2483 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
2485 if (lock->ctx->acquired > 0)
2486 lock->ctx->acquired--;
2490 mutex_release(&lock->base.dep_map, nest, _RET_IP_);
2491 rt_mutex_unlock(&lock->base.lock);
2493 EXPORT_SYMBOL(ww_mutex_unlock);