2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner.
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
11 * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
13 * Adaptive Spinlocks simplification:
14 * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
16 * See Documentation/locking/rt-mutex-design.txt for details.
18 #include <linux/spinlock.h>
19 #include <linux/export.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/deadline.h>
23 #include <linux/timer.h>
24 #include <linux/ww_mutex.h>
26 #include "rtmutex_common.h"
29 * lock->owner state tracking:
31 * lock->owner holds the task_struct pointer of the owner. Bit 0
32 * is used to keep track of the "lock has waiters" state.
35 * NULL 0 lock is free (fast acquire possible)
36 * NULL 1 lock is free and has waiters and the top waiter
37 * is going to take the lock*
38 * taskpointer 0 lock is held (fast release possible)
39 * taskpointer 1 lock is held and has waiters**
41 * The fast atomic compare exchange based acquire and release is only
42 * possible when bit 0 of lock->owner is 0.
44 * (*) It also can be a transitional state when grabbing the lock
45 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
46 * we need to set the bit0 before looking at the lock, and the owner may be
47 * NULL in this small time, hence this can be a transitional state.
49 * (**) There is a small time when bit 0 is set but there are no
50 * waiters. This can happen when grabbing the lock in the slow path.
51 * To prevent a cmpxchg of the owner releasing the lock, we need to
52 * set this bit before looking at the lock.
56 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
58 unsigned long val = (unsigned long)owner;
60 if (rt_mutex_has_waiters(lock))
61 val |= RT_MUTEX_HAS_WAITERS;
63 lock->owner = (struct task_struct *)val;
66 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
68 lock->owner = (struct task_struct *)
69 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
72 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
74 if (!rt_mutex_has_waiters(lock))
75 clear_rt_mutex_waiters(lock);
78 static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
80 return waiter && waiter != PI_WAKEUP_INPROGRESS &&
81 waiter != PI_REQUEUE_INPROGRESS;
85 * We can speed up the acquire/release, if the architecture
86 * supports cmpxchg and if there's no debugging state to be set up
88 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
89 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
90 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
92 unsigned long owner, *p = (unsigned long *) &lock->owner;
96 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
100 * Safe fastpath aware unlock:
101 * 1) Clear the waiters bit
102 * 2) Drop lock->wait_lock
103 * 3) Try to unlock the lock with cmpxchg
105 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
106 __releases(lock->wait_lock)
108 struct task_struct *owner = rt_mutex_owner(lock);
110 clear_rt_mutex_waiters(lock);
111 raw_spin_unlock(&lock->wait_lock);
113 * If a new waiter comes in between the unlock and the cmpxchg
114 * we have two situations:
118 * cmpxchg(p, owner, 0) == owner
119 * mark_rt_mutex_waiters(lock);
125 * mark_rt_mutex_waiters(lock);
127 * cmpxchg(p, owner, 0) != owner
136 return rt_mutex_cmpxchg(lock, owner, NULL);
140 # define rt_mutex_cmpxchg(l,c,n) (0)
141 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
143 lock->owner = (struct task_struct *)
144 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
148 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
150 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
151 __releases(lock->wait_lock)
154 raw_spin_unlock(&lock->wait_lock);
160 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
161 struct rt_mutex_waiter *right)
163 if (left->prio < right->prio)
167 * If both waiters have dl_prio(), we check the deadlines of the
169 * If left waiter has a dl_prio(), and we didn't return 1 above,
170 * then right waiter has a dl_prio() too.
172 if (dl_prio(left->prio))
173 return (left->task->dl.deadline < right->task->dl.deadline);
179 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
181 struct rb_node **link = &lock->waiters.rb_node;
182 struct rb_node *parent = NULL;
183 struct rt_mutex_waiter *entry;
188 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
189 if (rt_mutex_waiter_less(waiter, entry)) {
190 link = &parent->rb_left;
192 link = &parent->rb_right;
198 lock->waiters_leftmost = &waiter->tree_entry;
200 rb_link_node(&waiter->tree_entry, parent, link);
201 rb_insert_color(&waiter->tree_entry, &lock->waiters);
205 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
207 if (RB_EMPTY_NODE(&waiter->tree_entry))
210 if (lock->waiters_leftmost == &waiter->tree_entry)
211 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
213 rb_erase(&waiter->tree_entry, &lock->waiters);
214 RB_CLEAR_NODE(&waiter->tree_entry);
218 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
220 struct rb_node **link = &task->pi_waiters.rb_node;
221 struct rb_node *parent = NULL;
222 struct rt_mutex_waiter *entry;
227 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
228 if (rt_mutex_waiter_less(waiter, entry)) {
229 link = &parent->rb_left;
231 link = &parent->rb_right;
237 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
239 rb_link_node(&waiter->pi_tree_entry, parent, link);
240 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
244 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
246 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
249 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
250 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
252 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
253 RB_CLEAR_NODE(&waiter->pi_tree_entry);
257 * Calculate task priority from the waiter tree priority
259 * Return task->normal_prio when the waiter tree is empty or when
260 * the waiter is not allowed to do priority boosting
262 int rt_mutex_getprio(struct task_struct *task)
264 if (likely(!task_has_pi_waiters(task)))
265 return task->normal_prio;
267 return min(task_top_pi_waiter(task)->prio,
271 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
273 if (likely(!task_has_pi_waiters(task)))
276 return task_top_pi_waiter(task)->task;
280 * Called by sched_setscheduler() to get the priority which will be
281 * effective after the change.
283 int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
285 if (!task_has_pi_waiters(task))
288 if (task_top_pi_waiter(task)->task->prio <= newprio)
289 return task_top_pi_waiter(task)->task->prio;
294 * Adjust the priority of a task, after its pi_waiters got modified.
296 * This can be both boosting and unboosting. task->pi_lock must be held.
298 static void __rt_mutex_adjust_prio(struct task_struct *task)
300 int prio = rt_mutex_getprio(task);
302 if (task->prio != prio || dl_prio(prio))
303 rt_mutex_setprio(task, prio);
307 * Adjust task priority (undo boosting). Called from the exit path of
308 * rt_mutex_slowunlock() and rt_mutex_slowlock().
310 * (Note: We do this outside of the protection of lock->wait_lock to
311 * allow the lock to be taken while or before we readjust the priority
312 * of task. We do not use the spin_xx_mutex() variants here as we are
313 * outside of the debug path.)
315 void rt_mutex_adjust_prio(struct task_struct *task)
319 raw_spin_lock_irqsave(&task->pi_lock, flags);
320 __rt_mutex_adjust_prio(task);
321 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
325 * Deadlock detection is conditional:
327 * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
328 * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
330 * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
331 * conducted independent of the detect argument.
333 * If the waiter argument is NULL this indicates the deboost path and
334 * deadlock detection is disabled independent of the detect argument
335 * and the config settings.
337 static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
338 enum rtmutex_chainwalk chwalk)
341 * This is just a wrapper function for the following call,
342 * because debug_rt_mutex_detect_deadlock() smells like a magic
343 * debug feature and I wanted to keep the cond function in the
344 * main source file along with the comments instead of having
345 * two of the same in the headers.
347 return debug_rt_mutex_detect_deadlock(waiter, chwalk);
350 static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
352 if (waiter->savestate)
353 wake_up_lock_sleeper(waiter->task);
355 wake_up_process(waiter->task);
359 * Max number of times we'll walk the boosting chain:
361 int max_lock_depth = 1024;
363 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
365 return rt_mutex_real_waiter(p->pi_blocked_on) ?
366 p->pi_blocked_on->lock : NULL;
370 * Adjust the priority chain. Also used for deadlock detection.
371 * Decreases task's usage by one - may thus free the task.
373 * @task: the task owning the mutex (owner) for which a chain walk is
375 * @chwalk: do we have to carry out deadlock detection?
376 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
377 * things for a task that has just got its priority adjusted, and
378 * is waiting on a mutex)
379 * @next_lock: the mutex on which the owner of @orig_lock was blocked before
380 * we dropped its pi_lock. Is never dereferenced, only used for
381 * comparison to detect lock chain changes.
382 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
383 * its priority to the mutex owner (can be NULL in the case
384 * depicted above or if the top waiter is gone away and we are
385 * actually deboosting the owner)
386 * @top_task: the current top waiter
388 * Returns 0 or -EDEADLK.
390 * Chain walk basics and protection scope
392 * [R] refcount on task
393 * [P] task->pi_lock held
394 * [L] rtmutex->wait_lock held
396 * Step Description Protected by
397 * function arguments:
399 * @orig_lock if != NULL @top_task is blocked on it
400 * @next_lock Unprotected. Cannot be
401 * dereferenced. Only used for
403 * @orig_waiter if != NULL @top_task is blocked on it
404 * @top_task current, or in case of proxy
405 * locking protected by calling
408 * loop_sanity_check();
410 * [1] lock(task->pi_lock); [R] acquire [P]
411 * [2] waiter = task->pi_blocked_on; [P]
412 * [3] check_exit_conditions_1(); [P]
413 * [4] lock = waiter->lock; [P]
414 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
415 * unlock(task->pi_lock); release [P]
418 * [6] check_exit_conditions_2(); [P] + [L]
419 * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
420 * [8] unlock(task->pi_lock); release [P]
421 * put_task_struct(task); release [R]
422 * [9] check_exit_conditions_3(); [L]
423 * [10] task = owner(lock); [L]
424 * get_task_struct(task); [L] acquire [R]
425 * lock(task->pi_lock); [L] acquire [P]
426 * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
427 * [12] check_exit_conditions_4(); [P] + [L]
428 * [13] unlock(task->pi_lock); release [P]
429 * unlock(lock->wait_lock); release [L]
432 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
433 enum rtmutex_chainwalk chwalk,
434 struct rt_mutex *orig_lock,
435 struct rt_mutex *next_lock,
436 struct rt_mutex_waiter *orig_waiter,
437 struct task_struct *top_task)
439 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
440 struct rt_mutex_waiter *prerequeue_top_waiter;
441 int ret = 0, depth = 0;
442 struct rt_mutex *lock;
443 bool detect_deadlock;
447 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
450 * The (de)boosting is a step by step approach with a lot of
451 * pitfalls. We want this to be preemptible and we want hold a
452 * maximum of two locks per step. So we have to check
453 * carefully whether things change under us.
457 * We limit the lock chain length for each invocation.
459 if (++depth > max_lock_depth) {
463 * Print this only once. If the admin changes the limit,
464 * print a new message when reaching the limit again.
466 if (prev_max != max_lock_depth) {
467 prev_max = max_lock_depth;
468 printk(KERN_WARNING "Maximum lock depth %d reached "
469 "task: %s (%d)\n", max_lock_depth,
470 top_task->comm, task_pid_nr(top_task));
472 put_task_struct(task);
478 * We are fully preemptible here and only hold the refcount on
479 * @task. So everything can have changed under us since the
480 * caller or our own code below (goto retry/again) dropped all
485 * [1] Task cannot go away as we did a get_task() before !
487 raw_spin_lock_irqsave(&task->pi_lock, flags);
490 * [2] Get the waiter on which @task is blocked on.
492 waiter = task->pi_blocked_on;
495 * [3] check_exit_conditions_1() protected by task->pi_lock.
499 * Check whether the end of the boosting chain has been
500 * reached or the state of the chain has changed while we
503 if (!rt_mutex_real_waiter(waiter))
507 * Check the orig_waiter state. After we dropped the locks,
508 * the previous owner of the lock might have released the lock.
510 if (orig_waiter && !rt_mutex_owner(orig_lock))
514 * We dropped all locks after taking a refcount on @task, so
515 * the task might have moved on in the lock chain or even left
516 * the chain completely and blocks now on an unrelated lock or
519 * We stored the lock on which @task was blocked in @next_lock,
520 * so we can detect the chain change.
522 if (next_lock != waiter->lock)
526 * Drop out, when the task has no waiters. Note,
527 * top_waiter can be NULL, when we are in the deboosting
531 if (!task_has_pi_waiters(task))
534 * If deadlock detection is off, we stop here if we
535 * are not the top pi waiter of the task. If deadlock
536 * detection is enabled we continue, but stop the
537 * requeueing in the chain walk.
539 if (top_waiter != task_top_pi_waiter(task)) {
540 if (!detect_deadlock)
548 * If the waiter priority is the same as the task priority
549 * then there is no further priority adjustment necessary. If
550 * deadlock detection is off, we stop the chain walk. If its
551 * enabled we continue, but stop the requeueing in the chain
554 if (waiter->prio == task->prio) {
555 if (!detect_deadlock)
562 * [4] Get the next lock
566 * [5] We need to trylock here as we are holding task->pi_lock,
567 * which is the reverse lock order versus the other rtmutex
570 if (!raw_spin_trylock(&lock->wait_lock)) {
571 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
577 * [6] check_exit_conditions_2() protected by task->pi_lock and
580 * Deadlock detection. If the lock is the same as the original
581 * lock which caused us to walk the lock chain or if the
582 * current lock is owned by the task which initiated the chain
583 * walk, we detected a deadlock.
585 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
586 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
587 raw_spin_unlock(&lock->wait_lock);
593 * If we just follow the lock chain for deadlock detection, no
594 * need to do all the requeue operations. To avoid a truckload
595 * of conditionals around the various places below, just do the
596 * minimum chain walk checks.
600 * No requeue[7] here. Just release @task [8]
602 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
603 put_task_struct(task);
606 * [9] check_exit_conditions_3 protected by lock->wait_lock.
607 * If there is no owner of the lock, end of chain.
609 if (!rt_mutex_owner(lock)) {
610 raw_spin_unlock(&lock->wait_lock);
614 /* [10] Grab the next task, i.e. owner of @lock */
615 task = rt_mutex_owner(lock);
616 get_task_struct(task);
617 raw_spin_lock_irqsave(&task->pi_lock, flags);
620 * No requeue [11] here. We just do deadlock detection.
622 * [12] Store whether owner is blocked
623 * itself. Decision is made after dropping the locks
625 next_lock = task_blocked_on_lock(task);
627 * Get the top waiter for the next iteration
629 top_waiter = rt_mutex_top_waiter(lock);
631 /* [13] Drop locks */
632 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
633 raw_spin_unlock(&lock->wait_lock);
635 /* If owner is not blocked, end of chain. */
642 * Store the current top waiter before doing the requeue
643 * operation on @lock. We need it for the boost/deboost
646 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
648 /* [7] Requeue the waiter in the lock waiter list. */
649 rt_mutex_dequeue(lock, waiter);
650 waiter->prio = task->prio;
651 rt_mutex_enqueue(lock, waiter);
653 /* [8] Release the task */
654 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
655 put_task_struct(task);
658 * [9] check_exit_conditions_3 protected by lock->wait_lock.
660 * We must abort the chain walk if there is no lock owner even
661 * in the dead lock detection case, as we have nothing to
662 * follow here. This is the end of the chain we are walking.
664 if (!rt_mutex_owner(lock)) {
665 struct rt_mutex_waiter *lock_top_waiter;
668 * If the requeue [7] above changed the top waiter,
669 * then we need to wake the new top waiter up to try
672 lock_top_waiter = rt_mutex_top_waiter(lock);
673 if (prerequeue_top_waiter != lock_top_waiter)
674 rt_mutex_wake_waiter(lock_top_waiter);
675 raw_spin_unlock(&lock->wait_lock);
679 /* [10] Grab the next task, i.e. the owner of @lock */
680 task = rt_mutex_owner(lock);
681 get_task_struct(task);
682 raw_spin_lock_irqsave(&task->pi_lock, flags);
684 /* [11] requeue the pi waiters if necessary */
685 if (waiter == rt_mutex_top_waiter(lock)) {
687 * The waiter became the new top (highest priority)
688 * waiter on the lock. Replace the previous top waiter
689 * in the owner tasks pi waiters list with this waiter
690 * and adjust the priority of the owner.
692 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
693 rt_mutex_enqueue_pi(task, waiter);
694 __rt_mutex_adjust_prio(task);
696 } else if (prerequeue_top_waiter == waiter) {
698 * The waiter was the top waiter on the lock, but is
699 * no longer the top prority waiter. Replace waiter in
700 * the owner tasks pi waiters list with the new top
701 * (highest priority) waiter and adjust the priority
703 * The new top waiter is stored in @waiter so that
704 * @waiter == @top_waiter evaluates to true below and
705 * we continue to deboost the rest of the chain.
707 rt_mutex_dequeue_pi(task, waiter);
708 waiter = rt_mutex_top_waiter(lock);
709 rt_mutex_enqueue_pi(task, waiter);
710 __rt_mutex_adjust_prio(task);
713 * Nothing changed. No need to do any priority
719 * [12] check_exit_conditions_4() protected by task->pi_lock
720 * and lock->wait_lock. The actual decisions are made after we
723 * Check whether the task which owns the current lock is pi
724 * blocked itself. If yes we store a pointer to the lock for
725 * the lock chain change detection above. After we dropped
726 * task->pi_lock next_lock cannot be dereferenced anymore.
728 next_lock = task_blocked_on_lock(task);
730 * Store the top waiter of @lock for the end of chain walk
733 top_waiter = rt_mutex_top_waiter(lock);
735 /* [13] Drop the locks */
736 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
737 raw_spin_unlock(&lock->wait_lock);
740 * Make the actual exit decisions [12], based on the stored
743 * We reached the end of the lock chain. Stop right here. No
744 * point to go back just to figure that out.
750 * If the current waiter is not the top waiter on the lock,
751 * then we can stop the chain walk here if we are not in full
752 * deadlock detection mode.
754 if (!detect_deadlock && waiter != top_waiter)
760 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
762 put_task_struct(task);
768 #define STEAL_NORMAL 0
769 #define STEAL_LATERAL 1
772 * Note that RT tasks are excluded from lateral-steals to prevent the
773 * introduction of an unbounded latency
775 static inline int lock_is_stealable(struct task_struct *task,
776 struct task_struct *pendowner, int mode)
778 if (mode == STEAL_NORMAL || rt_task(task)) {
779 if (task->prio >= pendowner->prio)
781 } else if (task->prio > pendowner->prio)
787 * Try to take an rt-mutex
789 * Must be called with lock->wait_lock held.
791 * @lock: The lock to be acquired.
792 * @task: The task which wants to acquire the lock
793 * @waiter: The waiter that is queued to the lock's wait list if the
794 * callsite called task_blocked_on_lock(), otherwise NULL
796 static int __try_to_take_rt_mutex(struct rt_mutex *lock,
797 struct task_struct *task,
798 struct rt_mutex_waiter *waiter, int mode)
803 * Before testing whether we can acquire @lock, we set the
804 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
805 * other tasks which try to modify @lock into the slow path
806 * and they serialize on @lock->wait_lock.
808 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
809 * as explained at the top of this file if and only if:
811 * - There is a lock owner. The caller must fixup the
812 * transient state if it does a trylock or leaves the lock
813 * function due to a signal or timeout.
815 * - @task acquires the lock and there are no other
816 * waiters. This is undone in rt_mutex_set_owner(@task) at
817 * the end of this function.
819 mark_rt_mutex_waiters(lock);
822 * If @lock has an owner, give up.
824 if (rt_mutex_owner(lock))
828 * If @waiter != NULL, @task has already enqueued the waiter
829 * into @lock waiter list. If @waiter == NULL then this is a
834 * If waiter is not the highest priority waiter of
837 if (waiter != rt_mutex_top_waiter(lock)) {
838 /* XXX lock_is_stealable() ? */
843 * We can acquire the lock. Remove the waiter from the
846 rt_mutex_dequeue(lock, waiter);
850 * If the lock has waiters already we check whether @task is
851 * eligible to take over the lock.
853 * If there are no other waiters, @task can acquire
854 * the lock. @task->pi_blocked_on is NULL, so it does
855 * not need to be dequeued.
857 if (rt_mutex_has_waiters(lock)) {
858 struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
860 if (task != pown && !lock_is_stealable(task, pown, mode))
863 * The current top waiter stays enqueued. We
864 * don't have to change anything in the lock
869 * No waiters. Take the lock without the
870 * pi_lock dance.@task->pi_blocked_on is NULL
871 * and we have no waiters to enqueue in @task
879 * Clear @task->pi_blocked_on. Requires protection by
880 * @task->pi_lock. Redundant operation for the @waiter == NULL
881 * case, but conditionals are more expensive than a redundant
884 raw_spin_lock_irqsave(&task->pi_lock, flags);
885 task->pi_blocked_on = NULL;
887 * Finish the lock acquisition. @task is the new owner. If
888 * other waiters exist we have to insert the highest priority
889 * waiter into @task->pi_waiters list.
891 if (rt_mutex_has_waiters(lock))
892 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
893 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
896 /* We got the lock. */
897 debug_rt_mutex_lock(lock);
900 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
901 * are still waiters or clears it.
903 rt_mutex_set_owner(lock, task);
905 rt_mutex_deadlock_account_lock(lock, task);
910 #ifdef CONFIG_PREEMPT_RT_FULL
912 * preemptible spin_lock functions:
914 static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
915 void (*slowfn)(struct rt_mutex *lock))
917 might_sleep_no_state_check();
919 if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
920 rt_mutex_deadlock_account_lock(lock, current);
925 static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
926 void (*slowfn)(struct rt_mutex *lock))
928 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
929 rt_mutex_deadlock_account_unlock(current);
935 * Note that owner is a speculative pointer and dereferencing relies
936 * on rcu_read_lock() and the check against the lock owner.
938 static int adaptive_wait(struct rt_mutex *lock,
939 struct task_struct *owner)
945 if (owner != rt_mutex_owner(lock))
948 * Ensure that owner->on_cpu is dereferenced _after_
949 * checking the above to be valid.
952 if (!owner->on_cpu) {
962 static int adaptive_wait(struct rt_mutex *lock,
963 struct task_struct *orig_owner)
969 # define pi_lock(lock) raw_spin_lock_irq(lock)
970 # define pi_unlock(lock) raw_spin_unlock_irq(lock)
972 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
973 struct rt_mutex_waiter *waiter,
974 struct task_struct *task,
975 enum rtmutex_chainwalk chwalk);
977 * Slow path lock function spin_lock style: this variant is very
978 * careful not to miss any non-lock wakeups.
980 * We store the current state under p->pi_lock in p->saved_state and
981 * the try_to_wake_up() code handles this accordingly.
983 static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
985 struct task_struct *lock_owner, *self = current;
986 struct rt_mutex_waiter waiter, *top_waiter;
989 rt_mutex_init_waiter(&waiter, true);
991 raw_spin_lock(&lock->wait_lock);
993 if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
994 raw_spin_unlock(&lock->wait_lock);
998 BUG_ON(rt_mutex_owner(lock) == self);
1001 * We save whatever state the task is in and we'll restore it
1002 * after acquiring the lock taking real wakeups into account
1003 * as well. We are serialized via pi_lock against wakeups. See
1006 pi_lock(&self->pi_lock);
1007 self->saved_state = self->state;
1008 __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1009 pi_unlock(&self->pi_lock);
1011 ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
1015 /* Try to acquire the lock again. */
1016 if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
1019 top_waiter = rt_mutex_top_waiter(lock);
1020 lock_owner = rt_mutex_owner(lock);
1022 raw_spin_unlock(&lock->wait_lock);
1024 debug_rt_mutex_print_deadlock(&waiter);
1026 if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
1027 schedule_rt_mutex(lock);
1029 raw_spin_lock(&lock->wait_lock);
1031 pi_lock(&self->pi_lock);
1032 __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1033 pi_unlock(&self->pi_lock);
1037 * Restore the task state to current->saved_state. We set it
1038 * to the original state above and the try_to_wake_up() code
1039 * has possibly updated it when a real (non-rtmutex) wakeup
1040 * happened while we were blocked. Clear saved_state so
1041 * try_to_wakeup() does not get confused.
1043 pi_lock(&self->pi_lock);
1044 __set_current_state_no_track(self->saved_state);
1045 self->saved_state = TASK_RUNNING;
1046 pi_unlock(&self->pi_lock);
1049 * try_to_take_rt_mutex() sets the waiter bit
1050 * unconditionally. We might have to fix that up:
1052 fixup_rt_mutex_waiters(lock);
1054 BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
1055 BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
1057 raw_spin_unlock(&lock->wait_lock);
1059 debug_rt_mutex_free_waiter(&waiter);
1062 static void wakeup_next_waiter(struct rt_mutex *lock);
1064 * Slow path to release a rt_mutex spin_lock style
1066 static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
1068 raw_spin_lock(&lock->wait_lock);
1070 debug_rt_mutex_unlock(lock);
1072 rt_mutex_deadlock_account_unlock(current);
1074 if (!rt_mutex_has_waiters(lock)) {
1076 raw_spin_unlock(&lock->wait_lock);
1080 wakeup_next_waiter(lock);
1082 raw_spin_unlock(&lock->wait_lock);
1084 /* Undo pi boosting.when necessary */
1085 rt_mutex_adjust_prio(current);
1088 void __lockfunc rt_spin_lock(spinlock_t *lock)
1090 rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
1091 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1093 EXPORT_SYMBOL(rt_spin_lock);
1095 void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
1097 rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
1099 EXPORT_SYMBOL(__rt_spin_lock);
1101 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1102 void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
1104 rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
1105 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1107 EXPORT_SYMBOL(rt_spin_lock_nested);
1110 void __lockfunc rt_spin_unlock(spinlock_t *lock)
1112 /* NOTE: we always pass in '1' for nested, for simplicity */
1113 spin_release(&lock->dep_map, 1, _RET_IP_);
1114 rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
1116 EXPORT_SYMBOL(rt_spin_unlock);
1118 void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
1120 rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
1122 EXPORT_SYMBOL(__rt_spin_unlock);
1125 * Wait for the lock to get unlocked: instead of polling for an unlock
1126 * (like raw spinlocks do), we lock and unlock, to force the kernel to
1127 * schedule if there's contention:
1129 void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
1134 EXPORT_SYMBOL(rt_spin_unlock_wait);
1136 int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
1138 return rt_mutex_trylock(lock);
1141 int __lockfunc rt_spin_trylock(spinlock_t *lock)
1143 int ret = rt_mutex_trylock(&lock->lock);
1146 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1149 EXPORT_SYMBOL(rt_spin_trylock);
1151 int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
1156 ret = rt_mutex_trylock(&lock->lock);
1159 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1164 EXPORT_SYMBOL(rt_spin_trylock_bh);
1166 int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
1171 ret = rt_mutex_trylock(&lock->lock);
1174 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1178 EXPORT_SYMBOL(rt_spin_trylock_irqsave);
1180 int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
1182 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
1183 if (atomic_add_unless(atomic, -1, 1))
1187 if (atomic_dec_and_test(atomic))
1189 rt_spin_unlock(lock);
1193 EXPORT_SYMBOL(atomic_dec_and_spin_lock);
1196 __rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
1198 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1200 * Make sure we are not reinitializing a held lock:
1202 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
1203 lockdep_init_map(&lock->dep_map, name, key, 0);
1206 EXPORT_SYMBOL(__rt_spin_lock_init);
1208 #endif /* PREEMPT_RT_FULL */
1210 #ifdef CONFIG_PREEMPT_RT_FULL
1211 static inline int __sched
1212 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1214 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1215 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
1220 if (unlikely(ctx == hold_ctx))
1223 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
1224 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
1225 #ifdef CONFIG_DEBUG_MUTEXES
1226 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
1227 ctx->contending_lock = ww;
1235 static inline int __sched
1236 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1245 try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
1246 struct rt_mutex_waiter *waiter)
1248 return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
1252 * Task blocks on lock.
1254 * Prepare waiter and propagate pi chain
1256 * This must be called with lock->wait_lock held.
1258 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
1259 struct rt_mutex_waiter *waiter,
1260 struct task_struct *task,
1261 enum rtmutex_chainwalk chwalk)
1263 struct task_struct *owner = rt_mutex_owner(lock);
1264 struct rt_mutex_waiter *top_waiter = waiter;
1265 struct rt_mutex *next_lock;
1266 int chain_walk = 0, res;
1267 unsigned long flags;
1270 * Early deadlock detection. We really don't want the task to
1271 * enqueue on itself just to untangle the mess later. It's not
1272 * only an optimization. We drop the locks, so another waiter
1273 * can come in before the chain walk detects the deadlock. So
1274 * the other will detect the deadlock and return -EDEADLOCK,
1275 * which is wrong, as the other waiter is not in a deadlock
1281 raw_spin_lock_irqsave(&task->pi_lock, flags);
1284 * In the case of futex requeue PI, this will be a proxy
1285 * lock. The task will wake unaware that it is enqueueed on
1286 * this lock. Avoid blocking on two locks and corrupting
1287 * pi_blocked_on via the PI_WAKEUP_INPROGRESS
1288 * flag. futex_wait_requeue_pi() sets this when it wakes up
1289 * before requeue (due to a signal or timeout). Do not enqueue
1290 * the task if PI_WAKEUP_INPROGRESS is set.
1292 if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
1293 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1297 BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
1299 __rt_mutex_adjust_prio(task);
1300 waiter->task = task;
1301 waiter->lock = lock;
1302 waiter->prio = task->prio;
1304 /* Get the top priority waiter on the lock */
1305 if (rt_mutex_has_waiters(lock))
1306 top_waiter = rt_mutex_top_waiter(lock);
1307 rt_mutex_enqueue(lock, waiter);
1309 task->pi_blocked_on = waiter;
1311 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1316 raw_spin_lock_irqsave(&owner->pi_lock, flags);
1317 if (waiter == rt_mutex_top_waiter(lock)) {
1318 rt_mutex_dequeue_pi(owner, top_waiter);
1319 rt_mutex_enqueue_pi(owner, waiter);
1321 __rt_mutex_adjust_prio(owner);
1322 if (rt_mutex_real_waiter(owner->pi_blocked_on))
1324 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1328 /* Store the lock on which owner is blocked or NULL */
1329 next_lock = task_blocked_on_lock(owner);
1331 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
1333 * Even if full deadlock detection is on, if the owner is not
1334 * blocked itself, we can avoid finding this out in the chain
1337 if (!chain_walk || !next_lock)
1341 * The owner can't disappear while holding a lock,
1342 * so the owner struct is protected by wait_lock.
1343 * Gets dropped in rt_mutex_adjust_prio_chain()!
1345 get_task_struct(owner);
1347 raw_spin_unlock(&lock->wait_lock);
1349 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1350 next_lock, waiter, task);
1352 raw_spin_lock(&lock->wait_lock);
1358 * Wake up the next waiter on the lock.
1360 * Remove the top waiter from the current tasks pi waiter list,
1361 * wake it up and return whether the current task needs to undo
1362 * a potential priority boosting.
1364 * Called with lock->wait_lock held.
1366 static void wakeup_next_waiter(struct rt_mutex *lock)
1368 struct rt_mutex_waiter *waiter;
1369 unsigned long flags;
1371 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1373 waiter = rt_mutex_top_waiter(lock);
1376 * Remove it from current->pi_waiters. We do not adjust a
1377 * possible priority boost right now. We execute wakeup in the
1378 * boosted mode and go back to normal after releasing
1381 rt_mutex_dequeue_pi(current, waiter);
1384 * As we are waking up the top waiter, and the waiter stays
1385 * queued on the lock until it gets the lock, this lock
1386 * obviously has waiters. Just set the bit here and this has
1387 * the added benefit of forcing all new tasks into the
1388 * slow path making sure no task of lower priority than
1389 * the top waiter can steal this lock.
1391 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1393 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1396 * It's safe to dereference waiter as it cannot go away as
1397 * long as we hold lock->wait_lock. The waiter task needs to
1398 * acquire it in order to dequeue the waiter.
1400 rt_mutex_wake_waiter(waiter);
1404 * Remove a waiter from a lock and give up
1406 * Must be called with lock->wait_lock held and
1407 * have just failed to try_to_take_rt_mutex().
1409 static void remove_waiter(struct rt_mutex *lock,
1410 struct rt_mutex_waiter *waiter)
1412 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1413 struct task_struct *owner = rt_mutex_owner(lock);
1414 struct rt_mutex *next_lock = NULL;
1415 unsigned long flags;
1417 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1418 rt_mutex_dequeue(lock, waiter);
1419 current->pi_blocked_on = NULL;
1420 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1423 * Only update priority if the waiter was the highest priority
1424 * waiter of the lock and there is an owner to update.
1426 if (!owner || !is_top_waiter)
1429 raw_spin_lock_irqsave(&owner->pi_lock, flags);
1431 rt_mutex_dequeue_pi(owner, waiter);
1433 if (rt_mutex_has_waiters(lock))
1434 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1436 __rt_mutex_adjust_prio(owner);
1438 /* Store the lock on which owner is blocked or NULL */
1439 if (rt_mutex_real_waiter(owner->pi_blocked_on))
1440 next_lock = task_blocked_on_lock(owner);
1442 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
1445 * Don't walk the chain, if the owner task is not blocked
1451 /* gets dropped in rt_mutex_adjust_prio_chain()! */
1452 get_task_struct(owner);
1454 raw_spin_unlock(&lock->wait_lock);
1456 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1457 next_lock, NULL, current);
1459 raw_spin_lock(&lock->wait_lock);
1463 * Recheck the pi chain, in case we got a priority setting
1465 * Called from sched_setscheduler
1467 void rt_mutex_adjust_pi(struct task_struct *task)
1469 struct rt_mutex_waiter *waiter;
1470 struct rt_mutex *next_lock;
1471 unsigned long flags;
1473 raw_spin_lock_irqsave(&task->pi_lock, flags);
1475 waiter = task->pi_blocked_on;
1476 if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
1477 !dl_prio(task->prio))) {
1478 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1481 next_lock = waiter->lock;
1483 /* gets dropped in rt_mutex_adjust_prio_chain()! */
1484 get_task_struct(task);
1486 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1487 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1488 next_lock, NULL, task);
1492 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
1493 * @lock: the rt_mutex to take
1494 * @state: the state the task should block in (TASK_INTERRUPTIBLE
1495 * or TASK_UNINTERRUPTIBLE)
1496 * @timeout: the pre-initialized and started timer, or NULL for none
1497 * @waiter: the pre-initialized rt_mutex_waiter
1499 * lock->wait_lock must be held by the caller.
1502 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
1503 struct hrtimer_sleeper *timeout,
1504 struct rt_mutex_waiter *waiter,
1505 struct ww_acquire_ctx *ww_ctx)
1510 /* Try to acquire the lock: */
1511 if (try_to_take_rt_mutex(lock, current, waiter))
1515 * TASK_INTERRUPTIBLE checks for signals and
1516 * timeout. Ignored otherwise.
1518 if (unlikely(state == TASK_INTERRUPTIBLE)) {
1519 /* Signal pending? */
1520 if (signal_pending(current))
1522 if (timeout && !timeout->task)
1528 if (ww_ctx && ww_ctx->acquired > 0) {
1529 ret = __mutex_lock_check_stamp(lock, ww_ctx);
1534 raw_spin_unlock(&lock->wait_lock);
1536 debug_rt_mutex_print_deadlock(waiter);
1538 schedule_rt_mutex(lock);
1540 raw_spin_lock(&lock->wait_lock);
1541 set_current_state(state);
1544 __set_current_state(TASK_RUNNING);
1548 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1549 struct rt_mutex_waiter *w)
1552 * If the result is not -EDEADLOCK or the caller requested
1553 * deadlock detection, nothing to do here.
1555 if (res != -EDEADLOCK || detect_deadlock)
1559 * Yell lowdly and stop the task right here.
1561 rt_mutex_print_deadlock(w);
1563 set_current_state(TASK_INTERRUPTIBLE);
1568 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
1569 struct ww_acquire_ctx *ww_ctx)
1571 #ifdef CONFIG_DEBUG_MUTEXES
1573 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
1574 * but released with a normal mutex_unlock in this call.
1576 * This should never happen, always use ww_mutex_unlock.
1578 DEBUG_LOCKS_WARN_ON(ww->ctx);
1581 * Not quite done after calling ww_acquire_done() ?
1583 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
1585 if (ww_ctx->contending_lock) {
1587 * After -EDEADLK you tried to
1588 * acquire a different ww_mutex? Bad!
1590 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
1593 * You called ww_mutex_lock after receiving -EDEADLK,
1594 * but 'forgot' to unlock everything else first?
1596 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
1597 ww_ctx->contending_lock = NULL;
1601 * Naughty, using a different class will lead to undefined behavior!
1603 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
1608 #ifdef CONFIG_PREEMPT_RT_FULL
1609 static void ww_mutex_account_lock(struct rt_mutex *lock,
1610 struct ww_acquire_ctx *ww_ctx)
1612 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1613 struct rt_mutex_waiter *waiter, *n;
1616 * This branch gets optimized out for the common case,
1617 * and is only important for ww_mutex_lock.
1619 ww_mutex_lock_acquired(ww, ww_ctx);
1623 * Give any possible sleeping processes the chance to wake up,
1624 * so they can recheck if they have to back off.
1626 rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
1628 /* XXX debug rt mutex waiter wakeup */
1630 BUG_ON(waiter->lock != lock);
1631 rt_mutex_wake_waiter(waiter);
1637 static void ww_mutex_account_lock(struct rt_mutex *lock,
1638 struct ww_acquire_ctx *ww_ctx)
1645 * Slow path lock function:
1648 rt_mutex_slowlock(struct rt_mutex *lock, int state,
1649 struct hrtimer_sleeper *timeout,
1650 enum rtmutex_chainwalk chwalk,
1651 struct ww_acquire_ctx *ww_ctx)
1653 struct rt_mutex_waiter waiter;
1656 rt_mutex_init_waiter(&waiter, false);
1658 raw_spin_lock(&lock->wait_lock);
1660 /* Try to acquire the lock again: */
1661 if (try_to_take_rt_mutex(lock, current, NULL)) {
1663 ww_mutex_account_lock(lock, ww_ctx);
1664 raw_spin_unlock(&lock->wait_lock);
1668 set_current_state(state);
1670 /* Setup the timer, when timeout != NULL */
1671 if (unlikely(timeout)) {
1672 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1673 if (!hrtimer_active(&timeout->timer))
1674 timeout->task = NULL;
1677 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1680 /* sleep on the mutex */
1681 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
1684 /* ww_mutex received EDEADLK, let it become EALREADY */
1685 ret = __mutex_lock_check_stamp(lock, ww_ctx);
1689 if (unlikely(ret)) {
1690 __set_current_state(TASK_RUNNING);
1691 if (rt_mutex_has_waiters(lock))
1692 remove_waiter(lock, &waiter);
1693 /* ww_mutex want to report EDEADLK/EALREADY, let them */
1695 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1696 } else if (ww_ctx) {
1697 ww_mutex_account_lock(lock, ww_ctx);
1701 * try_to_take_rt_mutex() sets the waiter bit
1702 * unconditionally. We might have to fix that up.
1704 fixup_rt_mutex_waiters(lock);
1706 raw_spin_unlock(&lock->wait_lock);
1708 /* Remove pending timer: */
1709 if (unlikely(timeout))
1710 hrtimer_cancel(&timeout->timer);
1712 debug_rt_mutex_free_waiter(&waiter);
1718 * Slow path try-lock function:
1720 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1725 * If the lock already has an owner we fail to get the lock.
1726 * This can be done without taking the @lock->wait_lock as
1727 * it is only being read, and this is a trylock anyway.
1729 if (rt_mutex_owner(lock))
1733 * The mutex has currently no owner. Lock the wait lock and
1734 * try to acquire the lock.
1736 raw_spin_lock(&lock->wait_lock);
1738 ret = try_to_take_rt_mutex(lock, current, NULL);
1741 * try_to_take_rt_mutex() sets the lock waiters bit
1742 * unconditionally. Clean this up.
1744 fixup_rt_mutex_waiters(lock);
1746 raw_spin_unlock(&lock->wait_lock);
1752 * Slow path to release a rt-mutex:
1755 rt_mutex_slowunlock(struct rt_mutex *lock)
1757 raw_spin_lock(&lock->wait_lock);
1759 debug_rt_mutex_unlock(lock);
1761 rt_mutex_deadlock_account_unlock(current);
1764 * We must be careful here if the fast path is enabled. If we
1765 * have no waiters queued we cannot set owner to NULL here
1768 * foo->lock->owner = NULL;
1769 * rtmutex_lock(foo->lock); <- fast path
1770 * free = atomic_dec_and_test(foo->refcnt);
1771 * rtmutex_unlock(foo->lock); <- fast path
1774 * raw_spin_unlock(foo->lock->wait_lock);
1776 * So for the fastpath enabled kernel:
1778 * Nothing can set the waiters bit as long as we hold
1779 * lock->wait_lock. So we do the following sequence:
1781 * owner = rt_mutex_owner(lock);
1782 * clear_rt_mutex_waiters(lock);
1783 * raw_spin_unlock(&lock->wait_lock);
1784 * if (cmpxchg(&lock->owner, owner, 0) == owner)
1788 * The fastpath disabled variant is simple as all access to
1789 * lock->owner is serialized by lock->wait_lock:
1791 * lock->owner = NULL;
1792 * raw_spin_unlock(&lock->wait_lock);
1794 while (!rt_mutex_has_waiters(lock)) {
1795 /* Drops lock->wait_lock ! */
1796 if (unlock_rt_mutex_safe(lock) == true)
1798 /* Relock the rtmutex and try again */
1799 raw_spin_lock(&lock->wait_lock);
1803 * The wakeup next waiter path does not suffer from the above
1804 * race. See the comments there.
1806 wakeup_next_waiter(lock);
1808 raw_spin_unlock(&lock->wait_lock);
1814 * debug aware fast / slowpath lock,trylock,unlock
1816 * The atomic acquire/release ops are compiled away, when either the
1817 * architecture does not support cmpxchg or when debugging is enabled.
1820 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1821 struct ww_acquire_ctx *ww_ctx,
1822 int (*slowfn)(struct rt_mutex *lock, int state,
1823 struct hrtimer_sleeper *timeout,
1824 enum rtmutex_chainwalk chwalk,
1825 struct ww_acquire_ctx *ww_ctx))
1827 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1828 rt_mutex_deadlock_account_lock(lock, current);
1831 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
1836 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1837 struct hrtimer_sleeper *timeout,
1838 enum rtmutex_chainwalk chwalk,
1839 struct ww_acquire_ctx *ww_ctx,
1840 int (*slowfn)(struct rt_mutex *lock, int state,
1841 struct hrtimer_sleeper *timeout,
1842 enum rtmutex_chainwalk chwalk,
1843 struct ww_acquire_ctx *ww_ctx))
1845 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1846 likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1847 rt_mutex_deadlock_account_lock(lock, current);
1850 return slowfn(lock, state, timeout, chwalk, ww_ctx);
1854 rt_mutex_fasttrylock(struct rt_mutex *lock,
1855 int (*slowfn)(struct rt_mutex *lock))
1857 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1858 rt_mutex_deadlock_account_lock(lock, current);
1861 return slowfn(lock);
1865 rt_mutex_fastunlock(struct rt_mutex *lock,
1866 bool (*slowfn)(struct rt_mutex *lock))
1868 if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
1869 rt_mutex_deadlock_account_unlock(current);
1870 } else if (slowfn(lock)) {
1871 /* Undo pi boosting if necessary: */
1872 rt_mutex_adjust_prio(current);
1877 * rt_mutex_lock - lock a rt_mutex
1879 * @lock: the rt_mutex to be locked
1881 void __sched rt_mutex_lock(struct rt_mutex *lock)
1885 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
1887 EXPORT_SYMBOL_GPL(rt_mutex_lock);
1890 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1892 * @lock: the rt_mutex to be locked
1896 * -EINTR when interrupted by a signal
1898 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1902 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
1904 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1907 * Futex variant with full deadlock detection.
1909 int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1910 struct hrtimer_sleeper *timeout)
1914 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1915 RT_MUTEX_FULL_CHAINWALK, NULL,
1920 * rt_mutex_lock_killable - lock a rt_mutex killable
1922 * @lock: the rt_mutex to be locked
1923 * @detect_deadlock: deadlock detection on/off
1927 * -EINTR when interrupted by a signal
1928 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
1930 int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
1934 return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
1936 EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
1939 * rt_mutex_timed_lock - lock a rt_mutex interruptible
1940 * the timeout structure is provided
1943 * @lock: the rt_mutex to be locked
1944 * @timeout: timeout structure or NULL (no timeout)
1948 * -EINTR when interrupted by a signal
1949 * -ETIMEDOUT when the timeout expired
1952 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1956 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1957 RT_MUTEX_MIN_CHAINWALK,
1961 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1964 * rt_mutex_trylock - try to lock a rt_mutex
1966 * @lock: the rt_mutex to be locked
1968 * Returns 1 on success and 0 on contention
1970 int __sched rt_mutex_trylock(struct rt_mutex *lock)
1972 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1974 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1977 * rt_mutex_unlock - unlock a rt_mutex
1979 * @lock: the rt_mutex to be unlocked
1981 void __sched rt_mutex_unlock(struct rt_mutex *lock)
1983 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1985 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1988 * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
1989 * @lock: the rt_mutex to be unlocked
1991 * Returns: true/false indicating whether priority adjustment is
1994 bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1996 if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
1997 rt_mutex_deadlock_account_unlock(current);
2000 return rt_mutex_slowunlock(lock);
2004 * rt_mutex_destroy - mark a mutex unusable
2005 * @lock: the mutex to be destroyed
2007 * This function marks the mutex uninitialized, and any subsequent
2008 * use of the mutex is forbidden. The mutex must not be locked when
2009 * this function is called.
2011 void rt_mutex_destroy(struct rt_mutex *lock)
2013 WARN_ON(rt_mutex_is_locked(lock));
2014 #ifdef CONFIG_DEBUG_RT_MUTEXES
2019 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
2022 * __rt_mutex_init - initialize the rt lock
2024 * @lock: the rt lock to be initialized
2026 * Initialize the rt lock to unlocked state.
2028 * Initializing of a locked rt lock is not allowed
2030 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
2033 lock->waiters = RB_ROOT;
2034 lock->waiters_leftmost = NULL;
2036 debug_rt_mutex_init(lock, name);
2038 EXPORT_SYMBOL(__rt_mutex_init);
2041 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
2044 * @lock: the rt_mutex to be locked
2045 * @proxy_owner:the task to set as owner
2047 * No locking. Caller has to do serializing itself
2048 * Special API call for PI-futex support
2050 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
2051 struct task_struct *proxy_owner)
2053 rt_mutex_init(lock);
2054 debug_rt_mutex_proxy_lock(lock, proxy_owner);
2055 rt_mutex_set_owner(lock, proxy_owner);
2056 rt_mutex_deadlock_account_lock(lock, proxy_owner);
2060 * rt_mutex_proxy_unlock - release a lock on behalf of owner
2062 * @lock: the rt_mutex to be locked
2064 * No locking. Caller has to do serializing itself
2065 * Special API call for PI-futex support
2067 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
2068 struct task_struct *proxy_owner)
2070 debug_rt_mutex_proxy_unlock(lock);
2071 rt_mutex_set_owner(lock, NULL);
2072 rt_mutex_deadlock_account_unlock(proxy_owner);
2076 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
2077 * @lock: the rt_mutex to take
2078 * @waiter: the pre-initialized rt_mutex_waiter
2079 * @task: the task to prepare
2082 * 0 - task blocked on lock
2083 * 1 - acquired the lock for task, caller should wake it up
2086 * Special API call for FUTEX_REQUEUE_PI support.
2088 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
2089 struct rt_mutex_waiter *waiter,
2090 struct task_struct *task)
2094 raw_spin_lock(&lock->wait_lock);
2096 if (try_to_take_rt_mutex(lock, task, NULL)) {
2097 raw_spin_unlock(&lock->wait_lock);
2101 #ifdef CONFIG_PREEMPT_RT_FULL
2103 * In PREEMPT_RT there's an added race.
2104 * If the task, that we are about to requeue, times out,
2105 * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
2106 * to skip this task. But right after the task sets
2107 * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
2108 * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
2109 * This will replace the PI_WAKEUP_INPROGRESS with the actual
2110 * lock that it blocks on. We *must not* place this task
2111 * on this proxy lock in that case.
2113 * To prevent this race, we first take the task's pi_lock
2114 * and check if it has updated its pi_blocked_on. If it has,
2115 * we assume that it woke up and we return -EAGAIN.
2116 * Otherwise, we set the task's pi_blocked_on to
2117 * PI_REQUEUE_INPROGRESS, so that if the task is waking up
2118 * it will know that we are in the process of requeuing it.
2120 raw_spin_lock_irq(&task->pi_lock);
2121 if (task->pi_blocked_on) {
2122 raw_spin_unlock_irq(&task->pi_lock);
2123 raw_spin_unlock(&lock->wait_lock);
2126 task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
2127 raw_spin_unlock_irq(&task->pi_lock);
2130 /* We enforce deadlock detection for futexes */
2131 ret = task_blocks_on_rt_mutex(lock, waiter, task,
2132 RT_MUTEX_FULL_CHAINWALK);
2134 if (ret && !rt_mutex_owner(lock)) {
2136 * Reset the return value. We might have
2137 * returned with -EDEADLK and the owner
2138 * released the lock while we were walking the
2139 * pi chain. Let the waiter sort it out.
2145 remove_waiter(lock, waiter);
2147 raw_spin_unlock(&lock->wait_lock);
2149 debug_rt_mutex_print_deadlock(waiter);
2155 * rt_mutex_next_owner - return the next owner of the lock
2157 * @lock: the rt lock query
2159 * Returns the next owner of the lock or NULL
2161 * Caller has to serialize against other accessors to the lock
2164 * Special API call for PI-futex support
2166 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
2168 if (!rt_mutex_has_waiters(lock))
2171 return rt_mutex_top_waiter(lock)->task;
2175 * rt_mutex_finish_proxy_lock() - Complete lock acquisition
2176 * @lock: the rt_mutex we were woken on
2177 * @to: the timeout, null if none. hrtimer should already have
2179 * @waiter: the pre-initialized rt_mutex_waiter
2181 * Complete the lock acquisition started our behalf by another thread.
2185 * <0 - error, one of -EINTR, -ETIMEDOUT
2187 * Special API call for PI-futex requeue support
2189 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
2190 struct hrtimer_sleeper *to,
2191 struct rt_mutex_waiter *waiter)
2195 raw_spin_lock(&lock->wait_lock);
2197 set_current_state(TASK_INTERRUPTIBLE);
2199 /* sleep on the mutex */
2200 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
2203 remove_waiter(lock, waiter);
2206 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
2207 * have to fix that up.
2209 fixup_rt_mutex_waiters(lock);
2211 raw_spin_unlock(&lock->wait_lock);
2217 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2219 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
2222 if (ctx->deadlock_inject_countdown-- == 0) {
2223 tmp = ctx->deadlock_inject_interval;
2224 if (tmp > UINT_MAX/4)
2227 tmp = tmp*2 + tmp + tmp/2;
2229 ctx->deadlock_inject_interval = tmp;
2230 ctx->deadlock_inject_countdown = tmp;
2231 ctx->contending_lock = lock;
2233 ww_mutex_unlock(lock);
2242 #ifdef CONFIG_PREEMPT_RT_FULL
2244 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
2250 mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
2251 ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
2253 mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2254 else if (!ret && ww_ctx->acquired > 1)
2255 return ww_mutex_deadlock_injection(lock, ww_ctx);
2259 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
2262 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
2268 mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
2269 ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
2271 mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2272 else if (!ret && ww_ctx->acquired > 1)
2273 return ww_mutex_deadlock_injection(lock, ww_ctx);
2277 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
2279 void __sched ww_mutex_unlock(struct ww_mutex *lock)
2281 int nest = !!lock->ctx;
2284 * The unlocking fastpath is the 0->1 transition from 'locked'
2285 * into 'unlocked' state:
2288 #ifdef CONFIG_DEBUG_MUTEXES
2289 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
2291 if (lock->ctx->acquired > 0)
2292 lock->ctx->acquired--;
2296 mutex_release(&lock->base.dep_map, nest, _RET_IP_);
2297 rt_mutex_unlock(&lock->base.lock);
2299 EXPORT_SYMBOL(ww_mutex_unlock);