Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / kernel / locking / rtmutex.c
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *  Adaptive Spinlocks:
11  *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
12  *                                   and Peter Morreale,
13  * Adaptive Spinlocks simplification:
14  *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
15  *
16  *  See Documentation/locking/rt-mutex-design.txt for details.
17  */
18 #include <linux/spinlock.h>
19 #include <linux/export.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/deadline.h>
23 #include <linux/timer.h>
24 #include <linux/ww_mutex.h>
25
26 #include "rtmutex_common.h"
27
28 /*
29  * lock->owner state tracking:
30  *
31  * lock->owner holds the task_struct pointer of the owner. Bit 0
32  * is used to keep track of the "lock has waiters" state.
33  *
34  * owner        bit0
35  * NULL         0       lock is free (fast acquire possible)
36  * NULL         1       lock is free and has waiters and the top waiter
37  *                              is going to take the lock*
38  * taskpointer  0       lock is held (fast release possible)
39  * taskpointer  1       lock is held and has waiters**
40  *
41  * The fast atomic compare exchange based acquire and release is only
42  * possible when bit 0 of lock->owner is 0.
43  *
44  * (*) It also can be a transitional state when grabbing the lock
45  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
46  * we need to set the bit0 before looking at the lock, and the owner may be
47  * NULL in this small time, hence this can be a transitional state.
48  *
49  * (**) There is a small time when bit 0 is set but there are no
50  * waiters. This can happen when grabbing the lock in the slow path.
51  * To prevent a cmpxchg of the owner releasing the lock, we need to
52  * set this bit before looking at the lock.
53  */
54
55 static void
56 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
57 {
58         unsigned long val = (unsigned long)owner;
59
60         if (rt_mutex_has_waiters(lock))
61                 val |= RT_MUTEX_HAS_WAITERS;
62
63         lock->owner = (struct task_struct *)val;
64 }
65
66 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
67 {
68         lock->owner = (struct task_struct *)
69                         ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
70 }
71
72 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
73 {
74         if (!rt_mutex_has_waiters(lock))
75                 clear_rt_mutex_waiters(lock);
76 }
77
78 static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
79 {
80         return waiter && waiter != PI_WAKEUP_INPROGRESS &&
81                 waiter != PI_REQUEUE_INPROGRESS;
82 }
83
84 /*
85  * We can speed up the acquire/release, if the architecture
86  * supports cmpxchg and if there's no debugging state to be set up
87  */
88 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
89 # define rt_mutex_cmpxchg(l,c,n)        (cmpxchg(&l->owner, c, n) == c)
90 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
91 {
92         unsigned long owner, *p = (unsigned long *) &lock->owner;
93
94         do {
95                 owner = *p;
96         } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
97 }
98
99 /*
100  * Safe fastpath aware unlock:
101  * 1) Clear the waiters bit
102  * 2) Drop lock->wait_lock
103  * 3) Try to unlock the lock with cmpxchg
104  */
105 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
106         __releases(lock->wait_lock)
107 {
108         struct task_struct *owner = rt_mutex_owner(lock);
109
110         clear_rt_mutex_waiters(lock);
111         raw_spin_unlock(&lock->wait_lock);
112         /*
113          * If a new waiter comes in between the unlock and the cmpxchg
114          * we have two situations:
115          *
116          * unlock(wait_lock);
117          *                                      lock(wait_lock);
118          * cmpxchg(p, owner, 0) == owner
119          *                                      mark_rt_mutex_waiters(lock);
120          *                                      acquire(lock);
121          * or:
122          *
123          * unlock(wait_lock);
124          *                                      lock(wait_lock);
125          *                                      mark_rt_mutex_waiters(lock);
126          *
127          * cmpxchg(p, owner, 0) != owner
128          *                                      enqueue_waiter();
129          *                                      unlock(wait_lock);
130          * lock(wait_lock);
131          * wake waiter();
132          * unlock(wait_lock);
133          *                                      lock(wait_lock);
134          *                                      acquire(lock);
135          */
136         return rt_mutex_cmpxchg(lock, owner, NULL);
137 }
138
139 #else
140 # define rt_mutex_cmpxchg(l,c,n)        (0)
141 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
142 {
143         lock->owner = (struct task_struct *)
144                         ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
145 }
146
147 /*
148  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
149  */
150 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
151         __releases(lock->wait_lock)
152 {
153         lock->owner = NULL;
154         raw_spin_unlock(&lock->wait_lock);
155         return true;
156 }
157 #endif
158
159 static inline int
160 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
161                      struct rt_mutex_waiter *right)
162 {
163         if (left->prio < right->prio)
164                 return 1;
165
166         /*
167          * If both waiters have dl_prio(), we check the deadlines of the
168          * associated tasks.
169          * If left waiter has a dl_prio(), and we didn't return 1 above,
170          * then right waiter has a dl_prio() too.
171          */
172         if (dl_prio(left->prio))
173                 return (left->task->dl.deadline < right->task->dl.deadline);
174
175         return 0;
176 }
177
178 static void
179 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
180 {
181         struct rb_node **link = &lock->waiters.rb_node;
182         struct rb_node *parent = NULL;
183         struct rt_mutex_waiter *entry;
184         int leftmost = 1;
185
186         while (*link) {
187                 parent = *link;
188                 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
189                 if (rt_mutex_waiter_less(waiter, entry)) {
190                         link = &parent->rb_left;
191                 } else {
192                         link = &parent->rb_right;
193                         leftmost = 0;
194                 }
195         }
196
197         if (leftmost)
198                 lock->waiters_leftmost = &waiter->tree_entry;
199
200         rb_link_node(&waiter->tree_entry, parent, link);
201         rb_insert_color(&waiter->tree_entry, &lock->waiters);
202 }
203
204 static void
205 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
206 {
207         if (RB_EMPTY_NODE(&waiter->tree_entry))
208                 return;
209
210         if (lock->waiters_leftmost == &waiter->tree_entry)
211                 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
212
213         rb_erase(&waiter->tree_entry, &lock->waiters);
214         RB_CLEAR_NODE(&waiter->tree_entry);
215 }
216
217 static void
218 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
219 {
220         struct rb_node **link = &task->pi_waiters.rb_node;
221         struct rb_node *parent = NULL;
222         struct rt_mutex_waiter *entry;
223         int leftmost = 1;
224
225         while (*link) {
226                 parent = *link;
227                 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
228                 if (rt_mutex_waiter_less(waiter, entry)) {
229                         link = &parent->rb_left;
230                 } else {
231                         link = &parent->rb_right;
232                         leftmost = 0;
233                 }
234         }
235
236         if (leftmost)
237                 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
238
239         rb_link_node(&waiter->pi_tree_entry, parent, link);
240         rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
241 }
242
243 static void
244 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
245 {
246         if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
247                 return;
248
249         if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
250                 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
251
252         rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
253         RB_CLEAR_NODE(&waiter->pi_tree_entry);
254 }
255
256 /*
257  * Calculate task priority from the waiter tree priority
258  *
259  * Return task->normal_prio when the waiter tree is empty or when
260  * the waiter is not allowed to do priority boosting
261  */
262 int rt_mutex_getprio(struct task_struct *task)
263 {
264         if (likely(!task_has_pi_waiters(task)))
265                 return task->normal_prio;
266
267         return min(task_top_pi_waiter(task)->prio,
268                    task->normal_prio);
269 }
270
271 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
272 {
273         if (likely(!task_has_pi_waiters(task)))
274                 return NULL;
275
276         return task_top_pi_waiter(task)->task;
277 }
278
279 /*
280  * Called by sched_setscheduler() to get the priority which will be
281  * effective after the change.
282  */
283 int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
284 {
285         if (!task_has_pi_waiters(task))
286                 return newprio;
287
288         if (task_top_pi_waiter(task)->task->prio <= newprio)
289                 return task_top_pi_waiter(task)->task->prio;
290         return newprio;
291 }
292
293 /*
294  * Adjust the priority of a task, after its pi_waiters got modified.
295  *
296  * This can be both boosting and unboosting. task->pi_lock must be held.
297  */
298 static void __rt_mutex_adjust_prio(struct task_struct *task)
299 {
300         int prio = rt_mutex_getprio(task);
301
302         if (task->prio != prio || dl_prio(prio))
303                 rt_mutex_setprio(task, prio);
304 }
305
306 /*
307  * Adjust task priority (undo boosting). Called from the exit path of
308  * rt_mutex_slowunlock() and rt_mutex_slowlock().
309  *
310  * (Note: We do this outside of the protection of lock->wait_lock to
311  * allow the lock to be taken while or before we readjust the priority
312  * of task. We do not use the spin_xx_mutex() variants here as we are
313  * outside of the debug path.)
314  */
315 void rt_mutex_adjust_prio(struct task_struct *task)
316 {
317         unsigned long flags;
318
319         raw_spin_lock_irqsave(&task->pi_lock, flags);
320         __rt_mutex_adjust_prio(task);
321         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
322 }
323
324 /*
325  * Deadlock detection is conditional:
326  *
327  * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
328  * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
329  *
330  * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
331  * conducted independent of the detect argument.
332  *
333  * If the waiter argument is NULL this indicates the deboost path and
334  * deadlock detection is disabled independent of the detect argument
335  * and the config settings.
336  */
337 static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
338                                           enum rtmutex_chainwalk chwalk)
339 {
340         /*
341          * This is just a wrapper function for the following call,
342          * because debug_rt_mutex_detect_deadlock() smells like a magic
343          * debug feature and I wanted to keep the cond function in the
344          * main source file along with the comments instead of having
345          * two of the same in the headers.
346          */
347         return debug_rt_mutex_detect_deadlock(waiter, chwalk);
348 }
349
350 static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
351 {
352         if (waiter->savestate)
353                 wake_up_lock_sleeper(waiter->task);
354         else
355                 wake_up_process(waiter->task);
356 }
357
358 /*
359  * Max number of times we'll walk the boosting chain:
360  */
361 int max_lock_depth = 1024;
362
363 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
364 {
365         return rt_mutex_real_waiter(p->pi_blocked_on) ?
366                 p->pi_blocked_on->lock : NULL;
367 }
368
369 /*
370  * Adjust the priority chain. Also used for deadlock detection.
371  * Decreases task's usage by one - may thus free the task.
372  *
373  * @task:       the task owning the mutex (owner) for which a chain walk is
374  *              probably needed
375  * @chwalk:     do we have to carry out deadlock detection?
376  * @orig_lock:  the mutex (can be NULL if we are walking the chain to recheck
377  *              things for a task that has just got its priority adjusted, and
378  *              is waiting on a mutex)
379  * @next_lock:  the mutex on which the owner of @orig_lock was blocked before
380  *              we dropped its pi_lock. Is never dereferenced, only used for
381  *              comparison to detect lock chain changes.
382  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
383  *              its priority to the mutex owner (can be NULL in the case
384  *              depicted above or if the top waiter is gone away and we are
385  *              actually deboosting the owner)
386  * @top_task:   the current top waiter
387  *
388  * Returns 0 or -EDEADLK.
389  *
390  * Chain walk basics and protection scope
391  *
392  * [R] refcount on task
393  * [P] task->pi_lock held
394  * [L] rtmutex->wait_lock held
395  *
396  * Step Description                             Protected by
397  *      function arguments:
398  *      @task                                   [R]
399  *      @orig_lock if != NULL                   @top_task is blocked on it
400  *      @next_lock                              Unprotected. Cannot be
401  *                                              dereferenced. Only used for
402  *                                              comparison.
403  *      @orig_waiter if != NULL                 @top_task is blocked on it
404  *      @top_task                               current, or in case of proxy
405  *                                              locking protected by calling
406  *                                              code
407  *      again:
408  *        loop_sanity_check();
409  *      retry:
410  * [1]    lock(task->pi_lock);                  [R] acquire [P]
411  * [2]    waiter = task->pi_blocked_on;         [P]
412  * [3]    check_exit_conditions_1();            [P]
413  * [4]    lock = waiter->lock;                  [P]
414  * [5]    if (!try_lock(lock->wait_lock)) {     [P] try to acquire [L]
415  *          unlock(task->pi_lock);              release [P]
416  *          goto retry;
417  *        }
418  * [6]    check_exit_conditions_2();            [P] + [L]
419  * [7]    requeue_lock_waiter(lock, waiter);    [P] + [L]
420  * [8]    unlock(task->pi_lock);                release [P]
421  *        put_task_struct(task);                release [R]
422  * [9]    check_exit_conditions_3();            [L]
423  * [10]   task = owner(lock);                   [L]
424  *        get_task_struct(task);                [L] acquire [R]
425  *        lock(task->pi_lock);                  [L] acquire [P]
426  * [11]   requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
427  * [12]   check_exit_conditions_4();            [P] + [L]
428  * [13]   unlock(task->pi_lock);                release [P]
429  *        unlock(lock->wait_lock);              release [L]
430  *        goto again;
431  */
432 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
433                                       enum rtmutex_chainwalk chwalk,
434                                       struct rt_mutex *orig_lock,
435                                       struct rt_mutex *next_lock,
436                                       struct rt_mutex_waiter *orig_waiter,
437                                       struct task_struct *top_task)
438 {
439         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
440         struct rt_mutex_waiter *prerequeue_top_waiter;
441         int ret = 0, depth = 0;
442         struct rt_mutex *lock;
443         bool detect_deadlock;
444         unsigned long flags;
445         bool requeue = true;
446
447         detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
448
449         /*
450          * The (de)boosting is a step by step approach with a lot of
451          * pitfalls. We want this to be preemptible and we want hold a
452          * maximum of two locks per step. So we have to check
453          * carefully whether things change under us.
454          */
455  again:
456         /*
457          * We limit the lock chain length for each invocation.
458          */
459         if (++depth > max_lock_depth) {
460                 static int prev_max;
461
462                 /*
463                  * Print this only once. If the admin changes the limit,
464                  * print a new message when reaching the limit again.
465                  */
466                 if (prev_max != max_lock_depth) {
467                         prev_max = max_lock_depth;
468                         printk(KERN_WARNING "Maximum lock depth %d reached "
469                                "task: %s (%d)\n", max_lock_depth,
470                                top_task->comm, task_pid_nr(top_task));
471                 }
472                 put_task_struct(task);
473
474                 return -EDEADLK;
475         }
476
477         /*
478          * We are fully preemptible here and only hold the refcount on
479          * @task. So everything can have changed under us since the
480          * caller or our own code below (goto retry/again) dropped all
481          * locks.
482          */
483  retry:
484         /*
485          * [1] Task cannot go away as we did a get_task() before !
486          */
487         raw_spin_lock_irqsave(&task->pi_lock, flags);
488
489         /*
490          * [2] Get the waiter on which @task is blocked on.
491          */
492         waiter = task->pi_blocked_on;
493
494         /*
495          * [3] check_exit_conditions_1() protected by task->pi_lock.
496          */
497
498         /*
499          * Check whether the end of the boosting chain has been
500          * reached or the state of the chain has changed while we
501          * dropped the locks.
502          */
503         if (!rt_mutex_real_waiter(waiter))
504                 goto out_unlock_pi;
505
506         /*
507          * Check the orig_waiter state. After we dropped the locks,
508          * the previous owner of the lock might have released the lock.
509          */
510         if (orig_waiter && !rt_mutex_owner(orig_lock))
511                 goto out_unlock_pi;
512
513         /*
514          * We dropped all locks after taking a refcount on @task, so
515          * the task might have moved on in the lock chain or even left
516          * the chain completely and blocks now on an unrelated lock or
517          * on @orig_lock.
518          *
519          * We stored the lock on which @task was blocked in @next_lock,
520          * so we can detect the chain change.
521          */
522         if (next_lock != waiter->lock)
523                 goto out_unlock_pi;
524
525         /*
526          * Drop out, when the task has no waiters. Note,
527          * top_waiter can be NULL, when we are in the deboosting
528          * mode!
529          */
530         if (top_waiter) {
531                 if (!task_has_pi_waiters(task))
532                         goto out_unlock_pi;
533                 /*
534                  * If deadlock detection is off, we stop here if we
535                  * are not the top pi waiter of the task. If deadlock
536                  * detection is enabled we continue, but stop the
537                  * requeueing in the chain walk.
538                  */
539                 if (top_waiter != task_top_pi_waiter(task)) {
540                         if (!detect_deadlock)
541                                 goto out_unlock_pi;
542                         else
543                                 requeue = false;
544                 }
545         }
546
547         /*
548          * If the waiter priority is the same as the task priority
549          * then there is no further priority adjustment necessary.  If
550          * deadlock detection is off, we stop the chain walk. If its
551          * enabled we continue, but stop the requeueing in the chain
552          * walk.
553          */
554         if (waiter->prio == task->prio) {
555                 if (!detect_deadlock)
556                         goto out_unlock_pi;
557                 else
558                         requeue = false;
559         }
560
561         /*
562          * [4] Get the next lock
563          */
564         lock = waiter->lock;
565         /*
566          * [5] We need to trylock here as we are holding task->pi_lock,
567          * which is the reverse lock order versus the other rtmutex
568          * operations.
569          */
570         if (!raw_spin_trylock(&lock->wait_lock)) {
571                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
572                 cpu_relax();
573                 goto retry;
574         }
575
576         /*
577          * [6] check_exit_conditions_2() protected by task->pi_lock and
578          * lock->wait_lock.
579          *
580          * Deadlock detection. If the lock is the same as the original
581          * lock which caused us to walk the lock chain or if the
582          * current lock is owned by the task which initiated the chain
583          * walk, we detected a deadlock.
584          */
585         if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
586                 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
587                 raw_spin_unlock(&lock->wait_lock);
588                 ret = -EDEADLK;
589                 goto out_unlock_pi;
590         }
591
592         /*
593          * If we just follow the lock chain for deadlock detection, no
594          * need to do all the requeue operations. To avoid a truckload
595          * of conditionals around the various places below, just do the
596          * minimum chain walk checks.
597          */
598         if (!requeue) {
599                 /*
600                  * No requeue[7] here. Just release @task [8]
601                  */
602                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
603                 put_task_struct(task);
604
605                 /*
606                  * [9] check_exit_conditions_3 protected by lock->wait_lock.
607                  * If there is no owner of the lock, end of chain.
608                  */
609                 if (!rt_mutex_owner(lock)) {
610                         raw_spin_unlock(&lock->wait_lock);
611                         return 0;
612                 }
613
614                 /* [10] Grab the next task, i.e. owner of @lock */
615                 task = rt_mutex_owner(lock);
616                 get_task_struct(task);
617                 raw_spin_lock_irqsave(&task->pi_lock, flags);
618
619                 /*
620                  * No requeue [11] here. We just do deadlock detection.
621                  *
622                  * [12] Store whether owner is blocked
623                  * itself. Decision is made after dropping the locks
624                  */
625                 next_lock = task_blocked_on_lock(task);
626                 /*
627                  * Get the top waiter for the next iteration
628                  */
629                 top_waiter = rt_mutex_top_waiter(lock);
630
631                 /* [13] Drop locks */
632                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
633                 raw_spin_unlock(&lock->wait_lock);
634
635                 /* If owner is not blocked, end of chain. */
636                 if (!next_lock)
637                         goto out_put_task;
638                 goto again;
639         }
640
641         /*
642          * Store the current top waiter before doing the requeue
643          * operation on @lock. We need it for the boost/deboost
644          * decision below.
645          */
646         prerequeue_top_waiter = rt_mutex_top_waiter(lock);
647
648         /* [7] Requeue the waiter in the lock waiter list. */
649         rt_mutex_dequeue(lock, waiter);
650         waiter->prio = task->prio;
651         rt_mutex_enqueue(lock, waiter);
652
653         /* [8] Release the task */
654         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
655         put_task_struct(task);
656
657         /*
658          * [9] check_exit_conditions_3 protected by lock->wait_lock.
659          *
660          * We must abort the chain walk if there is no lock owner even
661          * in the dead lock detection case, as we have nothing to
662          * follow here. This is the end of the chain we are walking.
663          */
664         if (!rt_mutex_owner(lock)) {
665                 struct rt_mutex_waiter *lock_top_waiter;
666
667                 /*
668                  * If the requeue [7] above changed the top waiter,
669                  * then we need to wake the new top waiter up to try
670                  * to get the lock.
671                  */
672                 lock_top_waiter = rt_mutex_top_waiter(lock);
673                 if (prerequeue_top_waiter != lock_top_waiter)
674                         rt_mutex_wake_waiter(lock_top_waiter);
675                 raw_spin_unlock(&lock->wait_lock);
676                 return 0;
677         }
678
679         /* [10] Grab the next task, i.e. the owner of @lock */
680         task = rt_mutex_owner(lock);
681         get_task_struct(task);
682         raw_spin_lock_irqsave(&task->pi_lock, flags);
683
684         /* [11] requeue the pi waiters if necessary */
685         if (waiter == rt_mutex_top_waiter(lock)) {
686                 /*
687                  * The waiter became the new top (highest priority)
688                  * waiter on the lock. Replace the previous top waiter
689                  * in the owner tasks pi waiters list with this waiter
690                  * and adjust the priority of the owner.
691                  */
692                 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
693                 rt_mutex_enqueue_pi(task, waiter);
694                 __rt_mutex_adjust_prio(task);
695
696         } else if (prerequeue_top_waiter == waiter) {
697                 /*
698                  * The waiter was the top waiter on the lock, but is
699                  * no longer the top prority waiter. Replace waiter in
700                  * the owner tasks pi waiters list with the new top
701                  * (highest priority) waiter and adjust the priority
702                  * of the owner.
703                  * The new top waiter is stored in @waiter so that
704                  * @waiter == @top_waiter evaluates to true below and
705                  * we continue to deboost the rest of the chain.
706                  */
707                 rt_mutex_dequeue_pi(task, waiter);
708                 waiter = rt_mutex_top_waiter(lock);
709                 rt_mutex_enqueue_pi(task, waiter);
710                 __rt_mutex_adjust_prio(task);
711         } else {
712                 /*
713                  * Nothing changed. No need to do any priority
714                  * adjustment.
715                  */
716         }
717
718         /*
719          * [12] check_exit_conditions_4() protected by task->pi_lock
720          * and lock->wait_lock. The actual decisions are made after we
721          * dropped the locks.
722          *
723          * Check whether the task which owns the current lock is pi
724          * blocked itself. If yes we store a pointer to the lock for
725          * the lock chain change detection above. After we dropped
726          * task->pi_lock next_lock cannot be dereferenced anymore.
727          */
728         next_lock = task_blocked_on_lock(task);
729         /*
730          * Store the top waiter of @lock for the end of chain walk
731          * decision below.
732          */
733         top_waiter = rt_mutex_top_waiter(lock);
734
735         /* [13] Drop the locks */
736         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
737         raw_spin_unlock(&lock->wait_lock);
738
739         /*
740          * Make the actual exit decisions [12], based on the stored
741          * values.
742          *
743          * We reached the end of the lock chain. Stop right here. No
744          * point to go back just to figure that out.
745          */
746         if (!next_lock)
747                 goto out_put_task;
748
749         /*
750          * If the current waiter is not the top waiter on the lock,
751          * then we can stop the chain walk here if we are not in full
752          * deadlock detection mode.
753          */
754         if (!detect_deadlock && waiter != top_waiter)
755                 goto out_put_task;
756
757         goto again;
758
759  out_unlock_pi:
760         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
761  out_put_task:
762         put_task_struct(task);
763
764         return ret;
765 }
766
767
768 #define STEAL_NORMAL  0
769 #define STEAL_LATERAL 1
770
771 /*
772  * Note that RT tasks are excluded from lateral-steals to prevent the
773  * introduction of an unbounded latency
774  */
775 static inline int lock_is_stealable(struct task_struct *task,
776                                     struct task_struct *pendowner, int mode)
777 {
778     if (mode == STEAL_NORMAL || rt_task(task)) {
779             if (task->prio >= pendowner->prio)
780                     return 0;
781     } else if (task->prio > pendowner->prio)
782             return 0;
783     return 1;
784 }
785
786 /*
787  * Try to take an rt-mutex
788  *
789  * Must be called with lock->wait_lock held.
790  *
791  * @lock:   The lock to be acquired.
792  * @task:   The task which wants to acquire the lock
793  * @waiter: The waiter that is queued to the lock's wait list if the
794  *          callsite called task_blocked_on_lock(), otherwise NULL
795  */
796 static int __try_to_take_rt_mutex(struct rt_mutex *lock,
797                                   struct task_struct *task,
798                                   struct rt_mutex_waiter *waiter, int mode)
799 {
800         unsigned long flags;
801
802         /*
803          * Before testing whether we can acquire @lock, we set the
804          * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
805          * other tasks which try to modify @lock into the slow path
806          * and they serialize on @lock->wait_lock.
807          *
808          * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
809          * as explained at the top of this file if and only if:
810          *
811          * - There is a lock owner. The caller must fixup the
812          *   transient state if it does a trylock or leaves the lock
813          *   function due to a signal or timeout.
814          *
815          * - @task acquires the lock and there are no other
816          *   waiters. This is undone in rt_mutex_set_owner(@task) at
817          *   the end of this function.
818          */
819         mark_rt_mutex_waiters(lock);
820
821         /*
822          * If @lock has an owner, give up.
823          */
824         if (rt_mutex_owner(lock))
825                 return 0;
826
827         /*
828          * If @waiter != NULL, @task has already enqueued the waiter
829          * into @lock waiter list. If @waiter == NULL then this is a
830          * trylock attempt.
831          */
832         if (waiter) {
833                 /*
834                  * If waiter is not the highest priority waiter of
835                  * @lock, give up.
836                  */
837                 if (waiter != rt_mutex_top_waiter(lock)) {
838                         /* XXX lock_is_stealable() ? */
839                         return 0;
840                 }
841
842                 /*
843                  * We can acquire the lock. Remove the waiter from the
844                  * lock waiters list.
845                  */
846                 rt_mutex_dequeue(lock, waiter);
847
848         } else {
849                 /*
850                  * If the lock has waiters already we check whether @task is
851                  * eligible to take over the lock.
852                  *
853                  * If there are no other waiters, @task can acquire
854                  * the lock.  @task->pi_blocked_on is NULL, so it does
855                  * not need to be dequeued.
856                  */
857                 if (rt_mutex_has_waiters(lock)) {
858                         struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
859
860                         if (task != pown && !lock_is_stealable(task, pown, mode))
861                                 return 0;
862                         /*
863                          * The current top waiter stays enqueued. We
864                          * don't have to change anything in the lock
865                          * waiters order.
866                          */
867                 } else {
868                         /*
869                          * No waiters. Take the lock without the
870                          * pi_lock dance.@task->pi_blocked_on is NULL
871                          * and we have no waiters to enqueue in @task
872                          * pi waiters list.
873                          */
874                         goto takeit;
875                 }
876         }
877
878         /*
879          * Clear @task->pi_blocked_on. Requires protection by
880          * @task->pi_lock. Redundant operation for the @waiter == NULL
881          * case, but conditionals are more expensive than a redundant
882          * store.
883          */
884         raw_spin_lock_irqsave(&task->pi_lock, flags);
885         task->pi_blocked_on = NULL;
886         /*
887          * Finish the lock acquisition. @task is the new owner. If
888          * other waiters exist we have to insert the highest priority
889          * waiter into @task->pi_waiters list.
890          */
891         if (rt_mutex_has_waiters(lock))
892                 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
893         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
894
895 takeit:
896         /* We got the lock. */
897         debug_rt_mutex_lock(lock);
898
899         /*
900          * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
901          * are still waiters or clears it.
902          */
903         rt_mutex_set_owner(lock, task);
904
905         rt_mutex_deadlock_account_lock(lock, task);
906
907         return 1;
908 }
909
910 #ifdef CONFIG_PREEMPT_RT_FULL
911 /*
912  * preemptible spin_lock functions:
913  */
914 static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
915                                          void  (*slowfn)(struct rt_mutex *lock))
916 {
917         might_sleep_no_state_check();
918
919         if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
920                 rt_mutex_deadlock_account_lock(lock, current);
921         else
922                 slowfn(lock);
923 }
924
925 static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
926                                            void  (*slowfn)(struct rt_mutex *lock))
927 {
928         if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
929                 rt_mutex_deadlock_account_unlock(current);
930         else
931                 slowfn(lock);
932 }
933 #ifdef CONFIG_SMP
934 /*
935  * Note that owner is a speculative pointer and dereferencing relies
936  * on rcu_read_lock() and the check against the lock owner.
937  */
938 static int adaptive_wait(struct rt_mutex *lock,
939                          struct task_struct *owner)
940 {
941         int res = 0;
942
943         rcu_read_lock();
944         for (;;) {
945                 if (owner != rt_mutex_owner(lock))
946                         break;
947                 /*
948                  * Ensure that owner->on_cpu is dereferenced _after_
949                  * checking the above to be valid.
950                  */
951                 barrier();
952                 if (!owner->on_cpu) {
953                         res = 1;
954                         break;
955                 }
956                 cpu_relax();
957         }
958         rcu_read_unlock();
959         return res;
960 }
961 #else
962 static int adaptive_wait(struct rt_mutex *lock,
963                          struct task_struct *orig_owner)
964 {
965         return 1;
966 }
967 #endif
968
969 # define pi_lock(lock)          raw_spin_lock_irq(lock)
970 # define pi_unlock(lock)        raw_spin_unlock_irq(lock)
971
972 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
973                                    struct rt_mutex_waiter *waiter,
974                                    struct task_struct *task,
975                                    enum rtmutex_chainwalk chwalk);
976 /*
977  * Slow path lock function spin_lock style: this variant is very
978  * careful not to miss any non-lock wakeups.
979  *
980  * We store the current state under p->pi_lock in p->saved_state and
981  * the try_to_wake_up() code handles this accordingly.
982  */
983 static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
984 {
985         struct task_struct *lock_owner, *self = current;
986         struct rt_mutex_waiter waiter, *top_waiter;
987         int ret;
988
989         rt_mutex_init_waiter(&waiter, true);
990
991         raw_spin_lock(&lock->wait_lock);
992
993         if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
994                 raw_spin_unlock(&lock->wait_lock);
995                 return;
996         }
997
998         BUG_ON(rt_mutex_owner(lock) == self);
999
1000         /*
1001          * We save whatever state the task is in and we'll restore it
1002          * after acquiring the lock taking real wakeups into account
1003          * as well. We are serialized via pi_lock against wakeups. See
1004          * try_to_wake_up().
1005          */
1006         pi_lock(&self->pi_lock);
1007         self->saved_state = self->state;
1008         __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1009         pi_unlock(&self->pi_lock);
1010
1011         ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
1012         BUG_ON(ret);
1013
1014         for (;;) {
1015                 /* Try to acquire the lock again. */
1016                 if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
1017                         break;
1018
1019                 top_waiter = rt_mutex_top_waiter(lock);
1020                 lock_owner = rt_mutex_owner(lock);
1021
1022                 raw_spin_unlock(&lock->wait_lock);
1023
1024                 debug_rt_mutex_print_deadlock(&waiter);
1025
1026                 if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
1027                         schedule_rt_mutex(lock);
1028
1029                 raw_spin_lock(&lock->wait_lock);
1030
1031                 pi_lock(&self->pi_lock);
1032                 __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1033                 pi_unlock(&self->pi_lock);
1034         }
1035
1036         /*
1037          * Restore the task state to current->saved_state. We set it
1038          * to the original state above and the try_to_wake_up() code
1039          * has possibly updated it when a real (non-rtmutex) wakeup
1040          * happened while we were blocked. Clear saved_state so
1041          * try_to_wakeup() does not get confused.
1042          */
1043         pi_lock(&self->pi_lock);
1044         __set_current_state_no_track(self->saved_state);
1045         self->saved_state = TASK_RUNNING;
1046         pi_unlock(&self->pi_lock);
1047
1048         /*
1049          * try_to_take_rt_mutex() sets the waiter bit
1050          * unconditionally. We might have to fix that up:
1051          */
1052         fixup_rt_mutex_waiters(lock);
1053
1054         BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
1055         BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
1056
1057         raw_spin_unlock(&lock->wait_lock);
1058
1059         debug_rt_mutex_free_waiter(&waiter);
1060 }
1061
1062 static void wakeup_next_waiter(struct rt_mutex *lock);
1063 /*
1064  * Slow path to release a rt_mutex spin_lock style
1065  */
1066 static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
1067 {
1068         raw_spin_lock(&lock->wait_lock);
1069
1070         debug_rt_mutex_unlock(lock);
1071
1072         rt_mutex_deadlock_account_unlock(current);
1073
1074         if (!rt_mutex_has_waiters(lock)) {
1075                 lock->owner = NULL;
1076                 raw_spin_unlock(&lock->wait_lock);
1077                 return;
1078         }
1079
1080         wakeup_next_waiter(lock);
1081
1082         raw_spin_unlock(&lock->wait_lock);
1083
1084         /* Undo pi boosting.when necessary */
1085         rt_mutex_adjust_prio(current);
1086 }
1087
1088 void __lockfunc rt_spin_lock(spinlock_t *lock)
1089 {
1090         rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
1091         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1092 }
1093 EXPORT_SYMBOL(rt_spin_lock);
1094
1095 void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
1096 {
1097         rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
1098 }
1099 EXPORT_SYMBOL(__rt_spin_lock);
1100
1101 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1102 void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
1103 {
1104         rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
1105         spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1106 }
1107 EXPORT_SYMBOL(rt_spin_lock_nested);
1108 #endif
1109
1110 void __lockfunc rt_spin_unlock(spinlock_t *lock)
1111 {
1112         /* NOTE: we always pass in '1' for nested, for simplicity */
1113         spin_release(&lock->dep_map, 1, _RET_IP_);
1114         rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
1115 }
1116 EXPORT_SYMBOL(rt_spin_unlock);
1117
1118 void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
1119 {
1120         rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
1121 }
1122 EXPORT_SYMBOL(__rt_spin_unlock);
1123
1124 /*
1125  * Wait for the lock to get unlocked: instead of polling for an unlock
1126  * (like raw spinlocks do), we lock and unlock, to force the kernel to
1127  * schedule if there's contention:
1128  */
1129 void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
1130 {
1131         spin_lock(lock);
1132         spin_unlock(lock);
1133 }
1134 EXPORT_SYMBOL(rt_spin_unlock_wait);
1135
1136 int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
1137 {
1138         return rt_mutex_trylock(lock);
1139 }
1140
1141 int __lockfunc rt_spin_trylock(spinlock_t *lock)
1142 {
1143         int ret = rt_mutex_trylock(&lock->lock);
1144
1145         if (ret)
1146                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1147         return ret;
1148 }
1149 EXPORT_SYMBOL(rt_spin_trylock);
1150
1151 int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
1152 {
1153         int ret;
1154
1155         local_bh_disable();
1156         ret = rt_mutex_trylock(&lock->lock);
1157         if (ret) {
1158                 migrate_disable();
1159                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1160         } else
1161                 local_bh_enable();
1162         return ret;
1163 }
1164 EXPORT_SYMBOL(rt_spin_trylock_bh);
1165
1166 int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
1167 {
1168         int ret;
1169
1170         *flags = 0;
1171         ret = rt_mutex_trylock(&lock->lock);
1172         if (ret) {
1173                 migrate_disable();
1174                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1175         }
1176         return ret;
1177 }
1178 EXPORT_SYMBOL(rt_spin_trylock_irqsave);
1179
1180 int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
1181 {
1182         /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
1183         if (atomic_add_unless(atomic, -1, 1))
1184                 return 0;
1185         migrate_disable();
1186         rt_spin_lock(lock);
1187         if (atomic_dec_and_test(atomic))
1188                 return 1;
1189         rt_spin_unlock(lock);
1190         migrate_enable();
1191         return 0;
1192 }
1193 EXPORT_SYMBOL(atomic_dec_and_spin_lock);
1194
1195         void
1196 __rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
1197 {
1198 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1199         /*
1200          * Make sure we are not reinitializing a held lock:
1201          */
1202         debug_check_no_locks_freed((void *)lock, sizeof(*lock));
1203         lockdep_init_map(&lock->dep_map, name, key, 0);
1204 #endif
1205 }
1206 EXPORT_SYMBOL(__rt_spin_lock_init);
1207
1208 #endif /* PREEMPT_RT_FULL */
1209
1210 #ifdef CONFIG_PREEMPT_RT_FULL
1211         static inline int __sched
1212 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1213 {
1214         struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1215         struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
1216
1217         if (!hold_ctx)
1218                 return 0;
1219
1220         if (unlikely(ctx == hold_ctx))
1221                 return -EALREADY;
1222
1223         if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
1224             (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
1225 #ifdef CONFIG_DEBUG_MUTEXES
1226                 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
1227                 ctx->contending_lock = ww;
1228 #endif
1229                 return -EDEADLK;
1230         }
1231
1232         return 0;
1233 }
1234 #else
1235         static inline int __sched
1236 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1237 {
1238         BUG();
1239         return 0;
1240 }
1241
1242 #endif
1243
1244 static inline int
1245 try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
1246                      struct rt_mutex_waiter *waiter)
1247 {
1248         return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
1249 }
1250
1251 /*
1252  * Task blocks on lock.
1253  *
1254  * Prepare waiter and propagate pi chain
1255  *
1256  * This must be called with lock->wait_lock held.
1257  */
1258 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
1259                                    struct rt_mutex_waiter *waiter,
1260                                    struct task_struct *task,
1261                                    enum rtmutex_chainwalk chwalk)
1262 {
1263         struct task_struct *owner = rt_mutex_owner(lock);
1264         struct rt_mutex_waiter *top_waiter = waiter;
1265         struct rt_mutex *next_lock;
1266         int chain_walk = 0, res;
1267         unsigned long flags;
1268
1269         /*
1270          * Early deadlock detection. We really don't want the task to
1271          * enqueue on itself just to untangle the mess later. It's not
1272          * only an optimization. We drop the locks, so another waiter
1273          * can come in before the chain walk detects the deadlock. So
1274          * the other will detect the deadlock and return -EDEADLOCK,
1275          * which is wrong, as the other waiter is not in a deadlock
1276          * situation.
1277          */
1278         if (owner == task)
1279                 return -EDEADLK;
1280
1281         raw_spin_lock_irqsave(&task->pi_lock, flags);
1282
1283         /*
1284          * In the case of futex requeue PI, this will be a proxy
1285          * lock. The task will wake unaware that it is enqueueed on
1286          * this lock. Avoid blocking on two locks and corrupting
1287          * pi_blocked_on via the PI_WAKEUP_INPROGRESS
1288          * flag. futex_wait_requeue_pi() sets this when it wakes up
1289          * before requeue (due to a signal or timeout). Do not enqueue
1290          * the task if PI_WAKEUP_INPROGRESS is set.
1291          */
1292         if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
1293                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1294                 return -EAGAIN;
1295         }
1296
1297         BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
1298
1299         __rt_mutex_adjust_prio(task);
1300         waiter->task = task;
1301         waiter->lock = lock;
1302         waiter->prio = task->prio;
1303
1304         /* Get the top priority waiter on the lock */
1305         if (rt_mutex_has_waiters(lock))
1306                 top_waiter = rt_mutex_top_waiter(lock);
1307         rt_mutex_enqueue(lock, waiter);
1308
1309         task->pi_blocked_on = waiter;
1310
1311         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1312
1313         if (!owner)
1314                 return 0;
1315
1316         raw_spin_lock_irqsave(&owner->pi_lock, flags);
1317         if (waiter == rt_mutex_top_waiter(lock)) {
1318                 rt_mutex_dequeue_pi(owner, top_waiter);
1319                 rt_mutex_enqueue_pi(owner, waiter);
1320
1321                 __rt_mutex_adjust_prio(owner);
1322                 if (rt_mutex_real_waiter(owner->pi_blocked_on))
1323                         chain_walk = 1;
1324         } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1325                 chain_walk = 1;
1326         }
1327
1328         /* Store the lock on which owner is blocked or NULL */
1329         next_lock = task_blocked_on_lock(owner);
1330
1331         raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
1332         /*
1333          * Even if full deadlock detection is on, if the owner is not
1334          * blocked itself, we can avoid finding this out in the chain
1335          * walk.
1336          */
1337         if (!chain_walk || !next_lock)
1338                 return 0;
1339
1340         /*
1341          * The owner can't disappear while holding a lock,
1342          * so the owner struct is protected by wait_lock.
1343          * Gets dropped in rt_mutex_adjust_prio_chain()!
1344          */
1345         get_task_struct(owner);
1346
1347         raw_spin_unlock(&lock->wait_lock);
1348
1349         res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1350                                          next_lock, waiter, task);
1351
1352         raw_spin_lock(&lock->wait_lock);
1353
1354         return res;
1355 }
1356
1357 /*
1358  * Wake up the next waiter on the lock.
1359  *
1360  * Remove the top waiter from the current tasks pi waiter list,
1361  * wake it up and return whether the current task needs to undo
1362  * a potential priority boosting.
1363  *
1364  * Called with lock->wait_lock held.
1365  */
1366 static void wakeup_next_waiter(struct rt_mutex *lock)
1367 {
1368         struct rt_mutex_waiter *waiter;
1369         unsigned long flags;
1370
1371         raw_spin_lock_irqsave(&current->pi_lock, flags);
1372
1373         waiter = rt_mutex_top_waiter(lock);
1374
1375         /*
1376          * Remove it from current->pi_waiters. We do not adjust a
1377          * possible priority boost right now. We execute wakeup in the
1378          * boosted mode and go back to normal after releasing
1379          * lock->wait_lock.
1380          */
1381         rt_mutex_dequeue_pi(current, waiter);
1382
1383         /*
1384          * As we are waking up the top waiter, and the waiter stays
1385          * queued on the lock until it gets the lock, this lock
1386          * obviously has waiters. Just set the bit here and this has
1387          * the added benefit of forcing all new tasks into the
1388          * slow path making sure no task of lower priority than
1389          * the top waiter can steal this lock.
1390          */
1391         lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1392
1393         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1394
1395         /*
1396          * It's safe to dereference waiter as it cannot go away as
1397          * long as we hold lock->wait_lock. The waiter task needs to
1398          * acquire it in order to dequeue the waiter.
1399          */
1400         rt_mutex_wake_waiter(waiter);
1401 }
1402
1403 /*
1404  * Remove a waiter from a lock and give up
1405  *
1406  * Must be called with lock->wait_lock held and
1407  * have just failed to try_to_take_rt_mutex().
1408  */
1409 static void remove_waiter(struct rt_mutex *lock,
1410                           struct rt_mutex_waiter *waiter)
1411 {
1412         bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1413         struct task_struct *owner = rt_mutex_owner(lock);
1414         struct rt_mutex *next_lock = NULL;
1415         unsigned long flags;
1416
1417         raw_spin_lock_irqsave(&current->pi_lock, flags);
1418         rt_mutex_dequeue(lock, waiter);
1419         current->pi_blocked_on = NULL;
1420         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1421
1422         /*
1423          * Only update priority if the waiter was the highest priority
1424          * waiter of the lock and there is an owner to update.
1425          */
1426         if (!owner || !is_top_waiter)
1427                 return;
1428
1429         raw_spin_lock_irqsave(&owner->pi_lock, flags);
1430
1431         rt_mutex_dequeue_pi(owner, waiter);
1432
1433         if (rt_mutex_has_waiters(lock))
1434                 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1435
1436         __rt_mutex_adjust_prio(owner);
1437
1438         /* Store the lock on which owner is blocked or NULL */
1439         if (rt_mutex_real_waiter(owner->pi_blocked_on))
1440                 next_lock = task_blocked_on_lock(owner);
1441
1442         raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
1443
1444         /*
1445          * Don't walk the chain, if the owner task is not blocked
1446          * itself.
1447          */
1448         if (!next_lock)
1449                 return;
1450
1451         /* gets dropped in rt_mutex_adjust_prio_chain()! */
1452         get_task_struct(owner);
1453
1454         raw_spin_unlock(&lock->wait_lock);
1455
1456         rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1457                                    next_lock, NULL, current);
1458
1459         raw_spin_lock(&lock->wait_lock);
1460 }
1461
1462 /*
1463  * Recheck the pi chain, in case we got a priority setting
1464  *
1465  * Called from sched_setscheduler
1466  */
1467 void rt_mutex_adjust_pi(struct task_struct *task)
1468 {
1469         struct rt_mutex_waiter *waiter;
1470         struct rt_mutex *next_lock;
1471         unsigned long flags;
1472
1473         raw_spin_lock_irqsave(&task->pi_lock, flags);
1474
1475         waiter = task->pi_blocked_on;
1476         if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
1477                         !dl_prio(task->prio))) {
1478                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1479                 return;
1480         }
1481         next_lock = waiter->lock;
1482
1483         /* gets dropped in rt_mutex_adjust_prio_chain()! */
1484         get_task_struct(task);
1485
1486         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1487         rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1488                                    next_lock, NULL, task);
1489 }
1490
1491 /**
1492  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
1493  * @lock:                the rt_mutex to take
1494  * @state:               the state the task should block in (TASK_INTERRUPTIBLE
1495  *                       or TASK_UNINTERRUPTIBLE)
1496  * @timeout:             the pre-initialized and started timer, or NULL for none
1497  * @waiter:              the pre-initialized rt_mutex_waiter
1498  *
1499  * lock->wait_lock must be held by the caller.
1500  */
1501 static int __sched
1502 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
1503                     struct hrtimer_sleeper *timeout,
1504                     struct rt_mutex_waiter *waiter,
1505                     struct ww_acquire_ctx *ww_ctx)
1506 {
1507         int ret = 0;
1508
1509         for (;;) {
1510                 /* Try to acquire the lock: */
1511                 if (try_to_take_rt_mutex(lock, current, waiter))
1512                         break;
1513
1514                 /*
1515                  * TASK_INTERRUPTIBLE checks for signals and
1516                  * timeout. Ignored otherwise.
1517                  */
1518                 if (unlikely(state == TASK_INTERRUPTIBLE)) {
1519                         /* Signal pending? */
1520                         if (signal_pending(current))
1521                                 ret = -EINTR;
1522                         if (timeout && !timeout->task)
1523                                 ret = -ETIMEDOUT;
1524                         if (ret)
1525                                 break;
1526                 }
1527
1528                 if (ww_ctx && ww_ctx->acquired > 0) {
1529                         ret = __mutex_lock_check_stamp(lock, ww_ctx);
1530                         if (ret)
1531                                 break;
1532                 }
1533
1534                 raw_spin_unlock(&lock->wait_lock);
1535
1536                 debug_rt_mutex_print_deadlock(waiter);
1537
1538                 schedule_rt_mutex(lock);
1539
1540                 raw_spin_lock(&lock->wait_lock);
1541                 set_current_state(state);
1542         }
1543
1544         __set_current_state(TASK_RUNNING);
1545         return ret;
1546 }
1547
1548 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1549                                      struct rt_mutex_waiter *w)
1550 {
1551         /*
1552          * If the result is not -EDEADLOCK or the caller requested
1553          * deadlock detection, nothing to do here.
1554          */
1555         if (res != -EDEADLOCK || detect_deadlock)
1556                 return;
1557
1558         /*
1559          * Yell lowdly and stop the task right here.
1560          */
1561         rt_mutex_print_deadlock(w);
1562         while (1) {
1563                 set_current_state(TASK_INTERRUPTIBLE);
1564                 schedule();
1565         }
1566 }
1567
1568 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
1569                                                    struct ww_acquire_ctx *ww_ctx)
1570 {
1571 #ifdef CONFIG_DEBUG_MUTEXES
1572         /*
1573          * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
1574          * but released with a normal mutex_unlock in this call.
1575          *
1576          * This should never happen, always use ww_mutex_unlock.
1577          */
1578         DEBUG_LOCKS_WARN_ON(ww->ctx);
1579
1580         /*
1581          * Not quite done after calling ww_acquire_done() ?
1582          */
1583         DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
1584
1585         if (ww_ctx->contending_lock) {
1586                 /*
1587                  * After -EDEADLK you tried to
1588                  * acquire a different ww_mutex? Bad!
1589                  */
1590                 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
1591
1592                 /*
1593                  * You called ww_mutex_lock after receiving -EDEADLK,
1594                  * but 'forgot' to unlock everything else first?
1595                  */
1596                 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
1597                 ww_ctx->contending_lock = NULL;
1598         }
1599
1600         /*
1601          * Naughty, using a different class will lead to undefined behavior!
1602          */
1603         DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
1604 #endif
1605         ww_ctx->acquired++;
1606 }
1607
1608 #ifdef CONFIG_PREEMPT_RT_FULL
1609 static void ww_mutex_account_lock(struct rt_mutex *lock,
1610                                   struct ww_acquire_ctx *ww_ctx)
1611 {
1612         struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1613         struct rt_mutex_waiter *waiter, *n;
1614
1615         /*
1616          * This branch gets optimized out for the common case,
1617          * and is only important for ww_mutex_lock.
1618          */
1619         ww_mutex_lock_acquired(ww, ww_ctx);
1620         ww->ctx = ww_ctx;
1621
1622         /*
1623          * Give any possible sleeping processes the chance to wake up,
1624          * so they can recheck if they have to back off.
1625          */
1626         rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
1627                                              tree_entry) {
1628                 /* XXX debug rt mutex waiter wakeup */
1629
1630                 BUG_ON(waiter->lock != lock);
1631                 rt_mutex_wake_waiter(waiter);
1632         }
1633 }
1634
1635 #else
1636
1637 static void ww_mutex_account_lock(struct rt_mutex *lock,
1638                                   struct ww_acquire_ctx *ww_ctx)
1639 {
1640         BUG();
1641 }
1642 #endif
1643
1644 /*
1645  * Slow path lock function:
1646  */
1647 static int __sched
1648 rt_mutex_slowlock(struct rt_mutex *lock, int state,
1649                   struct hrtimer_sleeper *timeout,
1650                   enum rtmutex_chainwalk chwalk,
1651                   struct ww_acquire_ctx *ww_ctx)
1652 {
1653         struct rt_mutex_waiter waiter;
1654         int ret = 0;
1655
1656         rt_mutex_init_waiter(&waiter, false);
1657
1658         raw_spin_lock(&lock->wait_lock);
1659
1660         /* Try to acquire the lock again: */
1661         if (try_to_take_rt_mutex(lock, current, NULL)) {
1662                 if (ww_ctx)
1663                         ww_mutex_account_lock(lock, ww_ctx);
1664                 raw_spin_unlock(&lock->wait_lock);
1665                 return 0;
1666         }
1667
1668         set_current_state(state);
1669
1670         /* Setup the timer, when timeout != NULL */
1671         if (unlikely(timeout)) {
1672                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1673                 if (!hrtimer_active(&timeout->timer))
1674                         timeout->task = NULL;
1675         }
1676
1677         ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1678
1679         if (likely(!ret))
1680                 /* sleep on the mutex */
1681                 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
1682                                           ww_ctx);
1683         else if (ww_ctx) {
1684                 /* ww_mutex received EDEADLK, let it become EALREADY */
1685                 ret = __mutex_lock_check_stamp(lock, ww_ctx);
1686                 BUG_ON(!ret);
1687         }
1688
1689         if (unlikely(ret)) {
1690                 __set_current_state(TASK_RUNNING);
1691                 if (rt_mutex_has_waiters(lock))
1692                         remove_waiter(lock, &waiter);
1693                 /* ww_mutex want to report EDEADLK/EALREADY, let them */
1694                 if (!ww_ctx)
1695                         rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1696         } else if (ww_ctx) {
1697                 ww_mutex_account_lock(lock, ww_ctx);
1698         }
1699
1700         /*
1701          * try_to_take_rt_mutex() sets the waiter bit
1702          * unconditionally. We might have to fix that up.
1703          */
1704         fixup_rt_mutex_waiters(lock);
1705
1706         raw_spin_unlock(&lock->wait_lock);
1707
1708         /* Remove pending timer: */
1709         if (unlikely(timeout))
1710                 hrtimer_cancel(&timeout->timer);
1711
1712         debug_rt_mutex_free_waiter(&waiter);
1713
1714         return ret;
1715 }
1716
1717 /*
1718  * Slow path try-lock function:
1719  */
1720 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1721 {
1722         int ret;
1723
1724         /*
1725          * If the lock already has an owner we fail to get the lock.
1726          * This can be done without taking the @lock->wait_lock as
1727          * it is only being read, and this is a trylock anyway.
1728          */
1729         if (rt_mutex_owner(lock))
1730                 return 0;
1731
1732         /*
1733          * The mutex has currently no owner. Lock the wait lock and
1734          * try to acquire the lock.
1735          */
1736         raw_spin_lock(&lock->wait_lock);
1737
1738         ret = try_to_take_rt_mutex(lock, current, NULL);
1739
1740         /*
1741          * try_to_take_rt_mutex() sets the lock waiters bit
1742          * unconditionally. Clean this up.
1743          */
1744         fixup_rt_mutex_waiters(lock);
1745
1746         raw_spin_unlock(&lock->wait_lock);
1747
1748         return ret;
1749 }
1750
1751 /*
1752  * Slow path to release a rt-mutex:
1753  */
1754 static bool __sched
1755 rt_mutex_slowunlock(struct rt_mutex *lock)
1756 {
1757         raw_spin_lock(&lock->wait_lock);
1758
1759         debug_rt_mutex_unlock(lock);
1760
1761         rt_mutex_deadlock_account_unlock(current);
1762
1763         /*
1764          * We must be careful here if the fast path is enabled. If we
1765          * have no waiters queued we cannot set owner to NULL here
1766          * because of:
1767          *
1768          * foo->lock->owner = NULL;
1769          *                      rtmutex_lock(foo->lock);   <- fast path
1770          *                      free = atomic_dec_and_test(foo->refcnt);
1771          *                      rtmutex_unlock(foo->lock); <- fast path
1772          *                      if (free)
1773          *                              kfree(foo);
1774          * raw_spin_unlock(foo->lock->wait_lock);
1775          *
1776          * So for the fastpath enabled kernel:
1777          *
1778          * Nothing can set the waiters bit as long as we hold
1779          * lock->wait_lock. So we do the following sequence:
1780          *
1781          *      owner = rt_mutex_owner(lock);
1782          *      clear_rt_mutex_waiters(lock);
1783          *      raw_spin_unlock(&lock->wait_lock);
1784          *      if (cmpxchg(&lock->owner, owner, 0) == owner)
1785          *              return;
1786          *      goto retry;
1787          *
1788          * The fastpath disabled variant is simple as all access to
1789          * lock->owner is serialized by lock->wait_lock:
1790          *
1791          *      lock->owner = NULL;
1792          *      raw_spin_unlock(&lock->wait_lock);
1793          */
1794         while (!rt_mutex_has_waiters(lock)) {
1795                 /* Drops lock->wait_lock ! */
1796                 if (unlock_rt_mutex_safe(lock) == true)
1797                         return false;
1798                 /* Relock the rtmutex and try again */
1799                 raw_spin_lock(&lock->wait_lock);
1800         }
1801
1802         /*
1803          * The wakeup next waiter path does not suffer from the above
1804          * race. See the comments there.
1805          */
1806         wakeup_next_waiter(lock);
1807
1808         raw_spin_unlock(&lock->wait_lock);
1809
1810         return true;
1811 }
1812
1813 /*
1814  * debug aware fast / slowpath lock,trylock,unlock
1815  *
1816  * The atomic acquire/release ops are compiled away, when either the
1817  * architecture does not support cmpxchg or when debugging is enabled.
1818  */
1819 static inline int
1820 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1821                   struct ww_acquire_ctx *ww_ctx,
1822                   int (*slowfn)(struct rt_mutex *lock, int state,
1823                                 struct hrtimer_sleeper *timeout,
1824                                 enum rtmutex_chainwalk chwalk,
1825                                 struct ww_acquire_ctx *ww_ctx))
1826 {
1827         if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1828                 rt_mutex_deadlock_account_lock(lock, current);
1829                 return 0;
1830         } else
1831                 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
1832                               ww_ctx);
1833 }
1834
1835 static inline int
1836 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1837                         struct hrtimer_sleeper *timeout,
1838                         enum rtmutex_chainwalk chwalk,
1839                         struct ww_acquire_ctx *ww_ctx,
1840                         int (*slowfn)(struct rt_mutex *lock, int state,
1841                                       struct hrtimer_sleeper *timeout,
1842                                       enum rtmutex_chainwalk chwalk,
1843                                       struct ww_acquire_ctx *ww_ctx))
1844 {
1845         if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1846             likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1847                 rt_mutex_deadlock_account_lock(lock, current);
1848                 return 0;
1849         } else
1850                 return slowfn(lock, state, timeout, chwalk, ww_ctx);
1851 }
1852
1853 static inline int
1854 rt_mutex_fasttrylock(struct rt_mutex *lock,
1855                      int (*slowfn)(struct rt_mutex *lock))
1856 {
1857         if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1858                 rt_mutex_deadlock_account_lock(lock, current);
1859                 return 1;
1860         }
1861         return slowfn(lock);
1862 }
1863
1864 static inline void
1865 rt_mutex_fastunlock(struct rt_mutex *lock,
1866                     bool (*slowfn)(struct rt_mutex *lock))
1867 {
1868         if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
1869                 rt_mutex_deadlock_account_unlock(current);
1870         } else if (slowfn(lock)) {
1871                 /* Undo pi boosting if necessary: */
1872                 rt_mutex_adjust_prio(current);
1873         }
1874 }
1875
1876 /**
1877  * rt_mutex_lock - lock a rt_mutex
1878  *
1879  * @lock: the rt_mutex to be locked
1880  */
1881 void __sched rt_mutex_lock(struct rt_mutex *lock)
1882 {
1883         might_sleep();
1884
1885         rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
1886 }
1887 EXPORT_SYMBOL_GPL(rt_mutex_lock);
1888
1889 /**
1890  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1891  *
1892  * @lock:               the rt_mutex to be locked
1893  *
1894  * Returns:
1895  *  0           on success
1896  * -EINTR       when interrupted by a signal
1897  */
1898 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1899 {
1900         might_sleep();
1901
1902         return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
1903 }
1904 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1905
1906 /*
1907  * Futex variant with full deadlock detection.
1908  */
1909 int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1910                               struct hrtimer_sleeper *timeout)
1911 {
1912         might_sleep();
1913
1914         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1915                                        RT_MUTEX_FULL_CHAINWALK, NULL,
1916                                        rt_mutex_slowlock);
1917 }
1918
1919 /**
1920  * rt_mutex_lock_killable - lock a rt_mutex killable
1921  *
1922  * @lock:              the rt_mutex to be locked
1923  * @detect_deadlock:   deadlock detection on/off
1924  *
1925  * Returns:
1926  *  0          on success
1927  * -EINTR      when interrupted by a signal
1928  * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
1929  */
1930 int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
1931 {
1932         might_sleep();
1933
1934         return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
1935 }
1936 EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
1937
1938 /**
1939  * rt_mutex_timed_lock - lock a rt_mutex interruptible
1940  *                      the timeout structure is provided
1941  *                      by the caller
1942  *
1943  * @lock:               the rt_mutex to be locked
1944  * @timeout:            timeout structure or NULL (no timeout)
1945  *
1946  * Returns:
1947  *  0           on success
1948  * -EINTR       when interrupted by a signal
1949  * -ETIMEDOUT   when the timeout expired
1950  */
1951 int
1952 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1953 {
1954         might_sleep();
1955
1956         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1957                                        RT_MUTEX_MIN_CHAINWALK,
1958                                        NULL,
1959                                        rt_mutex_slowlock);
1960 }
1961 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1962
1963 /**
1964  * rt_mutex_trylock - try to lock a rt_mutex
1965  *
1966  * @lock:       the rt_mutex to be locked
1967  *
1968  * Returns 1 on success and 0 on contention
1969  */
1970 int __sched rt_mutex_trylock(struct rt_mutex *lock)
1971 {
1972         return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1973 }
1974 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1975
1976 /**
1977  * rt_mutex_unlock - unlock a rt_mutex
1978  *
1979  * @lock: the rt_mutex to be unlocked
1980  */
1981 void __sched rt_mutex_unlock(struct rt_mutex *lock)
1982 {
1983         rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1984 }
1985 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1986
1987 /**
1988  * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
1989  * @lock: the rt_mutex to be unlocked
1990  *
1991  * Returns: true/false indicating whether priority adjustment is
1992  * required or not.
1993  */
1994 bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1995 {
1996         if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
1997                 rt_mutex_deadlock_account_unlock(current);
1998                 return false;
1999         }
2000         return rt_mutex_slowunlock(lock);
2001 }
2002
2003 /**
2004  * rt_mutex_destroy - mark a mutex unusable
2005  * @lock: the mutex to be destroyed
2006  *
2007  * This function marks the mutex uninitialized, and any subsequent
2008  * use of the mutex is forbidden. The mutex must not be locked when
2009  * this function is called.
2010  */
2011 void rt_mutex_destroy(struct rt_mutex *lock)
2012 {
2013         WARN_ON(rt_mutex_is_locked(lock));
2014 #ifdef CONFIG_DEBUG_RT_MUTEXES
2015         lock->magic = NULL;
2016 #endif
2017 }
2018
2019 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
2020
2021 /**
2022  * __rt_mutex_init - initialize the rt lock
2023  *
2024  * @lock: the rt lock to be initialized
2025  *
2026  * Initialize the rt lock to unlocked state.
2027  *
2028  * Initializing of a locked rt lock is not allowed
2029  */
2030 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
2031 {
2032         lock->owner = NULL;
2033         lock->waiters = RB_ROOT;
2034         lock->waiters_leftmost = NULL;
2035
2036         debug_rt_mutex_init(lock, name);
2037 }
2038 EXPORT_SYMBOL(__rt_mutex_init);
2039
2040 /**
2041  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
2042  *                              proxy owner
2043  *
2044  * @lock:       the rt_mutex to be locked
2045  * @proxy_owner:the task to set as owner
2046  *
2047  * No locking. Caller has to do serializing itself
2048  * Special API call for PI-futex support
2049  */
2050 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
2051                                 struct task_struct *proxy_owner)
2052 {
2053         rt_mutex_init(lock);
2054         debug_rt_mutex_proxy_lock(lock, proxy_owner);
2055         rt_mutex_set_owner(lock, proxy_owner);
2056         rt_mutex_deadlock_account_lock(lock, proxy_owner);
2057 }
2058
2059 /**
2060  * rt_mutex_proxy_unlock - release a lock on behalf of owner
2061  *
2062  * @lock:       the rt_mutex to be locked
2063  *
2064  * No locking. Caller has to do serializing itself
2065  * Special API call for PI-futex support
2066  */
2067 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
2068                            struct task_struct *proxy_owner)
2069 {
2070         debug_rt_mutex_proxy_unlock(lock);
2071         rt_mutex_set_owner(lock, NULL);
2072         rt_mutex_deadlock_account_unlock(proxy_owner);
2073 }
2074
2075 /**
2076  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
2077  * @lock:               the rt_mutex to take
2078  * @waiter:             the pre-initialized rt_mutex_waiter
2079  * @task:               the task to prepare
2080  *
2081  * Returns:
2082  *  0 - task blocked on lock
2083  *  1 - acquired the lock for task, caller should wake it up
2084  * <0 - error
2085  *
2086  * Special API call for FUTEX_REQUEUE_PI support.
2087  */
2088 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
2089                               struct rt_mutex_waiter *waiter,
2090                               struct task_struct *task)
2091 {
2092         int ret;
2093
2094         raw_spin_lock(&lock->wait_lock);
2095
2096         if (try_to_take_rt_mutex(lock, task, NULL)) {
2097                 raw_spin_unlock(&lock->wait_lock);
2098                 return 1;
2099         }
2100
2101 #ifdef CONFIG_PREEMPT_RT_FULL
2102         /*
2103          * In PREEMPT_RT there's an added race.
2104          * If the task, that we are about to requeue, times out,
2105          * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
2106          * to skip this task. But right after the task sets
2107          * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
2108          * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
2109          * This will replace the PI_WAKEUP_INPROGRESS with the actual
2110          * lock that it blocks on. We *must not* place this task
2111          * on this proxy lock in that case.
2112          *
2113          * To prevent this race, we first take the task's pi_lock
2114          * and check if it has updated its pi_blocked_on. If it has,
2115          * we assume that it woke up and we return -EAGAIN.
2116          * Otherwise, we set the task's pi_blocked_on to
2117          * PI_REQUEUE_INPROGRESS, so that if the task is waking up
2118          * it will know that we are in the process of requeuing it.
2119          */
2120         raw_spin_lock_irq(&task->pi_lock);
2121         if (task->pi_blocked_on) {
2122                 raw_spin_unlock_irq(&task->pi_lock);
2123                 raw_spin_unlock(&lock->wait_lock);
2124                 return -EAGAIN;
2125         }
2126         task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
2127         raw_spin_unlock_irq(&task->pi_lock);
2128 #endif
2129
2130         /* We enforce deadlock detection for futexes */
2131         ret = task_blocks_on_rt_mutex(lock, waiter, task,
2132                                       RT_MUTEX_FULL_CHAINWALK);
2133
2134         if (ret && !rt_mutex_owner(lock)) {
2135                 /*
2136                  * Reset the return value. We might have
2137                  * returned with -EDEADLK and the owner
2138                  * released the lock while we were walking the
2139                  * pi chain.  Let the waiter sort it out.
2140                  */
2141                 ret = 0;
2142         }
2143
2144         if (unlikely(ret))
2145                 remove_waiter(lock, waiter);
2146
2147         raw_spin_unlock(&lock->wait_lock);
2148
2149         debug_rt_mutex_print_deadlock(waiter);
2150
2151         return ret;
2152 }
2153
2154 /**
2155  * rt_mutex_next_owner - return the next owner of the lock
2156  *
2157  * @lock: the rt lock query
2158  *
2159  * Returns the next owner of the lock or NULL
2160  *
2161  * Caller has to serialize against other accessors to the lock
2162  * itself.
2163  *
2164  * Special API call for PI-futex support
2165  */
2166 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
2167 {
2168         if (!rt_mutex_has_waiters(lock))
2169                 return NULL;
2170
2171         return rt_mutex_top_waiter(lock)->task;
2172 }
2173
2174 /**
2175  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
2176  * @lock:               the rt_mutex we were woken on
2177  * @to:                 the timeout, null if none. hrtimer should already have
2178  *                      been started.
2179  * @waiter:             the pre-initialized rt_mutex_waiter
2180  *
2181  * Complete the lock acquisition started our behalf by another thread.
2182  *
2183  * Returns:
2184  *  0 - success
2185  * <0 - error, one of -EINTR, -ETIMEDOUT
2186  *
2187  * Special API call for PI-futex requeue support
2188  */
2189 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
2190                                struct hrtimer_sleeper *to,
2191                                struct rt_mutex_waiter *waiter)
2192 {
2193         int ret;
2194
2195         raw_spin_lock(&lock->wait_lock);
2196
2197         set_current_state(TASK_INTERRUPTIBLE);
2198
2199         /* sleep on the mutex */
2200         ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
2201
2202         if (unlikely(ret))
2203                 remove_waiter(lock, waiter);
2204
2205         /*
2206          * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
2207          * have to fix that up.
2208          */
2209         fixup_rt_mutex_waiters(lock);
2210
2211         raw_spin_unlock(&lock->wait_lock);
2212
2213         return ret;
2214 }
2215
2216 static inline int
2217 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2218 {
2219 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
2220         unsigned tmp;
2221
2222         if (ctx->deadlock_inject_countdown-- == 0) {
2223                 tmp = ctx->deadlock_inject_interval;
2224                 if (tmp > UINT_MAX/4)
2225                         tmp = UINT_MAX;
2226                 else
2227                         tmp = tmp*2 + tmp + tmp/2;
2228
2229                 ctx->deadlock_inject_interval = tmp;
2230                 ctx->deadlock_inject_countdown = tmp;
2231                 ctx->contending_lock = lock;
2232
2233                 ww_mutex_unlock(lock);
2234
2235                 return -EDEADLK;
2236         }
2237 #endif
2238
2239         return 0;
2240 }
2241
2242 #ifdef CONFIG_PREEMPT_RT_FULL
2243 int __sched
2244 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
2245 {
2246         int ret;
2247
2248         might_sleep();
2249
2250         mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
2251         ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
2252         if (ret)
2253                 mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2254         else if (!ret && ww_ctx->acquired > 1)
2255                 return ww_mutex_deadlock_injection(lock, ww_ctx);
2256
2257         return ret;
2258 }
2259 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
2260
2261 int __sched
2262 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
2263 {
2264         int ret;
2265
2266         might_sleep();
2267
2268         mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
2269         ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
2270         if (ret)
2271                 mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2272         else if (!ret && ww_ctx->acquired > 1)
2273                 return ww_mutex_deadlock_injection(lock, ww_ctx);
2274
2275         return ret;
2276 }
2277 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
2278
2279 void __sched ww_mutex_unlock(struct ww_mutex *lock)
2280 {
2281         int nest = !!lock->ctx;
2282
2283         /*
2284          * The unlocking fastpath is the 0->1 transition from 'locked'
2285          * into 'unlocked' state:
2286          */
2287         if (nest) {
2288 #ifdef CONFIG_DEBUG_MUTEXES
2289                 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
2290 #endif
2291                 if (lock->ctx->acquired > 0)
2292                         lock->ctx->acquired--;
2293                 lock->ctx = NULL;
2294         }
2295
2296         mutex_release(&lock->base.dep_map, nest, _RET_IP_);
2297         rt_mutex_unlock(&lock->base.lock);
2298 }
2299 EXPORT_SYMBOL(ww_mutex_unlock);
2300 #endif