Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / kernel / locking / rtmutex.c
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *  Adaptive Spinlocks:
11  *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
12  *                                   and Peter Morreale,
13  * Adaptive Spinlocks simplification:
14  *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
15  *
16  *  See Documentation/locking/rt-mutex-design.txt for details.
17  */
18 #include <linux/spinlock.h>
19 #include <linux/export.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/deadline.h>
23 #include <linux/timer.h>
24 #include <linux/ww_mutex.h>
25
26 #include "rtmutex_common.h"
27
28 /*
29  * lock->owner state tracking:
30  *
31  * lock->owner holds the task_struct pointer of the owner. Bit 0
32  * is used to keep track of the "lock has waiters" state.
33  *
34  * owner        bit0
35  * NULL         0       lock is free (fast acquire possible)
36  * NULL         1       lock is free and has waiters and the top waiter
37  *                              is going to take the lock*
38  * taskpointer  0       lock is held (fast release possible)
39  * taskpointer  1       lock is held and has waiters**
40  *
41  * The fast atomic compare exchange based acquire and release is only
42  * possible when bit 0 of lock->owner is 0.
43  *
44  * (*) It also can be a transitional state when grabbing the lock
45  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
46  * we need to set the bit0 before looking at the lock, and the owner may be
47  * NULL in this small time, hence this can be a transitional state.
48  *
49  * (**) There is a small time when bit 0 is set but there are no
50  * waiters. This can happen when grabbing the lock in the slow path.
51  * To prevent a cmpxchg of the owner releasing the lock, we need to
52  * set this bit before looking at the lock.
53  */
54
55 static void
56 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
57 {
58         unsigned long val = (unsigned long)owner;
59
60         if (rt_mutex_has_waiters(lock))
61                 val |= RT_MUTEX_HAS_WAITERS;
62
63         lock->owner = (struct task_struct *)val;
64 }
65
66 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
67 {
68         lock->owner = (struct task_struct *)
69                         ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
70 }
71
72 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
73 {
74         unsigned long owner, *p = (unsigned long *) &lock->owner;
75
76         if (rt_mutex_has_waiters(lock))
77                 return;
78
79         /*
80          * The rbtree has no waiters enqueued, now make sure that the
81          * lock->owner still has the waiters bit set, otherwise the
82          * following can happen:
83          *
84          * CPU 0        CPU 1           CPU2
85          * l->owner=T1
86          *              rt_mutex_lock(l)
87          *              lock(l->lock)
88          *              l->owner = T1 | HAS_WAITERS;
89          *              enqueue(T2)
90          *              boost()
91          *                unlock(l->lock)
92          *              block()
93          *
94          *                              rt_mutex_lock(l)
95          *                              lock(l->lock)
96          *                              l->owner = T1 | HAS_WAITERS;
97          *                              enqueue(T3)
98          *                              boost()
99          *                                unlock(l->lock)
100          *                              block()
101          *              signal(->T2)    signal(->T3)
102          *              lock(l->lock)
103          *              dequeue(T2)
104          *              deboost()
105          *                unlock(l->lock)
106          *                              lock(l->lock)
107          *                              dequeue(T3)
108          *                               ==> wait list is empty
109          *                              deboost()
110          *                               unlock(l->lock)
111          *              lock(l->lock)
112          *              fixup_rt_mutex_waiters()
113          *                if (wait_list_empty(l) {
114          *                  l->owner = owner
115          *                  owner = l->owner & ~HAS_WAITERS;
116          *                    ==> l->owner = T1
117          *                }
118          *                              lock(l->lock)
119          * rt_mutex_unlock(l)           fixup_rt_mutex_waiters()
120          *                                if (wait_list_empty(l) {
121          *                                  owner = l->owner & ~HAS_WAITERS;
122          * cmpxchg(l->owner, T1, NULL)
123          *  ===> Success (l->owner = NULL)
124          *
125          *                                  l->owner = owner
126          *                                    ==> l->owner = T1
127          *                                }
128          *
129          * With the check for the waiter bit in place T3 on CPU2 will not
130          * overwrite. All tasks fiddling with the waiters bit are
131          * serialized by l->lock, so nothing else can modify the waiters
132          * bit. If the bit is set then nothing can change l->owner either
133          * so the simple RMW is safe. The cmpxchg() will simply fail if it
134          * happens in the middle of the RMW because the waiters bit is
135          * still set.
136          */
137         owner = READ_ONCE(*p);
138         if (owner & RT_MUTEX_HAS_WAITERS)
139                 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
140 }
141
142 static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
143 {
144         return waiter && waiter != PI_WAKEUP_INPROGRESS &&
145                 waiter != PI_REQUEUE_INPROGRESS;
146 }
147
148 /*
149  * We can speed up the acquire/release, if there's no debugging state to be
150  * set up.
151  */
152 #ifndef CONFIG_DEBUG_RT_MUTEXES
153 # define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
154 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
155 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
156
157 /*
158  * Callers must hold the ->wait_lock -- which is the whole purpose as we force
159  * all future threads that attempt to [Rmw] the lock to the slowpath. As such
160  * relaxed semantics suffice.
161  */
162 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
163 {
164         unsigned long owner, *p = (unsigned long *) &lock->owner;
165
166         do {
167                 owner = *p;
168         } while (cmpxchg_relaxed(p, owner,
169                                  owner | RT_MUTEX_HAS_WAITERS) != owner);
170 }
171
172 /*
173  * Safe fastpath aware unlock:
174  * 1) Clear the waiters bit
175  * 2) Drop lock->wait_lock
176  * 3) Try to unlock the lock with cmpxchg
177  */
178 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
179                                         unsigned long flags)
180         __releases(lock->wait_lock)
181 {
182         struct task_struct *owner = rt_mutex_owner(lock);
183
184         clear_rt_mutex_waiters(lock);
185         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
186         /*
187          * If a new waiter comes in between the unlock and the cmpxchg
188          * we have two situations:
189          *
190          * unlock(wait_lock);
191          *                                      lock(wait_lock);
192          * cmpxchg(p, owner, 0) == owner
193          *                                      mark_rt_mutex_waiters(lock);
194          *                                      acquire(lock);
195          * or:
196          *
197          * unlock(wait_lock);
198          *                                      lock(wait_lock);
199          *                                      mark_rt_mutex_waiters(lock);
200          *
201          * cmpxchg(p, owner, 0) != owner
202          *                                      enqueue_waiter();
203          *                                      unlock(wait_lock);
204          * lock(wait_lock);
205          * wake waiter();
206          * unlock(wait_lock);
207          *                                      lock(wait_lock);
208          *                                      acquire(lock);
209          */
210         return rt_mutex_cmpxchg_release(lock, owner, NULL);
211 }
212
213 #else
214 # define rt_mutex_cmpxchg_relaxed(l,c,n)        (0)
215 # define rt_mutex_cmpxchg_acquire(l,c,n)        (0)
216 # define rt_mutex_cmpxchg_release(l,c,n)        (0)
217
218 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
219 {
220         lock->owner = (struct task_struct *)
221                         ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
222 }
223
224 /*
225  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
226  */
227 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
228                                         unsigned long flags)
229         __releases(lock->wait_lock)
230 {
231         lock->owner = NULL;
232         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
233         return true;
234 }
235 #endif
236
237 static inline int
238 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
239                      struct rt_mutex_waiter *right)
240 {
241         if (left->prio < right->prio)
242                 return 1;
243
244         /*
245          * If both waiters have dl_prio(), we check the deadlines of the
246          * associated tasks.
247          * If left waiter has a dl_prio(), and we didn't return 1 above,
248          * then right waiter has a dl_prio() too.
249          */
250         if (dl_prio(left->prio))
251                 return dl_time_before(left->task->dl.deadline,
252                                       right->task->dl.deadline);
253
254         return 0;
255 }
256
257 static void
258 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
259 {
260         struct rb_node **link = &lock->waiters.rb_node;
261         struct rb_node *parent = NULL;
262         struct rt_mutex_waiter *entry;
263         int leftmost = 1;
264
265         while (*link) {
266                 parent = *link;
267                 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
268                 if (rt_mutex_waiter_less(waiter, entry)) {
269                         link = &parent->rb_left;
270                 } else {
271                         link = &parent->rb_right;
272                         leftmost = 0;
273                 }
274         }
275
276         if (leftmost)
277                 lock->waiters_leftmost = &waiter->tree_entry;
278
279         rb_link_node(&waiter->tree_entry, parent, link);
280         rb_insert_color(&waiter->tree_entry, &lock->waiters);
281 }
282
283 static void
284 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
285 {
286         if (RB_EMPTY_NODE(&waiter->tree_entry))
287                 return;
288
289         if (lock->waiters_leftmost == &waiter->tree_entry)
290                 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
291
292         rb_erase(&waiter->tree_entry, &lock->waiters);
293         RB_CLEAR_NODE(&waiter->tree_entry);
294 }
295
296 static void
297 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
298 {
299         struct rb_node **link = &task->pi_waiters.rb_node;
300         struct rb_node *parent = NULL;
301         struct rt_mutex_waiter *entry;
302         int leftmost = 1;
303
304         while (*link) {
305                 parent = *link;
306                 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
307                 if (rt_mutex_waiter_less(waiter, entry)) {
308                         link = &parent->rb_left;
309                 } else {
310                         link = &parent->rb_right;
311                         leftmost = 0;
312                 }
313         }
314
315         if (leftmost)
316                 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
317
318         rb_link_node(&waiter->pi_tree_entry, parent, link);
319         rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
320 }
321
322 static void
323 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
324 {
325         if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
326                 return;
327
328         if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
329                 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
330
331         rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
332         RB_CLEAR_NODE(&waiter->pi_tree_entry);
333 }
334
335 /*
336  * Calculate task priority from the waiter tree priority
337  *
338  * Return task->normal_prio when the waiter tree is empty or when
339  * the waiter is not allowed to do priority boosting
340  */
341 int rt_mutex_getprio(struct task_struct *task)
342 {
343         if (likely(!task_has_pi_waiters(task)))
344                 return task->normal_prio;
345
346         return min(task_top_pi_waiter(task)->prio,
347                    task->normal_prio);
348 }
349
350 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
351 {
352         if (likely(!task_has_pi_waiters(task)))
353                 return NULL;
354
355         return task_top_pi_waiter(task)->task;
356 }
357
358 /*
359  * Called by sched_setscheduler() to get the priority which will be
360  * effective after the change.
361  */
362 int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
363 {
364         if (!task_has_pi_waiters(task))
365                 return newprio;
366
367         if (task_top_pi_waiter(task)->task->prio <= newprio)
368                 return task_top_pi_waiter(task)->task->prio;
369         return newprio;
370 }
371
372 /*
373  * Adjust the priority of a task, after its pi_waiters got modified.
374  *
375  * This can be both boosting and unboosting. task->pi_lock must be held.
376  */
377 static void __rt_mutex_adjust_prio(struct task_struct *task)
378 {
379         int prio = rt_mutex_getprio(task);
380
381         if (task->prio != prio || dl_prio(prio))
382                 rt_mutex_setprio(task, prio);
383 }
384
385 /*
386  * Adjust task priority (undo boosting). Called from the exit path of
387  * rt_mutex_slowunlock() and rt_mutex_slowlock().
388  *
389  * (Note: We do this outside of the protection of lock->wait_lock to
390  * allow the lock to be taken while or before we readjust the priority
391  * of task. We do not use the spin_xx_mutex() variants here as we are
392  * outside of the debug path.)
393  */
394 void rt_mutex_adjust_prio(struct task_struct *task)
395 {
396         unsigned long flags;
397
398         raw_spin_lock_irqsave(&task->pi_lock, flags);
399         __rt_mutex_adjust_prio(task);
400         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
401 }
402
403 /*
404  * Deadlock detection is conditional:
405  *
406  * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
407  * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
408  *
409  * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
410  * conducted independent of the detect argument.
411  *
412  * If the waiter argument is NULL this indicates the deboost path and
413  * deadlock detection is disabled independent of the detect argument
414  * and the config settings.
415  */
416 static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
417                                           enum rtmutex_chainwalk chwalk)
418 {
419         /*
420          * This is just a wrapper function for the following call,
421          * because debug_rt_mutex_detect_deadlock() smells like a magic
422          * debug feature and I wanted to keep the cond function in the
423          * main source file along with the comments instead of having
424          * two of the same in the headers.
425          */
426         return debug_rt_mutex_detect_deadlock(waiter, chwalk);
427 }
428
429 static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
430 {
431         if (waiter->savestate)
432                 wake_up_lock_sleeper(waiter->task);
433         else
434                 wake_up_process(waiter->task);
435 }
436
437 /*
438  * Max number of times we'll walk the boosting chain:
439  */
440 int max_lock_depth = 1024;
441
442 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
443 {
444         return rt_mutex_real_waiter(p->pi_blocked_on) ?
445                 p->pi_blocked_on->lock : NULL;
446 }
447
448 /*
449  * Adjust the priority chain. Also used for deadlock detection.
450  * Decreases task's usage by one - may thus free the task.
451  *
452  * @task:       the task owning the mutex (owner) for which a chain walk is
453  *              probably needed
454  * @chwalk:     do we have to carry out deadlock detection?
455  * @orig_lock:  the mutex (can be NULL if we are walking the chain to recheck
456  *              things for a task that has just got its priority adjusted, and
457  *              is waiting on a mutex)
458  * @next_lock:  the mutex on which the owner of @orig_lock was blocked before
459  *              we dropped its pi_lock. Is never dereferenced, only used for
460  *              comparison to detect lock chain changes.
461  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
462  *              its priority to the mutex owner (can be NULL in the case
463  *              depicted above or if the top waiter is gone away and we are
464  *              actually deboosting the owner)
465  * @top_task:   the current top waiter
466  *
467  * Returns 0 or -EDEADLK.
468  *
469  * Chain walk basics and protection scope
470  *
471  * [R] refcount on task
472  * [P] task->pi_lock held
473  * [L] rtmutex->wait_lock held
474  *
475  * Step Description                             Protected by
476  *      function arguments:
477  *      @task                                   [R]
478  *      @orig_lock if != NULL                   @top_task is blocked on it
479  *      @next_lock                              Unprotected. Cannot be
480  *                                              dereferenced. Only used for
481  *                                              comparison.
482  *      @orig_waiter if != NULL                 @top_task is blocked on it
483  *      @top_task                               current, or in case of proxy
484  *                                              locking protected by calling
485  *                                              code
486  *      again:
487  *        loop_sanity_check();
488  *      retry:
489  * [1]    lock(task->pi_lock);                  [R] acquire [P]
490  * [2]    waiter = task->pi_blocked_on;         [P]
491  * [3]    check_exit_conditions_1();            [P]
492  * [4]    lock = waiter->lock;                  [P]
493  * [5]    if (!try_lock(lock->wait_lock)) {     [P] try to acquire [L]
494  *          unlock(task->pi_lock);              release [P]
495  *          goto retry;
496  *        }
497  * [6]    check_exit_conditions_2();            [P] + [L]
498  * [7]    requeue_lock_waiter(lock, waiter);    [P] + [L]
499  * [8]    unlock(task->pi_lock);                release [P]
500  *        put_task_struct(task);                release [R]
501  * [9]    check_exit_conditions_3();            [L]
502  * [10]   task = owner(lock);                   [L]
503  *        get_task_struct(task);                [L] acquire [R]
504  *        lock(task->pi_lock);                  [L] acquire [P]
505  * [11]   requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
506  * [12]   check_exit_conditions_4();            [P] + [L]
507  * [13]   unlock(task->pi_lock);                release [P]
508  *        unlock(lock->wait_lock);              release [L]
509  *        goto again;
510  */
511 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
512                                       enum rtmutex_chainwalk chwalk,
513                                       struct rt_mutex *orig_lock,
514                                       struct rt_mutex *next_lock,
515                                       struct rt_mutex_waiter *orig_waiter,
516                                       struct task_struct *top_task)
517 {
518         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
519         struct rt_mutex_waiter *prerequeue_top_waiter;
520         int ret = 0, depth = 0;
521         struct rt_mutex *lock;
522         bool detect_deadlock;
523         bool requeue = true;
524
525         detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
526
527         /*
528          * The (de)boosting is a step by step approach with a lot of
529          * pitfalls. We want this to be preemptible and we want hold a
530          * maximum of two locks per step. So we have to check
531          * carefully whether things change under us.
532          */
533  again:
534         /*
535          * We limit the lock chain length for each invocation.
536          */
537         if (++depth > max_lock_depth) {
538                 static int prev_max;
539
540                 /*
541                  * Print this only once. If the admin changes the limit,
542                  * print a new message when reaching the limit again.
543                  */
544                 if (prev_max != max_lock_depth) {
545                         prev_max = max_lock_depth;
546                         printk(KERN_WARNING "Maximum lock depth %d reached "
547                                "task: %s (%d)\n", max_lock_depth,
548                                top_task->comm, task_pid_nr(top_task));
549                 }
550                 put_task_struct(task);
551
552                 return -EDEADLK;
553         }
554
555         /*
556          * We are fully preemptible here and only hold the refcount on
557          * @task. So everything can have changed under us since the
558          * caller or our own code below (goto retry/again) dropped all
559          * locks.
560          */
561  retry:
562         /*
563          * [1] Task cannot go away as we did a get_task() before !
564          */
565         raw_spin_lock_irq(&task->pi_lock);
566
567         /*
568          * [2] Get the waiter on which @task is blocked on.
569          */
570         waiter = task->pi_blocked_on;
571
572         /*
573          * [3] check_exit_conditions_1() protected by task->pi_lock.
574          */
575
576         /*
577          * Check whether the end of the boosting chain has been
578          * reached or the state of the chain has changed while we
579          * dropped the locks.
580          */
581         if (!rt_mutex_real_waiter(waiter))
582                 goto out_unlock_pi;
583
584         /*
585          * Check the orig_waiter state. After we dropped the locks,
586          * the previous owner of the lock might have released the lock.
587          */
588         if (orig_waiter && !rt_mutex_owner(orig_lock))
589                 goto out_unlock_pi;
590
591         /*
592          * We dropped all locks after taking a refcount on @task, so
593          * the task might have moved on in the lock chain or even left
594          * the chain completely and blocks now on an unrelated lock or
595          * on @orig_lock.
596          *
597          * We stored the lock on which @task was blocked in @next_lock,
598          * so we can detect the chain change.
599          */
600         if (next_lock != waiter->lock)
601                 goto out_unlock_pi;
602
603         /*
604          * Drop out, when the task has no waiters. Note,
605          * top_waiter can be NULL, when we are in the deboosting
606          * mode!
607          */
608         if (top_waiter) {
609                 if (!task_has_pi_waiters(task))
610                         goto out_unlock_pi;
611                 /*
612                  * If deadlock detection is off, we stop here if we
613                  * are not the top pi waiter of the task. If deadlock
614                  * detection is enabled we continue, but stop the
615                  * requeueing in the chain walk.
616                  */
617                 if (top_waiter != task_top_pi_waiter(task)) {
618                         if (!detect_deadlock)
619                                 goto out_unlock_pi;
620                         else
621                                 requeue = false;
622                 }
623         }
624
625         /*
626          * If the waiter priority is the same as the task priority
627          * then there is no further priority adjustment necessary.  If
628          * deadlock detection is off, we stop the chain walk. If its
629          * enabled we continue, but stop the requeueing in the chain
630          * walk.
631          */
632         if (waiter->prio == task->prio) {
633                 if (!detect_deadlock)
634                         goto out_unlock_pi;
635                 else
636                         requeue = false;
637         }
638
639         /*
640          * [4] Get the next lock
641          */
642         lock = waiter->lock;
643         /*
644          * [5] We need to trylock here as we are holding task->pi_lock,
645          * which is the reverse lock order versus the other rtmutex
646          * operations.
647          */
648         if (!raw_spin_trylock(&lock->wait_lock)) {
649                 raw_spin_unlock_irq(&task->pi_lock);
650                 cpu_relax();
651                 goto retry;
652         }
653
654         /*
655          * [6] check_exit_conditions_2() protected by task->pi_lock and
656          * lock->wait_lock.
657          *
658          * Deadlock detection. If the lock is the same as the original
659          * lock which caused us to walk the lock chain or if the
660          * current lock is owned by the task which initiated the chain
661          * walk, we detected a deadlock.
662          */
663         if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
664                 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
665                 raw_spin_unlock(&lock->wait_lock);
666                 ret = -EDEADLK;
667                 goto out_unlock_pi;
668         }
669
670         /*
671          * If we just follow the lock chain for deadlock detection, no
672          * need to do all the requeue operations. To avoid a truckload
673          * of conditionals around the various places below, just do the
674          * minimum chain walk checks.
675          */
676         if (!requeue) {
677                 /*
678                  * No requeue[7] here. Just release @task [8]
679                  */
680                 raw_spin_unlock(&task->pi_lock);
681                 put_task_struct(task);
682
683                 /*
684                  * [9] check_exit_conditions_3 protected by lock->wait_lock.
685                  * If there is no owner of the lock, end of chain.
686                  */
687                 if (!rt_mutex_owner(lock)) {
688                         raw_spin_unlock_irq(&lock->wait_lock);
689                         return 0;
690                 }
691
692                 /* [10] Grab the next task, i.e. owner of @lock */
693                 task = rt_mutex_owner(lock);
694                 get_task_struct(task);
695                 raw_spin_lock(&task->pi_lock);
696
697                 /*
698                  * No requeue [11] here. We just do deadlock detection.
699                  *
700                  * [12] Store whether owner is blocked
701                  * itself. Decision is made after dropping the locks
702                  */
703                 next_lock = task_blocked_on_lock(task);
704                 /*
705                  * Get the top waiter for the next iteration
706                  */
707                 top_waiter = rt_mutex_top_waiter(lock);
708
709                 /* [13] Drop locks */
710                 raw_spin_unlock(&task->pi_lock);
711                 raw_spin_unlock_irq(&lock->wait_lock);
712
713                 /* If owner is not blocked, end of chain. */
714                 if (!next_lock)
715                         goto out_put_task;
716                 goto again;
717         }
718
719         /*
720          * Store the current top waiter before doing the requeue
721          * operation on @lock. We need it for the boost/deboost
722          * decision below.
723          */
724         prerequeue_top_waiter = rt_mutex_top_waiter(lock);
725
726         /* [7] Requeue the waiter in the lock waiter tree. */
727         rt_mutex_dequeue(lock, waiter);
728         waiter->prio = task->prio;
729         rt_mutex_enqueue(lock, waiter);
730
731         /* [8] Release the task */
732         raw_spin_unlock(&task->pi_lock);
733         put_task_struct(task);
734
735         /*
736          * [9] check_exit_conditions_3 protected by lock->wait_lock.
737          *
738          * We must abort the chain walk if there is no lock owner even
739          * in the dead lock detection case, as we have nothing to
740          * follow here. This is the end of the chain we are walking.
741          */
742         if (!rt_mutex_owner(lock)) {
743                 struct rt_mutex_waiter *lock_top_waiter;
744
745                 /*
746                  * If the requeue [7] above changed the top waiter,
747                  * then we need to wake the new top waiter up to try
748                  * to get the lock.
749                  */
750                 lock_top_waiter = rt_mutex_top_waiter(lock);
751                 if (prerequeue_top_waiter != lock_top_waiter)
752                         rt_mutex_wake_waiter(lock_top_waiter);
753                 raw_spin_unlock_irq(&lock->wait_lock);
754                 return 0;
755         }
756
757         /* [10] Grab the next task, i.e. the owner of @lock */
758         task = rt_mutex_owner(lock);
759         get_task_struct(task);
760         raw_spin_lock(&task->pi_lock);
761
762         /* [11] requeue the pi waiters if necessary */
763         if (waiter == rt_mutex_top_waiter(lock)) {
764                 /*
765                  * The waiter became the new top (highest priority)
766                  * waiter on the lock. Replace the previous top waiter
767                  * in the owner tasks pi waiters tree with this waiter
768                  * and adjust the priority of the owner.
769                  */
770                 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
771                 rt_mutex_enqueue_pi(task, waiter);
772                 __rt_mutex_adjust_prio(task);
773
774         } else if (prerequeue_top_waiter == waiter) {
775                 /*
776                  * The waiter was the top waiter on the lock, but is
777                  * no longer the top prority waiter. Replace waiter in
778                  * the owner tasks pi waiters tree with the new top
779                  * (highest priority) waiter and adjust the priority
780                  * of the owner.
781                  * The new top waiter is stored in @waiter so that
782                  * @waiter == @top_waiter evaluates to true below and
783                  * we continue to deboost the rest of the chain.
784                  */
785                 rt_mutex_dequeue_pi(task, waiter);
786                 waiter = rt_mutex_top_waiter(lock);
787                 rt_mutex_enqueue_pi(task, waiter);
788                 __rt_mutex_adjust_prio(task);
789         } else {
790                 /*
791                  * Nothing changed. No need to do any priority
792                  * adjustment.
793                  */
794         }
795
796         /*
797          * [12] check_exit_conditions_4() protected by task->pi_lock
798          * and lock->wait_lock. The actual decisions are made after we
799          * dropped the locks.
800          *
801          * Check whether the task which owns the current lock is pi
802          * blocked itself. If yes we store a pointer to the lock for
803          * the lock chain change detection above. After we dropped
804          * task->pi_lock next_lock cannot be dereferenced anymore.
805          */
806         next_lock = task_blocked_on_lock(task);
807         /*
808          * Store the top waiter of @lock for the end of chain walk
809          * decision below.
810          */
811         top_waiter = rt_mutex_top_waiter(lock);
812
813         /* [13] Drop the locks */
814         raw_spin_unlock(&task->pi_lock);
815         raw_spin_unlock_irq(&lock->wait_lock);
816
817         /*
818          * Make the actual exit decisions [12], based on the stored
819          * values.
820          *
821          * We reached the end of the lock chain. Stop right here. No
822          * point to go back just to figure that out.
823          */
824         if (!next_lock)
825                 goto out_put_task;
826
827         /*
828          * If the current waiter is not the top waiter on the lock,
829          * then we can stop the chain walk here if we are not in full
830          * deadlock detection mode.
831          */
832         if (!detect_deadlock && waiter != top_waiter)
833                 goto out_put_task;
834
835         goto again;
836
837  out_unlock_pi:
838         raw_spin_unlock_irq(&task->pi_lock);
839  out_put_task:
840         put_task_struct(task);
841
842         return ret;
843 }
844
845
846 #define STEAL_NORMAL  0
847 #define STEAL_LATERAL 1
848
849 /*
850  * Note that RT tasks are excluded from lateral-steals to prevent the
851  * introduction of an unbounded latency
852  */
853 static inline int lock_is_stealable(struct task_struct *task,
854                                     struct task_struct *pendowner, int mode)
855 {
856     if (mode == STEAL_NORMAL || rt_task(task)) {
857             if (task->prio >= pendowner->prio)
858                     return 0;
859     } else if (task->prio > pendowner->prio)
860             return 0;
861     return 1;
862 }
863
864 /*
865  * Try to take an rt-mutex
866  *
867  * Must be called with lock->wait_lock held and interrupts disabled
868  *
869  * @lock:   The lock to be acquired.
870  * @task:   The task which wants to acquire the lock
871  * @waiter: The waiter that is queued to the lock's wait tree if the
872  *          callsite called task_blocked_on_lock(), otherwise NULL
873  */
874 static int __try_to_take_rt_mutex(struct rt_mutex *lock,
875                                   struct task_struct *task,
876                                   struct rt_mutex_waiter *waiter, int mode)
877 {
878         /*
879          * Before testing whether we can acquire @lock, we set the
880          * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
881          * other tasks which try to modify @lock into the slow path
882          * and they serialize on @lock->wait_lock.
883          *
884          * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
885          * as explained at the top of this file if and only if:
886          *
887          * - There is a lock owner. The caller must fixup the
888          *   transient state if it does a trylock or leaves the lock
889          *   function due to a signal or timeout.
890          *
891          * - @task acquires the lock and there are no other
892          *   waiters. This is undone in rt_mutex_set_owner(@task) at
893          *   the end of this function.
894          */
895         mark_rt_mutex_waiters(lock);
896
897         /*
898          * If @lock has an owner, give up.
899          */
900         if (rt_mutex_owner(lock))
901                 return 0;
902
903         /*
904          * If @waiter != NULL, @task has already enqueued the waiter
905          * into @lock waiter tree. If @waiter == NULL then this is a
906          * trylock attempt.
907          */
908         if (waiter) {
909                 /*
910                  * If waiter is not the highest priority waiter of
911                  * @lock, give up.
912                  */
913                 if (waiter != rt_mutex_top_waiter(lock)) {
914                         /* XXX lock_is_stealable() ? */
915                         return 0;
916                 }
917
918                 /*
919                  * We can acquire the lock. Remove the waiter from the
920                  * lock waiters tree.
921                  */
922                 rt_mutex_dequeue(lock, waiter);
923
924         } else {
925                 /*
926                  * If the lock has waiters already we check whether @task is
927                  * eligible to take over the lock.
928                  *
929                  * If there are no other waiters, @task can acquire
930                  * the lock.  @task->pi_blocked_on is NULL, so it does
931                  * not need to be dequeued.
932                  */
933                 if (rt_mutex_has_waiters(lock)) {
934                         struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
935
936                         if (task != pown && !lock_is_stealable(task, pown, mode))
937                                 return 0;
938                         /*
939                          * The current top waiter stays enqueued. We
940                          * don't have to change anything in the lock
941                          * waiters order.
942                          */
943                 } else {
944                         /*
945                          * No waiters. Take the lock without the
946                          * pi_lock dance.@task->pi_blocked_on is NULL
947                          * and we have no waiters to enqueue in @task
948                          * pi waiters tree.
949                          */
950                         goto takeit;
951                 }
952         }
953
954         /*
955          * Clear @task->pi_blocked_on. Requires protection by
956          * @task->pi_lock. Redundant operation for the @waiter == NULL
957          * case, but conditionals are more expensive than a redundant
958          * store.
959          */
960         raw_spin_lock(&task->pi_lock);
961         task->pi_blocked_on = NULL;
962         /*
963          * Finish the lock acquisition. @task is the new owner. If
964          * other waiters exist we have to insert the highest priority
965          * waiter into @task->pi_waiters tree.
966          */
967         if (rt_mutex_has_waiters(lock))
968                 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
969         raw_spin_unlock(&task->pi_lock);
970
971 takeit:
972         /* We got the lock. */
973         debug_rt_mutex_lock(lock);
974
975         /*
976          * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
977          * are still waiters or clears it.
978          */
979         rt_mutex_set_owner(lock, task);
980
981         rt_mutex_deadlock_account_lock(lock, task);
982
983         return 1;
984 }
985
986 #ifdef CONFIG_PREEMPT_RT_FULL
987 /*
988  * preemptible spin_lock functions:
989  */
990 static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
991                                          void  (*slowfn)(struct rt_mutex *lock,
992                                                          bool mg_off),
993                                          bool do_mig_dis)
994 {
995         might_sleep_no_state_check();
996
997         if (do_mig_dis)
998                 migrate_disable();
999
1000         if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1001                 rt_mutex_deadlock_account_lock(lock, current);
1002         else
1003                 slowfn(lock, do_mig_dis);
1004 }
1005
1006 static inline int rt_spin_lock_fastunlock(struct rt_mutex *lock,
1007                                            int  (*slowfn)(struct rt_mutex *lock))
1008 {
1009         if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1010                 rt_mutex_deadlock_account_unlock(current);
1011                 return 0;
1012         }
1013         return slowfn(lock);
1014 }
1015 #ifdef CONFIG_SMP
1016 /*
1017  * Note that owner is a speculative pointer and dereferencing relies
1018  * on rcu_read_lock() and the check against the lock owner.
1019  */
1020 static int adaptive_wait(struct rt_mutex *lock,
1021                          struct task_struct *owner)
1022 {
1023         int res = 0;
1024
1025         rcu_read_lock();
1026         for (;;) {
1027                 if (owner != rt_mutex_owner(lock))
1028                         break;
1029                 /*
1030                  * Ensure that owner->on_cpu is dereferenced _after_
1031                  * checking the above to be valid.
1032                  */
1033                 barrier();
1034                 if (!owner->on_cpu) {
1035                         res = 1;
1036                         break;
1037                 }
1038                 cpu_relax();
1039         }
1040         rcu_read_unlock();
1041         return res;
1042 }
1043 #else
1044 static int adaptive_wait(struct rt_mutex *lock,
1045                          struct task_struct *orig_owner)
1046 {
1047         return 1;
1048 }
1049 #endif
1050
1051 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
1052                                    struct rt_mutex_waiter *waiter,
1053                                    struct task_struct *task,
1054                                    enum rtmutex_chainwalk chwalk);
1055 /*
1056  * Slow path lock function spin_lock style: this variant is very
1057  * careful not to miss any non-lock wakeups.
1058  *
1059  * We store the current state under p->pi_lock in p->saved_state and
1060  * the try_to_wake_up() code handles this accordingly.
1061  */
1062 static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock,
1063                                                     bool mg_off)
1064 {
1065         struct task_struct *lock_owner, *self = current;
1066         struct rt_mutex_waiter waiter, *top_waiter;
1067         unsigned long flags;
1068         int ret;
1069
1070         rt_mutex_init_waiter(&waiter, true);
1071
1072         raw_spin_lock_irqsave(&lock->wait_lock, flags);
1073
1074         if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
1075                 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1076                 return;
1077         }
1078
1079         BUG_ON(rt_mutex_owner(lock) == self);
1080
1081         /*
1082          * We save whatever state the task is in and we'll restore it
1083          * after acquiring the lock taking real wakeups into account
1084          * as well. We are serialized via pi_lock against wakeups. See
1085          * try_to_wake_up().
1086          */
1087         raw_spin_lock(&self->pi_lock);
1088         self->saved_state = self->state;
1089         __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1090         raw_spin_unlock(&self->pi_lock);
1091
1092         ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
1093         BUG_ON(ret);
1094
1095         for (;;) {
1096                 /* Try to acquire the lock again. */
1097                 if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
1098                         break;
1099
1100                 top_waiter = rt_mutex_top_waiter(lock);
1101                 lock_owner = rt_mutex_owner(lock);
1102
1103                 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1104
1105                 debug_rt_mutex_print_deadlock(&waiter);
1106
1107                 if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
1108                         if (mg_off)
1109                                 migrate_enable();
1110                         schedule();
1111                         if (mg_off)
1112                                 migrate_disable();
1113                 }
1114
1115                 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1116
1117                 raw_spin_lock(&self->pi_lock);
1118                 __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1119                 raw_spin_unlock(&self->pi_lock);
1120         }
1121
1122         /*
1123          * Restore the task state to current->saved_state. We set it
1124          * to the original state above and the try_to_wake_up() code
1125          * has possibly updated it when a real (non-rtmutex) wakeup
1126          * happened while we were blocked. Clear saved_state so
1127          * try_to_wakeup() does not get confused.
1128          */
1129         raw_spin_lock(&self->pi_lock);
1130         __set_current_state_no_track(self->saved_state);
1131         self->saved_state = TASK_RUNNING;
1132         raw_spin_unlock(&self->pi_lock);
1133
1134         /*
1135          * try_to_take_rt_mutex() sets the waiter bit
1136          * unconditionally. We might have to fix that up:
1137          */
1138         fixup_rt_mutex_waiters(lock);
1139
1140         BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
1141         BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
1142
1143         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1144
1145         debug_rt_mutex_free_waiter(&waiter);
1146 }
1147
1148 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1149                                     struct wake_q_head *wake_sleeper_q,
1150                                     struct rt_mutex *lock);
1151 /*
1152  * Slow path to release a rt_mutex spin_lock style
1153  */
1154 static int noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
1155 {
1156         unsigned long flags;
1157         WAKE_Q(wake_q);
1158         WAKE_Q(wake_sleeper_q);
1159
1160         raw_spin_lock_irqsave(&lock->wait_lock, flags);
1161
1162         debug_rt_mutex_unlock(lock);
1163
1164         rt_mutex_deadlock_account_unlock(current);
1165
1166         if (!rt_mutex_has_waiters(lock)) {
1167                 lock->owner = NULL;
1168                 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1169                 return 0;
1170         }
1171
1172         mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
1173
1174         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1175         wake_up_q(&wake_q);
1176         wake_up_q_sleeper(&wake_sleeper_q);
1177
1178         /* Undo pi boosting.when necessary */
1179         rt_mutex_adjust_prio(current);
1180         return 0;
1181 }
1182
1183 static int noinline __sched rt_spin_lock_slowunlock_no_deboost(struct rt_mutex *lock)
1184 {
1185         unsigned long flags;
1186         WAKE_Q(wake_q);
1187         WAKE_Q(wake_sleeper_q);
1188
1189         raw_spin_lock_irqsave(&lock->wait_lock, flags);
1190
1191         debug_rt_mutex_unlock(lock);
1192
1193         rt_mutex_deadlock_account_unlock(current);
1194
1195         if (!rt_mutex_has_waiters(lock)) {
1196                 lock->owner = NULL;
1197                 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1198                 return 0;
1199         }
1200
1201         mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
1202
1203         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1204         wake_up_q(&wake_q);
1205         wake_up_q_sleeper(&wake_sleeper_q);
1206         return 1;
1207 }
1208
1209 void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
1210 {
1211         rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
1212         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1213 }
1214 EXPORT_SYMBOL(rt_spin_lock__no_mg);
1215
1216 void __lockfunc rt_spin_lock(spinlock_t *lock)
1217 {
1218         rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
1219         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1220 }
1221 EXPORT_SYMBOL(rt_spin_lock);
1222
1223 void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
1224 {
1225         rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
1226 }
1227 EXPORT_SYMBOL(__rt_spin_lock);
1228
1229 void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
1230 {
1231         rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
1232 }
1233 EXPORT_SYMBOL(__rt_spin_lock__no_mg);
1234
1235 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1236 void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
1237 {
1238         spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1239         rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
1240 }
1241 EXPORT_SYMBOL(rt_spin_lock_nested);
1242 #endif
1243
1244 void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
1245 {
1246         /* NOTE: we always pass in '1' for nested, for simplicity */
1247         spin_release(&lock->dep_map, 1, _RET_IP_);
1248         rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
1249 }
1250 EXPORT_SYMBOL(rt_spin_unlock__no_mg);
1251
1252 void __lockfunc rt_spin_unlock(spinlock_t *lock)
1253 {
1254         /* NOTE: we always pass in '1' for nested, for simplicity */
1255         spin_release(&lock->dep_map, 1, _RET_IP_);
1256         rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
1257         migrate_enable();
1258 }
1259 EXPORT_SYMBOL(rt_spin_unlock);
1260
1261 int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock)
1262 {
1263         int ret;
1264
1265         /* NOTE: we always pass in '1' for nested, for simplicity */
1266         spin_release(&lock->dep_map, 1, _RET_IP_);
1267         ret = rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_no_deboost);
1268         migrate_enable();
1269         return ret;
1270 }
1271
1272 void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
1273 {
1274         rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
1275 }
1276 EXPORT_SYMBOL(__rt_spin_unlock);
1277
1278 /*
1279  * Wait for the lock to get unlocked: instead of polling for an unlock
1280  * (like raw spinlocks do), we lock and unlock, to force the kernel to
1281  * schedule if there's contention:
1282  */
1283 void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
1284 {
1285         spin_lock(lock);
1286         spin_unlock(lock);
1287 }
1288 EXPORT_SYMBOL(rt_spin_unlock_wait);
1289
1290 int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
1291 {
1292         return rt_mutex_trylock(lock);
1293 }
1294
1295 int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
1296 {
1297         int ret;
1298
1299         ret = rt_mutex_trylock(&lock->lock);
1300         if (ret)
1301                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1302         return ret;
1303 }
1304 EXPORT_SYMBOL(rt_spin_trylock__no_mg);
1305
1306 int __lockfunc rt_spin_trylock(spinlock_t *lock)
1307 {
1308         int ret;
1309
1310         migrate_disable();
1311         ret = rt_mutex_trylock(&lock->lock);
1312         if (ret)
1313                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1314         else
1315                 migrate_enable();
1316         return ret;
1317 }
1318 EXPORT_SYMBOL(rt_spin_trylock);
1319
1320 int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
1321 {
1322         int ret;
1323
1324         local_bh_disable();
1325         ret = rt_mutex_trylock(&lock->lock);
1326         if (ret) {
1327                 migrate_disable();
1328                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1329         } else
1330                 local_bh_enable();
1331         return ret;
1332 }
1333 EXPORT_SYMBOL(rt_spin_trylock_bh);
1334
1335 int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
1336 {
1337         int ret;
1338
1339         *flags = 0;
1340         ret = rt_mutex_trylock(&lock->lock);
1341         if (ret) {
1342                 migrate_disable();
1343                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1344         }
1345         return ret;
1346 }
1347 EXPORT_SYMBOL(rt_spin_trylock_irqsave);
1348
1349 int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
1350 {
1351         /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
1352         if (atomic_add_unless(atomic, -1, 1))
1353                 return 0;
1354         rt_spin_lock(lock);
1355         if (atomic_dec_and_test(atomic))
1356                 return 1;
1357         rt_spin_unlock(lock);
1358         return 0;
1359 }
1360 EXPORT_SYMBOL(atomic_dec_and_spin_lock);
1361
1362         void
1363 __rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
1364 {
1365 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1366         /*
1367          * Make sure we are not reinitializing a held lock:
1368          */
1369         debug_check_no_locks_freed((void *)lock, sizeof(*lock));
1370         lockdep_init_map(&lock->dep_map, name, key, 0);
1371 #endif
1372 }
1373 EXPORT_SYMBOL(__rt_spin_lock_init);
1374
1375 #endif /* PREEMPT_RT_FULL */
1376
1377 #ifdef CONFIG_PREEMPT_RT_FULL
1378         static inline int __sched
1379 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1380 {
1381         struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1382         struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
1383
1384         if (!hold_ctx)
1385                 return 0;
1386
1387         if (unlikely(ctx == hold_ctx))
1388                 return -EALREADY;
1389
1390         if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
1391             (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
1392 #ifdef CONFIG_DEBUG_MUTEXES
1393                 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
1394                 ctx->contending_lock = ww;
1395 #endif
1396                 return -EDEADLK;
1397         }
1398
1399         return 0;
1400 }
1401 #else
1402         static inline int __sched
1403 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1404 {
1405         BUG();
1406         return 0;
1407 }
1408
1409 #endif
1410
1411 static inline int
1412 try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
1413                      struct rt_mutex_waiter *waiter)
1414 {
1415         return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
1416 }
1417
1418 /*
1419  * Task blocks on lock.
1420  *
1421  * Prepare waiter and propagate pi chain
1422  *
1423  * This must be called with lock->wait_lock held and interrupts disabled
1424  */
1425 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
1426                                    struct rt_mutex_waiter *waiter,
1427                                    struct task_struct *task,
1428                                    enum rtmutex_chainwalk chwalk)
1429 {
1430         struct task_struct *owner = rt_mutex_owner(lock);
1431         struct rt_mutex_waiter *top_waiter = waiter;
1432         struct rt_mutex *next_lock;
1433         int chain_walk = 0, res;
1434
1435         /*
1436          * Early deadlock detection. We really don't want the task to
1437          * enqueue on itself just to untangle the mess later. It's not
1438          * only an optimization. We drop the locks, so another waiter
1439          * can come in before the chain walk detects the deadlock. So
1440          * the other will detect the deadlock and return -EDEADLOCK,
1441          * which is wrong, as the other waiter is not in a deadlock
1442          * situation.
1443          */
1444         if (owner == task)
1445                 return -EDEADLK;
1446
1447         raw_spin_lock(&task->pi_lock);
1448
1449         /*
1450          * In the case of futex requeue PI, this will be a proxy
1451          * lock. The task will wake unaware that it is enqueueed on
1452          * this lock. Avoid blocking on two locks and corrupting
1453          * pi_blocked_on via the PI_WAKEUP_INPROGRESS
1454          * flag. futex_wait_requeue_pi() sets this when it wakes up
1455          * before requeue (due to a signal or timeout). Do not enqueue
1456          * the task if PI_WAKEUP_INPROGRESS is set.
1457          */
1458         if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
1459                 raw_spin_unlock(&task->pi_lock);
1460                 return -EAGAIN;
1461         }
1462
1463         BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
1464
1465         __rt_mutex_adjust_prio(task);
1466         waiter->task = task;
1467         waiter->lock = lock;
1468         waiter->prio = task->prio;
1469
1470         /* Get the top priority waiter on the lock */
1471         if (rt_mutex_has_waiters(lock))
1472                 top_waiter = rt_mutex_top_waiter(lock);
1473         rt_mutex_enqueue(lock, waiter);
1474
1475         task->pi_blocked_on = waiter;
1476
1477         raw_spin_unlock(&task->pi_lock);
1478
1479         if (!owner)
1480                 return 0;
1481
1482         raw_spin_lock(&owner->pi_lock);
1483         if (waiter == rt_mutex_top_waiter(lock)) {
1484                 rt_mutex_dequeue_pi(owner, top_waiter);
1485                 rt_mutex_enqueue_pi(owner, waiter);
1486
1487                 __rt_mutex_adjust_prio(owner);
1488                 if (rt_mutex_real_waiter(owner->pi_blocked_on))
1489                         chain_walk = 1;
1490         } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1491                 chain_walk = 1;
1492         }
1493
1494         /* Store the lock on which owner is blocked or NULL */
1495         next_lock = task_blocked_on_lock(owner);
1496
1497         raw_spin_unlock(&owner->pi_lock);
1498         /*
1499          * Even if full deadlock detection is on, if the owner is not
1500          * blocked itself, we can avoid finding this out in the chain
1501          * walk.
1502          */
1503         if (!chain_walk || !next_lock)
1504                 return 0;
1505
1506         /*
1507          * The owner can't disappear while holding a lock,
1508          * so the owner struct is protected by wait_lock.
1509          * Gets dropped in rt_mutex_adjust_prio_chain()!
1510          */
1511         get_task_struct(owner);
1512
1513         raw_spin_unlock_irq(&lock->wait_lock);
1514
1515         res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1516                                          next_lock, waiter, task);
1517
1518         raw_spin_lock_irq(&lock->wait_lock);
1519
1520         return res;
1521 }
1522
1523 /*
1524  * Remove the top waiter from the current tasks pi waiter tree and
1525  * queue it up.
1526  *
1527  * Called with lock->wait_lock held and interrupts disabled.
1528  */
1529 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1530                                     struct wake_q_head *wake_sleeper_q,
1531                                     struct rt_mutex *lock)
1532 {
1533         struct rt_mutex_waiter *waiter;
1534
1535         raw_spin_lock(&current->pi_lock);
1536
1537         waiter = rt_mutex_top_waiter(lock);
1538
1539         /*
1540          * Remove it from current->pi_waiters. We do not adjust a
1541          * possible priority boost right now. We execute wakeup in the
1542          * boosted mode and go back to normal after releasing
1543          * lock->wait_lock.
1544          */
1545         rt_mutex_dequeue_pi(current, waiter);
1546
1547         /*
1548          * As we are waking up the top waiter, and the waiter stays
1549          * queued on the lock until it gets the lock, this lock
1550          * obviously has waiters. Just set the bit here and this has
1551          * the added benefit of forcing all new tasks into the
1552          * slow path making sure no task of lower priority than
1553          * the top waiter can steal this lock.
1554          */
1555         lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1556
1557         raw_spin_unlock(&current->pi_lock);
1558
1559         if (waiter->savestate)
1560                 wake_q_add(wake_sleeper_q, waiter->task);
1561         else
1562                 wake_q_add(wake_q, waiter->task);
1563 }
1564
1565 /*
1566  * Remove a waiter from a lock and give up
1567  *
1568  * Must be called with lock->wait_lock held and interrupts disabled. I must
1569  * have just failed to try_to_take_rt_mutex().
1570  */
1571 static void remove_waiter(struct rt_mutex *lock,
1572                           struct rt_mutex_waiter *waiter)
1573 {
1574         bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1575         struct task_struct *owner = rt_mutex_owner(lock);
1576         struct rt_mutex *next_lock = NULL;
1577
1578         raw_spin_lock(&current->pi_lock);
1579         rt_mutex_dequeue(lock, waiter);
1580         current->pi_blocked_on = NULL;
1581         raw_spin_unlock(&current->pi_lock);
1582
1583         /*
1584          * Only update priority if the waiter was the highest priority
1585          * waiter of the lock and there is an owner to update.
1586          */
1587         if (!owner || !is_top_waiter)
1588                 return;
1589
1590         raw_spin_lock(&owner->pi_lock);
1591
1592         rt_mutex_dequeue_pi(owner, waiter);
1593
1594         if (rt_mutex_has_waiters(lock))
1595                 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1596
1597         __rt_mutex_adjust_prio(owner);
1598
1599         /* Store the lock on which owner is blocked or NULL */
1600         if (rt_mutex_real_waiter(owner->pi_blocked_on))
1601                 next_lock = task_blocked_on_lock(owner);
1602
1603         raw_spin_unlock(&owner->pi_lock);
1604
1605         /*
1606          * Don't walk the chain, if the owner task is not blocked
1607          * itself.
1608          */
1609         if (!next_lock)
1610                 return;
1611
1612         /* gets dropped in rt_mutex_adjust_prio_chain()! */
1613         get_task_struct(owner);
1614
1615         raw_spin_unlock_irq(&lock->wait_lock);
1616
1617         rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1618                                    next_lock, NULL, current);
1619
1620         raw_spin_lock_irq(&lock->wait_lock);
1621 }
1622
1623 /*
1624  * Recheck the pi chain, in case we got a priority setting
1625  *
1626  * Called from sched_setscheduler
1627  */
1628 void rt_mutex_adjust_pi(struct task_struct *task)
1629 {
1630         struct rt_mutex_waiter *waiter;
1631         struct rt_mutex *next_lock;
1632         unsigned long flags;
1633
1634         raw_spin_lock_irqsave(&task->pi_lock, flags);
1635
1636         waiter = task->pi_blocked_on;
1637         if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
1638                         !dl_prio(task->prio))) {
1639                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1640                 return;
1641         }
1642         next_lock = waiter->lock;
1643
1644         /* gets dropped in rt_mutex_adjust_prio_chain()! */
1645         get_task_struct(task);
1646
1647         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1648         rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1649                                    next_lock, NULL, task);
1650 }
1651
1652 /**
1653  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
1654  * @lock:                the rt_mutex to take
1655  * @state:               the state the task should block in (TASK_INTERRUPTIBLE
1656  *                       or TASK_UNINTERRUPTIBLE)
1657  * @timeout:             the pre-initialized and started timer, or NULL for none
1658  * @waiter:              the pre-initialized rt_mutex_waiter
1659  *
1660  * Must be called with lock->wait_lock held and interrupts disabled
1661  */
1662 static int __sched
1663 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
1664                     struct hrtimer_sleeper *timeout,
1665                     struct rt_mutex_waiter *waiter,
1666                     struct ww_acquire_ctx *ww_ctx)
1667 {
1668         int ret = 0;
1669
1670         for (;;) {
1671                 /* Try to acquire the lock: */
1672                 if (try_to_take_rt_mutex(lock, current, waiter))
1673                         break;
1674
1675                 /*
1676                  * TASK_INTERRUPTIBLE checks for signals and
1677                  * timeout. Ignored otherwise.
1678                  */
1679                 if (unlikely(state == TASK_INTERRUPTIBLE)) {
1680                         /* Signal pending? */
1681                         if (signal_pending(current))
1682                                 ret = -EINTR;
1683                         if (timeout && !timeout->task)
1684                                 ret = -ETIMEDOUT;
1685                         if (ret)
1686                                 break;
1687                 }
1688
1689                 if (ww_ctx && ww_ctx->acquired > 0) {
1690                         ret = __mutex_lock_check_stamp(lock, ww_ctx);
1691                         if (ret)
1692                                 break;
1693                 }
1694
1695                 raw_spin_unlock_irq(&lock->wait_lock);
1696
1697                 debug_rt_mutex_print_deadlock(waiter);
1698
1699                 schedule();
1700
1701                 raw_spin_lock_irq(&lock->wait_lock);
1702                 set_current_state(state);
1703         }
1704
1705         __set_current_state(TASK_RUNNING);
1706         return ret;
1707 }
1708
1709 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1710                                      struct rt_mutex_waiter *w)
1711 {
1712         /*
1713          * If the result is not -EDEADLOCK or the caller requested
1714          * deadlock detection, nothing to do here.
1715          */
1716         if (res != -EDEADLOCK || detect_deadlock)
1717                 return;
1718
1719         /*
1720          * Yell lowdly and stop the task right here.
1721          */
1722         rt_mutex_print_deadlock(w);
1723         while (1) {
1724                 set_current_state(TASK_INTERRUPTIBLE);
1725                 schedule();
1726         }
1727 }
1728
1729 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
1730                                                    struct ww_acquire_ctx *ww_ctx)
1731 {
1732 #ifdef CONFIG_DEBUG_MUTEXES
1733         /*
1734          * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
1735          * but released with a normal mutex_unlock in this call.
1736          *
1737          * This should never happen, always use ww_mutex_unlock.
1738          */
1739         DEBUG_LOCKS_WARN_ON(ww->ctx);
1740
1741         /*
1742          * Not quite done after calling ww_acquire_done() ?
1743          */
1744         DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
1745
1746         if (ww_ctx->contending_lock) {
1747                 /*
1748                  * After -EDEADLK you tried to
1749                  * acquire a different ww_mutex? Bad!
1750                  */
1751                 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
1752
1753                 /*
1754                  * You called ww_mutex_lock after receiving -EDEADLK,
1755                  * but 'forgot' to unlock everything else first?
1756                  */
1757                 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
1758                 ww_ctx->contending_lock = NULL;
1759         }
1760
1761         /*
1762          * Naughty, using a different class will lead to undefined behavior!
1763          */
1764         DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
1765 #endif
1766         ww_ctx->acquired++;
1767 }
1768
1769 #ifdef CONFIG_PREEMPT_RT_FULL
1770 static void ww_mutex_account_lock(struct rt_mutex *lock,
1771                                   struct ww_acquire_ctx *ww_ctx)
1772 {
1773         struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1774         struct rt_mutex_waiter *waiter, *n;
1775
1776         /*
1777          * This branch gets optimized out for the common case,
1778          * and is only important for ww_mutex_lock.
1779          */
1780         ww_mutex_lock_acquired(ww, ww_ctx);
1781         ww->ctx = ww_ctx;
1782
1783         /*
1784          * Give any possible sleeping processes the chance to wake up,
1785          * so they can recheck if they have to back off.
1786          */
1787         rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
1788                                              tree_entry) {
1789                 /* XXX debug rt mutex waiter wakeup */
1790
1791                 BUG_ON(waiter->lock != lock);
1792                 rt_mutex_wake_waiter(waiter);
1793         }
1794 }
1795
1796 #else
1797
1798 static void ww_mutex_account_lock(struct rt_mutex *lock,
1799                                   struct ww_acquire_ctx *ww_ctx)
1800 {
1801         BUG();
1802 }
1803 #endif
1804
1805 /*
1806  * Slow path lock function:
1807  */
1808 static int __sched
1809 rt_mutex_slowlock(struct rt_mutex *lock, int state,
1810                   struct hrtimer_sleeper *timeout,
1811                   enum rtmutex_chainwalk chwalk,
1812                   struct ww_acquire_ctx *ww_ctx)
1813 {
1814         struct rt_mutex_waiter waiter;
1815         unsigned long flags;
1816         int ret = 0;
1817
1818         rt_mutex_init_waiter(&waiter, false);
1819
1820         /*
1821          * Technically we could use raw_spin_[un]lock_irq() here, but this can
1822          * be called in early boot if the cmpxchg() fast path is disabled
1823          * (debug, no architecture support). In this case we will acquire the
1824          * rtmutex with lock->wait_lock held. But we cannot unconditionally
1825          * enable interrupts in that early boot case. So we need to use the
1826          * irqsave/restore variants.
1827          */
1828         raw_spin_lock_irqsave(&lock->wait_lock, flags);
1829
1830         /* Try to acquire the lock again: */
1831         if (try_to_take_rt_mutex(lock, current, NULL)) {
1832                 if (ww_ctx)
1833                         ww_mutex_account_lock(lock, ww_ctx);
1834                 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1835                 return 0;
1836         }
1837
1838         set_current_state(state);
1839
1840         /* Setup the timer, when timeout != NULL */
1841         if (unlikely(timeout))
1842                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1843
1844         ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1845
1846         if (likely(!ret))
1847                 /* sleep on the mutex */
1848                 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
1849                                           ww_ctx);
1850         else if (ww_ctx) {
1851                 /* ww_mutex received EDEADLK, let it become EALREADY */
1852                 ret = __mutex_lock_check_stamp(lock, ww_ctx);
1853                 BUG_ON(!ret);
1854         }
1855
1856         if (unlikely(ret)) {
1857                 __set_current_state(TASK_RUNNING);
1858                 if (rt_mutex_has_waiters(lock))
1859                         remove_waiter(lock, &waiter);
1860                 /* ww_mutex want to report EDEADLK/EALREADY, let them */
1861                 if (!ww_ctx)
1862                         rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1863         } else if (ww_ctx) {
1864                 ww_mutex_account_lock(lock, ww_ctx);
1865         }
1866
1867         /*
1868          * try_to_take_rt_mutex() sets the waiter bit
1869          * unconditionally. We might have to fix that up.
1870          */
1871         fixup_rt_mutex_waiters(lock);
1872
1873         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1874
1875         /* Remove pending timer: */
1876         if (unlikely(timeout))
1877                 hrtimer_cancel(&timeout->timer);
1878
1879         debug_rt_mutex_free_waiter(&waiter);
1880
1881         return ret;
1882 }
1883
1884 /*
1885  * Slow path try-lock function:
1886  */
1887 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1888 {
1889         unsigned long flags;
1890         int ret;
1891
1892         /*
1893          * If the lock already has an owner we fail to get the lock.
1894          * This can be done without taking the @lock->wait_lock as
1895          * it is only being read, and this is a trylock anyway.
1896          */
1897         if (rt_mutex_owner(lock))
1898                 return 0;
1899
1900         /*
1901          * The mutex has currently no owner. Lock the wait lock and try to
1902          * acquire the lock. We use irqsave here to support early boot calls.
1903          */
1904         raw_spin_lock_irqsave(&lock->wait_lock, flags);
1905
1906         ret = try_to_take_rt_mutex(lock, current, NULL);
1907
1908         /*
1909          * try_to_take_rt_mutex() sets the lock waiters bit
1910          * unconditionally. Clean this up.
1911          */
1912         fixup_rt_mutex_waiters(lock);
1913
1914         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1915
1916         return ret;
1917 }
1918
1919 /*
1920  * Slow path to release a rt-mutex.
1921  * Return whether the current task needs to undo a potential priority boosting.
1922  */
1923 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1924                                         struct wake_q_head *wake_q,
1925                                         struct wake_q_head *wake_sleeper_q)
1926 {
1927         unsigned long flags;
1928
1929         /* irqsave required to support early boot calls */
1930         raw_spin_lock_irqsave(&lock->wait_lock, flags);
1931
1932         debug_rt_mutex_unlock(lock);
1933
1934         rt_mutex_deadlock_account_unlock(current);
1935
1936         /*
1937          * We must be careful here if the fast path is enabled. If we
1938          * have no waiters queued we cannot set owner to NULL here
1939          * because of:
1940          *
1941          * foo->lock->owner = NULL;
1942          *                      rtmutex_lock(foo->lock);   <- fast path
1943          *                      free = atomic_dec_and_test(foo->refcnt);
1944          *                      rtmutex_unlock(foo->lock); <- fast path
1945          *                      if (free)
1946          *                              kfree(foo);
1947          * raw_spin_unlock(foo->lock->wait_lock);
1948          *
1949          * So for the fastpath enabled kernel:
1950          *
1951          * Nothing can set the waiters bit as long as we hold
1952          * lock->wait_lock. So we do the following sequence:
1953          *
1954          *      owner = rt_mutex_owner(lock);
1955          *      clear_rt_mutex_waiters(lock);
1956          *      raw_spin_unlock(&lock->wait_lock);
1957          *      if (cmpxchg(&lock->owner, owner, 0) == owner)
1958          *              return;
1959          *      goto retry;
1960          *
1961          * The fastpath disabled variant is simple as all access to
1962          * lock->owner is serialized by lock->wait_lock:
1963          *
1964          *      lock->owner = NULL;
1965          *      raw_spin_unlock(&lock->wait_lock);
1966          */
1967         while (!rt_mutex_has_waiters(lock)) {
1968                 /* Drops lock->wait_lock ! */
1969                 if (unlock_rt_mutex_safe(lock, flags) == true)
1970                         return false;
1971                 /* Relock the rtmutex and try again */
1972                 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1973         }
1974
1975         /*
1976          * The wakeup next waiter path does not suffer from the above
1977          * race. See the comments there.
1978          *
1979          * Queue the next waiter for wakeup once we release the wait_lock.
1980          */
1981         mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
1982
1983         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1984
1985         /* check PI boosting */
1986         return true;
1987 }
1988
1989 /*
1990  * debug aware fast / slowpath lock,trylock,unlock
1991  *
1992  * The atomic acquire/release ops are compiled away, when either the
1993  * architecture does not support cmpxchg or when debugging is enabled.
1994  */
1995 static inline int
1996 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1997                   struct ww_acquire_ctx *ww_ctx,
1998                   int (*slowfn)(struct rt_mutex *lock, int state,
1999                                 struct hrtimer_sleeper *timeout,
2000                                 enum rtmutex_chainwalk chwalk,
2001                                 struct ww_acquire_ctx *ww_ctx))
2002 {
2003         if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
2004                 rt_mutex_deadlock_account_lock(lock, current);
2005                 return 0;
2006         } else
2007                 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
2008                               ww_ctx);
2009 }
2010
2011 static inline int
2012 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
2013                         struct hrtimer_sleeper *timeout,
2014                         enum rtmutex_chainwalk chwalk,
2015                         struct ww_acquire_ctx *ww_ctx,
2016                         int (*slowfn)(struct rt_mutex *lock, int state,
2017                                       struct hrtimer_sleeper *timeout,
2018                                       enum rtmutex_chainwalk chwalk,
2019                                       struct ww_acquire_ctx *ww_ctx))
2020 {
2021         if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
2022             likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
2023                 rt_mutex_deadlock_account_lock(lock, current);
2024                 return 0;
2025         } else
2026                 return slowfn(lock, state, timeout, chwalk, ww_ctx);
2027 }
2028
2029 static inline int
2030 rt_mutex_fasttrylock(struct rt_mutex *lock,
2031                      int (*slowfn)(struct rt_mutex *lock))
2032 {
2033         if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
2034                 rt_mutex_deadlock_account_lock(lock, current);
2035                 return 1;
2036         }
2037         return slowfn(lock);
2038 }
2039
2040 static inline void
2041 rt_mutex_fastunlock(struct rt_mutex *lock,
2042                     bool (*slowfn)(struct rt_mutex *lock,
2043                                    struct wake_q_head *wqh,
2044                                    struct wake_q_head *wq_sleeper))
2045 {
2046         WAKE_Q(wake_q);
2047         WAKE_Q(wake_sleeper_q);
2048
2049         if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
2050                 rt_mutex_deadlock_account_unlock(current);
2051
2052         } else {
2053                 bool deboost = slowfn(lock, &wake_q, &wake_sleeper_q);
2054
2055                 wake_up_q(&wake_q);
2056                 wake_up_q_sleeper(&wake_sleeper_q);
2057
2058                 /* Undo pi boosting if necessary: */
2059                 if (deboost)
2060                         rt_mutex_adjust_prio(current);
2061         }
2062 }
2063
2064 /**
2065  * rt_mutex_lock - lock a rt_mutex
2066  *
2067  * @lock: the rt_mutex to be locked
2068  */
2069 void __sched rt_mutex_lock(struct rt_mutex *lock)
2070 {
2071         might_sleep();
2072
2073         rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
2074 }
2075 EXPORT_SYMBOL_GPL(rt_mutex_lock);
2076
2077 /**
2078  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
2079  *
2080  * @lock:               the rt_mutex to be locked
2081  *
2082  * Returns:
2083  *  0           on success
2084  * -EINTR       when interrupted by a signal
2085  */
2086 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
2087 {
2088         might_sleep();
2089
2090         return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
2091 }
2092 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
2093
2094 /*
2095  * Futex variant with full deadlock detection.
2096  */
2097 int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
2098                               struct hrtimer_sleeper *timeout)
2099 {
2100         might_sleep();
2101
2102         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
2103                                        RT_MUTEX_FULL_CHAINWALK, NULL,
2104                                        rt_mutex_slowlock);
2105 }
2106
2107 /**
2108  * rt_mutex_lock_killable - lock a rt_mutex killable
2109  *
2110  * @lock:              the rt_mutex to be locked
2111  * @detect_deadlock:   deadlock detection on/off
2112  *
2113  * Returns:
2114  *  0          on success
2115  * -EINTR      when interrupted by a signal
2116  * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
2117  */
2118 int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
2119 {
2120         might_sleep();
2121
2122         return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
2123 }
2124 EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
2125
2126 /**
2127  * rt_mutex_timed_lock - lock a rt_mutex interruptible
2128  *                      the timeout structure is provided
2129  *                      by the caller
2130  *
2131  * @lock:               the rt_mutex to be locked
2132  * @timeout:            timeout structure or NULL (no timeout)
2133  *
2134  * Returns:
2135  *  0           on success
2136  * -EINTR       when interrupted by a signal
2137  * -ETIMEDOUT   when the timeout expired
2138  */
2139 int
2140 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
2141 {
2142         might_sleep();
2143
2144         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
2145                                        RT_MUTEX_MIN_CHAINWALK,
2146                                        NULL,
2147                                        rt_mutex_slowlock);
2148 }
2149 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
2150
2151 /**
2152  * rt_mutex_trylock - try to lock a rt_mutex
2153  *
2154  * @lock:       the rt_mutex to be locked
2155  *
2156  * This function can only be called in thread context. It's safe to
2157  * call it from atomic regions, but not from hard interrupt or soft
2158  * interrupt context.
2159  *
2160  * Returns 1 on success and 0 on contention
2161  */
2162 int __sched rt_mutex_trylock(struct rt_mutex *lock)
2163 {
2164         return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
2165 }
2166 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
2167
2168 /**
2169  * rt_mutex_unlock - unlock a rt_mutex
2170  *
2171  * @lock: the rt_mutex to be unlocked
2172  */
2173 void __sched rt_mutex_unlock(struct rt_mutex *lock)
2174 {
2175         rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
2176 }
2177 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
2178
2179 /**
2180  * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
2181  * @lock: the rt_mutex to be unlocked
2182  *
2183  * Returns: true/false indicating whether priority adjustment is
2184  * required or not.
2185  */
2186 bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
2187                                    struct wake_q_head *wqh,
2188                                    struct wake_q_head *wq_sleeper)
2189 {
2190         if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
2191                 rt_mutex_deadlock_account_unlock(current);
2192                 return false;
2193         }
2194         return rt_mutex_slowunlock(lock, wqh, wq_sleeper);
2195 }
2196
2197 /**
2198  * rt_mutex_destroy - mark a mutex unusable
2199  * @lock: the mutex to be destroyed
2200  *
2201  * This function marks the mutex uninitialized, and any subsequent
2202  * use of the mutex is forbidden. The mutex must not be locked when
2203  * this function is called.
2204  */
2205 void rt_mutex_destroy(struct rt_mutex *lock)
2206 {
2207         WARN_ON(rt_mutex_is_locked(lock));
2208 #ifdef CONFIG_DEBUG_RT_MUTEXES
2209         lock->magic = NULL;
2210 #endif
2211 }
2212
2213 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
2214
2215 /**
2216  * __rt_mutex_init - initialize the rt lock
2217  *
2218  * @lock: the rt lock to be initialized
2219  *
2220  * Initialize the rt lock to unlocked state.
2221  *
2222  * Initializing of a locked rt lock is not allowed
2223  */
2224 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
2225 {
2226         lock->owner = NULL;
2227         lock->waiters = RB_ROOT;
2228         lock->waiters_leftmost = NULL;
2229
2230         debug_rt_mutex_init(lock, name);
2231 }
2232 EXPORT_SYMBOL(__rt_mutex_init);
2233
2234 /**
2235  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
2236  *                              proxy owner
2237  *
2238  * @lock:       the rt_mutex to be locked
2239  * @proxy_owner:the task to set as owner
2240  *
2241  * No locking. Caller has to do serializing itself
2242  * Special API call for PI-futex support
2243  */
2244 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
2245                                 struct task_struct *proxy_owner)
2246 {
2247         rt_mutex_init(lock);
2248         debug_rt_mutex_proxy_lock(lock, proxy_owner);
2249         rt_mutex_set_owner(lock, proxy_owner);
2250         rt_mutex_deadlock_account_lock(lock, proxy_owner);
2251 }
2252
2253 /**
2254  * rt_mutex_proxy_unlock - release a lock on behalf of owner
2255  *
2256  * @lock:       the rt_mutex to be locked
2257  *
2258  * No locking. Caller has to do serializing itself
2259  * Special API call for PI-futex support
2260  */
2261 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
2262                            struct task_struct *proxy_owner)
2263 {
2264         debug_rt_mutex_proxy_unlock(lock);
2265         rt_mutex_set_owner(lock, NULL);
2266         rt_mutex_deadlock_account_unlock(proxy_owner);
2267 }
2268
2269 /**
2270  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
2271  * @lock:               the rt_mutex to take
2272  * @waiter:             the pre-initialized rt_mutex_waiter
2273  * @task:               the task to prepare
2274  *
2275  * Returns:
2276  *  0 - task blocked on lock
2277  *  1 - acquired the lock for task, caller should wake it up
2278  * <0 - error
2279  *
2280  * Special API call for FUTEX_REQUEUE_PI support.
2281  */
2282 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
2283                               struct rt_mutex_waiter *waiter,
2284                               struct task_struct *task)
2285 {
2286         int ret;
2287
2288         raw_spin_lock_irq(&lock->wait_lock);
2289
2290         if (try_to_take_rt_mutex(lock, task, NULL)) {
2291                 raw_spin_unlock_irq(&lock->wait_lock);
2292                 return 1;
2293         }
2294
2295 #ifdef CONFIG_PREEMPT_RT_FULL
2296         /*
2297          * In PREEMPT_RT there's an added race.
2298          * If the task, that we are about to requeue, times out,
2299          * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
2300          * to skip this task. But right after the task sets
2301          * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
2302          * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
2303          * This will replace the PI_WAKEUP_INPROGRESS with the actual
2304          * lock that it blocks on. We *must not* place this task
2305          * on this proxy lock in that case.
2306          *
2307          * To prevent this race, we first take the task's pi_lock
2308          * and check if it has updated its pi_blocked_on. If it has,
2309          * we assume that it woke up and we return -EAGAIN.
2310          * Otherwise, we set the task's pi_blocked_on to
2311          * PI_REQUEUE_INPROGRESS, so that if the task is waking up
2312          * it will know that we are in the process of requeuing it.
2313          */
2314         raw_spin_lock(&task->pi_lock);
2315         if (task->pi_blocked_on) {
2316                 raw_spin_unlock(&task->pi_lock);
2317                 raw_spin_unlock_irq(&lock->wait_lock);
2318                 return -EAGAIN;
2319         }
2320         task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
2321         raw_spin_unlock(&task->pi_lock);
2322 #endif
2323
2324         /* We enforce deadlock detection for futexes */
2325         ret = task_blocks_on_rt_mutex(lock, waiter, task,
2326                                       RT_MUTEX_FULL_CHAINWALK);
2327
2328         if (ret && !rt_mutex_owner(lock)) {
2329                 /*
2330                  * Reset the return value. We might have
2331                  * returned with -EDEADLK and the owner
2332                  * released the lock while we were walking the
2333                  * pi chain.  Let the waiter sort it out.
2334                  */
2335                 ret = 0;
2336         }
2337
2338         if (ret && rt_mutex_has_waiters(lock))
2339                 remove_waiter(lock, waiter);
2340
2341         raw_spin_unlock_irq(&lock->wait_lock);
2342
2343         debug_rt_mutex_print_deadlock(waiter);
2344
2345         return ret;
2346 }
2347
2348 /**
2349  * rt_mutex_next_owner - return the next owner of the lock
2350  *
2351  * @lock: the rt lock query
2352  *
2353  * Returns the next owner of the lock or NULL
2354  *
2355  * Caller has to serialize against other accessors to the lock
2356  * itself.
2357  *
2358  * Special API call for PI-futex support
2359  */
2360 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
2361 {
2362         if (!rt_mutex_has_waiters(lock))
2363                 return NULL;
2364
2365         return rt_mutex_top_waiter(lock)->task;
2366 }
2367
2368 /**
2369  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
2370  * @lock:               the rt_mutex we were woken on
2371  * @to:                 the timeout, null if none. hrtimer should already have
2372  *                      been started.
2373  * @waiter:             the pre-initialized rt_mutex_waiter
2374  *
2375  * Complete the lock acquisition started our behalf by another thread.
2376  *
2377  * Returns:
2378  *  0 - success
2379  * <0 - error, one of -EINTR, -ETIMEDOUT
2380  *
2381  * Special API call for PI-futex requeue support
2382  */
2383 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
2384                                struct hrtimer_sleeper *to,
2385                                struct rt_mutex_waiter *waiter)
2386 {
2387         int ret;
2388
2389         raw_spin_lock_irq(&lock->wait_lock);
2390
2391         set_current_state(TASK_INTERRUPTIBLE);
2392
2393         /* sleep on the mutex */
2394         ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
2395
2396         if (unlikely(ret))
2397                 remove_waiter(lock, waiter);
2398
2399         /*
2400          * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
2401          * have to fix that up.
2402          */
2403         fixup_rt_mutex_waiters(lock);
2404
2405         raw_spin_unlock_irq(&lock->wait_lock);
2406
2407         return ret;
2408 }
2409
2410 static inline int
2411 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2412 {
2413 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
2414         unsigned tmp;
2415
2416         if (ctx->deadlock_inject_countdown-- == 0) {
2417                 tmp = ctx->deadlock_inject_interval;
2418                 if (tmp > UINT_MAX/4)
2419                         tmp = UINT_MAX;
2420                 else
2421                         tmp = tmp*2 + tmp + tmp/2;
2422
2423                 ctx->deadlock_inject_interval = tmp;
2424                 ctx->deadlock_inject_countdown = tmp;
2425                 ctx->contending_lock = lock;
2426
2427                 ww_mutex_unlock(lock);
2428
2429                 return -EDEADLK;
2430         }
2431 #endif
2432
2433         return 0;
2434 }
2435
2436 #ifdef CONFIG_PREEMPT_RT_FULL
2437 int __sched
2438 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
2439 {
2440         int ret;
2441
2442         might_sleep();
2443
2444         mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
2445         ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
2446         if (ret)
2447                 mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2448         else if (!ret && ww_ctx->acquired > 1)
2449                 return ww_mutex_deadlock_injection(lock, ww_ctx);
2450
2451         return ret;
2452 }
2453 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
2454
2455 int __sched
2456 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
2457 {
2458         int ret;
2459
2460         might_sleep();
2461
2462         mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
2463         ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
2464         if (ret)
2465                 mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2466         else if (!ret && ww_ctx->acquired > 1)
2467                 return ww_mutex_deadlock_injection(lock, ww_ctx);
2468
2469         return ret;
2470 }
2471 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
2472
2473 void __sched ww_mutex_unlock(struct ww_mutex *lock)
2474 {
2475         int nest = !!lock->ctx;
2476
2477         /*
2478          * The unlocking fastpath is the 0->1 transition from 'locked'
2479          * into 'unlocked' state:
2480          */
2481         if (nest) {
2482 #ifdef CONFIG_DEBUG_MUTEXES
2483                 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
2484 #endif
2485                 if (lock->ctx->acquired > 0)
2486                         lock->ctx->acquired--;
2487                 lock->ctx = NULL;
2488         }
2489
2490         mutex_release(&lock->base.dep_map, nest, _RET_IP_);
2491         rt_mutex_unlock(&lock->base.lock);
2492 }
2493 EXPORT_SYMBOL(ww_mutex_unlock);
2494 #endif