Linux-libre 3.16.78-gnu
[librecmc/linux-libre.git] / kernel / locking / rtmutex.c
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <linux/timer.h>
19
20 #include "rtmutex_common.h"
21
22 /*
23  * lock->owner state tracking:
24  *
25  * lock->owner holds the task_struct pointer of the owner. Bit 0
26  * is used to keep track of the "lock has waiters" state.
27  *
28  * owner        bit0
29  * NULL         0       lock is free (fast acquire possible)
30  * NULL         1       lock is free and has waiters and the top waiter
31  *                              is going to take the lock*
32  * taskpointer  0       lock is held (fast release possible)
33  * taskpointer  1       lock is held and has waiters**
34  *
35  * The fast atomic compare exchange based acquire and release is only
36  * possible when bit 0 of lock->owner is 0.
37  *
38  * (*) It also can be a transitional state when grabbing the lock
39  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40  * we need to set the bit0 before looking at the lock, and the owner may be
41  * NULL in this small time, hence this can be a transitional state.
42  *
43  * (**) There is a small time when bit 0 is set but there are no
44  * waiters. This can happen when grabbing the lock in the slow path.
45  * To prevent a cmpxchg of the owner releasing the lock, we need to
46  * set this bit before looking at the lock.
47  */
48
49 static void
50 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51 {
52         unsigned long val = (unsigned long)owner;
53
54         if (rt_mutex_has_waiters(lock))
55                 val |= RT_MUTEX_HAS_WAITERS;
56
57         lock->owner = (struct task_struct *)val;
58 }
59
60 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61 {
62         lock->owner = (struct task_struct *)
63                         ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64 }
65
66 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67 {
68         unsigned long owner, *p = (unsigned long *) &lock->owner;
69
70         if (rt_mutex_has_waiters(lock))
71                 return;
72
73         /*
74          * The rbtree has no waiters enqueued, now make sure that the
75          * lock->owner still has the waiters bit set, otherwise the
76          * following can happen:
77          *
78          * CPU 0        CPU 1           CPU2
79          * l->owner=T1
80          *              rt_mutex_lock(l)
81          *              lock(l->lock)
82          *              l->owner = T1 | HAS_WAITERS;
83          *              enqueue(T2)
84          *              boost()
85          *                unlock(l->lock)
86          *              block()
87          *
88          *                              rt_mutex_lock(l)
89          *                              lock(l->lock)
90          *                              l->owner = T1 | HAS_WAITERS;
91          *                              enqueue(T3)
92          *                              boost()
93          *                                unlock(l->lock)
94          *                              block()
95          *              signal(->T2)    signal(->T3)
96          *              lock(l->lock)
97          *              dequeue(T2)
98          *              deboost()
99          *                unlock(l->lock)
100          *                              lock(l->lock)
101          *                              dequeue(T3)
102          *                               ==> wait list is empty
103          *                              deboost()
104          *                               unlock(l->lock)
105          *              lock(l->lock)
106          *              fixup_rt_mutex_waiters()
107          *                if (wait_list_empty(l) {
108          *                  l->owner = owner
109          *                  owner = l->owner & ~HAS_WAITERS;
110          *                    ==> l->owner = T1
111          *                }
112          *                              lock(l->lock)
113          * rt_mutex_unlock(l)           fixup_rt_mutex_waiters()
114          *                                if (wait_list_empty(l) {
115          *                                  owner = l->owner & ~HAS_WAITERS;
116          * cmpxchg(l->owner, T1, NULL)
117          *  ===> Success (l->owner = NULL)
118          *
119          *                                  l->owner = owner
120          *                                    ==> l->owner = T1
121          *                                }
122          *
123          * With the check for the waiter bit in place T3 on CPU2 will not
124          * overwrite. All tasks fiddling with the waiters bit are
125          * serialized by l->lock, so nothing else can modify the waiters
126          * bit. If the bit is set then nothing can change l->owner either
127          * so the simple RMW is safe. The cmpxchg() will simply fail if it
128          * happens in the middle of the RMW because the waiters bit is
129          * still set.
130          */
131         owner = ACCESS_ONCE(*p);
132         if (owner & RT_MUTEX_HAS_WAITERS)
133                 ACCESS_ONCE(*p) = owner & ~RT_MUTEX_HAS_WAITERS;
134 }
135
136 /*
137  * We can speed up the acquire/release, if the architecture
138  * supports cmpxchg and if there's no debugging state to be set up
139  */
140 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
141 # define rt_mutex_cmpxchg(l,c,n)        (cmpxchg(&l->owner, c, n) == c)
142 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
143 {
144         unsigned long owner, *p = (unsigned long *) &lock->owner;
145
146         do {
147                 owner = *p;
148         } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
149 }
150
151 /*
152  * Safe fastpath aware unlock:
153  * 1) Clear the waiters bit
154  * 2) Drop lock->wait_lock
155  * 3) Try to unlock the lock with cmpxchg
156  */
157 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
158         __releases(lock->wait_lock)
159 {
160         struct task_struct *owner = rt_mutex_owner(lock);
161
162         clear_rt_mutex_waiters(lock);
163         raw_spin_unlock(&lock->wait_lock);
164         /*
165          * If a new waiter comes in between the unlock and the cmpxchg
166          * we have two situations:
167          *
168          * unlock(wait_lock);
169          *                                      lock(wait_lock);
170          * cmpxchg(p, owner, 0) == owner
171          *                                      mark_rt_mutex_waiters(lock);
172          *                                      acquire(lock);
173          * or:
174          *
175          * unlock(wait_lock);
176          *                                      lock(wait_lock);
177          *                                      mark_rt_mutex_waiters(lock);
178          *
179          * cmpxchg(p, owner, 0) != owner
180          *                                      enqueue_waiter();
181          *                                      unlock(wait_lock);
182          * lock(wait_lock);
183          * wake waiter();
184          * unlock(wait_lock);
185          *                                      lock(wait_lock);
186          *                                      acquire(lock);
187          */
188         return rt_mutex_cmpxchg(lock, owner, NULL);
189 }
190
191 #else
192 # define rt_mutex_cmpxchg(l,c,n)        (0)
193 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
194 {
195         lock->owner = (struct task_struct *)
196                         ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
197 }
198
199 /*
200  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
201  */
202 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
203         __releases(lock->wait_lock)
204 {
205         lock->owner = NULL;
206         raw_spin_unlock(&lock->wait_lock);
207         return true;
208 }
209 #endif
210
211 static inline int
212 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
213                      struct rt_mutex_waiter *right)
214 {
215         if (left->prio < right->prio)
216                 return 1;
217
218         /*
219          * If both waiters have dl_prio(), we check the deadlines of the
220          * associated tasks.
221          * If left waiter has a dl_prio(), and we didn't return 1 above,
222          * then right waiter has a dl_prio() too.
223          */
224         if (dl_prio(left->prio))
225                 return (left->task->dl.deadline < right->task->dl.deadline);
226
227         return 0;
228 }
229
230 static void
231 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
232 {
233         struct rb_node **link = &lock->waiters.rb_node;
234         struct rb_node *parent = NULL;
235         struct rt_mutex_waiter *entry;
236         int leftmost = 1;
237
238         while (*link) {
239                 parent = *link;
240                 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
241                 if (rt_mutex_waiter_less(waiter, entry)) {
242                         link = &parent->rb_left;
243                 } else {
244                         link = &parent->rb_right;
245                         leftmost = 0;
246                 }
247         }
248
249         if (leftmost)
250                 lock->waiters_leftmost = &waiter->tree_entry;
251
252         rb_link_node(&waiter->tree_entry, parent, link);
253         rb_insert_color(&waiter->tree_entry, &lock->waiters);
254 }
255
256 static void
257 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
258 {
259         if (RB_EMPTY_NODE(&waiter->tree_entry))
260                 return;
261
262         if (lock->waiters_leftmost == &waiter->tree_entry)
263                 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
264
265         rb_erase(&waiter->tree_entry, &lock->waiters);
266         RB_CLEAR_NODE(&waiter->tree_entry);
267 }
268
269 static void
270 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
271 {
272         struct rb_node **link = &task->pi_waiters.rb_node;
273         struct rb_node *parent = NULL;
274         struct rt_mutex_waiter *entry;
275         int leftmost = 1;
276
277         while (*link) {
278                 parent = *link;
279                 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
280                 if (rt_mutex_waiter_less(waiter, entry)) {
281                         link = &parent->rb_left;
282                 } else {
283                         link = &parent->rb_right;
284                         leftmost = 0;
285                 }
286         }
287
288         if (leftmost)
289                 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
290
291         rb_link_node(&waiter->pi_tree_entry, parent, link);
292         rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
293 }
294
295 static void
296 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
297 {
298         if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
299                 return;
300
301         if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
302                 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
303
304         rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
305         RB_CLEAR_NODE(&waiter->pi_tree_entry);
306 }
307
308 /*
309  * Calculate task priority from the waiter tree priority
310  *
311  * Return task->normal_prio when the waiter tree is empty or when
312  * the waiter is not allowed to do priority boosting
313  */
314 int rt_mutex_getprio(struct task_struct *task)
315 {
316         if (likely(!task_has_pi_waiters(task)))
317                 return task->normal_prio;
318
319         return min(task_top_pi_waiter(task)->prio,
320                    task->normal_prio);
321 }
322
323 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
324 {
325         if (likely(!task_has_pi_waiters(task)))
326                 return NULL;
327
328         return task_top_pi_waiter(task)->task;
329 }
330
331 /*
332  * Called by sched_setscheduler() to get the priority which will be
333  * effective after the change.
334  */
335 int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
336 {
337         if (!task_has_pi_waiters(task))
338                 return newprio;
339
340         if (task_top_pi_waiter(task)->task->prio <= newprio)
341                 return task_top_pi_waiter(task)->task->prio;
342         return newprio;
343 }
344
345 /*
346  * Adjust the priority of a task, after its pi_waiters got modified.
347  *
348  * This can be both boosting and unboosting. task->pi_lock must be held.
349  */
350 static void __rt_mutex_adjust_prio(struct task_struct *task)
351 {
352         int prio = rt_mutex_getprio(task);
353
354         if (task->prio != prio || dl_prio(prio))
355                 rt_mutex_setprio(task, prio);
356 }
357
358 /*
359  * Adjust task priority (undo boosting). Called from the exit path of
360  * rt_mutex_slowunlock() and rt_mutex_slowlock().
361  *
362  * (Note: We do this outside of the protection of lock->wait_lock to
363  * allow the lock to be taken while or before we readjust the priority
364  * of task. We do not use the spin_xx_mutex() variants here as we are
365  * outside of the debug path.)
366  */
367 static void rt_mutex_adjust_prio(struct task_struct *task)
368 {
369         unsigned long flags;
370
371         raw_spin_lock_irqsave(&task->pi_lock, flags);
372         __rt_mutex_adjust_prio(task);
373         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
374 }
375
376 /*
377  * Max number of times we'll walk the boosting chain:
378  */
379 int max_lock_depth = 1024;
380
381 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
382 {
383         return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
384 }
385
386 /*
387  * Adjust the priority chain. Also used for deadlock detection.
388  * Decreases task's usage by one - may thus free the task.
389  *
390  * @task:       the task owning the mutex (owner) for which a chain walk is
391  *              probably needed
392  * @deadlock_detect: do we have to carry out deadlock detection?
393  * @orig_lock:  the mutex (can be NULL if we are walking the chain to recheck
394  *              things for a task that has just got its priority adjusted, and
395  *              is waiting on a mutex)
396  * @next_lock:  the mutex on which the owner of @orig_lock was blocked before
397  *              we dropped its pi_lock. Is never dereferenced, only used for
398  *              comparison to detect lock chain changes.
399  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
400  *              its priority to the mutex owner (can be NULL in the case
401  *              depicted above or if the top waiter is gone away and we are
402  *              actually deboosting the owner)
403  * @top_task:   the current top waiter
404  *
405  * Returns 0 or -EDEADLK.
406  */
407 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
408                                       int deadlock_detect,
409                                       struct rt_mutex *orig_lock,
410                                       struct rt_mutex *next_lock,
411                                       struct rt_mutex_waiter *orig_waiter,
412                                       struct task_struct *top_task)
413 {
414         struct rt_mutex *lock;
415         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
416         int detect_deadlock, ret = 0, depth = 0;
417         unsigned long flags;
418
419         detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
420                                                          deadlock_detect);
421
422         /*
423          * The (de)boosting is a step by step approach with a lot of
424          * pitfalls. We want this to be preemptible and we want hold a
425          * maximum of two locks per step. So we have to check
426          * carefully whether things change under us.
427          */
428  again:
429         if (++depth > max_lock_depth) {
430                 static int prev_max;
431
432                 /*
433                  * Print this only once. If the admin changes the limit,
434                  * print a new message when reaching the limit again.
435                  */
436                 if (prev_max != max_lock_depth) {
437                         prev_max = max_lock_depth;
438                         printk(KERN_WARNING "Maximum lock depth %d reached "
439                                "task: %s (%d)\n", max_lock_depth,
440                                top_task->comm, task_pid_nr(top_task));
441                 }
442                 put_task_struct(task);
443
444                 return -EDEADLK;
445         }
446  retry:
447         /*
448          * Task can not go away as we did a get_task() before !
449          */
450         raw_spin_lock_irqsave(&task->pi_lock, flags);
451
452         waiter = task->pi_blocked_on;
453         /*
454          * Check whether the end of the boosting chain has been
455          * reached or the state of the chain has changed while we
456          * dropped the locks.
457          */
458         if (!waiter)
459                 goto out_unlock_pi;
460
461         /*
462          * Check the orig_waiter state. After we dropped the locks,
463          * the previous owner of the lock might have released the lock.
464          */
465         if (orig_waiter && !rt_mutex_owner(orig_lock))
466                 goto out_unlock_pi;
467
468         /*
469          * We dropped all locks after taking a refcount on @task, so
470          * the task might have moved on in the lock chain or even left
471          * the chain completely and blocks now on an unrelated lock or
472          * on @orig_lock.
473          *
474          * We stored the lock on which @task was blocked in @next_lock,
475          * so we can detect the chain change.
476          */
477         if (next_lock != waiter->lock)
478                 goto out_unlock_pi;
479
480         /*
481          * Drop out, when the task has no waiters. Note,
482          * top_waiter can be NULL, when we are in the deboosting
483          * mode!
484          */
485         if (top_waiter) {
486                 if (!task_has_pi_waiters(task))
487                         goto out_unlock_pi;
488                 /*
489                  * If deadlock detection is off, we stop here if we
490                  * are not the top pi waiter of the task.
491                  */
492                 if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
493                         goto out_unlock_pi;
494         }
495
496         /*
497          * When deadlock detection is off then we check, if further
498          * priority adjustment is necessary.
499          */
500         if (!detect_deadlock && waiter->prio == task->prio)
501                 goto out_unlock_pi;
502
503         lock = waiter->lock;
504         if (!raw_spin_trylock(&lock->wait_lock)) {
505                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
506                 cpu_relax();
507                 goto retry;
508         }
509
510         /*
511          * Deadlock detection. If the lock is the same as the original
512          * lock which caused us to walk the lock chain or if the
513          * current lock is owned by the task which initiated the chain
514          * walk, we detected a deadlock.
515          */
516         if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
517                 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
518                 raw_spin_unlock(&lock->wait_lock);
519                 ret = -EDEADLK;
520                 goto out_unlock_pi;
521         }
522
523         top_waiter = rt_mutex_top_waiter(lock);
524
525         /* Requeue the waiter */
526         rt_mutex_dequeue(lock, waiter);
527         waiter->prio = task->prio;
528         rt_mutex_enqueue(lock, waiter);
529
530         /* Release the task */
531         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
532         if (!rt_mutex_owner(lock)) {
533                 /*
534                  * If the requeue above changed the top waiter, then we need
535                  * to wake the new top waiter up to try to get the lock.
536                  */
537
538                 if (top_waiter != rt_mutex_top_waiter(lock))
539                         wake_up_process(rt_mutex_top_waiter(lock)->task);
540                 raw_spin_unlock(&lock->wait_lock);
541                 goto out_put_task;
542         }
543         put_task_struct(task);
544
545         /* Grab the next task */
546         task = rt_mutex_owner(lock);
547         get_task_struct(task);
548         raw_spin_lock_irqsave(&task->pi_lock, flags);
549
550         if (waiter == rt_mutex_top_waiter(lock)) {
551                 /* Boost the owner */
552                 rt_mutex_dequeue_pi(task, top_waiter);
553                 rt_mutex_enqueue_pi(task, waiter);
554                 __rt_mutex_adjust_prio(task);
555
556         } else if (top_waiter == waiter) {
557                 /* Deboost the owner */
558                 rt_mutex_dequeue_pi(task, waiter);
559                 waiter = rt_mutex_top_waiter(lock);
560                 rt_mutex_enqueue_pi(task, waiter);
561                 __rt_mutex_adjust_prio(task);
562         }
563
564         /*
565          * Check whether the task which owns the current lock is pi
566          * blocked itself. If yes we store a pointer to the lock for
567          * the lock chain change detection above. After we dropped
568          * task->pi_lock next_lock cannot be dereferenced anymore.
569          */
570         next_lock = task_blocked_on_lock(task);
571
572         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
573
574         top_waiter = rt_mutex_top_waiter(lock);
575         raw_spin_unlock(&lock->wait_lock);
576
577         /*
578          * We reached the end of the lock chain. Stop right here. No
579          * point to go back just to figure that out.
580          */
581         if (!next_lock)
582                 goto out_put_task;
583
584         if (!detect_deadlock && waiter != top_waiter)
585                 goto out_put_task;
586
587         goto again;
588
589  out_unlock_pi:
590         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
591  out_put_task:
592         put_task_struct(task);
593
594         return ret;
595 }
596
597 /*
598  * Try to take an rt-mutex
599  *
600  * Must be called with lock->wait_lock held.
601  *
602  * @lock:   the lock to be acquired.
603  * @task:   the task which wants to acquire the lock
604  * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
605  */
606 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
607                 struct rt_mutex_waiter *waiter)
608 {
609         /*
610          * We have to be careful here if the atomic speedups are
611          * enabled, such that, when
612          *  - no other waiter is on the lock
613          *  - the lock has been released since we did the cmpxchg
614          * the lock can be released or taken while we are doing the
615          * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
616          *
617          * The atomic acquire/release aware variant of
618          * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
619          * the WAITERS bit, the atomic release / acquire can not
620          * happen anymore and lock->wait_lock protects us from the
621          * non-atomic case.
622          *
623          * Note, that this might set lock->owner =
624          * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
625          * any more. This is fixed up when we take the ownership.
626          * This is the transitional state explained at the top of this file.
627          */
628         mark_rt_mutex_waiters(lock);
629
630         if (rt_mutex_owner(lock))
631                 return 0;
632
633         /*
634          * It will get the lock because of one of these conditions:
635          * 1) there is no waiter
636          * 2) higher priority than waiters
637          * 3) it is top waiter
638          */
639         if (rt_mutex_has_waiters(lock)) {
640                 if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
641                         if (!waiter || waiter != rt_mutex_top_waiter(lock))
642                                 return 0;
643                 }
644         }
645
646         if (waiter || rt_mutex_has_waiters(lock)) {
647                 unsigned long flags;
648                 struct rt_mutex_waiter *top;
649
650                 raw_spin_lock_irqsave(&task->pi_lock, flags);
651
652                 /* remove the queued waiter. */
653                 if (waiter) {
654                         rt_mutex_dequeue(lock, waiter);
655                         task->pi_blocked_on = NULL;
656                 }
657
658                 /*
659                  * We have to enqueue the top waiter(if it exists) into
660                  * task->pi_waiters list.
661                  */
662                 if (rt_mutex_has_waiters(lock)) {
663                         top = rt_mutex_top_waiter(lock);
664                         rt_mutex_enqueue_pi(task, top);
665                 }
666                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
667         }
668
669         /* We got the lock. */
670         debug_rt_mutex_lock(lock);
671
672         rt_mutex_set_owner(lock, task);
673
674         rt_mutex_deadlock_account_lock(lock, task);
675
676         return 1;
677 }
678
679 /*
680  * Task blocks on lock.
681  *
682  * Prepare waiter and propagate pi chain
683  *
684  * This must be called with lock->wait_lock held.
685  */
686 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
687                                    struct rt_mutex_waiter *waiter,
688                                    struct task_struct *task,
689                                    int detect_deadlock)
690 {
691         struct task_struct *owner = rt_mutex_owner(lock);
692         struct rt_mutex_waiter *top_waiter = waiter;
693         struct rt_mutex *next_lock;
694         int chain_walk = 0, res;
695         unsigned long flags;
696
697         /*
698          * Early deadlock detection. We really don't want the task to
699          * enqueue on itself just to untangle the mess later. It's not
700          * only an optimization. We drop the locks, so another waiter
701          * can come in before the chain walk detects the deadlock. So
702          * the other will detect the deadlock and return -EDEADLOCK,
703          * which is wrong, as the other waiter is not in a deadlock
704          * situation.
705          */
706         if (owner == task)
707                 return -EDEADLK;
708
709         raw_spin_lock_irqsave(&task->pi_lock, flags);
710         __rt_mutex_adjust_prio(task);
711         waiter->task = task;
712         waiter->lock = lock;
713         waiter->prio = task->prio;
714
715         /* Get the top priority waiter on the lock */
716         if (rt_mutex_has_waiters(lock))
717                 top_waiter = rt_mutex_top_waiter(lock);
718         rt_mutex_enqueue(lock, waiter);
719
720         task->pi_blocked_on = waiter;
721
722         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
723
724         if (!owner)
725                 return 0;
726
727         raw_spin_lock_irqsave(&owner->pi_lock, flags);
728         if (waiter == rt_mutex_top_waiter(lock)) {
729                 rt_mutex_dequeue_pi(owner, top_waiter);
730                 rt_mutex_enqueue_pi(owner, waiter);
731
732                 __rt_mutex_adjust_prio(owner);
733                 if (owner->pi_blocked_on)
734                         chain_walk = 1;
735         } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
736                 chain_walk = 1;
737         }
738
739         /* Store the lock on which owner is blocked or NULL */
740         next_lock = task_blocked_on_lock(owner);
741
742         raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
743         /*
744          * Even if full deadlock detection is on, if the owner is not
745          * blocked itself, we can avoid finding this out in the chain
746          * walk.
747          */
748         if (!chain_walk || !next_lock)
749                 return 0;
750
751         /*
752          * The owner can't disappear while holding a lock,
753          * so the owner struct is protected by wait_lock.
754          * Gets dropped in rt_mutex_adjust_prio_chain()!
755          */
756         get_task_struct(owner);
757
758         raw_spin_unlock(&lock->wait_lock);
759
760         res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
761                                          next_lock, waiter, task);
762
763         raw_spin_lock(&lock->wait_lock);
764
765         return res;
766 }
767
768 /*
769  * Wake up the next waiter on the lock.
770  *
771  * Remove the top waiter from the current tasks pi waiter list and
772  * wake it up.
773  *
774  * Called with lock->wait_lock held.
775  */
776 static void wakeup_next_waiter(struct rt_mutex *lock)
777 {
778         struct rt_mutex_waiter *waiter;
779         unsigned long flags;
780
781         raw_spin_lock_irqsave(&current->pi_lock, flags);
782
783         waiter = rt_mutex_top_waiter(lock);
784
785         /*
786          * Remove it from current->pi_waiters. We do not adjust a
787          * possible priority boost right now. We execute wakeup in the
788          * boosted mode and go back to normal after releasing
789          * lock->wait_lock.
790          */
791         rt_mutex_dequeue_pi(current, waiter);
792
793         /*
794          * As we are waking up the top waiter, and the waiter stays
795          * queued on the lock until it gets the lock, this lock
796          * obviously has waiters. Just set the bit here and this has
797          * the added benefit of forcing all new tasks into the
798          * slow path making sure no task of lower priority than
799          * the top waiter can steal this lock.
800          */
801         lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
802
803         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
804
805         /*
806          * It's safe to dereference waiter as it cannot go away as
807          * long as we hold lock->wait_lock. The waiter task needs to
808          * acquire it in order to dequeue the waiter.
809          */
810         wake_up_process(waiter->task);
811 }
812
813 /*
814  * Remove a waiter from a lock and give up
815  *
816  * Must be called with lock->wait_lock held and
817  * have just failed to try_to_take_rt_mutex().
818  */
819 static void remove_waiter(struct rt_mutex *lock,
820                           struct rt_mutex_waiter *waiter)
821 {
822         int first = (waiter == rt_mutex_top_waiter(lock));
823         struct task_struct *owner = rt_mutex_owner(lock);
824         struct rt_mutex *next_lock = NULL;
825         unsigned long flags;
826
827         raw_spin_lock_irqsave(&current->pi_lock, flags);
828         rt_mutex_dequeue(lock, waiter);
829         current->pi_blocked_on = NULL;
830         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
831
832         if (!owner)
833                 return;
834
835         if (first) {
836
837                 raw_spin_lock_irqsave(&owner->pi_lock, flags);
838
839                 rt_mutex_dequeue_pi(owner, waiter);
840
841                 if (rt_mutex_has_waiters(lock)) {
842                         struct rt_mutex_waiter *next;
843
844                         next = rt_mutex_top_waiter(lock);
845                         rt_mutex_enqueue_pi(owner, next);
846                 }
847                 __rt_mutex_adjust_prio(owner);
848
849                 /* Store the lock on which owner is blocked or NULL */
850                 next_lock = task_blocked_on_lock(owner);
851
852                 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
853         }
854
855         if (!next_lock)
856                 return;
857
858         /* gets dropped in rt_mutex_adjust_prio_chain()! */
859         get_task_struct(owner);
860
861         raw_spin_unlock(&lock->wait_lock);
862
863         rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
864
865         raw_spin_lock(&lock->wait_lock);
866 }
867
868 /*
869  * Recheck the pi chain, in case we got a priority setting
870  *
871  * Called from sched_setscheduler
872  */
873 void rt_mutex_adjust_pi(struct task_struct *task)
874 {
875         struct rt_mutex_waiter *waiter;
876         struct rt_mutex *next_lock;
877         unsigned long flags;
878
879         raw_spin_lock_irqsave(&task->pi_lock, flags);
880
881         waiter = task->pi_blocked_on;
882         if (!waiter || (waiter->prio == task->prio &&
883                         !dl_prio(task->prio))) {
884                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
885                 return;
886         }
887         next_lock = waiter->lock;
888         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
889
890         /* gets dropped in rt_mutex_adjust_prio_chain()! */
891         get_task_struct(task);
892
893         rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
894 }
895
896 /**
897  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
898  * @lock:                the rt_mutex to take
899  * @state:               the state the task should block in (TASK_INTERRUPTIBLE
900  *                       or TASK_UNINTERRUPTIBLE)
901  * @timeout:             the pre-initialized and started timer, or NULL for none
902  * @waiter:              the pre-initialized rt_mutex_waiter
903  *
904  * lock->wait_lock must be held by the caller.
905  */
906 static int __sched
907 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
908                     struct hrtimer_sleeper *timeout,
909                     struct rt_mutex_waiter *waiter)
910 {
911         int ret = 0;
912
913         for (;;) {
914                 /* Try to acquire the lock: */
915                 if (try_to_take_rt_mutex(lock, current, waiter))
916                         break;
917
918                 /*
919                  * TASK_INTERRUPTIBLE checks for signals and
920                  * timeout. Ignored otherwise.
921                  */
922                 if (unlikely(state == TASK_INTERRUPTIBLE)) {
923                         /* Signal pending? */
924                         if (signal_pending(current))
925                                 ret = -EINTR;
926                         if (timeout && !timeout->task)
927                                 ret = -ETIMEDOUT;
928                         if (ret)
929                                 break;
930                 }
931
932                 raw_spin_unlock(&lock->wait_lock);
933
934                 debug_rt_mutex_print_deadlock(waiter);
935
936                 schedule_rt_mutex(lock);
937
938                 raw_spin_lock(&lock->wait_lock);
939                 set_current_state(state);
940         }
941
942         return ret;
943 }
944
945 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
946                                      struct rt_mutex_waiter *w)
947 {
948         /*
949          * If the result is not -EDEADLOCK or the caller requested
950          * deadlock detection, nothing to do here.
951          */
952         if (res != -EDEADLOCK || detect_deadlock)
953                 return;
954
955         /*
956          * Yell lowdly and stop the task right here.
957          */
958         rt_mutex_print_deadlock(w);
959         while (1) {
960                 set_current_state(TASK_INTERRUPTIBLE);
961                 schedule();
962         }
963 }
964
965 /*
966  * Slow path lock function:
967  */
968 static int __sched
969 rt_mutex_slowlock(struct rt_mutex *lock, int state,
970                   struct hrtimer_sleeper *timeout,
971                   int detect_deadlock)
972 {
973         struct rt_mutex_waiter waiter;
974         int ret = 0;
975
976         debug_rt_mutex_init_waiter(&waiter);
977         RB_CLEAR_NODE(&waiter.pi_tree_entry);
978         RB_CLEAR_NODE(&waiter.tree_entry);
979
980         raw_spin_lock(&lock->wait_lock);
981
982         /* Try to acquire the lock again: */
983         if (try_to_take_rt_mutex(lock, current, NULL)) {
984                 raw_spin_unlock(&lock->wait_lock);
985                 return 0;
986         }
987
988         set_current_state(state);
989
990         /* Setup the timer, when timeout != NULL */
991         if (unlikely(timeout)) {
992                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
993                 if (!hrtimer_active(&timeout->timer))
994                         timeout->task = NULL;
995         }
996
997         ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
998
999         if (likely(!ret))
1000                 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1001
1002         set_current_state(TASK_RUNNING);
1003
1004         if (unlikely(ret)) {
1005                 if (rt_mutex_has_waiters(lock))
1006                         remove_waiter(lock, &waiter);
1007                 rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
1008         }
1009
1010         /*
1011          * try_to_take_rt_mutex() sets the waiter bit
1012          * unconditionally. We might have to fix that up.
1013          */
1014         fixup_rt_mutex_waiters(lock);
1015
1016         raw_spin_unlock(&lock->wait_lock);
1017
1018         /* Remove pending timer: */
1019         if (unlikely(timeout))
1020                 hrtimer_cancel(&timeout->timer);
1021
1022         debug_rt_mutex_free_waiter(&waiter);
1023
1024         return ret;
1025 }
1026
1027 /*
1028  * Slow path try-lock function:
1029  */
1030 static inline int
1031 rt_mutex_slowtrylock(struct rt_mutex *lock)
1032 {
1033         int ret = 0;
1034
1035         raw_spin_lock(&lock->wait_lock);
1036
1037         if (likely(rt_mutex_owner(lock) != current)) {
1038
1039                 ret = try_to_take_rt_mutex(lock, current, NULL);
1040                 /*
1041                  * try_to_take_rt_mutex() sets the lock waiters
1042                  * bit unconditionally. Clean this up.
1043                  */
1044                 fixup_rt_mutex_waiters(lock);
1045         }
1046
1047         raw_spin_unlock(&lock->wait_lock);
1048
1049         return ret;
1050 }
1051
1052 /*
1053  * Slow path to release a rt-mutex:
1054  */
1055 static void __sched
1056 rt_mutex_slowunlock(struct rt_mutex *lock)
1057 {
1058         raw_spin_lock(&lock->wait_lock);
1059
1060         debug_rt_mutex_unlock(lock);
1061
1062         rt_mutex_deadlock_account_unlock(current);
1063
1064         /*
1065          * We must be careful here if the fast path is enabled. If we
1066          * have no waiters queued we cannot set owner to NULL here
1067          * because of:
1068          *
1069          * foo->lock->owner = NULL;
1070          *                      rtmutex_lock(foo->lock);   <- fast path
1071          *                      free = atomic_dec_and_test(foo->refcnt);
1072          *                      rtmutex_unlock(foo->lock); <- fast path
1073          *                      if (free)
1074          *                              kfree(foo);
1075          * raw_spin_unlock(foo->lock->wait_lock);
1076          *
1077          * So for the fastpath enabled kernel:
1078          *
1079          * Nothing can set the waiters bit as long as we hold
1080          * lock->wait_lock. So we do the following sequence:
1081          *
1082          *      owner = rt_mutex_owner(lock);
1083          *      clear_rt_mutex_waiters(lock);
1084          *      raw_spin_unlock(&lock->wait_lock);
1085          *      if (cmpxchg(&lock->owner, owner, 0) == owner)
1086          *              return;
1087          *      goto retry;
1088          *
1089          * The fastpath disabled variant is simple as all access to
1090          * lock->owner is serialized by lock->wait_lock:
1091          *
1092          *      lock->owner = NULL;
1093          *      raw_spin_unlock(&lock->wait_lock);
1094          */
1095         while (!rt_mutex_has_waiters(lock)) {
1096                 /* Drops lock->wait_lock ! */
1097                 if (unlock_rt_mutex_safe(lock) == true)
1098                         return;
1099                 /* Relock the rtmutex and try again */
1100                 raw_spin_lock(&lock->wait_lock);
1101         }
1102
1103         /*
1104          * The wakeup next waiter path does not suffer from the above
1105          * race. See the comments there.
1106          */
1107         wakeup_next_waiter(lock);
1108
1109         raw_spin_unlock(&lock->wait_lock);
1110
1111         /* Undo pi boosting if necessary: */
1112         rt_mutex_adjust_prio(current);
1113 }
1114
1115 /*
1116  * debug aware fast / slowpath lock,trylock,unlock
1117  *
1118  * The atomic acquire/release ops are compiled away, when either the
1119  * architecture does not support cmpxchg or when debugging is enabled.
1120  */
1121 static inline int
1122 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1123                   int detect_deadlock,
1124                   int (*slowfn)(struct rt_mutex *lock, int state,
1125                                 struct hrtimer_sleeper *timeout,
1126                                 int detect_deadlock))
1127 {
1128         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1129                 rt_mutex_deadlock_account_lock(lock, current);
1130                 return 0;
1131         } else
1132                 return slowfn(lock, state, NULL, detect_deadlock);
1133 }
1134
1135 static inline int
1136 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1137                         struct hrtimer_sleeper *timeout, int detect_deadlock,
1138                         int (*slowfn)(struct rt_mutex *lock, int state,
1139                                       struct hrtimer_sleeper *timeout,
1140                                       int detect_deadlock))
1141 {
1142         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1143                 rt_mutex_deadlock_account_lock(lock, current);
1144                 return 0;
1145         } else
1146                 return slowfn(lock, state, timeout, detect_deadlock);
1147 }
1148
1149 static inline int
1150 rt_mutex_fasttrylock(struct rt_mutex *lock,
1151                      int (*slowfn)(struct rt_mutex *lock))
1152 {
1153         if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1154                 rt_mutex_deadlock_account_lock(lock, current);
1155                 return 1;
1156         }
1157         return slowfn(lock);
1158 }
1159
1160 static inline void
1161 rt_mutex_fastunlock(struct rt_mutex *lock,
1162                     void (*slowfn)(struct rt_mutex *lock))
1163 {
1164         if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
1165                 rt_mutex_deadlock_account_unlock(current);
1166         else
1167                 slowfn(lock);
1168 }
1169
1170 /**
1171  * rt_mutex_lock - lock a rt_mutex
1172  *
1173  * @lock: the rt_mutex to be locked
1174  */
1175 void __sched rt_mutex_lock(struct rt_mutex *lock)
1176 {
1177         might_sleep();
1178
1179         rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
1180 }
1181 EXPORT_SYMBOL_GPL(rt_mutex_lock);
1182
1183 /**
1184  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1185  *
1186  * @lock:               the rt_mutex to be locked
1187  * @detect_deadlock:    deadlock detection on/off
1188  *
1189  * Returns:
1190  *  0           on success
1191  * -EINTR       when interrupted by a signal
1192  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
1193  */
1194 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
1195                                                  int detect_deadlock)
1196 {
1197         might_sleep();
1198
1199         return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
1200                                  detect_deadlock, rt_mutex_slowlock);
1201 }
1202 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1203
1204 /**
1205  * rt_mutex_timed_lock - lock a rt_mutex interruptible
1206  *                      the timeout structure is provided
1207  *                      by the caller
1208  *
1209  * @lock:               the rt_mutex to be locked
1210  * @timeout:            timeout structure or NULL (no timeout)
1211  * @detect_deadlock:    deadlock detection on/off
1212  *
1213  * Returns:
1214  *  0           on success
1215  * -EINTR       when interrupted by a signal
1216  * -ETIMEDOUT   when the timeout expired
1217  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
1218  */
1219 int
1220 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
1221                     int detect_deadlock)
1222 {
1223         might_sleep();
1224
1225         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1226                                        detect_deadlock, rt_mutex_slowlock);
1227 }
1228 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1229
1230 /**
1231  * rt_mutex_trylock - try to lock a rt_mutex
1232  *
1233  * @lock:       the rt_mutex to be locked
1234  *
1235  * Returns 1 on success and 0 on contention
1236  */
1237 int __sched rt_mutex_trylock(struct rt_mutex *lock)
1238 {
1239         return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1240 }
1241 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1242
1243 /**
1244  * rt_mutex_unlock - unlock a rt_mutex
1245  *
1246  * @lock: the rt_mutex to be unlocked
1247  */
1248 void __sched rt_mutex_unlock(struct rt_mutex *lock)
1249 {
1250         rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1251 }
1252 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1253
1254 /**
1255  * rt_mutex_destroy - mark a mutex unusable
1256  * @lock: the mutex to be destroyed
1257  *
1258  * This function marks the mutex uninitialized, and any subsequent
1259  * use of the mutex is forbidden. The mutex must not be locked when
1260  * this function is called.
1261  */
1262 void rt_mutex_destroy(struct rt_mutex *lock)
1263 {
1264         WARN_ON(rt_mutex_is_locked(lock));
1265 #ifdef CONFIG_DEBUG_RT_MUTEXES
1266         lock->magic = NULL;
1267 #endif
1268 }
1269
1270 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1271
1272 /**
1273  * __rt_mutex_init - initialize the rt lock
1274  *
1275  * @lock: the rt lock to be initialized
1276  *
1277  * Initialize the rt lock to unlocked state.
1278  *
1279  * Initializing of a locked rt lock is not allowed
1280  */
1281 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1282 {
1283         lock->owner = NULL;
1284         raw_spin_lock_init(&lock->wait_lock);
1285         lock->waiters = RB_ROOT;
1286         lock->waiters_leftmost = NULL;
1287
1288         debug_rt_mutex_init(lock, name);
1289 }
1290 EXPORT_SYMBOL_GPL(__rt_mutex_init);
1291
1292 /**
1293  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1294  *                              proxy owner
1295  *
1296  * @lock:       the rt_mutex to be locked
1297  * @proxy_owner:the task to set as owner
1298  *
1299  * No locking. Caller has to do serializing itself
1300  * Special API call for PI-futex support
1301  */
1302 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1303                                 struct task_struct *proxy_owner)
1304 {
1305         __rt_mutex_init(lock, NULL);
1306         debug_rt_mutex_proxy_lock(lock, proxy_owner);
1307         rt_mutex_set_owner(lock, proxy_owner);
1308         rt_mutex_deadlock_account_lock(lock, proxy_owner);
1309 }
1310
1311 /**
1312  * rt_mutex_proxy_unlock - release a lock on behalf of owner
1313  *
1314  * @lock:       the rt_mutex to be locked
1315  *
1316  * No locking. Caller has to do serializing itself
1317  * Special API call for PI-futex support
1318  */
1319 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1320                            struct task_struct *proxy_owner)
1321 {
1322         debug_rt_mutex_proxy_unlock(lock);
1323         rt_mutex_set_owner(lock, NULL);
1324         rt_mutex_deadlock_account_unlock(proxy_owner);
1325 }
1326
1327 /**
1328  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1329  * @lock:               the rt_mutex to take
1330  * @waiter:             the pre-initialized rt_mutex_waiter
1331  * @task:               the task to prepare
1332  * @detect_deadlock:    perform deadlock detection (1) or not (0)
1333  *
1334  * Returns:
1335  *  0 - task blocked on lock
1336  *  1 - acquired the lock for task, caller should wake it up
1337  * <0 - error
1338  *
1339  * Special API call for FUTEX_REQUEUE_PI support.
1340  */
1341 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1342                               struct rt_mutex_waiter *waiter,
1343                               struct task_struct *task, int detect_deadlock)
1344 {
1345         int ret;
1346
1347         raw_spin_lock(&lock->wait_lock);
1348
1349         if (try_to_take_rt_mutex(lock, task, NULL)) {
1350                 raw_spin_unlock(&lock->wait_lock);
1351                 return 1;
1352         }
1353
1354         /* We enforce deadlock detection for futexes */
1355         ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
1356
1357         if (ret && !rt_mutex_owner(lock)) {
1358                 /*
1359                  * Reset the return value. We might have
1360                  * returned with -EDEADLK and the owner
1361                  * released the lock while we were walking the
1362                  * pi chain.  Let the waiter sort it out.
1363                  */
1364                 ret = 0;
1365         }
1366
1367         if (unlikely(ret))
1368                 remove_waiter(lock, waiter);
1369
1370         raw_spin_unlock(&lock->wait_lock);
1371
1372         debug_rt_mutex_print_deadlock(waiter);
1373
1374         return ret;
1375 }
1376
1377 /**
1378  * rt_mutex_next_owner - return the next owner of the lock
1379  *
1380  * @lock: the rt lock query
1381  *
1382  * Returns the next owner of the lock or NULL
1383  *
1384  * Caller has to serialize against other accessors to the lock
1385  * itself.
1386  *
1387  * Special API call for PI-futex support
1388  */
1389 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1390 {
1391         if (!rt_mutex_has_waiters(lock))
1392                 return NULL;
1393
1394         return rt_mutex_top_waiter(lock)->task;
1395 }
1396
1397 /**
1398  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1399  * @lock:               the rt_mutex we were woken on
1400  * @to:                 the timeout, null if none. hrtimer should already have
1401  *                      been started.
1402  * @waiter:             the pre-initialized rt_mutex_waiter
1403  * @detect_deadlock:    perform deadlock detection (1) or not (0)
1404  *
1405  * Complete the lock acquisition started our behalf by another thread.
1406  *
1407  * Returns:
1408  *  0 - success
1409  * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1410  *
1411  * Special API call for PI-futex requeue support
1412  */
1413 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1414                                struct hrtimer_sleeper *to,
1415                                struct rt_mutex_waiter *waiter,
1416                                int detect_deadlock)
1417 {
1418         int ret;
1419
1420         raw_spin_lock(&lock->wait_lock);
1421
1422         set_current_state(TASK_INTERRUPTIBLE);
1423
1424         ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1425
1426         set_current_state(TASK_RUNNING);
1427
1428         if (unlikely(ret))
1429                 remove_waiter(lock, waiter);
1430
1431         /*
1432          * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1433          * have to fix that up.
1434          */
1435         fixup_rt_mutex_waiters(lock);
1436
1437         raw_spin_unlock(&lock->wait_lock);
1438
1439         return ret;
1440 }