Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / kernel / sched / wait-simple.c
1 /*
2  * Simple waitqueues without fancy flags and callbacks
3  *
4  * (C) 2011 Thomas Gleixner <tglx@linutronix.de>
5  *
6  * Based on kernel/wait.c
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/wait-simple.h>
14
15 /* Adds w to head->list. Must be called with head->lock locked. */
16 static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
17 {
18         list_add(&w->node, &head->list);
19         /* We can't let the condition leak before the setting of head */
20         smp_mb();
21 }
22
23 /* Removes w from head->list. Must be called with head->lock locked. */
24 static inline void __swait_dequeue(struct swaiter *w)
25 {
26         list_del_init(&w->node);
27 }
28
29 void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
30 {
31         raw_spin_lock_init(&head->lock);
32         lockdep_set_class(&head->lock, key);
33         INIT_LIST_HEAD(&head->list);
34 }
35 EXPORT_SYMBOL(__init_swait_head);
36
37 void swait_prepare_locked(struct swait_head *head, struct swaiter *w)
38 {
39         w->task = current;
40         if (list_empty(&w->node))
41                 __swait_enqueue(head, w);
42 }
43
44 void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
45 {
46         unsigned long flags;
47
48         raw_spin_lock_irqsave(&head->lock, flags);
49         swait_prepare_locked(head, w);
50         __set_current_state(state);
51         raw_spin_unlock_irqrestore(&head->lock, flags);
52 }
53 EXPORT_SYMBOL(swait_prepare);
54
55 void swait_finish_locked(struct swait_head *head, struct swaiter *w)
56 {
57         __set_current_state(TASK_RUNNING);
58         if (w->task)
59                 __swait_dequeue(w);
60 }
61
62 void swait_finish(struct swait_head *head, struct swaiter *w)
63 {
64         unsigned long flags;
65
66         __set_current_state(TASK_RUNNING);
67         if (w->task) {
68                 raw_spin_lock_irqsave(&head->lock, flags);
69                 __swait_dequeue(w);
70                 raw_spin_unlock_irqrestore(&head->lock, flags);
71         }
72 }
73 EXPORT_SYMBOL(swait_finish);
74
75 unsigned int
76 __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num)
77 {
78         struct swaiter *curr, *next;
79         int woken = 0;
80
81         list_for_each_entry_safe(curr, next, &head->list, node) {
82                 if (wake_up_state(curr->task, state)) {
83                         __swait_dequeue(curr);
84                         /*
85                          * The waiting task can free the waiter as
86                          * soon as curr->task = NULL is written,
87                          * without taking any locks. A memory barrier
88                          * is required here to prevent the following
89                          * store to curr->task from getting ahead of
90                          * the dequeue operation.
91                          */
92                         smp_wmb();
93                         curr->task = NULL;
94                         if (++woken == num)
95                                 break;
96                 }
97         }
98         return woken;
99 }
100
101 unsigned int
102 __swait_wake(struct swait_head *head, unsigned int state, unsigned int num)
103 {
104         unsigned long flags;
105         int woken;
106
107         if (!swaitqueue_active(head))
108                 return 0;
109
110         raw_spin_lock_irqsave(&head->lock, flags);
111         woken = __swait_wake_locked(head, state, num);
112         raw_spin_unlock_irqrestore(&head->lock, flags);
113         return woken;
114 }
115 EXPORT_SYMBOL(__swait_wake);