1 #include <linux/sched.h>
2 #include <linux/swait.h>
3 #include <linux/suspend.h>
5 void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
6 struct lock_class_key *key)
8 raw_spin_lock_init(&q->lock);
9 lockdep_set_class_and_name(&q->lock, key, name);
10 INIT_LIST_HEAD(&q->task_list);
12 EXPORT_SYMBOL(__init_swait_queue_head);
15 * The thing about the wake_up_state() return value; I think we can ignore it.
17 * If for some reason it would return 0, that means the previously waiting
18 * task is already running, so it will observe condition true (or has already).
20 void swake_up_locked(struct swait_queue_head *q)
22 struct swait_queue *curr;
24 if (list_empty(&q->task_list))
27 curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
28 wake_up_process(curr->task);
29 list_del_init(&curr->task_list);
31 EXPORT_SYMBOL(swake_up_locked);
33 void swake_up_all_locked(struct swait_queue_head *q)
35 struct swait_queue *curr;
38 while (!list_empty(&q->task_list)) {
40 curr = list_first_entry(&q->task_list, typeof(*curr),
42 wake_up_process(curr->task);
43 list_del_init(&curr->task_list);
48 WARN(wakes > 2, "complate_all() with %d waiters\n", wakes);
50 EXPORT_SYMBOL(swake_up_all_locked);
52 void swake_up(struct swait_queue_head *q)
59 raw_spin_lock_irqsave(&q->lock, flags);
61 raw_spin_unlock_irqrestore(&q->lock, flags);
63 EXPORT_SYMBOL(swake_up);
66 * Does not allow usage from IRQ disabled, since we must be able to
67 * release IRQs to guarantee bounded hold time.
69 void swake_up_all(struct swait_queue_head *q)
71 struct swait_queue *curr;
77 raw_spin_lock_irq(&q->lock);
78 list_splice_init(&q->task_list, &tmp);
79 while (!list_empty(&tmp)) {
80 curr = list_first_entry(&tmp, typeof(*curr), task_list);
82 wake_up_state(curr->task, TASK_NORMAL);
83 list_del_init(&curr->task_list);
88 raw_spin_unlock_irq(&q->lock);
89 raw_spin_lock_irq(&q->lock);
91 raw_spin_unlock_irq(&q->lock);
93 EXPORT_SYMBOL(swake_up_all);
95 void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
98 if (list_empty(&wait->task_list))
99 list_add(&wait->task_list, &q->task_list);
102 void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
106 raw_spin_lock_irqsave(&q->lock, flags);
107 __prepare_to_swait(q, wait);
108 set_current_state(state);
109 raw_spin_unlock_irqrestore(&q->lock, flags);
111 EXPORT_SYMBOL(prepare_to_swait);
113 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
115 if (signal_pending_state(state, current))
118 prepare_to_swait(q, wait, state);
122 EXPORT_SYMBOL(prepare_to_swait_event);
124 void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
126 __set_current_state(TASK_RUNNING);
127 if (!list_empty(&wait->task_list))
128 list_del_init(&wait->task_list);
131 void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
135 __set_current_state(TASK_RUNNING);
137 if (!list_empty_careful(&wait->task_list)) {
138 raw_spin_lock_irqsave(&q->lock, flags);
139 list_del_init(&wait->task_list);
140 raw_spin_unlock_irqrestore(&q->lock, flags);
143 EXPORT_SYMBOL(finish_swait);