These changes are a raw update to a vanilla kernel 4.1.10, with the
[kvmfornfv.git] / kernel / kernel / sched / work-simple.c
1 /*
2  * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
3  *
4  * Provides a framework for enqueuing callbacks from irq context
5  * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
6  */
7
8 #include <linux/wait-simple.h>
9 #include <linux/work-simple.h>
10 #include <linux/kthread.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/export.h>
14
15 #define SWORK_EVENT_PENDING     (1 << 0)
16
17 static DEFINE_MUTEX(worker_mutex);
18 static struct sworker *glob_worker;
19
20 struct sworker {
21         struct list_head events;
22         struct swait_head wq;
23
24         raw_spinlock_t lock;
25
26         struct task_struct *task;
27         int refs;
28 };
29
30 static bool swork_readable(struct sworker *worker)
31 {
32         bool r;
33
34         if (kthread_should_stop())
35                 return true;
36
37         raw_spin_lock_irq(&worker->lock);
38         r = !list_empty(&worker->events);
39         raw_spin_unlock_irq(&worker->lock);
40
41         return r;
42 }
43
44 static int swork_kthread(void *arg)
45 {
46         struct sworker *worker = arg;
47
48         for (;;) {
49                 swait_event_interruptible(worker->wq,
50                                         swork_readable(worker));
51                 if (kthread_should_stop())
52                         break;
53
54                 raw_spin_lock_irq(&worker->lock);
55                 while (!list_empty(&worker->events)) {
56                         struct swork_event *sev;
57
58                         sev = list_first_entry(&worker->events,
59                                         struct swork_event, item);
60                         list_del(&sev->item);
61                         raw_spin_unlock_irq(&worker->lock);
62
63                         WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
64                                                          &sev->flags));
65                         sev->func(sev);
66                         raw_spin_lock_irq(&worker->lock);
67                 }
68                 raw_spin_unlock_irq(&worker->lock);
69         }
70         return 0;
71 }
72
73 static struct sworker *swork_create(void)
74 {
75         struct sworker *worker;
76
77         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
78         if (!worker)
79                 return ERR_PTR(-ENOMEM);
80
81         INIT_LIST_HEAD(&worker->events);
82         raw_spin_lock_init(&worker->lock);
83         init_swait_head(&worker->wq);
84
85         worker->task = kthread_run(swork_kthread, worker, "kswork");
86         if (IS_ERR(worker->task)) {
87                 kfree(worker);
88                 return ERR_PTR(-ENOMEM);
89         }
90
91         return worker;
92 }
93
94 static void swork_destroy(struct sworker *worker)
95 {
96         kthread_stop(worker->task);
97
98         WARN_ON(!list_empty(&worker->events));
99         kfree(worker);
100 }
101
102 /**
103  * swork_queue - queue swork
104  *
105  * Returns %false if @work was already on a queue, %true otherwise.
106  *
107  * The work is queued and processed on a random CPU
108  */
109 bool swork_queue(struct swork_event *sev)
110 {
111         unsigned long flags;
112
113         if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
114                 return false;
115
116         raw_spin_lock_irqsave(&glob_worker->lock, flags);
117         list_add_tail(&sev->item, &glob_worker->events);
118         raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
119
120         swait_wake(&glob_worker->wq);
121         return true;
122 }
123 EXPORT_SYMBOL_GPL(swork_queue);
124
125 /**
126  * swork_get - get an instance of the sworker
127  *
128  * Returns an negative error code if the initialization if the worker did not
129  * work, %0 otherwise.
130  *
131  */
132 int swork_get(void)
133 {
134         struct sworker *worker;
135
136         mutex_lock(&worker_mutex);
137         if (!glob_worker) {
138                 worker = swork_create();
139                 if (IS_ERR(worker)) {
140                         mutex_unlock(&worker_mutex);
141                         return -ENOMEM;
142                 }
143
144                 glob_worker = worker;
145         }
146
147         glob_worker->refs++;
148         mutex_unlock(&worker_mutex);
149
150         return 0;
151 }
152 EXPORT_SYMBOL_GPL(swork_get);
153
154 /**
155  * swork_put - puts an instance of the sworker
156  *
157  * Will destroy the sworker thread. This function must not be called until all
158  * queued events have been completed.
159  */
160 void swork_put(void)
161 {
162         mutex_lock(&worker_mutex);
163
164         glob_worker->refs--;
165         if (glob_worker->refs > 0)
166                 goto out;
167
168         swork_destroy(glob_worker);
169         glob_worker = NULL;
170 out:
171         mutex_unlock(&worker_mutex);
172 }
173 EXPORT_SYMBOL_GPL(swork_put);