2 * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
4 * Provides a framework for enqueuing callbacks from irq context
5 * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
8 #include <linux/wait-simple.h>
9 #include <linux/work-simple.h>
10 #include <linux/kthread.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
14 #define SWORK_EVENT_PENDING (1 << 0)
16 static DEFINE_MUTEX(worker_mutex);
17 static struct sworker *glob_worker;
20 struct list_head events;
25 struct task_struct *task;
29 static bool swork_readable(struct sworker *worker)
33 if (kthread_should_stop())
36 raw_spin_lock_irq(&worker->lock);
37 r = !list_empty(&worker->events);
38 raw_spin_unlock_irq(&worker->lock);
43 static int swork_kthread(void *arg)
45 struct sworker *worker = arg;
48 swait_event_interruptible(worker->wq,
49 swork_readable(worker));
50 if (kthread_should_stop())
53 raw_spin_lock_irq(&worker->lock);
54 while (!list_empty(&worker->events)) {
55 struct swork_event *sev;
57 sev = list_first_entry(&worker->events,
58 struct swork_event, item);
60 raw_spin_unlock_irq(&worker->lock);
62 WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
65 raw_spin_lock_irq(&worker->lock);
67 raw_spin_unlock_irq(&worker->lock);
72 static struct sworker *swork_create(void)
74 struct sworker *worker;
76 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
78 return ERR_PTR(-ENOMEM);
80 INIT_LIST_HEAD(&worker->events);
81 raw_spin_lock_init(&worker->lock);
82 init_swait_head(&worker->wq);
84 worker->task = kthread_run(swork_kthread, worker, "kswork");
85 if (IS_ERR(worker->task)) {
87 return ERR_PTR(-ENOMEM);
93 static void swork_destroy(struct sworker *worker)
95 kthread_stop(worker->task);
97 WARN_ON(!list_empty(&worker->events));
102 * swork_queue - queue swork
104 * Returns %false if @work was already on a queue, %true otherwise.
106 * The work is queued and processed on a random CPU
108 bool swork_queue(struct swork_event *sev)
112 if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
115 raw_spin_lock_irqsave(&glob_worker->lock, flags);
116 list_add_tail(&sev->item, &glob_worker->events);
117 raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
119 swait_wake(&glob_worker->wq);
122 EXPORT_SYMBOL_GPL(swork_queue);
125 * swork_get - get an instance of the sworker
127 * Returns an negative error code if the initialization if the worker did not
128 * work, %0 otherwise.
133 struct sworker *worker;
135 mutex_lock(&worker_mutex);
137 worker = swork_create();
138 if (IS_ERR(worker)) {
139 mutex_unlock(&worker_mutex);
143 glob_worker = worker;
147 mutex_unlock(&worker_mutex);
151 EXPORT_SYMBOL_GPL(swork_get);
154 * swork_put - puts an instance of the sworker
156 * Will destroy the sworker thread. This function must not be called until all
157 * queued events have been completed.
161 mutex_lock(&worker_mutex);
164 if (glob_worker->refs > 0)
167 swork_destroy(glob_worker);
170 mutex_unlock(&worker_mutex);
172 EXPORT_SYMBOL_GPL(swork_put);