2 * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
4 * Provides a framework for enqueuing callbacks from irq context
5 * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
8 #include <linux/swait.h>
9 #include <linux/swork.h>
10 #include <linux/kthread.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/export.h>
15 #define SWORK_EVENT_PENDING (1 << 0)
17 static DEFINE_MUTEX(worker_mutex);
18 static struct sworker *glob_worker;
21 struct list_head events;
22 struct swait_queue_head wq;
26 struct task_struct *task;
30 static bool swork_readable(struct sworker *worker)
34 if (kthread_should_stop())
37 raw_spin_lock_irq(&worker->lock);
38 r = !list_empty(&worker->events);
39 raw_spin_unlock_irq(&worker->lock);
44 static int swork_kthread(void *arg)
46 struct sworker *worker = arg;
49 swait_event_interruptible(worker->wq,
50 swork_readable(worker));
51 if (kthread_should_stop())
54 raw_spin_lock_irq(&worker->lock);
55 while (!list_empty(&worker->events)) {
56 struct swork_event *sev;
58 sev = list_first_entry(&worker->events,
59 struct swork_event, item);
61 raw_spin_unlock_irq(&worker->lock);
63 WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
66 raw_spin_lock_irq(&worker->lock);
68 raw_spin_unlock_irq(&worker->lock);
73 static struct sworker *swork_create(void)
75 struct sworker *worker;
77 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
79 return ERR_PTR(-ENOMEM);
81 INIT_LIST_HEAD(&worker->events);
82 raw_spin_lock_init(&worker->lock);
83 init_swait_queue_head(&worker->wq);
85 worker->task = kthread_run(swork_kthread, worker, "kswork");
86 if (IS_ERR(worker->task)) {
88 return ERR_PTR(-ENOMEM);
94 static void swork_destroy(struct sworker *worker)
96 kthread_stop(worker->task);
98 WARN_ON(!list_empty(&worker->events));
103 * swork_queue - queue swork
105 * Returns %false if @work was already on a queue, %true otherwise.
107 * The work is queued and processed on a random CPU
109 bool swork_queue(struct swork_event *sev)
113 if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
116 raw_spin_lock_irqsave(&glob_worker->lock, flags);
117 list_add_tail(&sev->item, &glob_worker->events);
118 raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
120 swake_up(&glob_worker->wq);
123 EXPORT_SYMBOL_GPL(swork_queue);
126 * swork_get - get an instance of the sworker
128 * Returns an negative error code if the initialization if the worker did not
129 * work, %0 otherwise.
134 struct sworker *worker;
136 mutex_lock(&worker_mutex);
138 worker = swork_create();
139 if (IS_ERR(worker)) {
140 mutex_unlock(&worker_mutex);
144 glob_worker = worker;
148 mutex_unlock(&worker_mutex);
152 EXPORT_SYMBOL_GPL(swork_get);
155 * swork_put - puts an instance of the sworker
157 * Will destroy the sworker thread. This function must not be called until all
158 * queued events have been completed.
162 mutex_lock(&worker_mutex);
165 if (glob_worker->refs > 0)
168 swork_destroy(glob_worker);
171 mutex_unlock(&worker_mutex);
173 EXPORT_SYMBOL_GPL(swork_put);