2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/irq_work.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/irqflags.h>
15 #include <linux/sched.h>
16 #include <linux/tick.h>
17 #include <linux/cpu.h>
18 #include <linux/notifier.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <asm/processor.h>
24 static DEFINE_PER_CPU(struct llist_head, raised_list);
25 static DEFINE_PER_CPU(struct llist_head, lazy_list);
28 * Claim the entry so that no one else will poke at it.
30 static bool irq_work_claim(struct irq_work *work)
32 unsigned long flags, oflags, nflags;
35 * Start with our best wish as a premise but only trust any
36 * flag value after cmpxchg() result.
38 flags = work->flags & ~IRQ_WORK_PENDING;
40 nflags = flags | IRQ_WORK_FLAGS;
41 oflags = cmpxchg(&work->flags, flags, nflags);
44 if (oflags & IRQ_WORK_PENDING)
53 void __weak arch_irq_work_raise(void)
56 * Lame architectures will get the timer tick callback
62 * Enqueue the irq_work @work on @cpu unless it's already pending
65 * Can be re-enqueued while the callback is still in progress.
67 bool irq_work_queue_on(struct irq_work *work, int cpu)
69 struct llist_head *list;
71 /* All work should have been flushed before going offline */
72 WARN_ON_ONCE(cpu_is_offline(cpu));
74 /* Arch remote IPI send/receive backend aren't NMI safe */
75 WARN_ON_ONCE(in_nmi());
77 /* Only queue if not already pending */
78 if (!irq_work_claim(work))
81 if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
82 list = &per_cpu(lazy_list, cpu);
84 list = &per_cpu(raised_list, cpu);
86 if (llist_add(&work->llnode, list))
87 arch_send_call_function_single_ipi(cpu);
91 EXPORT_SYMBOL_GPL(irq_work_queue_on);
94 /* Enqueue the irq work @work on the current CPU */
95 bool irq_work_queue(struct irq_work *work)
97 struct llist_head *list;
98 bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
100 /* Only queue if not already pending */
101 if (!irq_work_claim(work))
104 /* Queue the entry and raise the IPI if needed. */
107 lazy_work = work->flags & IRQ_WORK_LAZY;
109 if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
110 list = this_cpu_ptr(&lazy_list);
112 list = this_cpu_ptr(&raised_list);
114 if (llist_add(&work->llnode, list)) {
115 if (!lazy_work || tick_nohz_tick_stopped())
116 arch_irq_work_raise();
123 EXPORT_SYMBOL_GPL(irq_work_queue);
125 bool irq_work_needs_cpu(void)
127 struct llist_head *raised, *lazy;
129 raised = this_cpu_ptr(&raised_list);
130 lazy = this_cpu_ptr(&lazy_list);
132 if (llist_empty(raised) && llist_empty(lazy))
135 /* All work should have been flushed before going offline */
136 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
141 static void irq_work_run_list(struct llist_head *list)
144 struct irq_work *work;
145 struct llist_node *llnode;
147 BUG_ON_NONRT(!irqs_disabled());
149 if (llist_empty(list))
152 llnode = llist_del_all(list);
153 while (llnode != NULL) {
154 work = llist_entry(llnode, struct irq_work, llnode);
156 llnode = llist_next(llnode);
159 * Clear the PENDING bit, after this point the @work
161 * Make it immediately visible so that other CPUs trying
162 * to claim that work don't rely on us to handle their data
163 * while we are in the middle of the func.
165 flags = work->flags & ~IRQ_WORK_PENDING;
166 xchg(&work->flags, flags);
170 * Clear the BUSY bit and return to the free state if
171 * no-one else claimed it meanwhile.
173 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
178 * hotplug calls this through:
179 * hotplug_cfd() -> flush_smp_call_function_queue()
181 void irq_work_run(void)
183 irq_work_run_list(this_cpu_ptr(&raised_list));
184 if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
186 * NOTE: we raise softirq via IPI for safety,
187 * and execute in irq_work_tick() to move the
188 * overhead from hard to soft irq context.
190 if (!llist_empty(this_cpu_ptr(&lazy_list)))
191 raise_softirq(TIMER_SOFTIRQ);
193 irq_work_run_list(this_cpu_ptr(&lazy_list));
195 EXPORT_SYMBOL_GPL(irq_work_run);
197 void irq_work_tick(void)
199 struct llist_head *raised = this_cpu_ptr(&raised_list);
201 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
202 irq_work_run_list(raised);
203 irq_work_run_list(this_cpu_ptr(&lazy_list));
207 * Synchronize against the irq_work @entry, ensures the entry is not
210 void irq_work_sync(struct irq_work *work)
212 WARN_ON_ONCE(irqs_disabled());
214 while (work->flags & IRQ_WORK_BUSY)
217 EXPORT_SYMBOL_GPL(irq_work_sync);