2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/hrtimer.h>
15 #include <linux/kref.h>
16 #include <linux/workqueue.h>
18 #include <linux/atomic.h>
19 #include <asm/ptrace.h>
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
29 #define IRQF_TRIGGER_NONE 0x00000000
30 #define IRQF_TRIGGER_RISING 0x00000001
31 #define IRQF_TRIGGER_FALLING 0x00000002
32 #define IRQF_TRIGGER_HIGH 0x00000004
33 #define IRQF_TRIGGER_LOW 0x00000008
34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE 0x00000010
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
42 * IRQF_SHARED - allow sharing the irq among several devices
43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45 * IRQF_PERCPU - Interrupt is per cpu
46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48 * registered first in an shared interrupt is considered for
49 * performance reasons)
50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51 * Used by threaded interrupts which need to keep the
52 * irq line disabled until the threaded handler has been run.
53 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
54 * that this interrupt will wake the system from a suspended
55 * state. See Documentation/power/suspend-and-interrupts.txt
56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57 * IRQF_NO_THREAD - Interrupt cannot be threaded
58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
60 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
61 * interrupt handler after suspending interrupts. For system
62 * wakeup devices users need to implement wakeup detection in
63 * their interrupt handlers.
64 * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
66 #define IRQF_SHARED 0x00000080
67 #define IRQF_PROBE_SHARED 0x00000100
68 #define __IRQF_TIMER 0x00000200
69 #define IRQF_PERCPU 0x00000400
70 #define IRQF_NOBALANCING 0x00000800
71 #define IRQF_IRQPOLL 0x00001000
72 #define IRQF_ONESHOT 0x00002000
73 #define IRQF_NO_SUSPEND 0x00004000
74 #define IRQF_FORCE_RESUME 0x00008000
75 #define IRQF_NO_THREAD 0x00010000
76 #define IRQF_EARLY_RESUME 0x00020000
77 #define IRQF_COND_SUSPEND 0x00040000
78 #define IRQF_NO_SOFTIRQ_CALL 0x00080000
80 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
83 * These values can be returned by request_any_context_irq() and
84 * describe the context the interrupt will be run in.
86 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
87 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
94 typedef irqreturn_t (*irq_handler_t)(int, void *);
97 * struct irqaction - per interrupt action descriptor
98 * @handler: interrupt handler function
99 * @name: name of the device
100 * @dev_id: cookie to identify the device
101 * @percpu_dev_id: cookie to identify the device
102 * @next: pointer to the next irqaction for shared interrupts
103 * @irq: interrupt number
104 * @flags: flags (see IRQF_* above)
105 * @thread_fn: interrupt handler function for threaded interrupts
106 * @thread: thread pointer for threaded interrupts
107 * @secondary: pointer to secondary irqaction (force threading)
108 * @thread_flags: flags related to @thread
109 * @thread_mask: bitmask for keeping track of @thread activity
110 * @dir: pointer to the proc/irq/NN/name entry
113 irq_handler_t handler;
115 void __percpu *percpu_dev_id;
116 struct irqaction *next;
117 irq_handler_t thread_fn;
118 struct task_struct *thread;
119 struct irqaction *secondary;
122 unsigned long thread_flags;
123 unsigned long thread_mask;
125 struct proc_dir_entry *dir;
126 } ____cacheline_internodealigned_in_smp;
128 extern irqreturn_t no_action(int cpl, void *dev_id);
130 extern int __must_check
131 request_threaded_irq(unsigned int irq, irq_handler_t handler,
132 irq_handler_t thread_fn,
133 unsigned long flags, const char *name, void *dev);
135 static inline int __must_check
136 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
137 const char *name, void *dev)
139 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
142 extern int __must_check
143 request_any_context_irq(unsigned int irq, irq_handler_t handler,
144 unsigned long flags, const char *name, void *dev_id);
146 extern int __must_check
147 request_percpu_irq(unsigned int irq, irq_handler_t handler,
148 const char *devname, void __percpu *percpu_dev_id);
150 extern void free_irq(unsigned int, void *);
151 extern void free_percpu_irq(unsigned int, void __percpu *);
155 extern int __must_check
156 devm_request_threaded_irq(struct device *dev, unsigned int irq,
157 irq_handler_t handler, irq_handler_t thread_fn,
158 unsigned long irqflags, const char *devname,
161 static inline int __must_check
162 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
163 unsigned long irqflags, const char *devname, void *dev_id)
165 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
169 extern int __must_check
170 devm_request_any_context_irq(struct device *dev, unsigned int irq,
171 irq_handler_t handler, unsigned long irqflags,
172 const char *devname, void *dev_id);
174 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
177 * On lockdep we dont want to enable hardirqs in hardirq
178 * context. Use local_irq_enable_in_hardirq() to annotate
179 * kernel code that has to do this nevertheless (pretty much
180 * the only valid case is for old/broken hardware that is
183 * NOTE: in theory this might break fragile code that relies
184 * on hardirq delivery - in practice we dont seem to have such
185 * places left. So the only effect should be slightly increased
186 * irqs-off latencies.
188 #ifdef CONFIG_LOCKDEP
189 # define local_irq_enable_in_hardirq() do { } while (0)
191 # define local_irq_enable_in_hardirq() local_irq_enable_nort()
194 extern void disable_irq_nosync(unsigned int irq);
195 extern bool disable_hardirq(unsigned int irq);
196 extern void disable_irq(unsigned int irq);
197 extern void disable_percpu_irq(unsigned int irq);
198 extern void enable_irq(unsigned int irq);
199 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
200 extern void irq_wake_thread(unsigned int irq, void *dev_id);
202 /* The following three functions are for the core kernel use only. */
203 extern void suspend_device_irqs(void);
204 extern void resume_device_irqs(void);
207 * struct irq_affinity_notify - context for notification of IRQ affinity changes
208 * @irq: Interrupt to which notification applies
209 * @kref: Reference count, for internal use
210 * @work: Work item, for internal use
211 * @list: List item for deferred callbacks
212 * @notify: Function to be called on change. This will be
213 * called in process context.
214 * @release: Function to be called on release. This will be
215 * called in process context. Once registered, the
216 * structure must only be freed when this function is
219 struct irq_affinity_notify {
222 struct work_struct work;
223 struct list_head list;
224 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
225 void (*release)(struct kref *ref);
228 #if defined(CONFIG_SMP)
230 extern cpumask_var_t irq_default_affinity;
232 /* Internal implementation. Use the helpers below */
233 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
237 * irq_set_affinity - Set the irq affinity of a given irq
238 * @irq: Interrupt to set affinity
241 * Fails if cpumask does not contain an online CPU
244 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
246 return __irq_set_affinity(irq, cpumask, false);
250 * irq_force_affinity - Force the irq affinity of a given irq
251 * @irq: Interrupt to set affinity
254 * Same as irq_set_affinity, but without checking the mask against
257 * Solely for low level cpu hotplug code, where we need to make per
258 * cpu interrupts affine before the cpu becomes online.
261 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
263 return __irq_set_affinity(irq, cpumask, true);
266 extern int irq_can_set_affinity(unsigned int irq);
267 extern int irq_select_affinity(unsigned int irq);
269 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
272 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
274 #else /* CONFIG_SMP */
276 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
281 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
286 static inline int irq_can_set_affinity(unsigned int irq)
291 static inline int irq_select_affinity(unsigned int irq) { return 0; }
293 static inline int irq_set_affinity_hint(unsigned int irq,
294 const struct cpumask *m)
300 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
304 #endif /* CONFIG_SMP */
307 * Special lockdep variants of irq disabling/enabling.
308 * These should be used for locking constructs that
309 * know that a particular irq context which is disabled,
310 * and which is the only irq-context user of a lock,
311 * that it's safe to take the lock in the irq-disabled
312 * section without disabling hardirqs.
314 * On !CONFIG_LOCKDEP they are equivalent to the normal
315 * irq disable/enable methods.
317 static inline void disable_irq_nosync_lockdep(unsigned int irq)
319 disable_irq_nosync(irq);
320 #ifdef CONFIG_LOCKDEP
325 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
327 disable_irq_nosync(irq);
328 #ifdef CONFIG_LOCKDEP
329 local_irq_save(*flags);
333 static inline void disable_irq_lockdep(unsigned int irq)
336 #ifdef CONFIG_LOCKDEP
341 static inline void enable_irq_lockdep(unsigned int irq)
343 #ifdef CONFIG_LOCKDEP
349 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
351 #ifdef CONFIG_LOCKDEP
352 local_irq_restore(*flags);
357 /* IRQ wakeup (PM) control: */
358 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
360 static inline int enable_irq_wake(unsigned int irq)
362 return irq_set_irq_wake(irq, 1);
365 static inline int disable_irq_wake(unsigned int irq)
367 return irq_set_irq_wake(irq, 0);
371 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
373 enum irqchip_irq_state {
374 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
375 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
376 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
377 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
380 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
382 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
385 #ifdef CONFIG_IRQ_FORCED_THREADING
386 # ifndef CONFIG_PREEMPT_RT_BASE
387 extern bool force_irqthreads;
389 # define force_irqthreads (true)
392 #define force_irqthreads (false)
395 #ifndef __ARCH_SET_SOFTIRQ_PENDING
396 #define set_softirq_pending(x) (local_softirq_pending() = (x))
397 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
400 /* Some architectures might implement lazy enabling/disabling of
401 * interrupts. In some cases, such as stop_machine, we might want
402 * to ensure that after a local_irq_disable(), interrupts have
403 * really been disabled in hardware. Such architectures need to
404 * implement the following hook.
406 #ifndef hard_irq_disable
407 #define hard_irq_disable() do { } while(0)
410 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
411 frequency threaded job scheduling. For almost all the purposes
412 tasklets are more than enough. F.e. all serial device BHs et
413 al. should be converted to tasklets, not to softirqs.
423 BLOCK_IOPOLL_SOFTIRQ,
426 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
428 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
433 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
435 /* map softirq index to softirq name. update 'softirq_to_name' in
436 * kernel/softirq.c when adding a new softirq.
438 extern const char * const softirq_to_name[NR_SOFTIRQS];
440 /* softirq mask and active fields moved to irq_cpustat_t in
441 * asm/hardirq.h to get better cache usage. KAO
444 struct softirq_action
446 void (*action)(struct softirq_action *);
449 #ifndef CONFIG_PREEMPT_RT_FULL
450 asmlinkage void do_softirq(void);
451 asmlinkage void __do_softirq(void);
452 static inline void thread_do_softirq(void) { do_softirq(); }
453 #ifdef __ARCH_HAS_DO_SOFTIRQ
454 void do_softirq_own_stack(void);
456 static inline void do_softirq_own_stack(void)
462 extern void thread_do_softirq(void);
465 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
466 extern void softirq_init(void);
467 extern void __raise_softirq_irqoff(unsigned int nr);
468 #ifdef CONFIG_PREEMPT_RT_FULL
469 extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
471 static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
473 __raise_softirq_irqoff(nr);
477 extern void raise_softirq_irqoff(unsigned int nr);
478 extern void raise_softirq(unsigned int nr);
479 extern void softirq_check_pending_idle(void);
481 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
483 static inline struct task_struct *this_cpu_ksoftirqd(void)
485 return this_cpu_read(ksoftirqd);
488 /* Tasklets --- multithreaded analogue of BHs.
490 Main feature differing them of generic softirqs: tasklet
491 is running only on one CPU simultaneously.
493 Main feature differing them of BHs: different tasklets
494 may be run simultaneously on different CPUs.
497 * If tasklet_schedule() is called, then tasklet is guaranteed
498 to be executed on some cpu at least once after this.
499 * If the tasklet is already scheduled, but its execution is still not
500 started, it will be executed only once.
501 * If this tasklet is already running on another CPU, it is rescheduled
503 * Schedule must not be called from the tasklet itself (a lockup occurs)
504 * Tasklet is strictly serialized wrt itself, but not
505 wrt another tasklets. If client needs some intertask synchronization,
506 he makes it with spinlocks.
509 struct tasklet_struct
511 struct tasklet_struct *next;
514 void (*func)(unsigned long);
518 #define DECLARE_TASKLET(name, func, data) \
519 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
521 #define DECLARE_TASKLET_DISABLED(name, func, data) \
522 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
527 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
528 TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
529 TASKLET_STATE_PENDING /* Tasklet is pending */
532 #define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
533 #define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
534 #define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
536 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
537 static inline int tasklet_trylock(struct tasklet_struct *t)
539 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
542 static inline int tasklet_tryunlock(struct tasklet_struct *t)
544 return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
547 static inline void tasklet_unlock(struct tasklet_struct *t)
549 smp_mb__before_atomic();
550 clear_bit(TASKLET_STATE_RUN, &(t)->state);
553 extern void tasklet_unlock_wait(struct tasklet_struct *t);
556 #define tasklet_trylock(t) 1
557 #define tasklet_tryunlock(t) 1
558 #define tasklet_unlock_wait(t) do { } while (0)
559 #define tasklet_unlock(t) do { } while (0)
562 extern void __tasklet_schedule(struct tasklet_struct *t);
564 static inline void tasklet_schedule(struct tasklet_struct *t)
566 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
567 __tasklet_schedule(t);
570 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
572 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
574 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
575 __tasklet_hi_schedule(t);
578 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
581 * This version avoids touching any other tasklets. Needed for kmemcheck
582 * in order not to take any page faults while enqueueing this tasklet;
583 * consider VERY carefully whether you really need this or
584 * tasklet_hi_schedule()...
586 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
588 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
589 __tasklet_hi_schedule_first(t);
593 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
595 atomic_inc(&t->count);
596 smp_mb__after_atomic();
599 static inline void tasklet_disable(struct tasklet_struct *t)
601 tasklet_disable_nosync(t);
602 tasklet_unlock_wait(t);
606 extern void tasklet_enable(struct tasklet_struct *t);
607 extern void tasklet_kill(struct tasklet_struct *t);
608 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
609 extern void tasklet_init(struct tasklet_struct *t,
610 void (*func)(unsigned long), unsigned long data);
612 struct tasklet_hrtimer {
613 struct hrtimer timer;
614 struct tasklet_struct tasklet;
615 enum hrtimer_restart (*function)(struct hrtimer *);
619 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
620 enum hrtimer_restart (*function)(struct hrtimer *),
621 clockid_t which_clock, enum hrtimer_mode mode);
624 void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
625 const enum hrtimer_mode mode)
627 hrtimer_start(&ttimer->timer, time, mode);
631 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
633 hrtimer_cancel(&ttimer->timer);
634 tasklet_kill(&ttimer->tasklet);
637 #ifdef CONFIG_PREEMPT_RT_FULL
638 extern void softirq_early_init(void);
640 static inline void softirq_early_init(void) { }
644 * Autoprobing for irqs:
646 * probe_irq_on() and probe_irq_off() provide robust primitives
647 * for accurate IRQ probing during kernel initialization. They are
648 * reasonably simple to use, are not "fooled" by spurious interrupts,
649 * and, unlike other attempts at IRQ probing, they do not get hung on
650 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
652 * For reasonably foolproof probing, use them as follows:
654 * 1. clear and/or mask the device's internal interrupt.
656 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
657 * 4. enable the device and cause it to trigger an interrupt.
658 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
659 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
660 * 7. service the device to clear its pending interrupt.
661 * 8. loop again if paranoia is required.
663 * probe_irq_on() returns a mask of allocated irq's.
665 * probe_irq_off() takes the mask as a parameter,
666 * and returns the irq number which occurred,
667 * or zero if none occurred, or a negative irq number
668 * if more than one irq occurred.
671 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
672 static inline unsigned long probe_irq_on(void)
676 static inline int probe_irq_off(unsigned long val)
680 static inline unsigned int probe_irq_mask(unsigned long val)
685 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
686 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
687 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
690 #ifdef CONFIG_PROC_FS
691 /* Initialize /proc/irq/ */
692 extern void init_irq_proc(void);
694 static inline void init_irq_proc(void)
700 int show_interrupts(struct seq_file *p, void *v);
701 int arch_show_interrupts(struct seq_file *p, int prec);
703 extern int early_irq_init(void);
704 extern int arch_probe_nr_irqs(void);
705 extern int arch_early_irq_init(void);