2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/sched/rt.h>
19 #include <linux/tty.h>
20 #include <linux/binfmts.h>
21 #include <linux/coredump.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/signal.h>
26 #include <linux/signalfd.h>
27 #include <linux/ratelimit.h>
28 #include <linux/tracehook.h>
29 #include <linux/capability.h>
30 #include <linux/freezer.h>
31 #include <linux/pid_namespace.h>
32 #include <linux/nsproxy.h>
33 #include <linux/user_namespace.h>
34 #include <linux/uprobes.h>
35 #include <linux/compat.h>
36 #include <linux/cn_proc.h>
37 #include <linux/compiler.h>
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/signal.h>
42 #include <asm/param.h>
43 #include <asm/uaccess.h>
44 #include <asm/unistd.h>
45 #include <asm/siginfo.h>
46 #include <asm/cacheflush.h>
47 #include "audit.h" /* audit_signal_info() */
50 * SLAB caches for signal bits.
53 static struct kmem_cache *sigqueue_cachep;
55 int print_fatal_signals __read_mostly;
57 static void __user *sig_handler(struct task_struct *t, int sig)
59 return t->sighand->action[sig - 1].sa.sa_handler;
62 static int sig_handler_ignored(void __user *handler, int sig)
64 /* Is it explicitly or implicitly ignored? */
65 return handler == SIG_IGN ||
66 (handler == SIG_DFL && sig_kernel_ignore(sig));
69 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
73 handler = sig_handler(t, sig);
75 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
76 handler == SIG_DFL && !force)
79 return sig_handler_ignored(handler, sig);
82 static int sig_ignored(struct task_struct *t, int sig, bool force)
85 * Blocked signals are never ignored, since the
86 * signal handler may change by the time it is
89 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
92 if (!sig_task_ignored(t, sig, force))
96 * Tracers may want to know about even ignored signals.
102 * Re-calculate pending state from the set of locally pending
103 * signals, globally pending signals, and blocked signals.
105 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
110 switch (_NSIG_WORDS) {
112 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
113 ready |= signal->sig[i] &~ blocked->sig[i];
116 case 4: ready = signal->sig[3] &~ blocked->sig[3];
117 ready |= signal->sig[2] &~ blocked->sig[2];
118 ready |= signal->sig[1] &~ blocked->sig[1];
119 ready |= signal->sig[0] &~ blocked->sig[0];
122 case 2: ready = signal->sig[1] &~ blocked->sig[1];
123 ready |= signal->sig[0] &~ blocked->sig[0];
126 case 1: ready = signal->sig[0] &~ blocked->sig[0];
131 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
133 static int recalc_sigpending_tsk(struct task_struct *t)
135 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
136 PENDING(&t->pending, &t->blocked) ||
137 PENDING(&t->signal->shared_pending, &t->blocked)) {
138 set_tsk_thread_flag(t, TIF_SIGPENDING);
142 * We must never clear the flag in another thread, or in current
143 * when it's possible the current syscall is returning -ERESTART*.
144 * So we don't clear it here, and only callers who know they should do.
150 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
151 * This is superfluous when called on current, the wakeup is a harmless no-op.
153 void recalc_sigpending_and_wake(struct task_struct *t)
155 if (recalc_sigpending_tsk(t))
156 signal_wake_up(t, 0);
159 void recalc_sigpending(void)
161 if (!recalc_sigpending_tsk(current) && !freezing(current))
162 clear_thread_flag(TIF_SIGPENDING);
166 /* Given the mask, find the first available signal that should be serviced. */
168 #define SYNCHRONOUS_MASK \
169 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
170 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
172 int next_signal(struct sigpending *pending, sigset_t *mask)
174 unsigned long i, *s, *m, x;
177 s = pending->signal.sig;
181 * Handle the first word specially: it contains the
182 * synchronous signals that need to be dequeued first.
186 if (x & SYNCHRONOUS_MASK)
187 x &= SYNCHRONOUS_MASK;
192 switch (_NSIG_WORDS) {
194 for (i = 1; i < _NSIG_WORDS; ++i) {
198 sig = ffz(~x) + i*_NSIG_BPW + 1;
207 sig = ffz(~x) + _NSIG_BPW + 1;
218 static inline void print_dropped_signal(int sig)
220 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
222 if (!print_fatal_signals)
225 if (!__ratelimit(&ratelimit_state))
228 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
229 current->comm, current->pid, sig);
233 * task_set_jobctl_pending - set jobctl pending bits
235 * @mask: pending bits to set
237 * Clear @mask from @task->jobctl. @mask must be subset of
238 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
239 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
240 * cleared. If @task is already being killed or exiting, this function
244 * Must be called with @task->sighand->siglock held.
247 * %true if @mask is set, %false if made noop because @task was dying.
249 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
251 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
252 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
253 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
255 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
258 if (mask & JOBCTL_STOP_SIGMASK)
259 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
261 task->jobctl |= mask;
266 * task_clear_jobctl_trapping - clear jobctl trapping bit
269 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
270 * Clear it and wake up the ptracer. Note that we don't need any further
271 * locking. @task->siglock guarantees that @task->parent points to the
275 * Must be called with @task->sighand->siglock held.
277 void task_clear_jobctl_trapping(struct task_struct *task)
279 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
280 task->jobctl &= ~JOBCTL_TRAPPING;
281 smp_mb(); /* advised by wake_up_bit() */
282 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
287 * task_clear_jobctl_pending - clear jobctl pending bits
289 * @mask: pending bits to clear
291 * Clear @mask from @task->jobctl. @mask must be subset of
292 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
293 * STOP bits are cleared together.
295 * If clearing of @mask leaves no stop or trap pending, this function calls
296 * task_clear_jobctl_trapping().
299 * Must be called with @task->sighand->siglock held.
301 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
303 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
305 if (mask & JOBCTL_STOP_PENDING)
306 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
308 task->jobctl &= ~mask;
310 if (!(task->jobctl & JOBCTL_PENDING_MASK))
311 task_clear_jobctl_trapping(task);
315 * task_participate_group_stop - participate in a group stop
316 * @task: task participating in a group stop
318 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
319 * Group stop states are cleared and the group stop count is consumed if
320 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
321 * stop, the appropriate %SIGNAL_* flags are set.
324 * Must be called with @task->sighand->siglock held.
327 * %true if group stop completion should be notified to the parent, %false
330 static bool task_participate_group_stop(struct task_struct *task)
332 struct signal_struct *sig = task->signal;
333 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
335 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
337 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
342 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
343 sig->group_stop_count--;
346 * Tell the caller to notify completion iff we are entering into a
347 * fresh group stop. Read comment in do_signal_stop() for details.
349 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
350 sig->flags = SIGNAL_STOP_STOPPED;
356 #ifdef __HAVE_ARCH_CMPXCHG
357 static inline struct sigqueue *get_task_cache(struct task_struct *t)
359 struct sigqueue *q = t->sigqueue_cache;
361 if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
366 static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
368 if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
375 static inline struct sigqueue *get_task_cache(struct task_struct *t)
380 static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
388 * allocate a new signal queue record
389 * - this may be called without locks if and only if t == current, otherwise an
390 * appropriate lock must be held to stop the target task from exiting
392 static struct sigqueue *
393 __sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
394 int override_rlimit, int fromslab)
396 struct sigqueue *q = NULL;
397 struct user_struct *user;
400 * Protect access to @t credentials. This can go away when all
401 * callers hold rcu read lock.
404 user = get_uid(__task_cred(t)->user);
405 atomic_inc(&user->sigpending);
408 if (override_rlimit ||
409 atomic_read(&user->sigpending) <=
410 task_rlimit(t, RLIMIT_SIGPENDING)) {
412 q = get_task_cache(t);
414 q = kmem_cache_alloc(sigqueue_cachep, flags);
416 print_dropped_signal(sig);
419 if (unlikely(q == NULL)) {
420 atomic_dec(&user->sigpending);
423 INIT_LIST_HEAD(&q->list);
431 static struct sigqueue *
432 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
435 return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
438 static void __sigqueue_free(struct sigqueue *q)
440 if (q->flags & SIGQUEUE_PREALLOC)
442 atomic_dec(&q->user->sigpending);
444 kmem_cache_free(sigqueue_cachep, q);
447 static void sigqueue_free_current(struct sigqueue *q)
449 struct user_struct *up;
451 if (q->flags & SIGQUEUE_PREALLOC)
455 if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
456 atomic_dec(&up->sigpending);
462 void flush_sigqueue(struct sigpending *queue)
466 sigemptyset(&queue->signal);
467 while (!list_empty(&queue->list)) {
468 q = list_entry(queue->list.next, struct sigqueue , list);
469 list_del_init(&q->list);
475 * Called from __exit_signal. Flush tsk->pending and
476 * tsk->sigqueue_cache
478 void flush_task_sigqueue(struct task_struct *tsk)
482 flush_sigqueue(&tsk->pending);
484 q = get_task_cache(tsk);
486 kmem_cache_free(sigqueue_cachep, q);
490 * Flush all pending signals for a task.
492 void __flush_signals(struct task_struct *t)
494 clear_tsk_thread_flag(t, TIF_SIGPENDING);
495 flush_sigqueue(&t->pending);
496 flush_sigqueue(&t->signal->shared_pending);
499 void flush_signals(struct task_struct *t)
503 spin_lock_irqsave(&t->sighand->siglock, flags);
505 spin_unlock_irqrestore(&t->sighand->siglock, flags);
508 static void __flush_itimer_signals(struct sigpending *pending)
510 sigset_t signal, retain;
511 struct sigqueue *q, *n;
513 signal = pending->signal;
514 sigemptyset(&retain);
516 list_for_each_entry_safe(q, n, &pending->list, list) {
517 int sig = q->info.si_signo;
519 if (likely(q->info.si_code != SI_TIMER)) {
520 sigaddset(&retain, sig);
522 sigdelset(&signal, sig);
523 list_del_init(&q->list);
528 sigorsets(&pending->signal, &signal, &retain);
531 void flush_itimer_signals(void)
533 struct task_struct *tsk = current;
536 spin_lock_irqsave(&tsk->sighand->siglock, flags);
537 __flush_itimer_signals(&tsk->pending);
538 __flush_itimer_signals(&tsk->signal->shared_pending);
539 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
542 void ignore_signals(struct task_struct *t)
546 for (i = 0; i < _NSIG; ++i)
547 t->sighand->action[i].sa.sa_handler = SIG_IGN;
553 * Flush all handlers for a task.
557 flush_signal_handlers(struct task_struct *t, int force_default)
560 struct k_sigaction *ka = &t->sighand->action[0];
561 for (i = _NSIG ; i != 0 ; i--) {
562 if (force_default || ka->sa.sa_handler != SIG_IGN)
563 ka->sa.sa_handler = SIG_DFL;
565 #ifdef __ARCH_HAS_SA_RESTORER
566 ka->sa.sa_restorer = NULL;
568 sigemptyset(&ka->sa.sa_mask);
573 int unhandled_signal(struct task_struct *tsk, int sig)
575 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
576 if (is_global_init(tsk))
578 if (handler != SIG_IGN && handler != SIG_DFL)
580 /* if ptraced, let the tracer determine */
585 * Notify the system that a driver wants to block all signals for this
586 * process, and wants to be notified if any signals at all were to be
587 * sent/acted upon. If the notifier routine returns non-zero, then the
588 * signal will be acted upon after all. If the notifier routine returns 0,
589 * then then signal will be blocked. Only one block per process is
590 * allowed. priv is a pointer to private data that the notifier routine
591 * can use to determine if the signal should be blocked or not.
594 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
598 spin_lock_irqsave(¤t->sighand->siglock, flags);
599 current->notifier_mask = mask;
600 current->notifier_data = priv;
601 current->notifier = notifier;
602 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
605 /* Notify the system that blocking has ended. */
608 unblock_all_signals(void)
612 spin_lock_irqsave(¤t->sighand->siglock, flags);
613 current->notifier = NULL;
614 current->notifier_data = NULL;
616 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
619 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
621 struct sigqueue *q, *first = NULL;
624 * Collect the siginfo appropriate to this signal. Check if
625 * there is another siginfo for the same signal.
627 list_for_each_entry(q, &list->list, list) {
628 if (q->info.si_signo == sig) {
635 sigdelset(&list->signal, sig);
639 list_del_init(&first->list);
640 copy_siginfo(info, &first->info);
641 sigqueue_free_current(first);
644 * Ok, it wasn't in the queue. This must be
645 * a fast-pathed signal or we must have been
646 * out of queue space. So zero out the info.
648 info->si_signo = sig;
650 info->si_code = SI_USER;
656 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
659 int sig = next_signal(pending, mask);
662 if (current->notifier) {
663 if (sigismember(current->notifier_mask, sig)) {
664 if (!(current->notifier)(current->notifier_data)) {
665 clear_thread_flag(TIF_SIGPENDING);
671 collect_signal(sig, pending, info);
678 * Dequeue a signal and return the element to the caller, which is
679 * expected to free it.
681 * All callers have to hold the siglock.
683 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
687 WARN_ON_ONCE(tsk != current);
689 /* We only dequeue private signals from ourselves, we don't let
690 * signalfd steal them
692 signr = __dequeue_signal(&tsk->pending, mask, info);
694 signr = __dequeue_signal(&tsk->signal->shared_pending,
699 * itimers are process shared and we restart periodic
700 * itimers in the signal delivery path to prevent DoS
701 * attacks in the high resolution timer case. This is
702 * compliant with the old way of self-restarting
703 * itimers, as the SIGALRM is a legacy signal and only
704 * queued once. Changing the restart behaviour to
705 * restart the timer in the signal dequeue path is
706 * reducing the timer noise on heavy loaded !highres
709 if (unlikely(signr == SIGALRM)) {
710 struct hrtimer *tmr = &tsk->signal->real_timer;
712 if (!hrtimer_is_queued(tmr) &&
713 tsk->signal->it_real_incr.tv64 != 0) {
714 hrtimer_forward(tmr, tmr->base->get_time(),
715 tsk->signal->it_real_incr);
716 hrtimer_restart(tmr);
725 if (unlikely(sig_kernel_stop(signr))) {
727 * Set a marker that we have dequeued a stop signal. Our
728 * caller might release the siglock and then the pending
729 * stop signal it is about to process is no longer in the
730 * pending bitmasks, but must still be cleared by a SIGCONT
731 * (and overruled by a SIGKILL). So those cases clear this
732 * shared flag after we've set it. Note that this flag may
733 * remain set after the signal we return is ignored or
734 * handled. That doesn't matter because its only purpose
735 * is to alert stop-signal processing code when another
736 * processor has come along and cleared the flag.
738 current->jobctl |= JOBCTL_STOP_DEQUEUED;
740 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
742 * Release the siglock to ensure proper locking order
743 * of timer locks outside of siglocks. Note, we leave
744 * irqs disabled here, since the posix-timers code is
745 * about to disable them again anyway.
747 spin_unlock(&tsk->sighand->siglock);
748 do_schedule_next_timer(info);
749 spin_lock(&tsk->sighand->siglock);
755 * Tell a process that it has a new active signal..
757 * NOTE! we rely on the previous spin_lock to
758 * lock interrupts for us! We can only be called with
759 * "siglock" held, and the local interrupt must
760 * have been disabled when that got acquired!
762 * No need to set need_resched since signal event passing
763 * goes through ->blocked
765 void signal_wake_up_state(struct task_struct *t, unsigned int state)
767 set_tsk_thread_flag(t, TIF_SIGPENDING);
769 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
770 * case. We don't check t->state here because there is a race with it
771 * executing another processor and just now entering stopped state.
772 * By using wake_up_state, we ensure the process will wake up and
773 * handle its death signal.
775 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
780 * Remove signals in mask from the pending set and queue.
781 * Returns 1 if any signals were found.
783 * All callers must be holding the siglock.
785 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
787 struct sigqueue *q, *n;
790 sigandsets(&m, mask, &s->signal);
791 if (sigisemptyset(&m))
794 sigandnsets(&s->signal, &s->signal, mask);
795 list_for_each_entry_safe(q, n, &s->list, list) {
796 if (sigismember(mask, q->info.si_signo)) {
797 list_del_init(&q->list);
804 static inline int is_si_special(const struct siginfo *info)
806 return info <= SEND_SIG_FORCED;
809 static inline bool si_fromuser(const struct siginfo *info)
811 return info == SEND_SIG_NOINFO ||
812 (!is_si_special(info) && SI_FROMUSER(info));
816 * called with RCU read lock from check_kill_permission()
818 static int kill_ok_by_cred(struct task_struct *t)
820 const struct cred *cred = current_cred();
821 const struct cred *tcred = __task_cred(t);
823 if (uid_eq(cred->euid, tcred->suid) ||
824 uid_eq(cred->euid, tcred->uid) ||
825 uid_eq(cred->uid, tcred->suid) ||
826 uid_eq(cred->uid, tcred->uid))
829 if (ns_capable(tcred->user_ns, CAP_KILL))
836 * Bad permissions for sending the signal
837 * - the caller must hold the RCU read lock
839 static int check_kill_permission(int sig, struct siginfo *info,
840 struct task_struct *t)
845 if (!valid_signal(sig))
848 if (!si_fromuser(info))
851 error = audit_signal_info(sig, t); /* Let audit system see the signal */
855 if (!same_thread_group(current, t) &&
856 !kill_ok_by_cred(t)) {
859 sid = task_session(t);
861 * We don't return the error if sid == NULL. The
862 * task was unhashed, the caller must notice this.
864 if (!sid || sid == task_session(current))
871 return security_task_kill(t, info, sig, 0);
875 * ptrace_trap_notify - schedule trap to notify ptracer
876 * @t: tracee wanting to notify tracer
878 * This function schedules sticky ptrace trap which is cleared on the next
879 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
882 * If @t is running, STOP trap will be taken. If trapped for STOP and
883 * ptracer is listening for events, tracee is woken up so that it can
884 * re-trap for the new event. If trapped otherwise, STOP trap will be
885 * eventually taken without returning to userland after the existing traps
886 * are finished by PTRACE_CONT.
889 * Must be called with @task->sighand->siglock held.
891 static void ptrace_trap_notify(struct task_struct *t)
893 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
894 assert_spin_locked(&t->sighand->siglock);
896 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
897 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
901 * Handle magic process-wide effects of stop/continue signals. Unlike
902 * the signal actions, these happen immediately at signal-generation
903 * time regardless of blocking, ignoring, or handling. This does the
904 * actual continuing for SIGCONT, but not the actual stopping for stop
905 * signals. The process stop is done as a signal action for SIG_DFL.
907 * Returns true if the signal should be actually delivered, otherwise
908 * it should be dropped.
910 static bool prepare_signal(int sig, struct task_struct *p, bool force)
912 struct signal_struct *signal = p->signal;
913 struct task_struct *t;
916 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
917 if (signal->flags & SIGNAL_GROUP_COREDUMP)
918 return sig == SIGKILL;
920 * The process is in the middle of dying, nothing to do.
922 } else if (sig_kernel_stop(sig)) {
924 * This is a stop signal. Remove SIGCONT from all queues.
926 siginitset(&flush, sigmask(SIGCONT));
927 flush_sigqueue_mask(&flush, &signal->shared_pending);
928 for_each_thread(p, t)
929 flush_sigqueue_mask(&flush, &t->pending);
930 } else if (sig == SIGCONT) {
933 * Remove all stop signals from all queues, wake all threads.
935 siginitset(&flush, SIG_KERNEL_STOP_MASK);
936 flush_sigqueue_mask(&flush, &signal->shared_pending);
937 for_each_thread(p, t) {
938 flush_sigqueue_mask(&flush, &t->pending);
939 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
940 if (likely(!(t->ptrace & PT_SEIZED)))
941 wake_up_state(t, __TASK_STOPPED);
943 ptrace_trap_notify(t);
947 * Notify the parent with CLD_CONTINUED if we were stopped.
949 * If we were in the middle of a group stop, we pretend it
950 * was already finished, and then continued. Since SIGCHLD
951 * doesn't queue we report only CLD_STOPPED, as if the next
952 * CLD_CONTINUED was dropped.
955 if (signal->flags & SIGNAL_STOP_STOPPED)
956 why |= SIGNAL_CLD_CONTINUED;
957 else if (signal->group_stop_count)
958 why |= SIGNAL_CLD_STOPPED;
962 * The first thread which returns from do_signal_stop()
963 * will take ->siglock, notice SIGNAL_CLD_MASK, and
964 * notify its parent. See get_signal_to_deliver().
966 signal->flags = why | SIGNAL_STOP_CONTINUED;
967 signal->group_stop_count = 0;
968 signal->group_exit_code = 0;
972 return !sig_ignored(p, sig, force);
976 * Test if P wants to take SIG. After we've checked all threads with this,
977 * it's equivalent to finding no threads not blocking SIG. Any threads not
978 * blocking SIG were ruled out because they are not running and already
979 * have pending signals. Such threads will dequeue from the shared queue
980 * as soon as they're available, so putting the signal on the shared queue
981 * will be equivalent to sending it to one such thread.
983 static inline int wants_signal(int sig, struct task_struct *p)
985 if (sigismember(&p->blocked, sig))
987 if (p->flags & PF_EXITING)
991 if (task_is_stopped_or_traced(p))
993 return task_curr(p) || !signal_pending(p);
996 static void complete_signal(int sig, struct task_struct *p, int group)
998 struct signal_struct *signal = p->signal;
999 struct task_struct *t;
1002 * Now find a thread we can wake up to take the signal off the queue.
1004 * If the main thread wants the signal, it gets first crack.
1005 * Probably the least surprising to the average bear.
1007 if (wants_signal(sig, p))
1009 else if (!group || thread_group_empty(p))
1011 * There is just one thread and it does not need to be woken.
1012 * It will dequeue unblocked signals before it runs again.
1017 * Otherwise try to find a suitable thread.
1019 t = signal->curr_target;
1020 while (!wants_signal(sig, t)) {
1022 if (t == signal->curr_target)
1024 * No thread needs to be woken.
1025 * Any eligible threads will see
1026 * the signal in the queue soon.
1030 signal->curr_target = t;
1034 * Found a killable thread. If the signal will be fatal,
1035 * then start taking the whole group down immediately.
1037 if (sig_fatal(p, sig) &&
1038 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
1039 !sigismember(&t->real_blocked, sig) &&
1040 (sig == SIGKILL || !t->ptrace)) {
1042 * This signal will be fatal to the whole group.
1044 if (!sig_kernel_coredump(sig)) {
1046 * Start a group exit and wake everybody up.
1047 * This way we don't have other threads
1048 * running and doing things after a slower
1049 * thread has the fatal signal pending.
1051 signal->flags = SIGNAL_GROUP_EXIT;
1052 signal->group_exit_code = sig;
1053 signal->group_stop_count = 0;
1056 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1057 sigaddset(&t->pending.signal, SIGKILL);
1058 signal_wake_up(t, 1);
1059 } while_each_thread(p, t);
1065 * The signal is already in the shared-pending queue.
1066 * Tell the chosen thread to wake up and dequeue it.
1068 signal_wake_up(t, sig == SIGKILL);
1072 static inline int legacy_queue(struct sigpending *signals, int sig)
1074 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1077 #ifdef CONFIG_USER_NS
1078 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1080 if (current_user_ns() == task_cred_xxx(t, user_ns))
1083 if (SI_FROMKERNEL(info))
1087 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1088 make_kuid(current_user_ns(), info->si_uid));
1092 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1098 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1099 int group, int from_ancestor_ns)
1101 struct sigpending *pending;
1103 int override_rlimit;
1104 int ret = 0, result;
1106 assert_spin_locked(&t->sighand->siglock);
1108 result = TRACE_SIGNAL_IGNORED;
1109 if (!prepare_signal(sig, t,
1110 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1113 pending = group ? &t->signal->shared_pending : &t->pending;
1115 * Short-circuit ignored signals and support queuing
1116 * exactly one non-rt signal, so that we can get more
1117 * detailed information about the cause of the signal.
1119 result = TRACE_SIGNAL_ALREADY_PENDING;
1120 if (legacy_queue(pending, sig))
1123 result = TRACE_SIGNAL_DELIVERED;
1125 * fast-pathed signals for kernel-internal things like SIGSTOP
1128 if (info == SEND_SIG_FORCED)
1132 * Real-time signals must be queued if sent by sigqueue, or
1133 * some other real-time mechanism. It is implementation
1134 * defined whether kill() does so. We attempt to do so, on
1135 * the principle of least surprise, but since kill is not
1136 * allowed to fail with EAGAIN when low on memory we just
1137 * make sure at least one signal gets delivered and don't
1138 * pass on the info struct.
1141 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1143 override_rlimit = 0;
1145 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1148 list_add_tail(&q->list, &pending->list);
1149 switch ((unsigned long) info) {
1150 case (unsigned long) SEND_SIG_NOINFO:
1151 q->info.si_signo = sig;
1152 q->info.si_errno = 0;
1153 q->info.si_code = SI_USER;
1154 q->info.si_pid = task_tgid_nr_ns(current,
1155 task_active_pid_ns(t));
1156 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1158 case (unsigned long) SEND_SIG_PRIV:
1159 q->info.si_signo = sig;
1160 q->info.si_errno = 0;
1161 q->info.si_code = SI_KERNEL;
1166 copy_siginfo(&q->info, info);
1167 if (from_ancestor_ns)
1172 userns_fixup_signal_uid(&q->info, t);
1174 } else if (!is_si_special(info)) {
1175 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1177 * Queue overflow, abort. We may abort if the
1178 * signal was rt and sent by user using something
1179 * other than kill().
1181 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1186 * This is a silent loss of information. We still
1187 * send the signal, but the *info bits are lost.
1189 result = TRACE_SIGNAL_LOSE_INFO;
1194 signalfd_notify(t, sig);
1195 sigaddset(&pending->signal, sig);
1196 complete_signal(sig, t, group);
1198 trace_signal_generate(sig, info, t, group, result);
1202 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1205 int from_ancestor_ns = 0;
1207 #ifdef CONFIG_PID_NS
1208 from_ancestor_ns = si_fromuser(info) &&
1209 !task_pid_nr_ns(current, task_active_pid_ns(t));
1212 return __send_signal(sig, info, t, group, from_ancestor_ns);
1215 static void print_fatal_signal(int signr)
1217 struct pt_regs *regs = signal_pt_regs();
1218 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1220 #if defined(__i386__) && !defined(__arch_um__)
1221 printk(KERN_INFO "code at %08lx: ", regs->ip);
1224 for (i = 0; i < 16; i++) {
1227 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1229 printk(KERN_CONT "%02x ", insn);
1232 printk(KERN_CONT "\n");
1239 static int __init setup_print_fatal_signals(char *str)
1241 get_option (&str, &print_fatal_signals);
1246 __setup("print-fatal-signals=", setup_print_fatal_signals);
1249 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1251 return send_signal(sig, info, p, 1);
1255 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1257 return send_signal(sig, info, t, 0);
1260 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1263 unsigned long flags;
1266 if (lock_task_sighand(p, &flags)) {
1267 ret = send_signal(sig, info, p, group);
1268 unlock_task_sighand(p, &flags);
1275 * Force a signal that the process can't ignore: if necessary
1276 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1278 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1279 * since we do not want to have a signal handler that was blocked
1280 * be invoked when user space had explicitly blocked it.
1282 * We don't want to have recursive SIGSEGV's etc, for example,
1283 * that is why we also clear SIGNAL_UNKILLABLE.
1286 do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1288 unsigned long int flags;
1289 int ret, blocked, ignored;
1290 struct k_sigaction *action;
1292 spin_lock_irqsave(&t->sighand->siglock, flags);
1293 action = &t->sighand->action[sig-1];
1294 ignored = action->sa.sa_handler == SIG_IGN;
1295 blocked = sigismember(&t->blocked, sig);
1296 if (blocked || ignored) {
1297 action->sa.sa_handler = SIG_DFL;
1299 sigdelset(&t->blocked, sig);
1300 recalc_sigpending_and_wake(t);
1303 if (action->sa.sa_handler == SIG_DFL)
1304 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1305 ret = specific_send_sig_info(sig, info, t);
1306 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1311 int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1314 * On some archs, PREEMPT_RT has to delay sending a signal from a trap
1315 * since it can not enable preemption, and the signal code's spin_locks
1316 * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
1317 * send the signal on exit of the trap.
1319 #ifdef ARCH_RT_DELAYS_SIGNAL_SEND
1321 if (WARN_ON_ONCE(t != current))
1323 if (WARN_ON_ONCE(t->forced_info.si_signo))
1326 if (is_si_special(info)) {
1327 WARN_ON_ONCE(info != SEND_SIG_PRIV);
1328 t->forced_info.si_signo = sig;
1329 t->forced_info.si_errno = 0;
1330 t->forced_info.si_code = SI_KERNEL;
1331 t->forced_info.si_pid = 0;
1332 t->forced_info.si_uid = 0;
1334 t->forced_info = *info;
1337 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1341 return do_force_sig_info(sig, info, t);
1345 * Nuke all other threads in the group.
1347 int zap_other_threads(struct task_struct *p)
1349 struct task_struct *t = p;
1352 p->signal->group_stop_count = 0;
1354 while_each_thread(p, t) {
1355 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1358 /* Don't bother with already dead threads */
1361 sigaddset(&t->pending.signal, SIGKILL);
1362 signal_wake_up(t, 1);
1368 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1369 unsigned long *flags)
1371 struct sighand_struct *sighand;
1375 * Disable interrupts early to avoid deadlocks.
1376 * See rcu_read_unlock() comment header for details.
1378 local_irq_save_nort(*flags);
1380 sighand = rcu_dereference(tsk->sighand);
1381 if (unlikely(sighand == NULL)) {
1383 local_irq_restore_nort(*flags);
1387 * This sighand can be already freed and even reused, but
1388 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1389 * initializes ->siglock: this slab can't go away, it has
1390 * the same object type, ->siglock can't be reinitialized.
1392 * We need to ensure that tsk->sighand is still the same
1393 * after we take the lock, we can race with de_thread() or
1394 * __exit_signal(). In the latter case the next iteration
1395 * must see ->sighand == NULL.
1397 spin_lock(&sighand->siglock);
1398 if (likely(sighand == tsk->sighand)) {
1402 spin_unlock(&sighand->siglock);
1404 local_irq_restore_nort(*flags);
1411 * send signal info to all the members of a group
1413 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1418 ret = check_kill_permission(sig, info, p);
1422 ret = do_send_sig_info(sig, info, p, true);
1428 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1429 * control characters do (^C, ^Z etc)
1430 * - the caller must hold at least a readlock on tasklist_lock
1432 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1434 struct task_struct *p = NULL;
1435 int retval, success;
1439 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1440 int err = group_send_sig_info(sig, info, p);
1443 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1444 return success ? 0 : retval;
1447 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1450 struct task_struct *p;
1454 p = pid_task(pid, PIDTYPE_PID);
1456 error = group_send_sig_info(sig, info, p);
1458 if (likely(!p || error != -ESRCH))
1462 * The task was unhashed in between, try again. If it
1463 * is dead, pid_task() will return NULL, if we race with
1464 * de_thread() it will find the new leader.
1469 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1473 error = kill_pid_info(sig, info, find_vpid(pid));
1478 static int kill_as_cred_perm(const struct cred *cred,
1479 struct task_struct *target)
1481 const struct cred *pcred = __task_cred(target);
1482 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1483 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1488 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1489 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1490 const struct cred *cred, u32 secid)
1493 struct task_struct *p;
1494 unsigned long flags;
1496 if (!valid_signal(sig))
1500 p = pid_task(pid, PIDTYPE_PID);
1505 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1509 ret = security_task_kill(p, info, sig, secid);
1514 if (lock_task_sighand(p, &flags)) {
1515 ret = __send_signal(sig, info, p, 1, 0);
1516 unlock_task_sighand(p, &flags);
1524 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1527 * kill_something_info() interprets pid in interesting ways just like kill(2).
1529 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1530 * is probably wrong. Should make it like BSD or SYSV.
1533 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1539 ret = kill_pid_info(sig, info, find_vpid(pid));
1544 read_lock(&tasklist_lock);
1546 ret = __kill_pgrp_info(sig, info,
1547 pid ? find_vpid(-pid) : task_pgrp(current));
1549 int retval = 0, count = 0;
1550 struct task_struct * p;
1552 for_each_process(p) {
1553 if (task_pid_vnr(p) > 1 &&
1554 !same_thread_group(p, current)) {
1555 int err = group_send_sig_info(sig, info, p);
1561 ret = count ? retval : -ESRCH;
1563 read_unlock(&tasklist_lock);
1569 * These are for backward compatibility with the rest of the kernel source.
1572 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1575 * Make sure legacy kernel users don't send in bad values
1576 * (normal paths check this in check_kill_permission).
1578 if (!valid_signal(sig))
1581 return do_send_sig_info(sig, info, p, false);
1584 #define __si_special(priv) \
1585 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1588 send_sig(int sig, struct task_struct *p, int priv)
1590 return send_sig_info(sig, __si_special(priv), p);
1594 force_sig(int sig, struct task_struct *p)
1596 force_sig_info(sig, SEND_SIG_PRIV, p);
1600 * When things go south during signal handling, we
1601 * will force a SIGSEGV. And if the signal that caused
1602 * the problem was already a SIGSEGV, we'll want to
1603 * make sure we don't even try to deliver the signal..
1606 force_sigsegv(int sig, struct task_struct *p)
1608 if (sig == SIGSEGV) {
1609 unsigned long flags;
1610 spin_lock_irqsave(&p->sighand->siglock, flags);
1611 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1612 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1614 force_sig(SIGSEGV, p);
1618 int kill_pgrp(struct pid *pid, int sig, int priv)
1622 read_lock(&tasklist_lock);
1623 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1624 read_unlock(&tasklist_lock);
1628 EXPORT_SYMBOL(kill_pgrp);
1630 int kill_pid(struct pid *pid, int sig, int priv)
1632 return kill_pid_info(sig, __si_special(priv), pid);
1634 EXPORT_SYMBOL(kill_pid);
1637 * These functions support sending signals using preallocated sigqueue
1638 * structures. This is needed "because realtime applications cannot
1639 * afford to lose notifications of asynchronous events, like timer
1640 * expirations or I/O completions". In the case of POSIX Timers
1641 * we allocate the sigqueue structure from the timer_create. If this
1642 * allocation fails we are able to report the failure to the application
1643 * with an EAGAIN error.
1645 struct sigqueue *sigqueue_alloc(void)
1647 /* Preallocated sigqueue objects always from the slabcache ! */
1648 struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
1651 q->flags |= SIGQUEUE_PREALLOC;
1656 void sigqueue_free(struct sigqueue *q)
1658 unsigned long flags;
1659 spinlock_t *lock = ¤t->sighand->siglock;
1661 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1663 * We must hold ->siglock while testing q->list
1664 * to serialize with collect_signal() or with
1665 * __exit_signal()->flush_sigqueue().
1667 spin_lock_irqsave(lock, flags);
1668 q->flags &= ~SIGQUEUE_PREALLOC;
1670 * If it is queued it will be freed when dequeued,
1671 * like the "regular" sigqueue.
1673 if (!list_empty(&q->list))
1675 spin_unlock_irqrestore(lock, flags);
1681 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1683 int sig = q->info.si_signo;
1684 struct sigpending *pending;
1685 unsigned long flags;
1688 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1691 if (!likely(lock_task_sighand(t, &flags)))
1694 ret = 1; /* the signal is ignored */
1695 result = TRACE_SIGNAL_IGNORED;
1696 if (!prepare_signal(sig, t, false))
1700 if (unlikely(!list_empty(&q->list))) {
1702 * If an SI_TIMER entry is already queue just increment
1703 * the overrun count.
1705 BUG_ON(q->info.si_code != SI_TIMER);
1706 q->info.si_overrun++;
1707 result = TRACE_SIGNAL_ALREADY_PENDING;
1710 q->info.si_overrun = 0;
1712 signalfd_notify(t, sig);
1713 pending = group ? &t->signal->shared_pending : &t->pending;
1714 list_add_tail(&q->list, &pending->list);
1715 sigaddset(&pending->signal, sig);
1716 complete_signal(sig, t, group);
1717 result = TRACE_SIGNAL_DELIVERED;
1719 trace_signal_generate(sig, &q->info, t, group, result);
1720 unlock_task_sighand(t, &flags);
1726 * Let a parent know about the death of a child.
1727 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1729 * Returns true if our parent ignored us and so we've switched to
1732 bool do_notify_parent(struct task_struct *tsk, int sig)
1734 struct siginfo info;
1735 unsigned long flags;
1736 struct sighand_struct *psig;
1737 bool autoreap = false;
1738 cputime_t utime, stime;
1742 /* do_notify_parent_cldstop should have been called instead. */
1743 BUG_ON(task_is_stopped_or_traced(tsk));
1745 BUG_ON(!tsk->ptrace &&
1746 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1748 if (sig != SIGCHLD) {
1750 * This is only possible if parent == real_parent.
1751 * Check if it has changed security domain.
1753 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1757 info.si_signo = sig;
1760 * We are under tasklist_lock here so our parent is tied to
1761 * us and cannot change.
1763 * task_active_pid_ns will always return the same pid namespace
1764 * until a task passes through release_task.
1766 * write_lock() currently calls preempt_disable() which is the
1767 * same as rcu_read_lock(), but according to Oleg, this is not
1768 * correct to rely on this
1771 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1772 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1776 task_cputime(tsk, &utime, &stime);
1777 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1778 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1780 info.si_status = tsk->exit_code & 0x7f;
1781 if (tsk->exit_code & 0x80)
1782 info.si_code = CLD_DUMPED;
1783 else if (tsk->exit_code & 0x7f)
1784 info.si_code = CLD_KILLED;
1786 info.si_code = CLD_EXITED;
1787 info.si_status = tsk->exit_code >> 8;
1790 psig = tsk->parent->sighand;
1791 spin_lock_irqsave(&psig->siglock, flags);
1792 if (!tsk->ptrace && sig == SIGCHLD &&
1793 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1794 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1796 * We are exiting and our parent doesn't care. POSIX.1
1797 * defines special semantics for setting SIGCHLD to SIG_IGN
1798 * or setting the SA_NOCLDWAIT flag: we should be reaped
1799 * automatically and not left for our parent's wait4 call.
1800 * Rather than having the parent do it as a magic kind of
1801 * signal handler, we just set this to tell do_exit that we
1802 * can be cleaned up without becoming a zombie. Note that
1803 * we still call __wake_up_parent in this case, because a
1804 * blocked sys_wait4 might now return -ECHILD.
1806 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1807 * is implementation-defined: we do (if you don't want
1808 * it, just use SIG_IGN instead).
1811 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1814 if (valid_signal(sig) && sig)
1815 __group_send_sig_info(sig, &info, tsk->parent);
1816 __wake_up_parent(tsk, tsk->parent);
1817 spin_unlock_irqrestore(&psig->siglock, flags);
1823 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1824 * @tsk: task reporting the state change
1825 * @for_ptracer: the notification is for ptracer
1826 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1828 * Notify @tsk's parent that the stopped/continued state has changed. If
1829 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1830 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1833 * Must be called with tasklist_lock at least read locked.
1835 static void do_notify_parent_cldstop(struct task_struct *tsk,
1836 bool for_ptracer, int why)
1838 struct siginfo info;
1839 unsigned long flags;
1840 struct task_struct *parent;
1841 struct sighand_struct *sighand;
1842 cputime_t utime, stime;
1845 parent = tsk->parent;
1847 tsk = tsk->group_leader;
1848 parent = tsk->real_parent;
1851 info.si_signo = SIGCHLD;
1854 * see comment in do_notify_parent() about the following 4 lines
1857 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1858 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1861 task_cputime(tsk, &utime, &stime);
1862 info.si_utime = cputime_to_clock_t(utime);
1863 info.si_stime = cputime_to_clock_t(stime);
1868 info.si_status = SIGCONT;
1871 info.si_status = tsk->signal->group_exit_code & 0x7f;
1874 info.si_status = tsk->exit_code & 0x7f;
1880 sighand = parent->sighand;
1881 spin_lock_irqsave(&sighand->siglock, flags);
1882 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1883 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1884 __group_send_sig_info(SIGCHLD, &info, parent);
1886 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1888 __wake_up_parent(tsk, parent);
1889 spin_unlock_irqrestore(&sighand->siglock, flags);
1892 static inline int may_ptrace_stop(void)
1894 if (!likely(current->ptrace))
1897 * Are we in the middle of do_coredump?
1898 * If so and our tracer is also part of the coredump stopping
1899 * is a deadlock situation, and pointless because our tracer
1900 * is dead so don't allow us to stop.
1901 * If SIGKILL was already sent before the caller unlocked
1902 * ->siglock we must see ->core_state != NULL. Otherwise it
1903 * is safe to enter schedule().
1905 * This is almost outdated, a task with the pending SIGKILL can't
1906 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1907 * after SIGKILL was already dequeued.
1909 if (unlikely(current->mm->core_state) &&
1910 unlikely(current->mm == current->parent->mm))
1917 * Return non-zero if there is a SIGKILL that should be waking us up.
1918 * Called with the siglock held.
1920 static int sigkill_pending(struct task_struct *tsk)
1922 return sigismember(&tsk->pending.signal, SIGKILL) ||
1923 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1927 * This must be called with current->sighand->siglock held.
1929 * This should be the path for all ptrace stops.
1930 * We always set current->last_siginfo while stopped here.
1931 * That makes it a way to test a stopped process for
1932 * being ptrace-stopped vs being job-control-stopped.
1934 * If we actually decide not to stop at all because the tracer
1935 * is gone, we keep current->exit_code unless clear_code.
1937 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1938 __releases(¤t->sighand->siglock)
1939 __acquires(¤t->sighand->siglock)
1941 bool gstop_done = false;
1943 if (arch_ptrace_stop_needed(exit_code, info)) {
1945 * The arch code has something special to do before a
1946 * ptrace stop. This is allowed to block, e.g. for faults
1947 * on user stack pages. We can't keep the siglock while
1948 * calling arch_ptrace_stop, so we must release it now.
1949 * To preserve proper semantics, we must do this before
1950 * any signal bookkeeping like checking group_stop_count.
1951 * Meanwhile, a SIGKILL could come in before we retake the
1952 * siglock. That must prevent us from sleeping in TASK_TRACED.
1953 * So after regaining the lock, we must check for SIGKILL.
1955 spin_unlock_irq(¤t->sighand->siglock);
1956 arch_ptrace_stop(exit_code, info);
1957 spin_lock_irq(¤t->sighand->siglock);
1958 if (sigkill_pending(current))
1963 * We're committing to trapping. TRACED should be visible before
1964 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1965 * Also, transition to TRACED and updates to ->jobctl should be
1966 * atomic with respect to siglock and should be done after the arch
1967 * hook as siglock is released and regrabbed across it.
1969 set_current_state(TASK_TRACED);
1971 current->last_siginfo = info;
1972 current->exit_code = exit_code;
1975 * If @why is CLD_STOPPED, we're trapping to participate in a group
1976 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1977 * across siglock relocks since INTERRUPT was scheduled, PENDING
1978 * could be clear now. We act as if SIGCONT is received after
1979 * TASK_TRACED is entered - ignore it.
1981 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1982 gstop_done = task_participate_group_stop(current);
1984 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1985 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1986 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1987 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1989 /* entering a trap, clear TRAPPING */
1990 task_clear_jobctl_trapping(current);
1992 spin_unlock_irq(¤t->sighand->siglock);
1993 read_lock(&tasklist_lock);
1994 if (may_ptrace_stop()) {
1996 * Notify parents of the stop.
1998 * While ptraced, there are two parents - the ptracer and
1999 * the real_parent of the group_leader. The ptracer should
2000 * know about every stop while the real parent is only
2001 * interested in the completion of group stop. The states
2002 * for the two don't interact with each other. Notify
2003 * separately unless they're gonna be duplicates.
2005 do_notify_parent_cldstop(current, true, why);
2006 if (gstop_done && ptrace_reparented(current))
2007 do_notify_parent_cldstop(current, false, why);
2009 read_unlock(&tasklist_lock);
2010 freezable_schedule();
2013 * By the time we got the lock, our tracer went away.
2014 * Don't drop the lock yet, another tracer may come.
2016 * If @gstop_done, the ptracer went away between group stop
2017 * completion and here. During detach, it would have set
2018 * JOBCTL_STOP_PENDING on us and we'll re-enter
2019 * TASK_STOPPED in do_signal_stop() on return, so notifying
2020 * the real parent of the group stop completion is enough.
2023 do_notify_parent_cldstop(current, false, why);
2025 /* tasklist protects us from ptrace_freeze_traced() */
2026 __set_current_state(TASK_RUNNING);
2028 current->exit_code = 0;
2029 read_unlock(&tasklist_lock);
2033 * We are back. Now reacquire the siglock before touching
2034 * last_siginfo, so that we are sure to have synchronized with
2035 * any signal-sending on another CPU that wants to examine it.
2037 spin_lock_irq(¤t->sighand->siglock);
2038 current->last_siginfo = NULL;
2040 /* LISTENING can be set only during STOP traps, clear it */
2041 current->jobctl &= ~JOBCTL_LISTENING;
2044 * Queued signals ignored us while we were stopped for tracing.
2045 * So check for any that we should take before resuming user mode.
2046 * This sets TIF_SIGPENDING, but never clears it.
2048 recalc_sigpending_tsk(current);
2051 static void ptrace_do_notify(int signr, int exit_code, int why)
2055 memset(&info, 0, sizeof info);
2056 info.si_signo = signr;
2057 info.si_code = exit_code;
2058 info.si_pid = task_pid_vnr(current);
2059 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2061 /* Let the debugger run. */
2062 ptrace_stop(exit_code, why, 1, &info);
2065 void ptrace_notify(int exit_code)
2067 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2068 if (unlikely(current->task_works))
2071 spin_lock_irq(¤t->sighand->siglock);
2072 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2073 spin_unlock_irq(¤t->sighand->siglock);
2077 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2078 * @signr: signr causing group stop if initiating
2080 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2081 * and participate in it. If already set, participate in the existing
2082 * group stop. If participated in a group stop (and thus slept), %true is
2083 * returned with siglock released.
2085 * If ptraced, this function doesn't handle stop itself. Instead,
2086 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2087 * untouched. The caller must ensure that INTERRUPT trap handling takes
2088 * places afterwards.
2091 * Must be called with @current->sighand->siglock held, which is released
2095 * %false if group stop is already cancelled or ptrace trap is scheduled.
2096 * %true if participated in group stop.
2098 static bool do_signal_stop(int signr)
2099 __releases(¤t->sighand->siglock)
2101 struct signal_struct *sig = current->signal;
2103 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2104 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2105 struct task_struct *t;
2107 /* signr will be recorded in task->jobctl for retries */
2108 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2110 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2111 unlikely(signal_group_exit(sig)))
2114 * There is no group stop already in progress. We must
2117 * While ptraced, a task may be resumed while group stop is
2118 * still in effect and then receive a stop signal and
2119 * initiate another group stop. This deviates from the
2120 * usual behavior as two consecutive stop signals can't
2121 * cause two group stops when !ptraced. That is why we
2122 * also check !task_is_stopped(t) below.
2124 * The condition can be distinguished by testing whether
2125 * SIGNAL_STOP_STOPPED is already set. Don't generate
2126 * group_exit_code in such case.
2128 * This is not necessary for SIGNAL_STOP_CONTINUED because
2129 * an intervening stop signal is required to cause two
2130 * continued events regardless of ptrace.
2132 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2133 sig->group_exit_code = signr;
2135 sig->group_stop_count = 0;
2137 if (task_set_jobctl_pending(current, signr | gstop))
2138 sig->group_stop_count++;
2141 while_each_thread(current, t) {
2143 * Setting state to TASK_STOPPED for a group
2144 * stop is always done with the siglock held,
2145 * so this check has no races.
2147 if (!task_is_stopped(t) &&
2148 task_set_jobctl_pending(t, signr | gstop)) {
2149 sig->group_stop_count++;
2150 if (likely(!(t->ptrace & PT_SEIZED)))
2151 signal_wake_up(t, 0);
2153 ptrace_trap_notify(t);
2158 if (likely(!current->ptrace)) {
2162 * If there are no other threads in the group, or if there
2163 * is a group stop in progress and we are the last to stop,
2164 * report to the parent.
2166 if (task_participate_group_stop(current))
2167 notify = CLD_STOPPED;
2169 __set_current_state(TASK_STOPPED);
2170 spin_unlock_irq(¤t->sighand->siglock);
2173 * Notify the parent of the group stop completion. Because
2174 * we're not holding either the siglock or tasklist_lock
2175 * here, ptracer may attach inbetween; however, this is for
2176 * group stop and should always be delivered to the real
2177 * parent of the group leader. The new ptracer will get
2178 * its notification when this task transitions into
2182 read_lock(&tasklist_lock);
2183 do_notify_parent_cldstop(current, false, notify);
2184 read_unlock(&tasklist_lock);
2187 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2188 freezable_schedule();
2192 * While ptraced, group stop is handled by STOP trap.
2193 * Schedule it and let the caller deal with it.
2195 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2201 * do_jobctl_trap - take care of ptrace jobctl traps
2203 * When PT_SEIZED, it's used for both group stop and explicit
2204 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2205 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2206 * the stop signal; otherwise, %SIGTRAP.
2208 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2209 * number as exit_code and no siginfo.
2212 * Must be called with @current->sighand->siglock held, which may be
2213 * released and re-acquired before returning with intervening sleep.
2215 static void do_jobctl_trap(void)
2217 struct signal_struct *signal = current->signal;
2218 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2220 if (current->ptrace & PT_SEIZED) {
2221 if (!signal->group_stop_count &&
2222 !(signal->flags & SIGNAL_STOP_STOPPED))
2224 WARN_ON_ONCE(!signr);
2225 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2228 WARN_ON_ONCE(!signr);
2229 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2230 current->exit_code = 0;
2234 static int ptrace_signal(int signr, siginfo_t *info)
2236 ptrace_signal_deliver();
2238 * We do not check sig_kernel_stop(signr) but set this marker
2239 * unconditionally because we do not know whether debugger will
2240 * change signr. This flag has no meaning unless we are going
2241 * to stop after return from ptrace_stop(). In this case it will
2242 * be checked in do_signal_stop(), we should only stop if it was
2243 * not cleared by SIGCONT while we were sleeping. See also the
2244 * comment in dequeue_signal().
2246 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2247 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2249 /* We're back. Did the debugger cancel the sig? */
2250 signr = current->exit_code;
2254 current->exit_code = 0;
2257 * Update the siginfo structure if the signal has
2258 * changed. If the debugger wanted something
2259 * specific in the siginfo structure then it should
2260 * have updated *info via PTRACE_SETSIGINFO.
2262 if (signr != info->si_signo) {
2263 info->si_signo = signr;
2265 info->si_code = SI_USER;
2267 info->si_pid = task_pid_vnr(current->parent);
2268 info->si_uid = from_kuid_munged(current_user_ns(),
2269 task_uid(current->parent));
2273 /* If the (new) signal is now blocked, requeue it. */
2274 if (sigismember(¤t->blocked, signr)) {
2275 specific_send_sig_info(signr, info, current);
2282 int get_signal(struct ksignal *ksig)
2284 struct sighand_struct *sighand = current->sighand;
2285 struct signal_struct *signal = current->signal;
2288 if (unlikely(current->task_works))
2291 if (unlikely(uprobe_deny_signal()))
2295 * Do this once, we can't return to user-mode if freezing() == T.
2296 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2297 * thus do not need another check after return.
2302 spin_lock_irq(&sighand->siglock);
2304 * Every stopped thread goes here after wakeup. Check to see if
2305 * we should notify the parent, prepare_signal(SIGCONT) encodes
2306 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2308 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2311 if (signal->flags & SIGNAL_CLD_CONTINUED)
2312 why = CLD_CONTINUED;
2316 signal->flags &= ~SIGNAL_CLD_MASK;
2318 spin_unlock_irq(&sighand->siglock);
2321 * Notify the parent that we're continuing. This event is
2322 * always per-process and doesn't make whole lot of sense
2323 * for ptracers, who shouldn't consume the state via
2324 * wait(2) either, but, for backward compatibility, notify
2325 * the ptracer of the group leader too unless it's gonna be
2328 read_lock(&tasklist_lock);
2329 do_notify_parent_cldstop(current, false, why);
2331 if (ptrace_reparented(current->group_leader))
2332 do_notify_parent_cldstop(current->group_leader,
2334 read_unlock(&tasklist_lock);
2340 struct k_sigaction *ka;
2342 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2346 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2348 spin_unlock_irq(&sighand->siglock);
2352 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2355 break; /* will return 0 */
2357 if (unlikely(current->ptrace) && signr != SIGKILL) {
2358 signr = ptrace_signal(signr, &ksig->info);
2363 ka = &sighand->action[signr-1];
2365 /* Trace actually delivered signals. */
2366 trace_signal_deliver(signr, &ksig->info, ka);
2368 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2370 if (ka->sa.sa_handler != SIG_DFL) {
2371 /* Run the handler. */
2374 if (ka->sa.sa_flags & SA_ONESHOT)
2375 ka->sa.sa_handler = SIG_DFL;
2377 break; /* will return non-zero "signr" value */
2381 * Now we are doing the default action for this signal.
2383 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2387 * Global init gets no signals it doesn't want.
2388 * Container-init gets no signals it doesn't want from same
2391 * Note that if global/container-init sees a sig_kernel_only()
2392 * signal here, the signal must have been generated internally
2393 * or must have come from an ancestor namespace. In either
2394 * case, the signal cannot be dropped.
2396 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2397 !sig_kernel_only(signr))
2400 if (sig_kernel_stop(signr)) {
2402 * The default action is to stop all threads in
2403 * the thread group. The job control signals
2404 * do nothing in an orphaned pgrp, but SIGSTOP
2405 * always works. Note that siglock needs to be
2406 * dropped during the call to is_orphaned_pgrp()
2407 * because of lock ordering with tasklist_lock.
2408 * This allows an intervening SIGCONT to be posted.
2409 * We need to check for that and bail out if necessary.
2411 if (signr != SIGSTOP) {
2412 spin_unlock_irq(&sighand->siglock);
2414 /* signals can be posted during this window */
2416 if (is_current_pgrp_orphaned())
2419 spin_lock_irq(&sighand->siglock);
2422 if (likely(do_signal_stop(ksig->info.si_signo))) {
2423 /* It released the siglock. */
2428 * We didn't actually stop, due to a race
2429 * with SIGCONT or something like that.
2434 spin_unlock_irq(&sighand->siglock);
2437 * Anything else is fatal, maybe with a core dump.
2439 current->flags |= PF_SIGNALED;
2441 if (sig_kernel_coredump(signr)) {
2442 if (print_fatal_signals)
2443 print_fatal_signal(ksig->info.si_signo);
2444 proc_coredump_connector(current);
2446 * If it was able to dump core, this kills all
2447 * other threads in the group and synchronizes with
2448 * their demise. If we lost the race with another
2449 * thread getting here, it set group_exit_code
2450 * first and our do_group_exit call below will use
2451 * that value and ignore the one we pass it.
2453 do_coredump(&ksig->info);
2457 * Death signals, no core dump.
2459 do_group_exit(ksig->info.si_signo);
2462 spin_unlock_irq(&sighand->siglock);
2465 return ksig->sig > 0;
2469 * signal_delivered -
2470 * @ksig: kernel signal struct
2471 * @stepping: nonzero if debugger single-step or block-step in use
2473 * This function should be called when a signal has successfully been
2474 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2475 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2476 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2478 static void signal_delivered(struct ksignal *ksig, int stepping)
2482 /* A signal was successfully delivered, and the
2483 saved sigmask was stored on the signal frame,
2484 and will be restored by sigreturn. So we can
2485 simply clear the restore sigmask flag. */
2486 clear_restore_sigmask();
2488 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2489 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2490 sigaddset(&blocked, ksig->sig);
2491 set_current_blocked(&blocked);
2492 tracehook_signal_handler(stepping);
2495 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2498 force_sigsegv(ksig->sig, current);
2500 signal_delivered(ksig, stepping);
2504 * It could be that complete_signal() picked us to notify about the
2505 * group-wide signal. Other threads should be notified now to take
2506 * the shared signals in @which since we will not.
2508 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2511 struct task_struct *t;
2513 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2514 if (sigisemptyset(&retarget))
2518 while_each_thread(tsk, t) {
2519 if (t->flags & PF_EXITING)
2522 if (!has_pending_signals(&retarget, &t->blocked))
2524 /* Remove the signals this thread can handle. */
2525 sigandsets(&retarget, &retarget, &t->blocked);
2527 if (!signal_pending(t))
2528 signal_wake_up(t, 0);
2530 if (sigisemptyset(&retarget))
2535 void exit_signals(struct task_struct *tsk)
2541 * @tsk is about to have PF_EXITING set - lock out users which
2542 * expect stable threadgroup.
2544 threadgroup_change_begin(tsk);
2546 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2547 tsk->flags |= PF_EXITING;
2548 threadgroup_change_end(tsk);
2552 spin_lock_irq(&tsk->sighand->siglock);
2554 * From now this task is not visible for group-wide signals,
2555 * see wants_signal(), do_signal_stop().
2557 tsk->flags |= PF_EXITING;
2559 threadgroup_change_end(tsk);
2561 if (!signal_pending(tsk))
2564 unblocked = tsk->blocked;
2565 signotset(&unblocked);
2566 retarget_shared_pending(tsk, &unblocked);
2568 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2569 task_participate_group_stop(tsk))
2570 group_stop = CLD_STOPPED;
2572 spin_unlock_irq(&tsk->sighand->siglock);
2575 * If group stop has completed, deliver the notification. This
2576 * should always go to the real parent of the group leader.
2578 if (unlikely(group_stop)) {
2579 read_lock(&tasklist_lock);
2580 do_notify_parent_cldstop(tsk, false, group_stop);
2581 read_unlock(&tasklist_lock);
2585 EXPORT_SYMBOL(recalc_sigpending);
2586 EXPORT_SYMBOL_GPL(dequeue_signal);
2587 EXPORT_SYMBOL(flush_signals);
2588 EXPORT_SYMBOL(force_sig);
2589 EXPORT_SYMBOL(send_sig);
2590 EXPORT_SYMBOL(send_sig_info);
2591 EXPORT_SYMBOL(sigprocmask);
2592 EXPORT_SYMBOL(block_all_signals);
2593 EXPORT_SYMBOL(unblock_all_signals);
2597 * System call entry points.
2601 * sys_restart_syscall - restart a system call
2603 SYSCALL_DEFINE0(restart_syscall)
2605 struct restart_block *restart = ¤t->restart_block;
2606 return restart->fn(restart);
2609 long do_no_restart_syscall(struct restart_block *param)
2614 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2616 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2617 sigset_t newblocked;
2618 /* A set of now blocked but previously unblocked signals. */
2619 sigandnsets(&newblocked, newset, ¤t->blocked);
2620 retarget_shared_pending(tsk, &newblocked);
2622 tsk->blocked = *newset;
2623 recalc_sigpending();
2627 * set_current_blocked - change current->blocked mask
2630 * It is wrong to change ->blocked directly, this helper should be used
2631 * to ensure the process can't miss a shared signal we are going to block.
2633 void set_current_blocked(sigset_t *newset)
2635 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2636 __set_current_blocked(newset);
2639 void __set_current_blocked(const sigset_t *newset)
2641 struct task_struct *tsk = current;
2643 spin_lock_irq(&tsk->sighand->siglock);
2644 __set_task_blocked(tsk, newset);
2645 spin_unlock_irq(&tsk->sighand->siglock);
2649 * This is also useful for kernel threads that want to temporarily
2650 * (or permanently) block certain signals.
2652 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2653 * interface happily blocks "unblockable" signals like SIGKILL
2656 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2658 struct task_struct *tsk = current;
2661 /* Lockless, only current can change ->blocked, never from irq */
2663 *oldset = tsk->blocked;
2667 sigorsets(&newset, &tsk->blocked, set);
2670 sigandnsets(&newset, &tsk->blocked, set);
2679 __set_current_blocked(&newset);
2684 * sys_rt_sigprocmask - change the list of currently blocked signals
2685 * @how: whether to add, remove, or set signals
2686 * @nset: stores pending signals
2687 * @oset: previous value of signal mask if non-null
2688 * @sigsetsize: size of sigset_t type
2690 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2691 sigset_t __user *, oset, size_t, sigsetsize)
2693 sigset_t old_set, new_set;
2696 /* XXX: Don't preclude handling different sized sigset_t's. */
2697 if (sigsetsize != sizeof(sigset_t))
2700 old_set = current->blocked;
2703 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2705 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2707 error = sigprocmask(how, &new_set, NULL);
2713 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2720 #ifdef CONFIG_COMPAT
2721 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2722 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2725 sigset_t old_set = current->blocked;
2727 /* XXX: Don't preclude handling different sized sigset_t's. */
2728 if (sigsetsize != sizeof(sigset_t))
2732 compat_sigset_t new32;
2735 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2738 sigset_from_compat(&new_set, &new32);
2739 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2741 error = sigprocmask(how, &new_set, NULL);
2746 compat_sigset_t old32;
2747 sigset_to_compat(&old32, &old_set);
2748 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2753 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2754 (sigset_t __user *)oset, sigsetsize);
2759 static int do_sigpending(void *set, unsigned long sigsetsize)
2761 if (sigsetsize > sizeof(sigset_t))
2764 spin_lock_irq(¤t->sighand->siglock);
2765 sigorsets(set, ¤t->pending.signal,
2766 ¤t->signal->shared_pending.signal);
2767 spin_unlock_irq(¤t->sighand->siglock);
2769 /* Outside the lock because only this thread touches it. */
2770 sigandsets(set, ¤t->blocked, set);
2775 * sys_rt_sigpending - examine a pending signal that has been raised
2777 * @uset: stores pending signals
2778 * @sigsetsize: size of sigset_t type or larger
2780 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2783 int err = do_sigpending(&set, sigsetsize);
2784 if (!err && copy_to_user(uset, &set, sigsetsize))
2789 #ifdef CONFIG_COMPAT
2790 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2791 compat_size_t, sigsetsize)
2795 int err = do_sigpending(&set, sigsetsize);
2797 compat_sigset_t set32;
2798 sigset_to_compat(&set32, &set);
2799 /* we can get here only if sigsetsize <= sizeof(set) */
2800 if (copy_to_user(uset, &set32, sigsetsize))
2805 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2810 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2812 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2816 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2818 if (from->si_code < 0)
2819 return __copy_to_user(to, from, sizeof(siginfo_t))
2822 * If you change siginfo_t structure, please be sure
2823 * this code is fixed accordingly.
2824 * Please remember to update the signalfd_copyinfo() function
2825 * inside fs/signalfd.c too, in case siginfo_t changes.
2826 * It should never copy any pad contained in the structure
2827 * to avoid security leaks, but must copy the generic
2828 * 3 ints plus the relevant union member.
2830 err = __put_user(from->si_signo, &to->si_signo);
2831 err |= __put_user(from->si_errno, &to->si_errno);
2832 err |= __put_user((short)from->si_code, &to->si_code);
2833 switch (from->si_code & __SI_MASK) {
2835 err |= __put_user(from->si_pid, &to->si_pid);
2836 err |= __put_user(from->si_uid, &to->si_uid);
2839 err |= __put_user(from->si_tid, &to->si_tid);
2840 err |= __put_user(from->si_overrun, &to->si_overrun);
2841 err |= __put_user(from->si_ptr, &to->si_ptr);
2844 err |= __put_user(from->si_band, &to->si_band);
2845 err |= __put_user(from->si_fd, &to->si_fd);
2848 err |= __put_user(from->si_addr, &to->si_addr);
2849 #ifdef __ARCH_SI_TRAPNO
2850 err |= __put_user(from->si_trapno, &to->si_trapno);
2852 #ifdef BUS_MCEERR_AO
2854 * Other callers might not initialize the si_lsb field,
2855 * so check explicitly for the right codes here.
2857 if (from->si_signo == SIGBUS &&
2858 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2859 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2862 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2863 err |= __put_user(from->si_lower, &to->si_lower);
2864 err |= __put_user(from->si_upper, &to->si_upper);
2869 err |= __put_user(from->si_pid, &to->si_pid);
2870 err |= __put_user(from->si_uid, &to->si_uid);
2871 err |= __put_user(from->si_status, &to->si_status);
2872 err |= __put_user(from->si_utime, &to->si_utime);
2873 err |= __put_user(from->si_stime, &to->si_stime);
2875 case __SI_RT: /* This is not generated by the kernel as of now. */
2876 case __SI_MESGQ: /* But this is */
2877 err |= __put_user(from->si_pid, &to->si_pid);
2878 err |= __put_user(from->si_uid, &to->si_uid);
2879 err |= __put_user(from->si_ptr, &to->si_ptr);
2881 #ifdef __ARCH_SIGSYS
2883 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2884 err |= __put_user(from->si_syscall, &to->si_syscall);
2885 err |= __put_user(from->si_arch, &to->si_arch);
2888 default: /* this is just in case for now ... */
2889 err |= __put_user(from->si_pid, &to->si_pid);
2890 err |= __put_user(from->si_uid, &to->si_uid);
2899 * do_sigtimedwait - wait for queued signals specified in @which
2900 * @which: queued signals to wait for
2901 * @info: if non-null, the signal's siginfo is returned here
2902 * @ts: upper bound on process time suspension
2904 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2905 const struct timespec *ts)
2907 struct task_struct *tsk = current;
2908 long timeout = MAX_SCHEDULE_TIMEOUT;
2909 sigset_t mask = *which;
2913 if (!timespec_valid(ts))
2915 timeout = timespec_to_jiffies(ts);
2917 * We can be close to the next tick, add another one
2918 * to ensure we will wait at least the time asked for.
2920 if (ts->tv_sec || ts->tv_nsec)
2925 * Invert the set of allowed signals to get those we want to block.
2927 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2930 spin_lock_irq(&tsk->sighand->siglock);
2931 sig = dequeue_signal(tsk, &mask, info);
2932 if (!sig && timeout) {
2934 * None ready, temporarily unblock those we're interested
2935 * while we are sleeping in so that we'll be awakened when
2936 * they arrive. Unblocking is always fine, we can avoid
2937 * set_current_blocked().
2939 tsk->real_blocked = tsk->blocked;
2940 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2941 recalc_sigpending();
2942 spin_unlock_irq(&tsk->sighand->siglock);
2944 timeout = freezable_schedule_timeout_interruptible(timeout);
2946 spin_lock_irq(&tsk->sighand->siglock);
2947 __set_task_blocked(tsk, &tsk->real_blocked);
2948 sigemptyset(&tsk->real_blocked);
2949 sig = dequeue_signal(tsk, &mask, info);
2951 spin_unlock_irq(&tsk->sighand->siglock);
2955 return timeout ? -EINTR : -EAGAIN;
2959 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2961 * @uthese: queued signals to wait for
2962 * @uinfo: if non-null, the signal's siginfo is returned here
2963 * @uts: upper bound on process time suspension
2964 * @sigsetsize: size of sigset_t type
2966 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2967 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2975 /* XXX: Don't preclude handling different sized sigset_t's. */
2976 if (sigsetsize != sizeof(sigset_t))
2979 if (copy_from_user(&these, uthese, sizeof(these)))
2983 if (copy_from_user(&ts, uts, sizeof(ts)))
2987 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2989 if (ret > 0 && uinfo) {
2990 if (copy_siginfo_to_user(uinfo, &info))
2998 * sys_kill - send a signal to a process
2999 * @pid: the PID of the process
3000 * @sig: signal to be sent
3002 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3004 struct siginfo info;
3006 info.si_signo = sig;
3008 info.si_code = SI_USER;
3009 info.si_pid = task_tgid_vnr(current);
3010 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3012 return kill_something_info(sig, &info, pid);
3016 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3018 struct task_struct *p;
3022 p = find_task_by_vpid(pid);
3023 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3024 error = check_kill_permission(sig, info, p);
3026 * The null signal is a permissions and process existence
3027 * probe. No signal is actually delivered.
3029 if (!error && sig) {
3030 error = do_send_sig_info(sig, info, p, false);
3032 * If lock_task_sighand() failed we pretend the task
3033 * dies after receiving the signal. The window is tiny,
3034 * and the signal is private anyway.
3036 if (unlikely(error == -ESRCH))
3045 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3047 struct siginfo info = {};
3049 info.si_signo = sig;
3051 info.si_code = SI_TKILL;
3052 info.si_pid = task_tgid_vnr(current);
3053 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3055 return do_send_specific(tgid, pid, sig, &info);
3059 * sys_tgkill - send signal to one specific thread
3060 * @tgid: the thread group ID of the thread
3061 * @pid: the PID of the thread
3062 * @sig: signal to be sent
3064 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3065 * exists but it's not belonging to the target process anymore. This
3066 * method solves the problem of threads exiting and PIDs getting reused.
3068 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3070 /* This is only valid for single tasks */
3071 if (pid <= 0 || tgid <= 0)
3074 return do_tkill(tgid, pid, sig);
3078 * sys_tkill - send signal to one specific task
3079 * @pid: the PID of the task
3080 * @sig: signal to be sent
3082 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3084 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3086 /* This is only valid for single tasks */
3090 return do_tkill(0, pid, sig);
3093 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3095 /* Not even root can pretend to send signals from the kernel.
3096 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3098 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3099 (task_pid_vnr(current) != pid))
3102 info->si_signo = sig;
3104 /* POSIX.1b doesn't mention process groups. */
3105 return kill_proc_info(sig, info, pid);
3109 * sys_rt_sigqueueinfo - send signal information to a signal
3110 * @pid: the PID of the thread
3111 * @sig: signal to be sent
3112 * @uinfo: signal info to be sent
3114 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3115 siginfo_t __user *, uinfo)
3118 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3120 return do_rt_sigqueueinfo(pid, sig, &info);
3123 #ifdef CONFIG_COMPAT
3124 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3127 struct compat_siginfo __user *, uinfo)
3129 siginfo_t info = {};
3130 int ret = copy_siginfo_from_user32(&info, uinfo);
3133 return do_rt_sigqueueinfo(pid, sig, &info);
3137 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3139 /* This is only valid for single tasks */
3140 if (pid <= 0 || tgid <= 0)
3143 /* Not even root can pretend to send signals from the kernel.
3144 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3146 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3147 (task_pid_vnr(current) != pid))
3150 info->si_signo = sig;
3152 return do_send_specific(tgid, pid, sig, info);
3155 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3156 siginfo_t __user *, uinfo)
3160 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3163 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3166 #ifdef CONFIG_COMPAT
3167 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3171 struct compat_siginfo __user *, uinfo)
3173 siginfo_t info = {};
3175 if (copy_siginfo_from_user32(&info, uinfo))
3177 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3182 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3184 void kernel_sigaction(int sig, __sighandler_t action)
3186 spin_lock_irq(¤t->sighand->siglock);
3187 current->sighand->action[sig - 1].sa.sa_handler = action;
3188 if (action == SIG_IGN) {
3192 sigaddset(&mask, sig);
3194 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3195 flush_sigqueue_mask(&mask, ¤t->pending);
3196 recalc_sigpending();
3198 spin_unlock_irq(¤t->sighand->siglock);
3200 EXPORT_SYMBOL(kernel_sigaction);
3202 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3204 struct task_struct *p = current, *t;
3205 struct k_sigaction *k;
3208 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3211 k = &p->sighand->action[sig-1];
3213 spin_lock_irq(&p->sighand->siglock);
3218 sigdelsetmask(&act->sa.sa_mask,
3219 sigmask(SIGKILL) | sigmask(SIGSTOP));
3223 * "Setting a signal action to SIG_IGN for a signal that is
3224 * pending shall cause the pending signal to be discarded,
3225 * whether or not it is blocked."
3227 * "Setting a signal action to SIG_DFL for a signal that is
3228 * pending and whose default action is to ignore the signal
3229 * (for example, SIGCHLD), shall cause the pending signal to
3230 * be discarded, whether or not it is blocked"
3232 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3234 sigaddset(&mask, sig);
3235 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3236 for_each_thread(p, t)
3237 flush_sigqueue_mask(&mask, &t->pending);
3241 spin_unlock_irq(&p->sighand->siglock);
3246 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3251 oss.ss_sp = (void __user *) current->sas_ss_sp;
3252 oss.ss_size = current->sas_ss_size;
3253 oss.ss_flags = sas_ss_flags(sp);
3261 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3263 error = __get_user(ss_sp, &uss->ss_sp) |
3264 __get_user(ss_flags, &uss->ss_flags) |
3265 __get_user(ss_size, &uss->ss_size);
3270 if (on_sig_stack(sp))
3275 * Note - this code used to test ss_flags incorrectly:
3276 * old code may have been written using ss_flags==0
3277 * to mean ss_flags==SS_ONSTACK (as this was the only
3278 * way that worked) - this fix preserves that older
3281 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3284 if (ss_flags == SS_DISABLE) {
3289 if (ss_size < MINSIGSTKSZ)
3293 current->sas_ss_sp = (unsigned long) ss_sp;
3294 current->sas_ss_size = ss_size;
3300 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3302 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3303 __put_user(oss.ss_size, &uoss->ss_size) |
3304 __put_user(oss.ss_flags, &uoss->ss_flags);
3310 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3312 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3315 int restore_altstack(const stack_t __user *uss)
3317 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3318 /* squash all but EFAULT for now */
3319 return err == -EFAULT ? err : 0;
3322 int __save_altstack(stack_t __user *uss, unsigned long sp)
3324 struct task_struct *t = current;
3325 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3326 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3327 __put_user(t->sas_ss_size, &uss->ss_size);
3330 #ifdef CONFIG_COMPAT
3331 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3332 const compat_stack_t __user *, uss_ptr,
3333 compat_stack_t __user *, uoss_ptr)
3340 compat_stack_t uss32;
3342 memset(&uss, 0, sizeof(stack_t));
3343 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3345 uss.ss_sp = compat_ptr(uss32.ss_sp);
3346 uss.ss_flags = uss32.ss_flags;
3347 uss.ss_size = uss32.ss_size;
3351 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3352 (stack_t __force __user *) &uoss,
3353 compat_user_stack_pointer());
3355 if (ret >= 0 && uoss_ptr) {
3356 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3357 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3358 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3359 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3365 int compat_restore_altstack(const compat_stack_t __user *uss)
3367 int err = compat_sys_sigaltstack(uss, NULL);
3368 /* squash all but -EFAULT for now */
3369 return err == -EFAULT ? err : 0;
3372 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3374 struct task_struct *t = current;
3375 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3376 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3377 __put_user(t->sas_ss_size, &uss->ss_size);
3381 #ifdef __ARCH_WANT_SYS_SIGPENDING
3384 * sys_sigpending - examine pending signals
3385 * @set: where mask of pending signal is returned
3387 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3389 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3394 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3396 * sys_sigprocmask - examine and change blocked signals
3397 * @how: whether to add, remove, or set signals
3398 * @nset: signals to add or remove (if non-null)
3399 * @oset: previous value of signal mask if non-null
3401 * Some platforms have their own version with special arguments;
3402 * others support only sys_rt_sigprocmask.
3405 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3406 old_sigset_t __user *, oset)
3408 old_sigset_t old_set, new_set;
3409 sigset_t new_blocked;
3411 old_set = current->blocked.sig[0];
3414 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3417 new_blocked = current->blocked;
3421 sigaddsetmask(&new_blocked, new_set);
3424 sigdelsetmask(&new_blocked, new_set);
3427 new_blocked.sig[0] = new_set;
3433 set_current_blocked(&new_blocked);
3437 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3443 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3445 #ifndef CONFIG_ODD_RT_SIGACTION
3447 * sys_rt_sigaction - alter an action taken by a process
3448 * @sig: signal to be sent
3449 * @act: new sigaction
3450 * @oact: used to save the previous sigaction
3451 * @sigsetsize: size of sigset_t type
3453 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3454 const struct sigaction __user *, act,
3455 struct sigaction __user *, oact,
3458 struct k_sigaction new_sa, old_sa;
3461 /* XXX: Don't preclude handling different sized sigset_t's. */
3462 if (sigsetsize != sizeof(sigset_t))
3466 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3470 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3473 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3479 #ifdef CONFIG_COMPAT
3480 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3481 const struct compat_sigaction __user *, act,
3482 struct compat_sigaction __user *, oact,
3483 compat_size_t, sigsetsize)
3485 struct k_sigaction new_ka, old_ka;
3486 compat_sigset_t mask;
3487 #ifdef __ARCH_HAS_SA_RESTORER
3488 compat_uptr_t restorer;
3492 /* XXX: Don't preclude handling different sized sigset_t's. */
3493 if (sigsetsize != sizeof(compat_sigset_t))
3497 compat_uptr_t handler;
3498 ret = get_user(handler, &act->sa_handler);
3499 new_ka.sa.sa_handler = compat_ptr(handler);
3500 #ifdef __ARCH_HAS_SA_RESTORER
3501 ret |= get_user(restorer, &act->sa_restorer);
3502 new_ka.sa.sa_restorer = compat_ptr(restorer);
3504 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3505 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3508 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3511 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3513 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3514 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3516 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3517 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3518 #ifdef __ARCH_HAS_SA_RESTORER
3519 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3520 &oact->sa_restorer);
3526 #endif /* !CONFIG_ODD_RT_SIGACTION */
3528 #ifdef CONFIG_OLD_SIGACTION
3529 SYSCALL_DEFINE3(sigaction, int, sig,
3530 const struct old_sigaction __user *, act,
3531 struct old_sigaction __user *, oact)
3533 struct k_sigaction new_ka, old_ka;
3538 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3539 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3540 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3541 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3542 __get_user(mask, &act->sa_mask))
3544 #ifdef __ARCH_HAS_KA_RESTORER
3545 new_ka.ka_restorer = NULL;
3547 siginitset(&new_ka.sa.sa_mask, mask);
3550 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3553 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3554 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3555 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3556 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3557 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3564 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3565 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3566 const struct compat_old_sigaction __user *, act,
3567 struct compat_old_sigaction __user *, oact)
3569 struct k_sigaction new_ka, old_ka;
3571 compat_old_sigset_t mask;
3572 compat_uptr_t handler, restorer;
3575 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3576 __get_user(handler, &act->sa_handler) ||
3577 __get_user(restorer, &act->sa_restorer) ||
3578 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3579 __get_user(mask, &act->sa_mask))
3582 #ifdef __ARCH_HAS_KA_RESTORER
3583 new_ka.ka_restorer = NULL;
3585 new_ka.sa.sa_handler = compat_ptr(handler);
3586 new_ka.sa.sa_restorer = compat_ptr(restorer);
3587 siginitset(&new_ka.sa.sa_mask, mask);
3590 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3593 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3594 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3595 &oact->sa_handler) ||
3596 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3597 &oact->sa_restorer) ||
3598 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3599 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3606 #ifdef CONFIG_SGETMASK_SYSCALL
3609 * For backwards compatibility. Functionality superseded by sigprocmask.
3611 SYSCALL_DEFINE0(sgetmask)
3614 return current->blocked.sig[0];
3617 SYSCALL_DEFINE1(ssetmask, int, newmask)
3619 int old = current->blocked.sig[0];
3622 siginitset(&newset, newmask);
3623 set_current_blocked(&newset);
3627 #endif /* CONFIG_SGETMASK_SYSCALL */
3629 #ifdef __ARCH_WANT_SYS_SIGNAL
3631 * For backwards compatibility. Functionality superseded by sigaction.
3633 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3635 struct k_sigaction new_sa, old_sa;
3638 new_sa.sa.sa_handler = handler;
3639 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3640 sigemptyset(&new_sa.sa.sa_mask);
3642 ret = do_sigaction(sig, &new_sa, &old_sa);
3644 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3646 #endif /* __ARCH_WANT_SYS_SIGNAL */
3648 #ifdef __ARCH_WANT_SYS_PAUSE
3650 SYSCALL_DEFINE0(pause)
3652 while (!signal_pending(current)) {
3653 __set_current_state(TASK_INTERRUPTIBLE);
3656 return -ERESTARTNOHAND;
3661 int sigsuspend(sigset_t *set)
3663 current->saved_sigmask = current->blocked;
3664 set_current_blocked(set);
3666 __set_current_state(TASK_INTERRUPTIBLE);
3668 set_restore_sigmask();
3669 return -ERESTARTNOHAND;
3673 * sys_rt_sigsuspend - replace the signal mask for a value with the
3674 * @unewset value until a signal is received
3675 * @unewset: new signal mask value
3676 * @sigsetsize: size of sigset_t type
3678 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3682 /* XXX: Don't preclude handling different sized sigset_t's. */
3683 if (sigsetsize != sizeof(sigset_t))
3686 if (copy_from_user(&newset, unewset, sizeof(newset)))
3688 return sigsuspend(&newset);
3691 #ifdef CONFIG_COMPAT
3692 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3696 compat_sigset_t newset32;
3698 /* XXX: Don't preclude handling different sized sigset_t's. */
3699 if (sigsetsize != sizeof(sigset_t))
3702 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3704 sigset_from_compat(&newset, &newset32);
3705 return sigsuspend(&newset);
3707 /* on little-endian bitmaps don't care about granularity */
3708 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3713 #ifdef CONFIG_OLD_SIGSUSPEND
3714 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3717 siginitset(&blocked, mask);
3718 return sigsuspend(&blocked);
3721 #ifdef CONFIG_OLD_SIGSUSPEND3
3722 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3725 siginitset(&blocked, mask);
3726 return sigsuspend(&blocked);
3730 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3735 void __init signals_init(void)
3737 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3740 #ifdef CONFIG_KGDB_KDB
3741 #include <linux/kdb.h>
3743 * kdb_send_sig_info - Allows kdb to send signals without exposing
3744 * signal internals. This function checks if the required locks are
3745 * available before calling the main signal code, to avoid kdb
3749 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3751 static struct task_struct *kdb_prev_t;
3753 if (!spin_trylock(&t->sighand->siglock)) {
3754 kdb_printf("Can't do kill command now.\n"
3755 "The sigmask lock is held somewhere else in "
3756 "kernel, try again later\n");
3759 spin_unlock(&t->sighand->siglock);
3760 new_t = kdb_prev_t != t;
3762 if (t->state != TASK_RUNNING && new_t) {
3763 kdb_printf("Process is not RUNNING, sending a signal from "
3764 "kdb risks deadlock\n"
3765 "on the run queue locks. "
3766 "The signal has _not_ been sent.\n"
3767 "Reissue the kill command if you want to risk "
3771 sig = info->si_signo;
3772 if (send_sig_info(sig, info, t))
3773 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3776 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3778 #endif /* CONFIG_KGDB_KDB */