~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/signal.c

Version: ~ [ linux-6.0 ] ~ [ linux-5.19.12 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.71 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.146 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.215 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.260 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.295 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.330 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/kernel/signal.c
  3  *
  4  *  Copyright (C) 1991, 1992  Linus Torvalds
  5  *
  6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
  7  *
  8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
  9  *              Changes to use preallocated sigqueue structures
 10  *              to allow signals to be sent reliably.
 11  */
 12 
 13 #include <linux/slab.h>
 14 #include <linux/export.h>
 15 #include <linux/init.h>
 16 #include <linux/sched.h>
 17 #include <linux/fs.h>
 18 #include <linux/tty.h>
 19 #include <linux/binfmts.h>
 20 #include <linux/coredump.h>
 21 #include <linux/security.h>
 22 #include <linux/syscalls.h>
 23 #include <linux/ptrace.h>
 24 #include <linux/signal.h>
 25 #include <linux/signalfd.h>
 26 #include <linux/ratelimit.h>
 27 #include <linux/tracehook.h>
 28 #include <linux/capability.h>
 29 #include <linux/freezer.h>
 30 #include <linux/pid_namespace.h>
 31 #include <linux/nsproxy.h>
 32 #include <linux/user_namespace.h>
 33 #include <linux/uprobes.h>
 34 #include <linux/compat.h>
 35 #include <linux/cn_proc.h>
 36 #include <linux/compiler.h>
 37 
 38 #define CREATE_TRACE_POINTS
 39 #include <trace/events/signal.h>
 40 
 41 #include <asm/param.h>
 42 #include <asm/uaccess.h>
 43 #include <asm/unistd.h>
 44 #include <asm/siginfo.h>
 45 #include <asm/cacheflush.h>
 46 #include "audit.h"      /* audit_signal_info() */
 47 
 48 /*
 49  * SLAB caches for signal bits.
 50  */
 51 
 52 static struct kmem_cache *sigqueue_cachep;
 53 
 54 int print_fatal_signals __read_mostly;
 55 
 56 static void __user *sig_handler(struct task_struct *t, int sig)
 57 {
 58         return t->sighand->action[sig - 1].sa.sa_handler;
 59 }
 60 
 61 static int sig_handler_ignored(void __user *handler, int sig)
 62 {
 63         /* Is it explicitly or implicitly ignored? */
 64         return handler == SIG_IGN ||
 65                 (handler == SIG_DFL && sig_kernel_ignore(sig));
 66 }
 67 
 68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
 69 {
 70         void __user *handler;
 71 
 72         handler = sig_handler(t, sig);
 73 
 74         if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
 75             handler == SIG_DFL && !(force && sig_kernel_only(sig)))
 76                 return 1;
 77 
 78         return sig_handler_ignored(handler, sig);
 79 }
 80 
 81 static int sig_ignored(struct task_struct *t, int sig, bool force)
 82 {
 83         /*
 84          * Blocked signals are never ignored, since the
 85          * signal handler may change by the time it is
 86          * unblocked.
 87          */
 88         if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
 89                 return 0;
 90 
 91         /*
 92          * Tracers may want to know about even ignored signal unless it
 93          * is SIGKILL which can't be reported anyway but can be ignored
 94          * by SIGNAL_UNKILLABLE task.
 95          */
 96         if (t->ptrace && sig != SIGKILL)
 97                 return 0;
 98 
 99         return sig_task_ignored(t, sig, force);
100 }
101 
102 /*
103  * Re-calculate pending state from the set of locally pending
104  * signals, globally pending signals, and blocked signals.
105  */
106 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
107 {
108         unsigned long ready;
109         long i;
110 
111         switch (_NSIG_WORDS) {
112         default:
113                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
114                         ready |= signal->sig[i] &~ blocked->sig[i];
115                 break;
116 
117         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
118                 ready |= signal->sig[2] &~ blocked->sig[2];
119                 ready |= signal->sig[1] &~ blocked->sig[1];
120                 ready |= signal->sig[0] &~ blocked->sig[0];
121                 break;
122 
123         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
124                 ready |= signal->sig[0] &~ blocked->sig[0];
125                 break;
126 
127         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
128         }
129         return ready != 0;
130 }
131 
132 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
133 
134 static int recalc_sigpending_tsk(struct task_struct *t)
135 {
136         if ((t->jobctl & JOBCTL_PENDING_MASK) ||
137             PENDING(&t->pending, &t->blocked) ||
138             PENDING(&t->signal->shared_pending, &t->blocked)) {
139                 set_tsk_thread_flag(t, TIF_SIGPENDING);
140                 return 1;
141         }
142         /*
143          * We must never clear the flag in another thread, or in current
144          * when it's possible the current syscall is returning -ERESTART*.
145          * So we don't clear it here, and only callers who know they should do.
146          */
147         return 0;
148 }
149 
150 /*
151  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
152  * This is superfluous when called on current, the wakeup is a harmless no-op.
153  */
154 void recalc_sigpending_and_wake(struct task_struct *t)
155 {
156         if (recalc_sigpending_tsk(t))
157                 signal_wake_up(t, 0);
158 }
159 
160 void recalc_sigpending(void)
161 {
162         if (!recalc_sigpending_tsk(current) && !freezing(current))
163                 clear_thread_flag(TIF_SIGPENDING);
164 
165 }
166 
167 /* Given the mask, find the first available signal that should be serviced. */
168 
169 #define SYNCHRONOUS_MASK \
170         (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
171          sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
172 
173 int next_signal(struct sigpending *pending, sigset_t *mask)
174 {
175         unsigned long i, *s, *m, x;
176         int sig = 0;
177 
178         s = pending->signal.sig;
179         m = mask->sig;
180 
181         /*
182          * Handle the first word specially: it contains the
183          * synchronous signals that need to be dequeued first.
184          */
185         x = *s &~ *m;
186         if (x) {
187                 if (x & SYNCHRONOUS_MASK)
188                         x &= SYNCHRONOUS_MASK;
189                 sig = ffz(~x) + 1;
190                 return sig;
191         }
192 
193         switch (_NSIG_WORDS) {
194         default:
195                 for (i = 1; i < _NSIG_WORDS; ++i) {
196                         x = *++s &~ *++m;
197                         if (!x)
198                                 continue;
199                         sig = ffz(~x) + i*_NSIG_BPW + 1;
200                         break;
201                 }
202                 break;
203 
204         case 2:
205                 x = s[1] &~ m[1];
206                 if (!x)
207                         break;
208                 sig = ffz(~x) + _NSIG_BPW + 1;
209                 break;
210 
211         case 1:
212                 /* Nothing to do */
213                 break;
214         }
215 
216         return sig;
217 }
218 
219 static inline void print_dropped_signal(int sig)
220 {
221         static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
222 
223         if (!print_fatal_signals)
224                 return;
225 
226         if (!__ratelimit(&ratelimit_state))
227                 return;
228 
229         printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
230                                 current->comm, current->pid, sig);
231 }
232 
233 /**
234  * task_set_jobctl_pending - set jobctl pending bits
235  * @task: target task
236  * @mask: pending bits to set
237  *
238  * Clear @mask from @task->jobctl.  @mask must be subset of
239  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
240  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
241  * cleared.  If @task is already being killed or exiting, this function
242  * becomes noop.
243  *
244  * CONTEXT:
245  * Must be called with @task->sighand->siglock held.
246  *
247  * RETURNS:
248  * %true if @mask is set, %false if made noop because @task was dying.
249  */
250 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
251 {
252         BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
253                         JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
254         BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
255 
256         if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
257                 return false;
258 
259         if (mask & JOBCTL_STOP_SIGMASK)
260                 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
261 
262         task->jobctl |= mask;
263         return true;
264 }
265 
266 /**
267  * task_clear_jobctl_trapping - clear jobctl trapping bit
268  * @task: target task
269  *
270  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
271  * Clear it and wake up the ptracer.  Note that we don't need any further
272  * locking.  @task->siglock guarantees that @task->parent points to the
273  * ptracer.
274  *
275  * CONTEXT:
276  * Must be called with @task->sighand->siglock held.
277  */
278 void task_clear_jobctl_trapping(struct task_struct *task)
279 {
280         if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
281                 task->jobctl &= ~JOBCTL_TRAPPING;
282                 smp_mb();       /* advised by wake_up_bit() */
283                 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
284         }
285 }
286 
287 /**
288  * task_clear_jobctl_pending - clear jobctl pending bits
289  * @task: target task
290  * @mask: pending bits to clear
291  *
292  * Clear @mask from @task->jobctl.  @mask must be subset of
293  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
294  * STOP bits are cleared together.
295  *
296  * If clearing of @mask leaves no stop or trap pending, this function calls
297  * task_clear_jobctl_trapping().
298  *
299  * CONTEXT:
300  * Must be called with @task->sighand->siglock held.
301  */
302 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
303 {
304         BUG_ON(mask & ~JOBCTL_PENDING_MASK);
305 
306         if (mask & JOBCTL_STOP_PENDING)
307                 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
308 
309         task->jobctl &= ~mask;
310 
311         if (!(task->jobctl & JOBCTL_PENDING_MASK))
312                 task_clear_jobctl_trapping(task);
313 }
314 
315 /**
316  * task_participate_group_stop - participate in a group stop
317  * @task: task participating in a group stop
318  *
319  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
320  * Group stop states are cleared and the group stop count is consumed if
321  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
322  * stop, the appropriate %SIGNAL_* flags are set.
323  *
324  * CONTEXT:
325  * Must be called with @task->sighand->siglock held.
326  *
327  * RETURNS:
328  * %true if group stop completion should be notified to the parent, %false
329  * otherwise.
330  */
331 static bool task_participate_group_stop(struct task_struct *task)
332 {
333         struct signal_struct *sig = task->signal;
334         bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
335 
336         WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
337 
338         task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
339 
340         if (!consume)
341                 return false;
342 
343         if (!WARN_ON_ONCE(sig->group_stop_count == 0))
344                 sig->group_stop_count--;
345 
346         /*
347          * Tell the caller to notify completion iff we are entering into a
348          * fresh group stop.  Read comment in do_signal_stop() for details.
349          */
350         if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
351                 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
352                 return true;
353         }
354         return false;
355 }
356 
357 /*
358  * allocate a new signal queue record
359  * - this may be called without locks if and only if t == current, otherwise an
360  *   appropriate lock must be held to stop the target task from exiting
361  */
362 static struct sigqueue *
363 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
364 {
365         struct sigqueue *q = NULL;
366         struct user_struct *user;
367 
368         /*
369          * Protect access to @t credentials. This can go away when all
370          * callers hold rcu read lock.
371          */
372         rcu_read_lock();
373         user = get_uid(__task_cred(t)->user);
374         atomic_inc(&user->sigpending);
375         rcu_read_unlock();
376 
377         if (override_rlimit ||
378             atomic_read(&user->sigpending) <=
379                         task_rlimit(t, RLIMIT_SIGPENDING)) {
380                 q = kmem_cache_alloc(sigqueue_cachep, flags);
381         } else {
382                 print_dropped_signal(sig);
383         }
384 
385         if (unlikely(q == NULL)) {
386                 atomic_dec(&user->sigpending);
387                 free_uid(user);
388         } else {
389                 INIT_LIST_HEAD(&q->list);
390                 q->flags = 0;
391                 q->user = user;
392         }
393 
394         return q;
395 }
396 
397 static void __sigqueue_free(struct sigqueue *q)
398 {
399         if (q->flags & SIGQUEUE_PREALLOC)
400                 return;
401         atomic_dec(&q->user->sigpending);
402         free_uid(q->user);
403         kmem_cache_free(sigqueue_cachep, q);
404 }
405 
406 void flush_sigqueue(struct sigpending *queue)
407 {
408         struct sigqueue *q;
409 
410         sigemptyset(&queue->signal);
411         while (!list_empty(&queue->list)) {
412                 q = list_entry(queue->list.next, struct sigqueue , list);
413                 list_del_init(&q->list);
414                 __sigqueue_free(q);
415         }
416 }
417 
418 /*
419  * Flush all pending signals for a task.
420  */
421 void __flush_signals(struct task_struct *t)
422 {
423         clear_tsk_thread_flag(t, TIF_SIGPENDING);
424         flush_sigqueue(&t->pending);
425         flush_sigqueue(&t->signal->shared_pending);
426 }
427 
428 void flush_signals(struct task_struct *t)
429 {
430         unsigned long flags;
431 
432         spin_lock_irqsave(&t->sighand->siglock, flags);
433         __flush_signals(t);
434         spin_unlock_irqrestore(&t->sighand->siglock, flags);
435 }
436 
437 static void __flush_itimer_signals(struct sigpending *pending)
438 {
439         sigset_t signal, retain;
440         struct sigqueue *q, *n;
441 
442         signal = pending->signal;
443         sigemptyset(&retain);
444 
445         list_for_each_entry_safe(q, n, &pending->list, list) {
446                 int sig = q->info.si_signo;
447 
448                 if (likely(q->info.si_code != SI_TIMER)) {
449                         sigaddset(&retain, sig);
450                 } else {
451                         sigdelset(&signal, sig);
452                         list_del_init(&q->list);
453                         __sigqueue_free(q);
454                 }
455         }
456 
457         sigorsets(&pending->signal, &signal, &retain);
458 }
459 
460 void flush_itimer_signals(void)
461 {
462         struct task_struct *tsk = current;
463         unsigned long flags;
464 
465         spin_lock_irqsave(&tsk->sighand->siglock, flags);
466         __flush_itimer_signals(&tsk->pending);
467         __flush_itimer_signals(&tsk->signal->shared_pending);
468         spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
469 }
470 
471 void ignore_signals(struct task_struct *t)
472 {
473         int i;
474 
475         for (i = 0; i < _NSIG; ++i)
476                 t->sighand->action[i].sa.sa_handler = SIG_IGN;
477 
478         flush_signals(t);
479 }
480 
481 /*
482  * Flush all handlers for a task.
483  */
484 
485 void
486 flush_signal_handlers(struct task_struct *t, int force_default)
487 {
488         int i;
489         struct k_sigaction *ka = &t->sighand->action[0];
490         for (i = _NSIG ; i != 0 ; i--) {
491                 if (force_default || ka->sa.sa_handler != SIG_IGN)
492                         ka->sa.sa_handler = SIG_DFL;
493                 ka->sa.sa_flags = 0;
494 #ifdef __ARCH_HAS_SA_RESTORER
495                 ka->sa.sa_restorer = NULL;
496 #endif
497                 sigemptyset(&ka->sa.sa_mask);
498                 ka++;
499         }
500 }
501 
502 int unhandled_signal(struct task_struct *tsk, int sig)
503 {
504         void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
505         if (is_global_init(tsk))
506                 return 1;
507         if (handler != SIG_IGN && handler != SIG_DFL)
508                 return 0;
509         /* if ptraced, let the tracer determine */
510         return !tsk->ptrace;
511 }
512 
513 /*
514  * Notify the system that a driver wants to block all signals for this
515  * process, and wants to be notified if any signals at all were to be
516  * sent/acted upon.  If the notifier routine returns non-zero, then the
517  * signal will be acted upon after all.  If the notifier routine returns 0,
518  * then then signal will be blocked.  Only one block per process is
519  * allowed.  priv is a pointer to private data that the notifier routine
520  * can use to determine if the signal should be blocked or not.
521  */
522 void
523 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
524 {
525         unsigned long flags;
526 
527         spin_lock_irqsave(&current->sighand->siglock, flags);
528         current->notifier_mask = mask;
529         current->notifier_data = priv;
530         current->notifier = notifier;
531         spin_unlock_irqrestore(&current->sighand->siglock, flags);
532 }
533 
534 /* Notify the system that blocking has ended. */
535 
536 void
537 unblock_all_signals(void)
538 {
539         unsigned long flags;
540 
541         spin_lock_irqsave(&current->sighand->siglock, flags);
542         current->notifier = NULL;
543         current->notifier_data = NULL;
544         recalc_sigpending();
545         spin_unlock_irqrestore(&current->sighand->siglock, flags);
546 }
547 
548 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
549 {
550         struct sigqueue *q, *first = NULL;
551 
552         /*
553          * Collect the siginfo appropriate to this signal.  Check if
554          * there is another siginfo for the same signal.
555         */
556         list_for_each_entry(q, &list->list, list) {
557                 if (q->info.si_signo == sig) {
558                         if (first)
559                                 goto still_pending;
560                         first = q;
561                 }
562         }
563 
564         sigdelset(&list->signal, sig);
565 
566         if (first) {
567 still_pending:
568                 list_del_init(&first->list);
569                 copy_siginfo(info, &first->info);
570                 __sigqueue_free(first);
571         } else {
572                 /*
573                  * Ok, it wasn't in the queue.  This must be
574                  * a fast-pathed signal or we must have been
575                  * out of queue space.  So zero out the info.
576                  */
577                 info->si_signo = sig;
578                 info->si_errno = 0;
579                 info->si_code = SI_USER;
580                 info->si_pid = 0;
581                 info->si_uid = 0;
582         }
583 }
584 
585 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
586                         siginfo_t *info)
587 {
588         int sig = next_signal(pending, mask);
589 
590         if (sig) {
591                 if (current->notifier) {
592                         if (sigismember(current->notifier_mask, sig)) {
593                                 if (!(current->notifier)(current->notifier_data)) {
594                                         clear_thread_flag(TIF_SIGPENDING);
595                                         return 0;
596                                 }
597                         }
598                 }
599 
600                 collect_signal(sig, pending, info);
601         }
602 
603         return sig;
604 }
605 
606 /*
607  * Dequeue a signal and return the element to the caller, which is
608  * expected to free it.
609  *
610  * All callers have to hold the siglock.
611  */
612 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
613 {
614         int signr;
615 
616         /* We only dequeue private signals from ourselves, we don't let
617          * signalfd steal them
618          */
619         signr = __dequeue_signal(&tsk->pending, mask, info);
620         if (!signr) {
621                 signr = __dequeue_signal(&tsk->signal->shared_pending,
622                                          mask, info);
623                 /*
624                  * itimer signal ?
625                  *
626                  * itimers are process shared and we restart periodic
627                  * itimers in the signal delivery path to prevent DoS
628                  * attacks in the high resolution timer case. This is
629                  * compliant with the old way of self-restarting
630                  * itimers, as the SIGALRM is a legacy signal and only
631                  * queued once. Changing the restart behaviour to
632                  * restart the timer in the signal dequeue path is
633                  * reducing the timer noise on heavy loaded !highres
634                  * systems too.
635                  */
636                 if (unlikely(signr == SIGALRM)) {
637                         struct hrtimer *tmr = &tsk->signal->real_timer;
638 
639                         if (!hrtimer_is_queued(tmr) &&
640                             tsk->signal->it_real_incr.tv64 != 0) {
641                                 hrtimer_forward(tmr, tmr->base->get_time(),
642                                                 tsk->signal->it_real_incr);
643                                 hrtimer_restart(tmr);
644                         }
645                 }
646         }
647 
648         recalc_sigpending();
649         if (!signr)
650                 return 0;
651 
652         if (unlikely(sig_kernel_stop(signr))) {
653                 /*
654                  * Set a marker that we have dequeued a stop signal.  Our
655                  * caller might release the siglock and then the pending
656                  * stop signal it is about to process is no longer in the
657                  * pending bitmasks, but must still be cleared by a SIGCONT
658                  * (and overruled by a SIGKILL).  So those cases clear this
659                  * shared flag after we've set it.  Note that this flag may
660                  * remain set after the signal we return is ignored or
661                  * handled.  That doesn't matter because its only purpose
662                  * is to alert stop-signal processing code when another
663                  * processor has come along and cleared the flag.
664                  */
665                 current->jobctl |= JOBCTL_STOP_DEQUEUED;
666         }
667         if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
668                 /*
669                  * Release the siglock to ensure proper locking order
670                  * of timer locks outside of siglocks.  Note, we leave
671                  * irqs disabled here, since the posix-timers code is
672                  * about to disable them again anyway.
673                  */
674                 spin_unlock(&tsk->sighand->siglock);
675                 do_schedule_next_timer(info);
676                 spin_lock(&tsk->sighand->siglock);
677         }
678         return signr;
679 }
680 
681 /*
682  * Tell a process that it has a new active signal..
683  *
684  * NOTE! we rely on the previous spin_lock to
685  * lock interrupts for us! We can only be called with
686  * "siglock" held, and the local interrupt must
687  * have been disabled when that got acquired!
688  *
689  * No need to set need_resched since signal event passing
690  * goes through ->blocked
691  */
692 void signal_wake_up_state(struct task_struct *t, unsigned int state)
693 {
694         set_tsk_thread_flag(t, TIF_SIGPENDING);
695         /*
696          * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
697          * case. We don't check t->state here because there is a race with it
698          * executing another processor and just now entering stopped state.
699          * By using wake_up_state, we ensure the process will wake up and
700          * handle its death signal.
701          */
702         if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
703                 kick_process(t);
704 }
705 
706 /*
707  * Remove signals in mask from the pending set and queue.
708  * Returns 1 if any signals were found.
709  *
710  * All callers must be holding the siglock.
711  */
712 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
713 {
714         struct sigqueue *q, *n;
715         sigset_t m;
716 
717         sigandsets(&m, mask, &s->signal);
718         if (sigisemptyset(&m))
719                 return 0;
720 
721         sigandnsets(&s->signal, &s->signal, mask);
722         list_for_each_entry_safe(q, n, &s->list, list) {
723                 if (sigismember(mask, q->info.si_signo)) {
724                         list_del_init(&q->list);
725                         __sigqueue_free(q);
726                 }
727         }
728         return 1;
729 }
730 
731 static inline int is_si_special(const struct siginfo *info)
732 {
733         return info <= SEND_SIG_FORCED;
734 }
735 
736 static inline bool si_fromuser(const struct siginfo *info)
737 {
738         return info == SEND_SIG_NOINFO ||
739                 (!is_si_special(info) && SI_FROMUSER(info));
740 }
741 
742 /*
743  * called with RCU read lock from check_kill_permission()
744  */
745 static int kill_ok_by_cred(struct task_struct *t)
746 {
747         const struct cred *cred = current_cred();
748         const struct cred *tcred = __task_cred(t);
749 
750         if (uid_eq(cred->euid, tcred->suid) ||
751             uid_eq(cred->euid, tcred->uid)  ||
752             uid_eq(cred->uid,  tcred->suid) ||
753             uid_eq(cred->uid,  tcred->uid))
754                 return 1;
755 
756         if (ns_capable(tcred->user_ns, CAP_KILL))
757                 return 1;
758 
759         return 0;
760 }
761 
762 /*
763  * Bad permissions for sending the signal
764  * - the caller must hold the RCU read lock
765  */
766 static int check_kill_permission(int sig, struct siginfo *info,
767                                  struct task_struct *t)
768 {
769         struct pid *sid;
770         int error;
771 
772         if (!valid_signal(sig))
773                 return -EINVAL;
774 
775         if (!si_fromuser(info))
776                 return 0;
777 
778         error = audit_signal_info(sig, t); /* Let audit system see the signal */
779         if (error)
780                 return error;
781 
782         if (!same_thread_group(current, t) &&
783             !kill_ok_by_cred(t)) {
784                 switch (sig) {
785                 case SIGCONT:
786                         sid = task_session(t);
787                         /*
788                          * We don't return the error if sid == NULL. The
789                          * task was unhashed, the caller must notice this.
790                          */
791                         if (!sid || sid == task_session(current))
792                                 break;
793                 default:
794                         return -EPERM;
795                 }
796         }
797 
798         return security_task_kill(t, info, sig, 0);
799 }
800 
801 /**
802  * ptrace_trap_notify - schedule trap to notify ptracer
803  * @t: tracee wanting to notify tracer
804  *
805  * This function schedules sticky ptrace trap which is cleared on the next
806  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
807  * ptracer.
808  *
809  * If @t is running, STOP trap will be taken.  If trapped for STOP and
810  * ptracer is listening for events, tracee is woken up so that it can
811  * re-trap for the new event.  If trapped otherwise, STOP trap will be
812  * eventually taken without returning to userland after the existing traps
813  * are finished by PTRACE_CONT.
814  *
815  * CONTEXT:
816  * Must be called with @task->sighand->siglock held.
817  */
818 static void ptrace_trap_notify(struct task_struct *t)
819 {
820         WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
821         assert_spin_locked(&t->sighand->siglock);
822 
823         task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
824         ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
825 }
826 
827 /*
828  * Handle magic process-wide effects of stop/continue signals. Unlike
829  * the signal actions, these happen immediately at signal-generation
830  * time regardless of blocking, ignoring, or handling.  This does the
831  * actual continuing for SIGCONT, but not the actual stopping for stop
832  * signals. The process stop is done as a signal action for SIG_DFL.
833  *
834  * Returns true if the signal should be actually delivered, otherwise
835  * it should be dropped.
836  */
837 static bool prepare_signal(int sig, struct task_struct *p, bool force)
838 {
839         struct signal_struct *signal = p->signal;
840         struct task_struct *t;
841         sigset_t flush;
842 
843         if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
844                 if (signal->flags & SIGNAL_GROUP_COREDUMP)
845                         return sig == SIGKILL;
846                 /*
847                  * The process is in the middle of dying, nothing to do.
848                  */
849         } else if (sig_kernel_stop(sig)) {
850                 /*
851                  * This is a stop signal.  Remove SIGCONT from all queues.
852                  */
853                 siginitset(&flush, sigmask(SIGCONT));
854                 flush_sigqueue_mask(&flush, &signal->shared_pending);
855                 for_each_thread(p, t)
856                         flush_sigqueue_mask(&flush, &t->pending);
857         } else if (sig == SIGCONT) {
858                 unsigned int why;
859                 /*
860                  * Remove all stop signals from all queues, wake all threads.
861                  */
862                 siginitset(&flush, SIG_KERNEL_STOP_MASK);
863                 flush_sigqueue_mask(&flush, &signal->shared_pending);
864                 for_each_thread(p, t) {
865                         flush_sigqueue_mask(&flush, &t->pending);
866                         task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
867                         if (likely(!(t->ptrace & PT_SEIZED)))
868                                 wake_up_state(t, __TASK_STOPPED);
869                         else
870                                 ptrace_trap_notify(t);
871                 }
872 
873                 /*
874                  * Notify the parent with CLD_CONTINUED if we were stopped.
875                  *
876                  * If we were in the middle of a group stop, we pretend it
877                  * was already finished, and then continued. Since SIGCHLD
878                  * doesn't queue we report only CLD_STOPPED, as if the next
879                  * CLD_CONTINUED was dropped.
880                  */
881                 why = 0;
882                 if (signal->flags & SIGNAL_STOP_STOPPED)
883                         why |= SIGNAL_CLD_CONTINUED;
884                 else if (signal->group_stop_count)
885                         why |= SIGNAL_CLD_STOPPED;
886 
887                 if (why) {
888                         /*
889                          * The first thread which returns from do_signal_stop()
890                          * will take ->siglock, notice SIGNAL_CLD_MASK, and
891                          * notify its parent. See get_signal_to_deliver().
892                          */
893                         signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
894                         signal->group_stop_count = 0;
895                         signal->group_exit_code = 0;
896                 }
897         }
898 
899         return !sig_ignored(p, sig, force);
900 }
901 
902 /*
903  * Test if P wants to take SIG.  After we've checked all threads with this,
904  * it's equivalent to finding no threads not blocking SIG.  Any threads not
905  * blocking SIG were ruled out because they are not running and already
906  * have pending signals.  Such threads will dequeue from the shared queue
907  * as soon as they're available, so putting the signal on the shared queue
908  * will be equivalent to sending it to one such thread.
909  */
910 static inline int wants_signal(int sig, struct task_struct *p)
911 {
912         if (sigismember(&p->blocked, sig))
913                 return 0;
914         if (p->flags & PF_EXITING)
915                 return 0;
916         if (sig == SIGKILL)
917                 return 1;
918         if (task_is_stopped_or_traced(p))
919                 return 0;
920         return task_curr(p) || !signal_pending(p);
921 }
922 
923 static void complete_signal(int sig, struct task_struct *p, int group)
924 {
925         struct signal_struct *signal = p->signal;
926         struct task_struct *t;
927 
928         /*
929          * Now find a thread we can wake up to take the signal off the queue.
930          *
931          * If the main thread wants the signal, it gets first crack.
932          * Probably the least surprising to the average bear.
933          */
934         if (wants_signal(sig, p))
935                 t = p;
936         else if (!group || thread_group_empty(p))
937                 /*
938                  * There is just one thread and it does not need to be woken.
939                  * It will dequeue unblocked signals before it runs again.
940                  */
941                 return;
942         else {
943                 /*
944                  * Otherwise try to find a suitable thread.
945                  */
946                 t = signal->curr_target;
947                 while (!wants_signal(sig, t)) {
948                         t = next_thread(t);
949                         if (t == signal->curr_target)
950                                 /*
951                                  * No thread needs to be woken.
952                                  * Any eligible threads will see
953                                  * the signal in the queue soon.
954                                  */
955                                 return;
956                 }
957                 signal->curr_target = t;
958         }
959 
960         /*
961          * Found a killable thread.  If the signal will be fatal,
962          * then start taking the whole group down immediately.
963          */
964         if (sig_fatal(p, sig) &&
965             !(signal->flags & SIGNAL_GROUP_EXIT) &&
966             !sigismember(&t->real_blocked, sig) &&
967             (sig == SIGKILL || !p->ptrace)) {
968                 /*
969                  * This signal will be fatal to the whole group.
970                  */
971                 if (!sig_kernel_coredump(sig)) {
972                         /*
973                          * Start a group exit and wake everybody up.
974                          * This way we don't have other threads
975                          * running and doing things after a slower
976                          * thread has the fatal signal pending.
977                          */
978                         signal->flags = SIGNAL_GROUP_EXIT;
979                         signal->group_exit_code = sig;
980                         signal->group_stop_count = 0;
981                         t = p;
982                         do {
983                                 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
984                                 sigaddset(&t->pending.signal, SIGKILL);
985                                 signal_wake_up(t, 1);
986                         } while_each_thread(p, t);
987                         return;
988                 }
989         }
990 
991         /*
992          * The signal is already in the shared-pending queue.
993          * Tell the chosen thread to wake up and dequeue it.
994          */
995         signal_wake_up(t, sig == SIGKILL);
996         return;
997 }
998 
999 static inline int legacy_queue(struct sigpending *signals, int sig)
1000 {
1001         return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1002 }
1003 
1004 #ifdef CONFIG_USER_NS
1005 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1006 {
1007         if (current_user_ns() == task_cred_xxx(t, user_ns))
1008                 return;
1009 
1010         if (SI_FROMKERNEL(info))
1011                 return;
1012 
1013         rcu_read_lock();
1014         info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1015                                         make_kuid(current_user_ns(), info->si_uid));
1016         rcu_read_unlock();
1017 }
1018 #else
1019 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1020 {
1021         return;
1022 }
1023 #endif
1024 
1025 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1026                         int group, int from_ancestor_ns)
1027 {
1028         struct sigpending *pending;
1029         struct sigqueue *q;
1030         int override_rlimit;
1031         int ret = 0, result;
1032 
1033         assert_spin_locked(&t->sighand->siglock);
1034 
1035         result = TRACE_SIGNAL_IGNORED;
1036         if (!prepare_signal(sig, t,
1037                         from_ancestor_ns || (info == SEND_SIG_FORCED)))
1038                 goto ret;
1039 
1040         pending = group ? &t->signal->shared_pending : &t->pending;
1041         /*
1042          * Short-circuit ignored signals and support queuing
1043          * exactly one non-rt signal, so that we can get more
1044          * detailed information about the cause of the signal.
1045          */
1046         result = TRACE_SIGNAL_ALREADY_PENDING;
1047         if (legacy_queue(pending, sig))
1048                 goto ret;
1049 
1050         result = TRACE_SIGNAL_DELIVERED;
1051         /*
1052          * fast-pathed signals for kernel-internal things like SIGSTOP
1053          * or SIGKILL.
1054          */
1055         if (info == SEND_SIG_FORCED)
1056                 goto out_set;
1057 
1058         /*
1059          * Real-time signals must be queued if sent by sigqueue, or
1060          * some other real-time mechanism.  It is implementation
1061          * defined whether kill() does so.  We attempt to do so, on
1062          * the principle of least surprise, but since kill is not
1063          * allowed to fail with EAGAIN when low on memory we just
1064          * make sure at least one signal gets delivered and don't
1065          * pass on the info struct.
1066          */
1067         if (sig < SIGRTMIN)
1068                 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1069         else
1070                 override_rlimit = 0;
1071 
1072         q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1073                 override_rlimit);
1074         if (q) {
1075                 list_add_tail(&q->list, &pending->list);
1076                 switch ((unsigned long) info) {
1077                 case (unsigned long) SEND_SIG_NOINFO:
1078                         q->info.si_signo = sig;
1079                         q->info.si_errno = 0;
1080                         q->info.si_code = SI_USER;
1081                         q->info.si_pid = task_tgid_nr_ns(current,
1082                                                         task_active_pid_ns(t));
1083                         q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1084                         break;
1085                 case (unsigned long) SEND_SIG_PRIV:
1086                         q->info.si_signo = sig;
1087                         q->info.si_errno = 0;
1088                         q->info.si_code = SI_KERNEL;
1089                         q->info.si_pid = 0;
1090                         q->info.si_uid = 0;
1091                         break;
1092                 default:
1093                         copy_siginfo(&q->info, info);
1094                         if (from_ancestor_ns)
1095                                 q->info.si_pid = 0;
1096                         break;
1097                 }
1098 
1099                 userns_fixup_signal_uid(&q->info, t);
1100 
1101         } else if (!is_si_special(info)) {
1102                 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1103                         /*
1104                          * Queue overflow, abort.  We may abort if the
1105                          * signal was rt and sent by user using something
1106                          * other than kill().
1107                          */
1108                         result = TRACE_SIGNAL_OVERFLOW_FAIL;
1109                         ret = -EAGAIN;
1110                         goto ret;
1111                 } else {
1112                         /*
1113                          * This is a silent loss of information.  We still
1114                          * send the signal, but the *info bits are lost.
1115                          */
1116                         result = TRACE_SIGNAL_LOSE_INFO;
1117                 }
1118         }
1119 
1120 out_set:
1121         signalfd_notify(t, sig);
1122         sigaddset(&pending->signal, sig);
1123         complete_signal(sig, t, group);
1124 ret:
1125         trace_signal_generate(sig, info, t, group, result);
1126         return ret;
1127 }
1128 
1129 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1130                         int group)
1131 {
1132         int from_ancestor_ns = 0;
1133 
1134 #ifdef CONFIG_PID_NS
1135         from_ancestor_ns = si_fromuser(info) &&
1136                            !task_pid_nr_ns(current, task_active_pid_ns(t));
1137 #endif
1138 
1139         return __send_signal(sig, info, t, group, from_ancestor_ns);
1140 }
1141 
1142 static void print_fatal_signal(int signr)
1143 {
1144         struct pt_regs *regs = signal_pt_regs();
1145         printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1146 
1147 #if defined(__i386__) && !defined(__arch_um__)
1148         printk(KERN_INFO "code at %08lx: ", regs->ip);
1149         {
1150                 int i;
1151                 for (i = 0; i < 16; i++) {
1152                         unsigned char insn;
1153 
1154                         if (get_user(insn, (unsigned char *)(regs->ip + i)))
1155                                 break;
1156                         printk(KERN_CONT "%02x ", insn);
1157                 }
1158         }
1159         printk(KERN_CONT "\n");
1160 #endif
1161         preempt_disable();
1162         show_regs(regs);
1163         preempt_enable();
1164 }
1165 
1166 static int __init setup_print_fatal_signals(char *str)
1167 {
1168         get_option (&str, &print_fatal_signals);
1169 
1170         return 1;
1171 }
1172 
1173 __setup("print-fatal-signals=", setup_print_fatal_signals);
1174 
1175 int
1176 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1177 {
1178         return send_signal(sig, info, p, 1);
1179 }
1180 
1181 static int
1182 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1183 {
1184         return send_signal(sig, info, t, 0);
1185 }
1186 
1187 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1188                         bool group)
1189 {
1190         unsigned long flags;
1191         int ret = -ESRCH;
1192 
1193         if (lock_task_sighand(p, &flags)) {
1194                 ret = send_signal(sig, info, p, group);
1195                 unlock_task_sighand(p, &flags);
1196         }
1197 
1198         return ret;
1199 }
1200 
1201 /*
1202  * Force a signal that the process can't ignore: if necessary
1203  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1204  *
1205  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1206  * since we do not want to have a signal handler that was blocked
1207  * be invoked when user space had explicitly blocked it.
1208  *
1209  * We don't want to have recursive SIGSEGV's etc, for example,
1210  * that is why we also clear SIGNAL_UNKILLABLE.
1211  */
1212 int
1213 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1214 {
1215         unsigned long int flags;
1216         int ret, blocked, ignored;
1217         struct k_sigaction *action;
1218 
1219         spin_lock_irqsave(&t->sighand->siglock, flags);
1220         action = &t->sighand->action[sig-1];
1221         ignored = action->sa.sa_handler == SIG_IGN;
1222         blocked = sigismember(&t->blocked, sig);
1223         if (blocked || ignored) {
1224                 action->sa.sa_handler = SIG_DFL;
1225                 if (blocked) {
1226                         sigdelset(&t->blocked, sig);
1227                         recalc_sigpending_and_wake(t);
1228                 }
1229         }
1230         if (action->sa.sa_handler == SIG_DFL)
1231                 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1232         ret = specific_send_sig_info(sig, info, t);
1233         spin_unlock_irqrestore(&t->sighand->siglock, flags);
1234 
1235         return ret;
1236 }
1237 
1238 /*
1239  * Nuke all other threads in the group.
1240  */
1241 int zap_other_threads(struct task_struct *p)
1242 {
1243         struct task_struct *t = p;
1244         int count = 0;
1245 
1246         p->signal->group_stop_count = 0;
1247 
1248         while_each_thread(p, t) {
1249                 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1250                 count++;
1251 
1252                 /* Don't bother with already dead threads */
1253                 if (t->exit_state)
1254                         continue;
1255                 sigaddset(&t->pending.signal, SIGKILL);
1256                 signal_wake_up(t, 1);
1257         }
1258 
1259         return count;
1260 }
1261 
1262 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1263                                            unsigned long *flags)
1264 {
1265         struct sighand_struct *sighand;
1266 
1267         for (;;) {
1268                 /*
1269                  * Disable interrupts early to avoid deadlocks.
1270                  * See rcu_read_unlock() comment header for details.
1271                  */
1272                 local_irq_save(*flags);
1273                 rcu_read_lock();
1274                 sighand = rcu_dereference(tsk->sighand);
1275                 if (unlikely(sighand == NULL)) {
1276                         rcu_read_unlock();
1277                         local_irq_restore(*flags);
1278                         break;
1279                 }
1280                 /*
1281                  * This sighand can be already freed and even reused, but
1282                  * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1283                  * initializes ->siglock: this slab can't go away, it has
1284                  * the same object type, ->siglock can't be reinitialized.
1285                  *
1286                  * We need to ensure that tsk->sighand is still the same
1287                  * after we take the lock, we can race with de_thread() or
1288                  * __exit_signal(). In the latter case the next iteration
1289                  * must see ->sighand == NULL.
1290                  */
1291                 spin_lock(&sighand->siglock);
1292                 if (likely(sighand == tsk->sighand)) {
1293                         rcu_read_unlock();
1294                         break;
1295                 }
1296                 spin_unlock(&sighand->siglock);
1297                 rcu_read_unlock();
1298                 local_irq_restore(*flags);
1299         }
1300 
1301         return sighand;
1302 }
1303 
1304 /*
1305  * send signal info to all the members of a group
1306  */
1307 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1308 {
1309         int ret;
1310 
1311         rcu_read_lock();
1312         ret = check_kill_permission(sig, info, p);
1313         rcu_read_unlock();
1314 
1315         if (!ret && sig)
1316                 ret = do_send_sig_info(sig, info, p, true);
1317 
1318         return ret;
1319 }
1320 
1321 /*
1322  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1323  * control characters do (^C, ^Z etc)
1324  * - the caller must hold at least a readlock on tasklist_lock
1325  */
1326 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1327 {
1328         struct task_struct *p = NULL;
1329         int retval, success;
1330 
1331         success = 0;
1332         retval = -ESRCH;
1333         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1334                 int err = group_send_sig_info(sig, info, p);
1335                 success |= !err;
1336                 retval = err;
1337         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1338         return success ? 0 : retval;
1339 }
1340 
1341 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1342 {
1343         int error = -ESRCH;
1344         struct task_struct *p;
1345 
1346         for (;;) {
1347                 rcu_read_lock();
1348                 p = pid_task(pid, PIDTYPE_PID);
1349                 if (p)
1350                         error = group_send_sig_info(sig, info, p);
1351                 rcu_read_unlock();
1352                 if (likely(!p || error != -ESRCH))
1353                         return error;
1354 
1355                 /*
1356                  * The task was unhashed in between, try again.  If it
1357                  * is dead, pid_task() will return NULL, if we race with
1358                  * de_thread() it will find the new leader.
1359                  */
1360         }
1361 }
1362 
1363 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1364 {
1365         int error;
1366         rcu_read_lock();
1367         error = kill_pid_info(sig, info, find_vpid(pid));
1368         rcu_read_unlock();
1369         return error;
1370 }
1371 
1372 static int kill_as_cred_perm(const struct cred *cred,
1373                              struct task_struct *target)
1374 {
1375         const struct cred *pcred = __task_cred(target);
1376         if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1377             !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1378                 return 0;
1379         return 1;
1380 }
1381 
1382 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1383 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1384                          const struct cred *cred, u32 secid)
1385 {
1386         int ret = -EINVAL;
1387         struct task_struct *p;
1388         unsigned long flags;
1389 
1390         if (!valid_signal(sig))
1391                 return ret;
1392 
1393         rcu_read_lock();
1394         p = pid_task(pid, PIDTYPE_PID);
1395         if (!p) {
1396                 ret = -ESRCH;
1397                 goto out_unlock;
1398         }
1399         if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1400                 ret = -EPERM;
1401                 goto out_unlock;
1402         }
1403         ret = security_task_kill(p, info, sig, secid);
1404         if (ret)
1405                 goto out_unlock;
1406 
1407         if (sig) {
1408                 if (lock_task_sighand(p, &flags)) {
1409                         ret = __send_signal(sig, info, p, 1, 0);
1410                         unlock_task_sighand(p, &flags);
1411                 } else
1412                         ret = -ESRCH;
1413         }
1414 out_unlock:
1415         rcu_read_unlock();
1416         return ret;
1417 }
1418 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1419 
1420 /*
1421  * kill_something_info() interprets pid in interesting ways just like kill(2).
1422  *
1423  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1424  * is probably wrong.  Should make it like BSD or SYSV.
1425  */
1426 
1427 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1428 {
1429         int ret;
1430 
1431         if (pid > 0) {
1432                 rcu_read_lock();
1433                 ret = kill_pid_info(sig, info, find_vpid(pid));
1434                 rcu_read_unlock();
1435                 return ret;
1436         }
1437 
1438         read_lock(&tasklist_lock);
1439         if (pid != -1) {
1440                 ret = __kill_pgrp_info(sig, info,
1441                                 pid ? find_vpid(-pid) : task_pgrp(current));
1442         } else {
1443                 int retval = 0, count = 0;
1444                 struct task_struct * p;
1445 
1446                 for_each_process(p) {
1447                         if (task_pid_vnr(p) > 1 &&
1448                                         !same_thread_group(p, current)) {
1449                                 int err = group_send_sig_info(sig, info, p);
1450                                 ++count;
1451                                 if (err != -EPERM)
1452                                         retval = err;
1453                         }
1454                 }
1455                 ret = count ? retval : -ESRCH;
1456         }
1457         read_unlock(&tasklist_lock);
1458 
1459         return ret;
1460 }
1461 
1462 /*
1463  * These are for backward compatibility with the rest of the kernel source.
1464  */
1465 
1466 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1467 {
1468         /*
1469          * Make sure legacy kernel users don't send in bad values
1470          * (normal paths check this in check_kill_permission).
1471          */
1472         if (!valid_signal(sig))
1473                 return -EINVAL;
1474 
1475         return do_send_sig_info(sig, info, p, false);
1476 }
1477 
1478 #define __si_special(priv) \
1479         ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1480 
1481 int
1482 send_sig(int sig, struct task_struct *p, int priv)
1483 {
1484         return send_sig_info(sig, __si_special(priv), p);
1485 }
1486 
1487 void
1488 force_sig(int sig, struct task_struct *p)
1489 {
1490         force_sig_info(sig, SEND_SIG_PRIV, p);
1491 }
1492 
1493 /*
1494  * When things go south during signal handling, we
1495  * will force a SIGSEGV. And if the signal that caused
1496  * the problem was already a SIGSEGV, we'll want to
1497  * make sure we don't even try to deliver the signal..
1498  */
1499 int
1500 force_sigsegv(int sig, struct task_struct *p)
1501 {
1502         if (sig == SIGSEGV) {
1503                 unsigned long flags;
1504                 spin_lock_irqsave(&p->sighand->siglock, flags);
1505                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1506                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1507         }
1508         force_sig(SIGSEGV, p);
1509         return 0;
1510 }
1511 
1512 int kill_pgrp(struct pid *pid, int sig, int priv)
1513 {
1514         int ret;
1515 
1516         read_lock(&tasklist_lock);
1517         ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1518         read_unlock(&tasklist_lock);
1519 
1520         return ret;
1521 }
1522 EXPORT_SYMBOL(kill_pgrp);
1523 
1524 int kill_pid(struct pid *pid, int sig, int priv)
1525 {
1526         return kill_pid_info(sig, __si_special(priv), pid);
1527 }
1528 EXPORT_SYMBOL(kill_pid);
1529 
1530 /*
1531  * These functions support sending signals using preallocated sigqueue
1532  * structures.  This is needed "because realtime applications cannot
1533  * afford to lose notifications of asynchronous events, like timer
1534  * expirations or I/O completions".  In the case of POSIX Timers
1535  * we allocate the sigqueue structure from the timer_create.  If this
1536  * allocation fails we are able to report the failure to the application
1537  * with an EAGAIN error.
1538  */
1539 struct sigqueue *sigqueue_alloc(void)
1540 {
1541         struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1542 
1543         if (q)
1544                 q->flags |= SIGQUEUE_PREALLOC;
1545 
1546         return q;
1547 }
1548 
1549 void sigqueue_free(struct sigqueue *q)
1550 {
1551         unsigned long flags;
1552         spinlock_t *lock = &current->sighand->siglock;
1553 
1554         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1555         /*
1556          * We must hold ->siglock while testing q->list
1557          * to serialize with collect_signal() or with
1558          * __exit_signal()->flush_sigqueue().
1559          */
1560         spin_lock_irqsave(lock, flags);
1561         q->flags &= ~SIGQUEUE_PREALLOC;
1562         /*
1563          * If it is queued it will be freed when dequeued,
1564          * like the "regular" sigqueue.
1565          */
1566         if (!list_empty(&q->list))
1567                 q = NULL;
1568         spin_unlock_irqrestore(lock, flags);
1569 
1570         if (q)
1571                 __sigqueue_free(q);
1572 }
1573 
1574 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1575 {
1576         int sig = q->info.si_signo;
1577         struct sigpending *pending;
1578         unsigned long flags;
1579         int ret, result;
1580 
1581         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1582 
1583         ret = -1;
1584         if (!likely(lock_task_sighand(t, &flags)))
1585                 goto ret;
1586 
1587         ret = 1; /* the signal is ignored */
1588         result = TRACE_SIGNAL_IGNORED;
1589         if (!prepare_signal(sig, t, false))
1590                 goto out;
1591 
1592         ret = 0;
1593         if (unlikely(!list_empty(&q->list))) {
1594                 /*
1595                  * If an SI_TIMER entry is already queue just increment
1596                  * the overrun count.
1597                  */
1598                 BUG_ON(q->info.si_code != SI_TIMER);
1599                 q->info.si_overrun++;
1600                 result = TRACE_SIGNAL_ALREADY_PENDING;
1601                 goto out;
1602         }
1603         q->info.si_overrun = 0;
1604 
1605         signalfd_notify(t, sig);
1606         pending = group ? &t->signal->shared_pending : &t->pending;
1607         list_add_tail(&q->list, &pending->list);
1608         sigaddset(&pending->signal, sig);
1609         complete_signal(sig, t, group);
1610         result = TRACE_SIGNAL_DELIVERED;
1611 out:
1612         trace_signal_generate(sig, &q->info, t, group, result);
1613         unlock_task_sighand(t, &flags);
1614 ret:
1615         return ret;
1616 }
1617 
1618 /*
1619  * Let a parent know about the death of a child.
1620  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1621  *
1622  * Returns true if our parent ignored us and so we've switched to
1623  * self-reaping.
1624  */
1625 bool do_notify_parent(struct task_struct *tsk, int sig)
1626 {
1627         struct siginfo info;
1628         unsigned long flags;
1629         struct sighand_struct *psig;
1630         bool autoreap = false;
1631         cputime_t utime, stime;
1632 
1633         BUG_ON(sig == -1);
1634 
1635         /* do_notify_parent_cldstop should have been called instead.  */
1636         BUG_ON(task_is_stopped_or_traced(tsk));
1637 
1638         BUG_ON(!tsk->ptrace &&
1639                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1640 
1641         if (sig != SIGCHLD) {
1642                 /*
1643                  * This is only possible if parent == real_parent.
1644                  * Check if it has changed security domain.
1645                  */
1646                 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1647                         sig = SIGCHLD;
1648         }
1649 
1650         info.si_signo = sig;
1651         info.si_errno = 0;
1652         /*
1653          * We are under tasklist_lock here so our parent is tied to
1654          * us and cannot change.
1655          *
1656          * task_active_pid_ns will always return the same pid namespace
1657          * until a task passes through release_task.
1658          *
1659          * write_lock() currently calls preempt_disable() which is the
1660          * same as rcu_read_lock(), but according to Oleg, this is not
1661          * correct to rely on this
1662          */
1663         rcu_read_lock();
1664         info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1665         info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1666                                        task_uid(tsk));
1667         rcu_read_unlock();
1668 
1669         task_cputime(tsk, &utime, &stime);
1670         info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1671         info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1672 
1673         info.si_status = tsk->exit_code & 0x7f;
1674         if (tsk->exit_code & 0x80)
1675                 info.si_code = CLD_DUMPED;
1676         else if (tsk->exit_code & 0x7f)
1677                 info.si_code = CLD_KILLED;
1678         else {
1679                 info.si_code = CLD_EXITED;
1680                 info.si_status = tsk->exit_code >> 8;
1681         }
1682 
1683         psig = tsk->parent->sighand;
1684         spin_lock_irqsave(&psig->siglock, flags);
1685         if (!tsk->ptrace && sig == SIGCHLD &&
1686             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1687              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1688                 /*
1689                  * We are exiting and our parent doesn't care.  POSIX.1
1690                  * defines special semantics for setting SIGCHLD to SIG_IGN
1691                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1692                  * automatically and not left for our parent's wait4 call.
1693                  * Rather than having the parent do it as a magic kind of
1694                  * signal handler, we just set this to tell do_exit that we
1695                  * can be cleaned up without becoming a zombie.  Note that
1696                  * we still call __wake_up_parent in this case, because a
1697                  * blocked sys_wait4 might now return -ECHILD.
1698                  *
1699                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1700                  * is implementation-defined: we do (if you don't want
1701                  * it, just use SIG_IGN instead).
1702                  */
1703                 autoreap = true;
1704                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1705                         sig = 0;
1706         }
1707         if (valid_signal(sig) && sig)
1708                 __group_send_sig_info(sig, &info, tsk->parent);
1709         __wake_up_parent(tsk, tsk->parent);
1710         spin_unlock_irqrestore(&psig->siglock, flags);
1711 
1712         return autoreap;
1713 }
1714 
1715 /**
1716  * do_notify_parent_cldstop - notify parent of stopped/continued state change
1717  * @tsk: task reporting the state change
1718  * @for_ptracer: the notification is for ptracer
1719  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1720  *
1721  * Notify @tsk's parent that the stopped/continued state has changed.  If
1722  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1723  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1724  *
1725  * CONTEXT:
1726  * Must be called with tasklist_lock at least read locked.
1727  */
1728 static void do_notify_parent_cldstop(struct task_struct *tsk,
1729                                      bool for_ptracer, int why)
1730 {
1731         struct siginfo info;
1732         unsigned long flags;
1733         struct task_struct *parent;
1734         struct sighand_struct *sighand;
1735         cputime_t utime, stime;
1736 
1737         if (for_ptracer) {
1738                 parent = tsk->parent;
1739         } else {
1740                 tsk = tsk->group_leader;
1741                 parent = tsk->real_parent;
1742         }
1743 
1744         info.si_signo = SIGCHLD;
1745         info.si_errno = 0;
1746         /*
1747          * see comment in do_notify_parent() about the following 4 lines
1748          */
1749         rcu_read_lock();
1750         info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1751         info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1752         rcu_read_unlock();
1753 
1754         task_cputime(tsk, &utime, &stime);
1755         info.si_utime = cputime_to_clock_t(utime);
1756         info.si_stime = cputime_to_clock_t(stime);
1757 
1758         info.si_code = why;
1759         switch (why) {
1760         case CLD_CONTINUED:
1761                 info.si_status = SIGCONT;
1762                 break;
1763         case CLD_STOPPED:
1764                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1765                 break;
1766         case CLD_TRAPPED:
1767                 info.si_status = tsk->exit_code & 0x7f;
1768                 break;
1769         default:
1770                 BUG();
1771         }
1772 
1773         sighand = parent->sighand;
1774         spin_lock_irqsave(&sighand->siglock, flags);
1775         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1776             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1777                 __group_send_sig_info(SIGCHLD, &info, parent);
1778         /*
1779          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1780          */
1781         __wake_up_parent(tsk, parent);
1782         spin_unlock_irqrestore(&sighand->siglock, flags);
1783 }
1784 
1785 static inline int may_ptrace_stop(void)
1786 {
1787         if (!likely(current->ptrace))
1788                 return 0;
1789         /*
1790          * Are we in the middle of do_coredump?
1791          * If so and our tracer is also part of the coredump stopping
1792          * is a deadlock situation, and pointless because our tracer
1793          * is dead so don't allow us to stop.
1794          * If SIGKILL was already sent before the caller unlocked
1795          * ->siglock we must see ->core_state != NULL. Otherwise it
1796          * is safe to enter schedule().
1797          *
1798          * This is almost outdated, a task with the pending SIGKILL can't
1799          * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1800          * after SIGKILL was already dequeued.
1801          */
1802         if (unlikely(current->mm->core_state) &&
1803             unlikely(current->mm == current->parent->mm))
1804                 return 0;
1805 
1806         return 1;
1807 }
1808 
1809 /*
1810  * Return non-zero if there is a SIGKILL that should be waking us up.
1811  * Called with the siglock held.
1812  */
1813 static int sigkill_pending(struct task_struct *tsk)
1814 {
1815         return  sigismember(&tsk->pending.signal, SIGKILL) ||
1816                 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1817 }
1818 
1819 /*
1820  * This must be called with current->sighand->siglock held.
1821  *
1822  * This should be the path for all ptrace stops.
1823  * We always set current->last_siginfo while stopped here.
1824  * That makes it a way to test a stopped process for
1825  * being ptrace-stopped vs being job-control-stopped.
1826  *
1827  * If we actually decide not to stop at all because the tracer
1828  * is gone, we keep current->exit_code unless clear_code.
1829  */
1830 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1831         __releases(&current->sighand->siglock)
1832         __acquires(&current->sighand->siglock)
1833 {
1834         bool gstop_done = false;
1835 
1836         if (arch_ptrace_stop_needed(exit_code, info)) {
1837                 /*
1838                  * The arch code has something special to do before a
1839                  * ptrace stop.  This is allowed to block, e.g. for faults
1840                  * on user stack pages.  We can't keep the siglock while
1841                  * calling arch_ptrace_stop, so we must release it now.
1842                  * To preserve proper semantics, we must do this before
1843                  * any signal bookkeeping like checking group_stop_count.
1844                  * Meanwhile, a SIGKILL could come in before we retake the
1845                  * siglock.  That must prevent us from sleeping in TASK_TRACED.
1846                  * So after regaining the lock, we must check for SIGKILL.
1847                  */
1848                 spin_unlock_irq(&current->sighand->siglock);
1849                 arch_ptrace_stop(exit_code, info);
1850                 spin_lock_irq(&current->sighand->siglock);
1851                 if (sigkill_pending(current))
1852                         return;
1853         }
1854 
1855         /*
1856          * We're committing to trapping.  TRACED should be visible before
1857          * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1858          * Also, transition to TRACED and updates to ->jobctl should be
1859          * atomic with respect to siglock and should be done after the arch
1860          * hook as siglock is released and regrabbed across it.
1861          */
1862         set_current_state(TASK_TRACED);
1863 
1864         current->last_siginfo = info;
1865         current->exit_code = exit_code;
1866 
1867         /*
1868          * If @why is CLD_STOPPED, we're trapping to participate in a group
1869          * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1870          * across siglock relocks since INTERRUPT was scheduled, PENDING
1871          * could be clear now.  We act as if SIGCONT is received after
1872          * TASK_TRACED is entered - ignore it.
1873          */
1874         if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1875                 gstop_done = task_participate_group_stop(current);
1876 
1877         /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1878         task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1879         if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1880                 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1881 
1882         /* entering a trap, clear TRAPPING */
1883         task_clear_jobctl_trapping(current);
1884 
1885         spin_unlock_irq(&current->sighand->siglock);
1886         read_lock(&tasklist_lock);
1887         if (may_ptrace_stop()) {
1888                 /*
1889                  * Notify parents of the stop.
1890                  *
1891                  * While ptraced, there are two parents - the ptracer and
1892                  * the real_parent of the group_leader.  The ptracer should
1893                  * know about every stop while the real parent is only
1894                  * interested in the completion of group stop.  The states
1895                  * for the two don't interact with each other.  Notify
1896                  * separately unless they're gonna be duplicates.
1897                  */
1898                 do_notify_parent_cldstop(current, true, why);
1899                 if (gstop_done && ptrace_reparented(current))
1900                         do_notify_parent_cldstop(current, false, why);
1901 
1902                 /*
1903                  * Don't want to allow preemption here, because
1904                  * sys_ptrace() needs this task to be inactive.
1905                  *
1906                  * XXX: implement read_unlock_no_resched().
1907                  */
1908                 preempt_disable();
1909                 read_unlock(&tasklist_lock);
1910                 preempt_enable_no_resched();
1911                 freezable_schedule();
1912         } else {
1913                 /*
1914                  * By the time we got the lock, our tracer went away.
1915                  * Don't drop the lock yet, another tracer may come.
1916                  *
1917                  * If @gstop_done, the ptracer went away between group stop
1918                  * completion and here.  During detach, it would have set
1919                  * JOBCTL_STOP_PENDING on us and we'll re-enter
1920                  * TASK_STOPPED in do_signal_stop() on return, so notifying
1921                  * the real parent of the group stop completion is enough.
1922                  */
1923                 if (gstop_done)
1924                         do_notify_parent_cldstop(current, false, why);
1925 
1926                 /* tasklist protects us from ptrace_freeze_traced() */
1927                 __set_current_state(TASK_RUNNING);
1928                 if (clear_code)
1929                         current->exit_code = 0;
1930                 read_unlock(&tasklist_lock);
1931         }
1932 
1933         /*
1934          * We are back.  Now reacquire the siglock before touching
1935          * last_siginfo, so that we are sure to have synchronized with
1936          * any signal-sending on another CPU that wants to examine it.
1937          */
1938         spin_lock_irq(&current->sighand->siglock);
1939         current->last_siginfo = NULL;
1940 
1941         /* LISTENING can be set only during STOP traps, clear it */
1942         current->jobctl &= ~JOBCTL_LISTENING;
1943 
1944         /*
1945          * Queued signals ignored us while we were stopped for tracing.
1946          * So check for any that we should take before resuming user mode.
1947          * This sets TIF_SIGPENDING, but never clears it.
1948          */
1949         recalc_sigpending_tsk(current);
1950 }
1951 
1952 static void ptrace_do_notify(int signr, int exit_code, int why)
1953 {
1954         siginfo_t info;
1955 
1956         memset(&info, 0, sizeof info);
1957         info.si_signo = signr;
1958         info.si_code = exit_code;
1959         info.si_pid = task_pid_vnr(current);
1960         info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1961 
1962         /* Let the debugger run.  */
1963         ptrace_stop(exit_code, why, 1, &info);
1964 }
1965 
1966 void ptrace_notify(int exit_code)
1967 {
1968         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1969         if (unlikely(current->task_works))
1970                 task_work_run();
1971 
1972         spin_lock_irq(&current->sighand->siglock);
1973         ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1974         spin_unlock_irq(&current->sighand->siglock);
1975 }
1976 
1977 /**
1978  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1979  * @signr: signr causing group stop if initiating
1980  *
1981  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1982  * and participate in it.  If already set, participate in the existing
1983  * group stop.  If participated in a group stop (and thus slept), %true is
1984  * returned with siglock released.
1985  *
1986  * If ptraced, this function doesn't handle stop itself.  Instead,
1987  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1988  * untouched.  The caller must ensure that INTERRUPT trap handling takes
1989  * places afterwards.
1990  *
1991  * CONTEXT:
1992  * Must be called with @current->sighand->siglock held, which is released
1993  * on %true return.
1994  *
1995  * RETURNS:
1996  * %false if group stop is already cancelled or ptrace trap is scheduled.
1997  * %true if participated in group stop.
1998  */
1999 static bool do_signal_stop(int signr)
2000         __releases(&current->sighand->siglock)
2001 {
2002         struct signal_struct *sig = current->signal;
2003 
2004         if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2005                 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2006                 struct task_struct *t;
2007 
2008                 /* signr will be recorded in task->jobctl for retries */
2009                 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2010 
2011                 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2012                     unlikely(signal_group_exit(sig)))
2013                         return false;
2014                 /*
2015                  * There is no group stop already in progress.  We must
2016                  * initiate one now.
2017                  *
2018                  * While ptraced, a task may be resumed while group stop is
2019                  * still in effect and then receive a stop signal and
2020                  * initiate another group stop.  This deviates from the
2021                  * usual behavior as two consecutive stop signals can't
2022                  * cause two group stops when !ptraced.  That is why we
2023                  * also check !task_is_stopped(t) below.
2024                  *
2025                  * The condition can be distinguished by testing whether
2026                  * SIGNAL_STOP_STOPPED is already set.  Don't generate
2027                  * group_exit_code in such case.
2028                  *
2029                  * This is not necessary for SIGNAL_STOP_CONTINUED because
2030                  * an intervening stop signal is required to cause two
2031                  * continued events regardless of ptrace.
2032                  */
2033                 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2034                         sig->group_exit_code = signr;
2035 
2036                 sig->group_stop_count = 0;
2037 
2038                 if (task_set_jobctl_pending(current, signr | gstop))
2039                         sig->group_stop_count++;
2040 
2041                 t = current;
2042                 while_each_thread(current, t) {
2043                         /*
2044                          * Setting state to TASK_STOPPED for a group
2045                          * stop is always done with the siglock held,
2046                          * so this check has no races.
2047                          */
2048                         if (!task_is_stopped(t) &&
2049                             task_set_jobctl_pending(t, signr | gstop)) {
2050                                 sig->group_stop_count++;
2051                                 if (likely(!(t->ptrace & PT_SEIZED)))
2052                                         signal_wake_up(t, 0);
2053                                 else
2054                                         ptrace_trap_notify(t);
2055                         }
2056                 }
2057         }
2058 
2059         if (likely(!current->ptrace)) {
2060                 int notify = 0;
2061 
2062                 /*
2063                  * If there are no other threads in the group, or if there
2064                  * is a group stop in progress and we are the last to stop,
2065                  * report to the parent.
2066                  */
2067                 if (task_participate_group_stop(current))
2068                         notify = CLD_STOPPED;
2069 
2070                 __set_current_state(TASK_STOPPED);
2071                 spin_unlock_irq(&current->sighand->siglock);
2072 
2073                 /*
2074                  * Notify the parent of the group stop completion.  Because
2075                  * we're not holding either the siglock or tasklist_lock
2076                  * here, ptracer may attach inbetween; however, this is for
2077                  * group stop and should always be delivered to the real
2078                  * parent of the group leader.  The new ptracer will get
2079                  * its notification when this task transitions into
2080                  * TASK_TRACED.
2081                  */
2082                 if (notify) {
2083                         read_lock(&tasklist_lock);
2084                         do_notify_parent_cldstop(current, false, notify);
2085                         read_unlock(&tasklist_lock);
2086                 }
2087 
2088                 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2089                 freezable_schedule();
2090                 return true;
2091         } else {
2092                 /*
2093                  * While ptraced, group stop is handled by STOP trap.
2094                  * Schedule it and let the caller deal with it.
2095                  */
2096                 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2097                 return false;
2098         }
2099 }
2100 
2101 /**
2102  * do_jobctl_trap - take care of ptrace jobctl traps
2103  *
2104  * When PT_SEIZED, it's used for both group stop and explicit
2105  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2106  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2107  * the stop signal; otherwise, %SIGTRAP.
2108  *
2109  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2110  * number as exit_code and no siginfo.
2111  *
2112  * CONTEXT:
2113  * Must be called with @current->sighand->siglock held, which may be
2114  * released and re-acquired before returning with intervening sleep.
2115  */
2116 static void do_jobctl_trap(void)
2117 {
2118         struct signal_struct *signal = current->signal;
2119         int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2120 
2121         if (current->ptrace & PT_SEIZED) {
2122                 if (!signal->group_stop_count &&
2123                     !(signal->flags & SIGNAL_STOP_STOPPED))
2124                         signr = SIGTRAP;
2125                 WARN_ON_ONCE(!signr);
2126                 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2127                                  CLD_STOPPED);
2128         } else {
2129                 WARN_ON_ONCE(!signr);
2130                 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2131                 current->exit_code = 0;
2132         }
2133 }
2134 
2135 static int ptrace_signal(int signr, siginfo_t *info)
2136 {
2137         ptrace_signal_deliver();
2138         /*
2139          * We do not check sig_kernel_stop(signr) but set this marker
2140          * unconditionally because we do not know whether debugger will
2141          * change signr. This flag has no meaning unless we are going
2142          * to stop after return from ptrace_stop(). In this case it will
2143          * be checked in do_signal_stop(), we should only stop if it was
2144          * not cleared by SIGCONT while we were sleeping. See also the
2145          * comment in dequeue_signal().
2146          */
2147         current->jobctl |= JOBCTL_STOP_DEQUEUED;
2148         ptrace_stop(signr, CLD_TRAPPED, 0, info);
2149 
2150         /* We're back.  Did the debugger cancel the sig?  */
2151         signr = current->exit_code;
2152         if (signr == 0)
2153                 return signr;
2154 
2155         current->exit_code = 0;
2156 
2157         /*
2158          * Update the siginfo structure if the signal has
2159          * changed.  If the debugger wanted something
2160          * specific in the siginfo structure then it should
2161          * have updated *info via PTRACE_SETSIGINFO.
2162          */
2163         if (signr != info->si_signo) {
2164                 info->si_signo = signr;
2165                 info->si_errno = 0;
2166                 info->si_code = SI_USER;
2167                 rcu_read_lock();
2168                 info->si_pid = task_pid_vnr(current->parent);
2169                 info->si_uid = from_kuid_munged(current_user_ns(),
2170                                                 task_uid(current->parent));
2171                 rcu_read_unlock();
2172         }
2173 
2174         /* If the (new) signal is now blocked, requeue it.  */
2175         if (sigismember(&current->blocked, signr)) {
2176                 specific_send_sig_info(signr, info, current);
2177                 signr = 0;
2178         }
2179 
2180         return signr;
2181 }
2182 
2183 int get_signal(struct ksignal *ksig)
2184 {
2185         struct sighand_struct *sighand = current->sighand;
2186         struct signal_struct *signal = current->signal;
2187         int signr;
2188 
2189         if (unlikely(current->task_works))
2190                 task_work_run();
2191 
2192         if (unlikely(uprobe_deny_signal()))
2193                 return 0;
2194 
2195         /*
2196          * Do this once, we can't return to user-mode if freezing() == T.
2197          * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2198          * thus do not need another check after return.
2199          */
2200         try_to_freeze();
2201 
2202 relock:
2203         spin_lock_irq(&sighand->siglock);
2204         /*
2205          * Every stopped thread goes here after wakeup. Check to see if
2206          * we should notify the parent, prepare_signal(SIGCONT) encodes
2207          * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2208          */
2209         if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2210                 int why;
2211 
2212                 if (signal->flags & SIGNAL_CLD_CONTINUED)
2213                         why = CLD_CONTINUED;
2214                 else
2215                         why = CLD_STOPPED;
2216 
2217                 signal->flags &= ~SIGNAL_CLD_MASK;
2218 
2219                 spin_unlock_irq(&sighand->siglock);
2220 
2221                 /*
2222                  * Notify the parent that we're continuing.  This event is
2223                  * always per-process and doesn't make whole lot of sense
2224                  * for ptracers, who shouldn't consume the state via
2225                  * wait(2) either, but, for backward compatibility, notify
2226                  * the ptracer of the group leader too unless it's gonna be
2227                  * a duplicate.
2228                  */
2229                 read_lock(&tasklist_lock);
2230                 do_notify_parent_cldstop(current, false, why);
2231 
2232                 if (ptrace_reparented(current->group_leader))
2233                         do_notify_parent_cldstop(current->group_leader,
2234                                                 true, why);
2235                 read_unlock(&tasklist_lock);
2236 
2237                 goto relock;
2238         }
2239 
2240         for (;;) {
2241                 struct k_sigaction *ka;
2242 
2243                 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2244                     do_signal_stop(0))
2245                         goto relock;
2246 
2247                 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2248                         do_jobctl_trap();
2249                         spin_unlock_irq(&sighand->siglock);
2250                         goto relock;
2251                 }
2252 
2253                 signr = dequeue_signal(current, &current->blocked, &ksig->info);
2254 
2255                 if (!signr)
2256                         break; /* will return 0 */
2257 
2258                 if (unlikely(current->ptrace) && signr != SIGKILL) {
2259                         signr = ptrace_signal(signr, &ksig->info);
2260                         if (!signr)
2261                                 continue;
2262                 }
2263 
2264                 ka = &sighand->action[signr-1];
2265 
2266                 /* Trace actually delivered signals. */
2267                 trace_signal_deliver(signr, &ksig->info, ka);
2268 
2269                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2270                         continue;
2271                 if (ka->sa.sa_handler != SIG_DFL) {
2272                         /* Run the handler.  */
2273                         ksig->ka = *ka;
2274 
2275                         if (ka->sa.sa_flags & SA_ONESHOT)
2276                                 ka->sa.sa_handler = SIG_DFL;
2277 
2278                         break; /* will return non-zero "signr" value */
2279                 }
2280 
2281                 /*
2282                  * Now we are doing the default action for this signal.
2283                  */
2284                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2285                         continue;
2286 
2287                 /*
2288                  * Global init gets no signals it doesn't want.
2289                  * Container-init gets no signals it doesn't want from same
2290                  * container.
2291                  *
2292                  * Note that if global/container-init sees a sig_kernel_only()
2293                  * signal here, the signal must have been generated internally
2294                  * or must have come from an ancestor namespace. In either
2295                  * case, the signal cannot be dropped.
2296                  */
2297                 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2298                                 !sig_kernel_only(signr))
2299                         continue;
2300 
2301                 if (sig_kernel_stop(signr)) {
2302                         /*
2303                          * The default action is to stop all threads in
2304                          * the thread group.  The job control signals
2305                          * do nothing in an orphaned pgrp, but SIGSTOP
2306                          * always works.  Note that siglock needs to be
2307                          * dropped during the call to is_orphaned_pgrp()
2308                          * because of lock ordering with tasklist_lock.
2309                          * This allows an intervening SIGCONT to be posted.
2310                          * We need to check for that and bail out if necessary.
2311                          */
2312                         if (signr != SIGSTOP) {
2313                                 spin_unlock_irq(&sighand->siglock);
2314 
2315                                 /* signals can be posted during this window */
2316 
2317                                 if (is_current_pgrp_orphaned())
2318                                         goto relock;
2319 
2320                                 spin_lock_irq(&sighand->siglock);
2321                         }
2322 
2323                         if (likely(do_signal_stop(ksig->info.si_signo))) {
2324                                 /* It released the siglock.  */
2325                                 goto relock;
2326                         }
2327 
2328                         /*
2329                          * We didn't actually stop, due to a race
2330                          * with SIGCONT or something like that.
2331                          */
2332                         continue;
2333                 }
2334 
2335                 spin_unlock_irq(&sighand->siglock);
2336 
2337                 /*
2338                  * Anything else is fatal, maybe with a core dump.
2339                  */
2340                 current->flags |= PF_SIGNALED;
2341 
2342                 if (sig_kernel_coredump(signr)) {
2343                         if (print_fatal_signals)
2344                                 print_fatal_signal(ksig->info.si_signo);
2345                         proc_coredump_connector(current);
2346                         /*
2347                          * If it was able to dump core, this kills all
2348                          * other threads in the group and synchronizes with
2349                          * their demise.  If we lost the race with another
2350                          * thread getting here, it set group_exit_code
2351                          * first and our do_group_exit call below will use
2352                          * that value and ignore the one we pass it.
2353                          */
2354                         do_coredump(&ksig->info);
2355                 }
2356 
2357                 /*
2358                  * Death signals, no core dump.
2359                  */
2360                 do_group_exit(ksig->info.si_signo);
2361                 /* NOTREACHED */
2362         }
2363         spin_unlock_irq(&sighand->siglock);
2364 
2365         ksig->sig = signr;
2366         return ksig->sig > 0;
2367 }
2368 
2369 /**
2370  * signal_delivered - 
2371  * @ksig:               kernel signal struct
2372  * @stepping:           nonzero if debugger single-step or block-step in use
2373  *
2374  * This function should be called when a signal has successfully been
2375  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2376  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2377  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2378  */
2379 static void signal_delivered(struct ksignal *ksig, int stepping)
2380 {
2381         sigset_t blocked;
2382 
2383         /* A signal was successfully delivered, and the
2384            saved sigmask was stored on the signal frame,
2385            and will be restored by sigreturn.  So we can
2386            simply clear the restore sigmask flag.  */
2387         clear_restore_sigmask();
2388 
2389         sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2390         if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2391                 sigaddset(&blocked, ksig->sig);
2392         set_current_blocked(&blocked);
2393         tracehook_signal_handler(stepping);
2394 }
2395 
2396 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2397 {
2398         if (failed)
2399                 force_sigsegv(ksig->sig, current);
2400         else
2401                 signal_delivered(ksig, stepping);
2402 }
2403 
2404 /*
2405  * It could be that complete_signal() picked us to notify about the
2406  * group-wide signal. Other threads should be notified now to take
2407  * the shared signals in @which since we will not.
2408  */
2409 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2410 {
2411         sigset_t retarget;
2412         struct task_struct *t;
2413 
2414         sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2415         if (sigisemptyset(&retarget))
2416                 return;
2417 
2418         t = tsk;
2419         while_each_thread(tsk, t) {
2420                 if (t->flags & PF_EXITING)
2421                         continue;
2422 
2423                 if (!has_pending_signals(&retarget, &t->blocked))
2424                         continue;
2425                 /* Remove the signals this thread can handle. */
2426                 sigandsets(&retarget, &retarget, &t->blocked);
2427 
2428                 if (!signal_pending(t))
2429                         signal_wake_up(t, 0);
2430 
2431                 if (sigisemptyset(&retarget))
2432                         break;
2433         }
2434 }
2435 
2436 void exit_signals(struct task_struct *tsk)
2437 {
2438         int group_stop = 0;
2439         sigset_t unblocked;
2440 
2441         /*
2442          * @tsk is about to have PF_EXITING set - lock out users which
2443          * expect stable threadgroup.
2444          */
2445         threadgroup_change_begin(tsk);
2446 
2447         if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2448                 tsk->flags |= PF_EXITING;
2449                 threadgroup_change_end(tsk);
2450                 return;
2451         }
2452 
2453         spin_lock_irq(&tsk->sighand->siglock);
2454         /*
2455          * From now this task is not visible for group-wide signals,
2456          * see wants_signal(), do_signal_stop().
2457          */
2458         tsk->flags |= PF_EXITING;
2459 
2460         threadgroup_change_end(tsk);
2461 
2462         if (!signal_pending(tsk))
2463                 goto out;
2464 
2465         unblocked = tsk->blocked;
2466         signotset(&unblocked);
2467         retarget_shared_pending(tsk, &unblocked);
2468 
2469         if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2470             task_participate_group_stop(tsk))
2471                 group_stop = CLD_STOPPED;
2472 out:
2473         spin_unlock_irq(&tsk->sighand->siglock);
2474 
2475         /*
2476          * If group stop has completed, deliver the notification.  This
2477          * should always go to the real parent of the group leader.
2478          */
2479         if (unlikely(group_stop)) {
2480                 read_lock(&tasklist_lock);
2481                 do_notify_parent_cldstop(tsk, false, group_stop);
2482                 read_unlock(&tasklist_lock);
2483         }
2484 }
2485 
2486 EXPORT_SYMBOL(recalc_sigpending);
2487 EXPORT_SYMBOL_GPL(dequeue_signal);
2488 EXPORT_SYMBOL(flush_signals);
2489 EXPORT_SYMBOL(force_sig);
2490 EXPORT_SYMBOL(send_sig);
2491 EXPORT_SYMBOL(send_sig_info);
2492 EXPORT_SYMBOL(sigprocmask);
2493 EXPORT_SYMBOL(block_all_signals);
2494 EXPORT_SYMBOL(unblock_all_signals);
2495 
2496 
2497 /*
2498  * System call entry points.
2499  */
2500 
2501 /**
2502  *  sys_restart_syscall - restart a system call
2503  */
2504 SYSCALL_DEFINE0(restart_syscall)
2505 {
2506         struct restart_block *restart = &current->restart_block;
2507         return restart->fn(restart);
2508 }
2509 
2510 long do_no_restart_syscall(struct restart_block *param)
2511 {
2512         return -EINTR;
2513 }
2514 
2515 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2516 {
2517         if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2518                 sigset_t newblocked;
2519                 /* A set of now blocked but previously unblocked signals. */
2520                 sigandnsets(&newblocked, newset, &current->blocked);
2521                 retarget_shared_pending(tsk, &newblocked);
2522         }
2523         tsk->blocked = *newset;
2524         recalc_sigpending();
2525 }
2526 
2527 /**
2528  * set_current_blocked - change current->blocked mask
2529  * @newset: new mask
2530  *
2531  * It is wrong to change ->blocked directly, this helper should be used
2532  * to ensure the process can't miss a shared signal we are going to block.
2533  */
2534 void set_current_blocked(sigset_t *newset)
2535 {
2536         sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2537         __set_current_blocked(newset);
2538 }
2539 
2540 void __set_current_blocked(const sigset_t *newset)
2541 {
2542         struct task_struct *tsk = current;
2543 
2544         spin_lock_irq(&tsk->sighand->siglock);
2545         __set_task_blocked(tsk, newset);
2546         spin_unlock_irq(&tsk->sighand->siglock);
2547 }
2548 
2549 /*
2550  * This is also useful for kernel threads that want to temporarily
2551  * (or permanently) block certain signals.
2552  *
2553  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2554  * interface happily blocks "unblockable" signals like SIGKILL
2555  * and friends.
2556  */
2557 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2558 {
2559         struct task_struct *tsk = current;
2560         sigset_t newset;
2561 
2562         /* Lockless, only current can change ->blocked, never from irq */
2563         if (oldset)
2564                 *oldset = tsk->blocked;
2565 
2566         switch (how) {
2567         case SIG_BLOCK:
2568                 sigorsets(&newset, &tsk->blocked, set);
2569                 break;
2570         case SIG_UNBLOCK:
2571                 sigandnsets(&newset, &tsk->blocked, set);
2572                 break;
2573         case SIG_SETMASK:
2574                 newset = *set;
2575                 break;
2576         default:
2577                 return -EINVAL;
2578         }
2579 
2580         __set_current_blocked(&newset);
2581         return 0;
2582 }
2583 
2584 /**
2585  *  sys_rt_sigprocmask - change the list of currently blocked signals
2586  *  @how: whether to add, remove, or set signals
2587  *  @nset: stores pending signals
2588  *  @oset: previous value of signal mask if non-null
2589  *  @sigsetsize: size of sigset_t type
2590  */
2591 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2592                 sigset_t __user *, oset, size_t, sigsetsize)
2593 {
2594         sigset_t old_set, new_set;
2595         int error;
2596 
2597         /* XXX: Don't preclude handling different sized sigset_t's.  */
2598         if (sigsetsize != sizeof(sigset_t))
2599                 return -EINVAL;
2600 
2601         old_set = current->blocked;
2602 
2603         if (nset) {
2604                 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2605                         return -EFAULT;
2606                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2607 
2608                 error = sigprocmask(how, &new_set, NULL);
2609                 if (error)
2610                         return error;
2611         }
2612 
2613         if (oset) {
2614                 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2615                         return -EFAULT;
2616         }
2617 
2618         return 0;
2619 }
2620 
2621 #ifdef CONFIG_COMPAT
2622 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2623                 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2624 {
2625 #ifdef __BIG_ENDIAN
2626         sigset_t old_set = current->blocked;
2627 
2628         /* XXX: Don't preclude handling different sized sigset_t's.  */
2629         if (sigsetsize != sizeof(sigset_t))
2630                 return -EINVAL;
2631 
2632         if (nset) {
2633                 compat_sigset_t new32;
2634                 sigset_t new_set;
2635                 int error;
2636                 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2637                         return -EFAULT;
2638 
2639                 sigset_from_compat(&new_set, &new32);
2640                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2641 
2642                 error = sigprocmask(how, &new_set, NULL);
2643                 if (error)
2644                         return error;
2645         }
2646         if (oset) {
2647                 compat_sigset_t old32;
2648                 sigset_to_compat(&old32, &old_set);
2649                 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2650                         return -EFAULT;
2651         }
2652         return 0;
2653 #else
2654         return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2655                                   (sigset_t __user *)oset, sigsetsize);
2656 #endif
2657 }
2658 #endif
2659 
2660 static int do_sigpending(void *set, unsigned long sigsetsize)
2661 {
2662         if (sigsetsize > sizeof(sigset_t))
2663                 return -EINVAL;
2664 
2665         spin_lock_irq(&current->sighand->siglock);
2666         sigorsets(set, &current->pending.signal,
2667                   &current->signal->shared_pending.signal);
2668         spin_unlock_irq(&current->sighand->siglock);
2669 
2670         /* Outside the lock because only this thread touches it.  */
2671         sigandsets(set, &current->blocked, set);
2672         return 0;
2673 }
2674 
2675 /**
2676  *  sys_rt_sigpending - examine a pending signal that has been raised
2677  *                      while blocked
2678  *  @uset: stores pending signals
2679  *  @sigsetsize: size of sigset_t type or larger
2680  */
2681 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2682 {
2683         sigset_t set;
2684         int err = do_sigpending(&set, sigsetsize);
2685         if (!err && copy_to_user(uset, &set, sigsetsize))
2686                 err = -EFAULT;
2687         return err;
2688 }
2689 
2690 #ifdef CONFIG_COMPAT
2691 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2692                 compat_size_t, sigsetsize)
2693 {
2694 #ifdef __BIG_ENDIAN
2695         sigset_t set;
2696         int err = do_sigpending(&set, sigsetsize);
2697         if (!err) {
2698                 compat_sigset_t set32;
2699                 sigset_to_compat(&set32, &set);
2700                 /* we can get here only if sigsetsize <= sizeof(set) */
2701                 if (copy_to_user(uset, &set32, sigsetsize))
2702                         err = -EFAULT;
2703         }
2704         return err;
2705 #else
2706         return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2707 #endif
2708 }
2709 #endif
2710 
2711 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2712 
2713 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2714 {
2715         int err;
2716 
2717         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2718                 return -EFAULT;
2719         if (from->si_code < 0)
2720                 return __copy_to_user(to, from, sizeof(siginfo_t))
2721                         ? -EFAULT : 0;
2722         /*
2723          * If you change siginfo_t structure, please be sure
2724          * this code is fixed accordingly.
2725          * Please remember to update the signalfd_copyinfo() function
2726          * inside fs/signalfd.c too, in case siginfo_t changes.
2727          * It should never copy any pad contained in the structure
2728          * to avoid security leaks, but must copy the generic
2729          * 3 ints plus the relevant union member.
2730          */
2731         err = __put_user(from->si_signo, &to->si_signo);
2732         err |= __put_user(from->si_errno, &to->si_errno);
2733         err |= __put_user((short)from->si_code, &to->si_code);
2734         switch (from->si_code & __SI_MASK) {
2735         case __SI_KILL:
2736                 err |= __put_user(from->si_pid, &to->si_pid);
2737                 err |= __put_user(from->si_uid, &to->si_uid);
2738                 break;
2739         case __SI_TIMER:
2740                  err |= __put_user(from->si_tid, &to->si_tid);
2741                  err |= __put_user(from->si_overrun, &to->si_overrun);
2742                  err |= __put_user(from->si_ptr, &to->si_ptr);
2743                 break;
2744         case __SI_POLL:
2745                 err |= __put_user(from->si_band, &to->si_band);
2746                 err |= __put_user(from->si_fd, &to->si_fd);
2747                 break;
2748         case __SI_FAULT:
2749                 err |= __put_user(from->si_addr, &to->si_addr);
2750 #ifdef __ARCH_SI_TRAPNO
2751                 err |= __put_user(from->si_trapno, &to->si_trapno);
2752 #endif
2753 #ifdef BUS_MCEERR_AO
2754                 /*
2755                  * Other callers might not initialize the si_lsb field,
2756                  * so check explicitly for the right codes here.
2757                  */
2758                 if (from->si_signo == SIGBUS &&
2759                     (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2760                         err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2761 #endif
2762 #ifdef SEGV_BNDERR
2763                 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2764                         err |= __put_user(from->si_lower, &to->si_lower);
2765                         err |= __put_user(from->si_upper, &to->si_upper);
2766                 }
2767 #endif
2768                 break;
2769         case __SI_CHLD:
2770                 err |= __put_user(from->si_pid, &to->si_pid);
2771                 err |= __put_user(from->si_uid, &to->si_uid);
2772                 err |= __put_user(from->si_status, &to->si_status);
2773                 err |= __put_user(from->si_utime, &to->si_utime);
2774                 err |= __put_user(from->si_stime, &to->si_stime);
2775                 break;
2776         case __SI_RT: /* This is not generated by the kernel as of now. */
2777         case __SI_MESGQ: /* But this is */
2778                 err |= __put_user(from->si_pid, &to->si_pid);
2779                 err |= __put_user(from->si_uid, &to->si_uid);
2780                 err |= __put_user(from->si_ptr, &to->si_ptr);
2781                 break;
2782 #ifdef __ARCH_SIGSYS
2783         case __SI_SYS:
2784                 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2785                 err |= __put_user(from->si_syscall, &to->si_syscall);
2786                 err |= __put_user(from->si_arch, &to->si_arch);
2787                 break;
2788 #endif
2789         default: /* this is just in case for now ... */
2790                 err |= __put_user(from->si_pid, &to->si_pid);
2791                 err |= __put_user(from->si_uid, &to->si_uid);
2792                 break;
2793         }
2794         return err;
2795 }
2796 
2797 #endif
2798 
2799 /**
2800  *  do_sigtimedwait - wait for queued signals specified in @which
2801  *  @which: queued signals to wait for
2802  *  @info: if non-null, the signal's siginfo is returned here
2803  *  @ts: upper bound on process time suspension
2804  */
2805 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2806                         const struct timespec *ts)
2807 {
2808         struct task_struct *tsk = current;
2809         long timeout = MAX_SCHEDULE_TIMEOUT;
2810         sigset_t mask = *which;
2811         int sig;
2812 
2813         if (ts) {
2814                 if (!timespec_valid(ts))
2815                         return -EINVAL;
2816                 timeout = timespec_to_jiffies(ts);
2817                 /*
2818                  * We can be close to the next tick, add another one
2819                  * to ensure we will wait at least the time asked for.
2820                  */
2821                 if (ts->tv_sec || ts->tv_nsec)
2822                         timeout++;
2823         }
2824 
2825         /*
2826          * Invert the set of allowed signals to get those we want to block.
2827          */
2828         sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2829         signotset(&mask);
2830 
2831         spin_lock_irq(&tsk->sighand->siglock);
2832         sig = dequeue_signal(tsk, &mask, info);
2833         if (!sig && timeout) {
2834                 /*
2835                  * None ready, temporarily unblock those we're interested
2836                  * while we are sleeping in so that we'll be awakened when
2837                  * they arrive. Unblocking is always fine, we can avoid
2838                  * set_current_blocked().
2839                  */
2840                 tsk->real_blocked = tsk->blocked;
2841                 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2842                 recalc_sigpending();
2843                 spin_unlock_irq(&tsk->sighand->siglock);
2844 
2845                 timeout = freezable_schedule_timeout_interruptible(timeout);
2846 
2847                 spin_lock_irq(&tsk->sighand->siglock);
2848                 __set_task_blocked(tsk, &tsk->real_blocked);
2849                 sigemptyset(&tsk->real_blocked);
2850                 sig = dequeue_signal(tsk, &mask, info);
2851         }
2852         spin_unlock_irq(&tsk->sighand->siglock);
2853 
2854         if (sig)
2855                 return sig;
2856         return timeout ? -EINTR : -EAGAIN;
2857 }
2858 
2859 /**
2860  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2861  *                      in @uthese
2862  *  @uthese: queued signals to wait for
2863  *  @uinfo: if non-null, the signal's siginfo is returned here
2864  *  @uts: upper bound on process time suspension
2865  *  @sigsetsize: size of sigset_t type
2866  */
2867 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2868                 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2869                 size_t, sigsetsize)
2870 {
2871         sigset_t these;
2872         struct timespec ts;
2873         siginfo_t info;
2874         int ret;
2875 
2876         /* XXX: Don't preclude handling different sized sigset_t's.  */
2877         if (sigsetsize != sizeof(sigset_t))
2878                 return -EINVAL;
2879 
2880         if (copy_from_user(&these, uthese, sizeof(these)))
2881                 return -EFAULT;
2882 
2883         if (uts) {
2884                 if (copy_from_user(&ts, uts, sizeof(ts)))
2885                         return -EFAULT;
2886         }
2887 
2888         ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2889 
2890         if (ret > 0 && uinfo) {
2891                 if (copy_siginfo_to_user(uinfo, &info))
2892                         ret = -EFAULT;
2893         }
2894 
2895         return ret;
2896 }
2897 
2898 /**
2899  *  sys_kill - send a signal to a process
2900  *  @pid: the PID of the process
2901  *  @sig: signal to be sent
2902  */
2903 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2904 {
2905         struct siginfo info;
2906         if (ccs_kill_permission(pid, sig))
2907                 return -EPERM;
2908 
2909         info.si_signo = sig;
2910         info.si_errno = 0;
2911         info.si_code = SI_USER;
2912         info.si_pid = task_tgid_vnr(current);
2913         info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2914 
2915         return kill_something_info(sig, &info, pid);
2916 }
2917 
2918 static int
2919 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2920 {
2921         struct task_struct *p;
2922         int error = -ESRCH;
2923 
2924         rcu_read_lock();
2925         p = find_task_by_vpid(pid);
2926         if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2927                 error = check_kill_permission(sig, info, p);
2928                 /*
2929                  * The null signal is a permissions and process existence
2930                  * probe.  No signal is actually delivered.
2931                  */
2932                 if (!error && sig) {
2933                         error = do_send_sig_info(sig, info, p, false);
2934                         /*
2935                          * If lock_task_sighand() failed we pretend the task
2936                          * dies after receiving the signal. The window is tiny,
2937                          * and the signal is private anyway.
2938                          */
2939                         if (unlikely(error == -ESRCH))
2940                                 error = 0;
2941                 }
2942         }
2943         rcu_read_unlock();
2944 
2945         return error;
2946 }
2947 
2948 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2949 {
2950         struct siginfo info = {};
2951 
2952         info.si_signo = sig;
2953         info.si_errno = 0;
2954         info.si_code = SI_TKILL;
2955         info.si_pid = task_tgid_vnr(current);
2956         info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2957 
2958         return do_send_specific(tgid, pid, sig, &info);
2959 }
2960 
2961 /**
2962  *  sys_tgkill - send signal to one specific thread
2963  *  @tgid: the thread group ID of the thread
2964  *  @pid: the PID of the thread
2965  *  @sig: signal to be sent
2966  *
2967  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2968  *  exists but it's not belonging to the target process anymore. This
2969  *  method solves the problem of threads exiting and PIDs getting reused.
2970  */
2971 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2972 {
2973         /* This is only valid for single tasks */
2974         if (pid <= 0 || tgid <= 0)
2975                 return -EINVAL;
2976         if (ccs_tgkill_permission(tgid, pid, sig))
2977                 return -EPERM;
2978 
2979         return do_tkill(tgid, pid, sig);
2980 }
2981 
2982 /**
2983  *  sys_tkill - send signal to one specific task
2984  *  @pid: the PID of the task
2985  *  @sig: signal to be sent
2986  *
2987  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2988  */
2989 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2990 {
2991         /* This is only valid for single tasks */
2992         if (pid <= 0)
2993                 return -EINVAL;
2994         if (ccs_tkill_permission(pid, sig))
2995                 return -EPERM;
2996 
2997         return do_tkill(0, pid, sig);
2998 }
2999 
3000 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3001 {
3002         /* Not even root can pretend to send signals from the kernel.
3003          * Nor can they impersonate a kill()/tgkill(), which adds source info.
3004          */
3005         if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3006             (task_pid_vnr(current) != pid))
3007                 return -EPERM;
3008 
3009         info->si_signo = sig;
3010         if (ccs_sigqueue_permission(pid, sig))
3011                 return -EPERM;
3012 
3013         /* POSIX.1b doesn't mention process groups.  */
3014         return kill_proc_info(sig, info, pid);
3015 }
3016 
3017 /**
3018  *  sys_rt_sigqueueinfo - send signal information to a signal
3019  *  @pid: the PID of the thread
3020  *  @sig: signal to be sent
3021  *  @uinfo: signal info to be sent
3022  */
3023 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3024                 siginfo_t __user *, uinfo)
3025 {
3026         siginfo_t info;
3027         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3028                 return -EFAULT;
3029         return do_rt_sigqueueinfo(pid, sig, &info);
3030 }
3031 
3032 #ifdef CONFIG_COMPAT
3033 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3034                         compat_pid_t, pid,
3035                         int, sig,
3036                         struct compat_siginfo __user *, uinfo)
3037 {
3038         siginfo_t info = {};
3039         int ret = copy_siginfo_from_user32(&info, uinfo);
3040         if (unlikely(ret))
3041                 return ret;
3042         return do_rt_sigqueueinfo(pid, sig, &info);
3043 }
3044 #endif
3045 
3046 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3047 {
3048         /* This is only valid for single tasks */
3049         if (pid <= 0 || tgid <= 0)
3050                 return -EINVAL;
3051 
3052         /* Not even root can pretend to send signals from the kernel.
3053          * Nor can they impersonate a kill()/tgkill(), which adds source info.
3054          */
3055         if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3056             (task_pid_vnr(current) != pid))
3057                 return -EPERM;
3058 
3059         info->si_signo = sig;
3060         if (ccs_tgsigqueue_permission(tgid, pid, sig))
3061                 return -EPERM;
3062 
3063         return do_send_specific(tgid, pid, sig, info);
3064 }
3065 
3066 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3067                 siginfo_t __user *, uinfo)
3068 {
3069         siginfo_t info;
3070 
3071         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3072                 return -EFAULT;
3073 
3074         return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3075 }
3076 
3077 #ifdef CONFIG_COMPAT
3078 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3079                         compat_pid_t, tgid,
3080                         compat_pid_t, pid,
3081                         int, sig,
3082                         struct compat_siginfo __user *, uinfo)
3083 {
3084         siginfo_t info = {};
3085 
3086         if (copy_siginfo_from_user32(&info, uinfo))
3087                 return -EFAULT;
3088         return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3089 }
3090 #endif
3091 
3092 /*
3093  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3094  */
3095 void kernel_sigaction(int sig, __sighandler_t action)
3096 {
3097         spin_lock_irq(&current->sighand->siglock);
3098         current->sighand->action[sig - 1].sa.sa_handler = action;
3099         if (action == SIG_IGN) {
3100                 sigset_t mask;
3101 
3102                 sigemptyset(&mask);
3103                 sigaddset(&mask, sig);
3104 
3105                 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3106                 flush_sigqueue_mask(&mask, &current->pending);
3107                 recalc_sigpending();
3108         }
3109         spin_unlock_irq(&current->sighand->siglock);
3110 }
3111 EXPORT_SYMBOL(kernel_sigaction);
3112 
3113 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3114 {
3115         struct task_struct *p = current, *t;
3116         struct k_sigaction *k;
3117         sigset_t mask;
3118 
3119         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3120                 return -EINVAL;
3121 
3122         k = &p->sighand->action[sig-1];
3123 
3124         spin_lock_irq(&p->sighand->siglock);
3125         if (oact)
3126                 *oact = *k;
3127 
3128         if (act) {
3129                 sigdelsetmask(&act->sa.sa_mask,
3130                               sigmask(SIGKILL) | sigmask(SIGSTOP));
3131                 *k = *act;
3132                 /*
3133                  * POSIX 3.3.1.3:
3134                  *  "Setting a signal action to SIG_IGN for a signal that is
3135                  *   pending shall cause the pending signal to be discarded,
3136                  *   whether or not it is blocked."
3137                  *
3138                  *  "Setting a signal action to SIG_DFL for a signal that is
3139                  *   pending and whose default action is to ignore the signal
3140                  *   (for example, SIGCHLD), shall cause the pending signal to
3141                  *   be discarded, whether or not it is blocked"
3142                  */
3143                 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3144                         sigemptyset(&mask);
3145                         sigaddset(&mask, sig);
3146                         flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3147                         for_each_thread(p, t)
3148                                 flush_sigqueue_mask(&mask, &t->pending);
3149                 }
3150         }
3151 
3152         spin_unlock_irq(&p->sighand->siglock);
3153         return 0;
3154 }
3155 
3156 static int
3157 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3158 {
3159         stack_t oss;
3160         int error;
3161 
3162         oss.ss_sp = (void __user *) current->sas_ss_sp;
3163         oss.ss_size = current->sas_ss_size;
3164         oss.ss_flags = sas_ss_flags(sp);
3165 
3166         if (uss) {
3167                 void __user *ss_sp;
3168                 size_t ss_size;
3169                 int ss_flags;
3170 
3171                 error = -EFAULT;
3172                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3173                         goto out;
3174                 error = __get_user(ss_sp, &uss->ss_sp) |
3175                         __get_user(ss_flags, &uss->ss_flags) |
3176                         __get_user(ss_size, &uss->ss_size);
3177                 if (error)
3178                         goto out;
3179 
3180                 error = -EPERM;
3181                 if (on_sig_stack(sp))
3182                         goto out;
3183 
3184                 error = -EINVAL;
3185                 /*
3186                  * Note - this code used to test ss_flags incorrectly:
3187                  *        old code may have been written using ss_flags==0
3188                  *        to mean ss_flags==SS_ONSTACK (as this was the only
3189                  *        way that worked) - this fix preserves that older
3190                  *        mechanism.
3191                  */
3192                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3193                         goto out;
3194 
3195                 if (ss_flags == SS_DISABLE) {
3196                         ss_size = 0;
3197                         ss_sp = NULL;
3198                 } else {
3199                         error = -ENOMEM;
3200                         if (ss_size < MINSIGSTKSZ)
3201                                 goto out;
3202                 }
3203 
3204                 current->sas_ss_sp = (unsigned long) ss_sp;
3205                 current->sas_ss_size = ss_size;
3206         }
3207 
3208         error = 0;
3209         if (uoss) {
3210                 error = -EFAULT;
3211                 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3212                         goto out;
3213                 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3214                         __put_user(oss.ss_size, &uoss->ss_size) |
3215                         __put_user(oss.ss_flags, &uoss->ss_flags);
3216         }
3217 
3218 out:
3219         return error;
3220 }
3221 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3222 {
3223         return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3224 }
3225 
3226 int restore_altstack(const stack_t __user *uss)
3227 {
3228         int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3229         /* squash all but EFAULT for now */
3230         return err == -EFAULT ? err : 0;
3231 }
3232 
3233 int __save_altstack(stack_t __user *uss, unsigned long sp)
3234 {
3235         struct task_struct *t = current;
3236         return  __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3237                 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3238                 __put_user(t->sas_ss_size, &uss->ss_size);
3239 }
3240 
3241 #ifdef CONFIG_COMPAT
3242 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3243                         const compat_stack_t __user *, uss_ptr,
3244                         compat_stack_t __user *, uoss_ptr)
3245 {
3246         stack_t uss, uoss;
3247         int ret;
3248         mm_segment_t seg;
3249 
3250         if (uss_ptr) {
3251                 compat_stack_t uss32;
3252 
3253                 memset(&uss, 0, sizeof(stack_t));
3254                 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3255                         return -EFAULT;
3256                 uss.ss_sp = compat_ptr(uss32.ss_sp);
3257                 uss.ss_flags = uss32.ss_flags;
3258                 uss.ss_size = uss32.ss_size;
3259         }
3260         seg = get_fs();
3261         set_fs(KERNEL_DS);
3262         ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3263                              (stack_t __force __user *) &uoss,
3264                              compat_user_stack_pointer());
3265         set_fs(seg);
3266         if (ret >= 0 && uoss_ptr)  {
3267                 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3268                     __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3269                     __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3270                     __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3271                         ret = -EFAULT;
3272         }
3273         return ret;
3274 }
3275 
3276 int compat_restore_altstack(const compat_stack_t __user *uss)
3277 {
3278         int err = compat_sys_sigaltstack(uss, NULL);
3279         /* squash all but -EFAULT for now */
3280         return err == -EFAULT ? err : 0;
3281 }
3282 
3283 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3284 {
3285         struct task_struct *t = current;
3286         return  __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3287                 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3288                 __put_user(t->sas_ss_size, &uss->ss_size);
3289 }
3290 #endif
3291 
3292 #ifdef __ARCH_WANT_SYS_SIGPENDING
3293 
3294 /**
3295  *  sys_sigpending - examine pending signals
3296  *  @set: where mask of pending signal is returned
3297  */
3298 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3299 {
3300         return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); 
3301 }
3302 
3303 #endif
3304 
3305 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3306 /**
3307  *  sys_sigprocmask - examine and change blocked signals
3308  *  @how: whether to add, remove, or set signals
3309  *  @nset: signals to add or remove (if non-null)
3310  *  @oset: previous value of signal mask if non-null
3311  *
3312  * Some platforms have their own version with special arguments;
3313  * others support only sys_rt_sigprocmask.
3314  */
3315 
3316 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3317                 old_sigset_t __user *, oset)
3318 {
3319         old_sigset_t old_set, new_set;
3320         sigset_t new_blocked;
3321 
3322         old_set = current->blocked.sig[0];
3323 
3324         if (nset) {
3325                 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3326                         return -EFAULT;
3327 
3328                 new_blocked = current->blocked;
3329 
3330                 switch (how) {
3331                 case SIG_BLOCK:
3332                         sigaddsetmask(&new_blocked, new_set);
3333                         break;
3334                 case SIG_UNBLOCK:
3335                         sigdelsetmask(&new_blocked, new_set);
3336                         break;
3337                 case SIG_SETMASK:
3338                         new_blocked.sig[0] = new_set;
3339                         break;
3340                 default:
3341                         return -EINVAL;
3342                 }
3343 
3344                 set_current_blocked(&new_blocked);
3345         }
3346 
3347         if (oset) {
3348                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3349                         return -EFAULT;
3350         }
3351 
3352         return 0;
3353 }
3354 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3355 
3356 #ifndef CONFIG_ODD_RT_SIGACTION
3357 /**
3358  *  sys_rt_sigaction - alter an action taken by a process
3359  *  @sig: signal to be sent
3360  *  @act: new sigaction
3361  *  @oact: used to save the previous sigaction
3362  *  @sigsetsize: size of sigset_t type
3363  */
3364 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3365                 const struct sigaction __user *, act,
3366                 struct sigaction __user *, oact,
3367                 size_t, sigsetsize)
3368 {
3369         struct k_sigaction new_sa, old_sa;
3370         int ret = -EINVAL;
3371 
3372         /* XXX: Don't preclude handling different sized sigset_t's.  */
3373         if (sigsetsize != sizeof(sigset_t))
3374                 goto out;
3375 
3376         if (act) {
3377                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3378                         return -EFAULT;
3379         }
3380 
3381         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3382 
3383         if (!ret && oact) {
3384                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3385                         return -EFAULT;
3386         }
3387 out:
3388         return ret;
3389 }
3390 #ifdef CONFIG_COMPAT
3391 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3392                 const struct compat_sigaction __user *, act,
3393                 struct compat_sigaction __user *, oact,
3394                 compat_size_t, sigsetsize)
3395 {
3396         struct k_sigaction new_ka, old_ka;
3397         compat_sigset_t mask;
3398 #ifdef __ARCH_HAS_SA_RESTORER
3399         compat_uptr_t restorer;
3400 #endif
3401         int ret;
3402 
3403         /* XXX: Don't preclude handling different sized sigset_t's.  */
3404         if (sigsetsize != sizeof(compat_sigset_t))
3405                 return -EINVAL;
3406 
3407         if (act) {
3408                 compat_uptr_t handler;
3409                 ret = get_user(handler, &act->sa_handler);
3410                 new_ka.sa.sa_handler = compat_ptr(handler);
3411 #ifdef __ARCH_HAS_SA_RESTORER
3412                 ret |= get_user(restorer, &act->sa_restorer);
3413                 new_ka.sa.sa_restorer = compat_ptr(restorer);
3414 #endif
3415                 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3416                 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3417                 if (ret)
3418                         return -EFAULT;
3419                 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3420         }
3421 
3422         ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3423         if (!ret && oact) {
3424                 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3425                 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
3426                                &oact->sa_handler);
3427                 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3428                 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3429 #ifdef __ARCH_HAS_SA_RESTORER
3430                 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3431                                 &oact->sa_restorer);
3432 #endif
3433         }
3434         return ret;
3435 }
3436 #endif
3437 #endif /* !CONFIG_ODD_RT_SIGACTION */
3438 
3439 #ifdef CONFIG_OLD_SIGACTION
3440 SYSCALL_DEFINE3(sigaction, int, sig,
3441                 const struct old_sigaction __user *, act,
3442                 struct old_sigaction __user *, oact)
3443 {
3444         struct k_sigaction new_ka, old_ka;
3445         int ret;
3446 
3447         if (act) {
3448                 old_sigset_t mask;
3449                 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3450                     __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3451                     __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3452                     __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3453                     __get_user(mask, &act->sa_mask))
3454                         return -EFAULT;
3455 #ifdef __ARCH_HAS_KA_RESTORER
3456                 new_ka.ka_restorer = NULL;
3457 #endif
3458                 siginitset(&new_ka.sa.sa_mask, mask);
3459         }
3460 
3461         ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3462 
3463         if (!ret && oact) {
3464                 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3465                     __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3466                     __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3467                     __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3468                     __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3469                         return -EFAULT;
3470         }
3471 
3472         return ret;
3473 }
3474 #endif
3475 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3476 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3477                 const struct compat_old_sigaction __user *, act,
3478                 struct compat_old_sigaction __user *, oact)
3479 {
3480         struct k_sigaction new_ka, old_ka;
3481         int ret;
3482         compat_old_sigset_t mask;
3483         compat_uptr_t handler, restorer;
3484 
3485         if (act) {
3486                 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3487                     __get_user(handler, &act->sa_handler) ||
3488                     __get_user(restorer, &act->sa_restorer) ||
3489                     __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3490                     __get_user(mask, &act->sa_mask))
3491                         return -EFAULT;
3492 
3493 #ifdef __ARCH_HAS_KA_RESTORER
3494                 new_ka.ka_restorer = NULL;
3495 #endif
3496                 new_ka.sa.sa_handler = compat_ptr(handler);
3497                 new_ka.sa.sa_restorer = compat_ptr(restorer);
3498                 siginitset(&new_ka.sa.sa_mask, mask);
3499         }
3500 
3501         ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3502 
3503         if (!ret && oact) {
3504                 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3505                     __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3506                                &oact->sa_handler) ||
3507                     __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3508                                &oact->sa_restorer) ||
3509                     __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3510                     __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3511                         return -EFAULT;
3512         }
3513         return ret;
3514 }
3515 #endif
3516 
3517 #ifdef CONFIG_SGETMASK_SYSCALL
3518 
3519 /*
3520  * For backwards compatibility.  Functionality superseded by sigprocmask.
3521  */
3522 SYSCALL_DEFINE0(sgetmask)
3523 {
3524         /* SMP safe */
3525         return current->blocked.sig[0];
3526 }
3527 
3528 SYSCALL_DEFINE1(ssetmask, int, newmask)
3529 {
3530         int old = current->blocked.sig[0];
3531         sigset_t newset;
3532 
3533         siginitset(&newset, newmask);
3534         set_current_blocked(&newset);
3535 
3536         return old;
3537 }
3538 #endif /* CONFIG_SGETMASK_SYSCALL */
3539 
3540 #ifdef __ARCH_WANT_SYS_SIGNAL
3541 /*
3542  * For backwards compatibility.  Functionality superseded by sigaction.
3543  */
3544 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3545 {
3546         struct k_sigaction new_sa, old_sa;
3547         int ret;
3548 
3549         new_sa.sa.sa_handler = handler;
3550         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3551         sigemptyset(&new_sa.sa.sa_mask);
3552 
3553         ret = do_sigaction(sig, &new_sa, &old_sa);
3554 
3555         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3556 }
3557 #endif /* __ARCH_WANT_SYS_SIGNAL */
3558 
3559 #ifdef __ARCH_WANT_SYS_PAUSE
3560 
3561 SYSCALL_DEFINE0(pause)
3562 {
3563         while (!signal_pending(current)) {
3564                 __set_current_state(TASK_INTERRUPTIBLE);
3565                 schedule();
3566         }
3567         return -ERESTARTNOHAND;
3568 }
3569 
3570 #endif
3571 
3572 int sigsuspend(sigset_t *set)
3573 {
3574         current->saved_sigmask = current->blocked;
3575         set_current_blocked(set);
3576 
3577         __set_current_state(TASK_INTERRUPTIBLE);
3578         schedule();
3579         set_restore_sigmask();
3580         return -ERESTARTNOHAND;
3581 }
3582 
3583 /**
3584  *  sys_rt_sigsuspend - replace the signal mask for a value with the
3585  *      @unewset value until a signal is received
3586  *  @unewset: new signal mask value
3587  *  @sigsetsize: size of sigset_t type
3588  */
3589 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3590 {
3591         sigset_t newset;
3592 
3593         /* XXX: Don't preclude handling different sized sigset_t's.  */
3594         if (sigsetsize != sizeof(sigset_t))
3595                 return -EINVAL;
3596 
3597         if (copy_from_user(&newset, unewset, sizeof(newset)))
3598                 return -EFAULT;
3599         return sigsuspend(&newset);
3600 }
3601  
3602 #ifdef CONFIG_COMPAT
3603 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3604 {
3605 #ifdef __BIG_ENDIAN
3606         sigset_t newset;
3607         compat_sigset_t newset32;
3608 
3609         /* XXX: Don't preclude handling different sized sigset_t's.  */
3610         if (sigsetsize != sizeof(sigset_t))
3611                 return -EINVAL;
3612 
3613         if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3614                 return -EFAULT;
3615         sigset_from_compat(&newset, &newset32);
3616         return sigsuspend(&newset);
3617 #else
3618         /* on little-endian bitmaps don't care about granularity */
3619         return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3620 #endif
3621 }
3622 #endif
3623 
3624 #ifdef CONFIG_OLD_SIGSUSPEND
3625 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3626 {
3627         sigset_t blocked;
3628         siginitset(&blocked, mask);
3629         return sigsuspend(&blocked);
3630 }
3631 #endif
3632 #ifdef CONFIG_OLD_SIGSUSPEND3
3633 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3634 {
3635         sigset_t blocked;
3636         siginitset(&blocked, mask);
3637         return sigsuspend(&blocked);
3638 }
3639 #endif
3640 
3641 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3642 {
3643         return NULL;
3644 }
3645 
3646 void __init signals_init(void)
3647 {
3648         sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3649 }
3650 
3651 #ifdef CONFIG_KGDB_KDB
3652 #include <linux/kdb.h>
3653 /*
3654  * kdb_send_sig_info - Allows kdb to send signals without exposing
3655  * signal internals.  This function checks if the required locks are
3656  * available before calling the main signal code, to avoid kdb
3657  * deadlocks.
3658  */
3659 void
3660 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3661 {
3662         static struct task_struct *kdb_prev_t;
3663         int sig, new_t;
3664         if (!spin_trylock(&t->sighand->siglock)) {
3665                 kdb_printf("Can't do kill command now.\n"
3666                            "The sigmask lock is held somewhere else in "
3667                            "kernel, try again later\n");
3668                 return;
3669         }
3670         spin_unlock(&t->sighand->siglock);
3671         new_t = kdb_prev_t != t;
3672         kdb_prev_t = t;
3673         if (t->state != TASK_RUNNING && new_t) {
3674                 kdb_printf("Process is not RUNNING, sending a signal from "
3675                            "kdb risks deadlock\n"
3676                            "on the run queue locks. "
3677                            "The signal has _not_ been sent.\n"
3678                            "Reissue the kill command if you want to risk "
3679                            "the deadlock.\n");
3680                 return;
3681         }
3682         sig = info->si_signo;
3683         if (send_sig_info(sig, info, t))
3684                 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3685                            sig, t->pid);
3686         else
3687                 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3688 }
3689 #endif  /* CONFIG_KGDB_KDB */
3690 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp