~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/signal.c

Version: ~ [ linux-6.4-rc3 ] ~ [ linux-6.3.4 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.30 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.113 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.180 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.243 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.283 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.315 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/kernel/signal.c
  3  *
  4  *  Copyright (C) 1991, 1992  Linus Torvalds
  5  *
  6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
  7  */
  8 
  9 #include <linux/config.h>
 10 #include <linux/slab.h>
 11 #include <linux/module.h>
 12 #include <linux/unistd.h>
 13 #include <linux/smp_lock.h>
 14 #include <linux/init.h>
 15 #include <linux/sched.h>
 16 
 17 #include <asm/uaccess.h>
 18 #include <linux/ccsecurity.h>
 19 
 20 /*
 21  * SLAB caches for signal bits.
 22  */
 23 
 24 #define DEBUG_SIG 0
 25 
 26 #if DEBUG_SIG
 27 #define SIG_SLAB_DEBUG  (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
 28 #else
 29 #define SIG_SLAB_DEBUG  0
 30 #endif
 31 
 32 static kmem_cache_t *sigqueue_cachep;
 33 
 34 atomic_t nr_queued_signals;
 35 int max_queued_signals = 1024;
 36 
 37 void __init signals_init(void)
 38 {
 39         sigqueue_cachep =
 40                 kmem_cache_create("sigqueue",
 41                                   sizeof(struct sigqueue),
 42                                   __alignof__(struct sigqueue),
 43                                   SIG_SLAB_DEBUG, NULL, NULL);
 44         if (!sigqueue_cachep)
 45                 panic("signals_init(): cannot create sigqueue SLAB cache");
 46 }
 47 
 48 
 49 /* Given the mask, find the first available signal that should be serviced. */
 50 
 51 static int
 52 next_signal(struct task_struct *tsk, sigset_t *mask)
 53 {
 54         unsigned long i, *s, *m, x;
 55         int sig = 0;
 56         
 57         s = tsk->pending.signal.sig;
 58         m = mask->sig;
 59         switch (_NSIG_WORDS) {
 60         default:
 61                 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
 62                         if ((x = *s &~ *m) != 0) {
 63                                 sig = ffz(~x) + i*_NSIG_BPW + 1;
 64                                 break;
 65                         }
 66                 break;
 67 
 68         case 2: if ((x = s[0] &~ m[0]) != 0)
 69                         sig = 1;
 70                 else if ((x = s[1] &~ m[1]) != 0)
 71                         sig = _NSIG_BPW + 1;
 72                 else
 73                         break;
 74                 sig += ffz(~x);
 75                 break;
 76 
 77         case 1: if ((x = *s &~ *m) != 0)
 78                         sig = ffz(~x) + 1;
 79                 break;
 80         }
 81         
 82         return sig;
 83 }
 84 
 85 static void flush_sigqueue(struct sigpending *queue)
 86 {
 87         struct sigqueue *q, *n;
 88 
 89         sigemptyset(&queue->signal);
 90         q = queue->head;
 91         queue->head = NULL;
 92         queue->tail = &queue->head;
 93 
 94         while (q) {
 95                 n = q->next;
 96                 kmem_cache_free(sigqueue_cachep, q);
 97                 atomic_dec(&nr_queued_signals);
 98                 q = n;
 99         }
100 }
101 
102 /*
103  * Flush all pending signals for a task.
104  */
105 
106 void
107 flush_signals(struct task_struct *t)
108 {
109         t->sigpending = 0;
110         flush_sigqueue(&t->pending);
111 }
112 
113 void exit_sighand(struct task_struct *tsk)
114 {
115         struct signal_struct * sig = tsk->sig;
116 
117         spin_lock_irq(&tsk->sigmask_lock);
118         if (sig) {
119                 tsk->sig = NULL;
120                 if (atomic_dec_and_test(&sig->count))
121                         kmem_cache_free(sigact_cachep, sig);
122         }
123         tsk->sigpending = 0;
124         flush_sigqueue(&tsk->pending);
125         spin_unlock_irq(&tsk->sigmask_lock);
126 }
127 
128 /*
129  * Flush all handlers for a task.
130  */
131 
132 void
133 flush_signal_handlers(struct task_struct *t)
134 {
135         int i;
136         struct k_sigaction *ka = &t->sig->action[0];
137         for (i = _NSIG ; i != 0 ; i--) {
138                 if (ka->sa.sa_handler != SIG_IGN)
139                         ka->sa.sa_handler = SIG_DFL;
140                 ka->sa.sa_flags = 0;
141                 sigemptyset(&ka->sa.sa_mask);
142                 ka++;
143         }
144 }
145 
146 /*
147  * sig_exit - cause the current task to exit due to a signal.
148  */
149 
150 void
151 sig_exit(int sig, int exit_code, struct siginfo *info)
152 {
153         struct task_struct *t;
154 
155         sigaddset(&current->pending.signal, sig);
156         recalc_sigpending(current);
157         current->flags |= PF_SIGNALED;
158 
159         /* Propagate the signal to all the tasks in
160          *  our thread group
161          */
162         if (info && (unsigned long)info != 1
163             && info->si_code != SI_TKILL) {
164                 read_lock(&tasklist_lock);
165                 for_each_thread(t) {
166                         force_sig_info(sig, info, t);
167                 }
168                 read_unlock(&tasklist_lock);
169         }
170 
171         do_exit(exit_code);
172         /* NOTREACHED */
173 }
174 
175 /* Notify the system that a driver wants to block all signals for this
176  * process, and wants to be notified if any signals at all were to be
177  * sent/acted upon.  If the notifier routine returns non-zero, then the
178  * signal will be acted upon after all.  If the notifier routine returns 0,
179  * then then signal will be blocked.  Only one block per process is
180  * allowed.  priv is a pointer to private data that the notifier routine
181  * can use to determine if the signal should be blocked or not.  */
182 
183 void
184 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
185 {
186         unsigned long flags;
187 
188         spin_lock_irqsave(&current->sigmask_lock, flags);
189         current->notifier_mask = mask;
190         current->notifier_data = priv;
191         current->notifier = notifier;
192         spin_unlock_irqrestore(&current->sigmask_lock, flags);
193 }
194 
195 /* Notify the system that blocking has ended. */
196 
197 void
198 unblock_all_signals(void)
199 {
200         unsigned long flags;
201 
202         spin_lock_irqsave(&current->sigmask_lock, flags);
203         current->notifier = NULL;
204         current->notifier_data = NULL;
205         recalc_sigpending(current);
206         spin_unlock_irqrestore(&current->sigmask_lock, flags);
207 }
208 
209 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
210 {
211         if (sigismember(&list->signal, sig)) {
212                 /* Collect the siginfo appropriate to this signal.  */
213                 struct sigqueue *q, **pp;
214                 pp = &list->head;
215                 while ((q = *pp) != NULL) {
216                         if (q->info.si_signo == sig)
217                                 goto found_it;
218                         pp = &q->next;
219                 }
220 
221                 /* Ok, it wasn't in the queue.  We must have
222                    been out of queue space.  So zero out the
223                    info.  */
224                 sigdelset(&list->signal, sig);
225                 info->si_signo = sig;
226                 info->si_errno = 0;
227                 info->si_code = 0;
228                 info->si_pid = 0;
229                 info->si_uid = 0;
230                 return 1;
231 
232 found_it:
233                 if ((*pp = q->next) == NULL)
234                         list->tail = pp;
235 
236                 /* Copy the sigqueue information and free the queue entry */
237                 copy_siginfo(info, &q->info);
238                 kmem_cache_free(sigqueue_cachep,q);
239                 atomic_dec(&nr_queued_signals);
240 
241                 /* Non-RT signals can exist multiple times.. */
242                 if (sig >= SIGRTMIN) {
243                         while ((q = *pp) != NULL) {
244                                 if (q->info.si_signo == sig)
245                                         goto found_another;
246                                 pp = &q->next;
247                         }
248                 }
249 
250                 sigdelset(&list->signal, sig);
251 found_another:
252                 return 1;
253         }
254         return 0;
255 }
256 
257 /*
258  * Dequeue a signal and return the element to the caller, which is 
259  * expected to free it.
260  *
261  * All callers must be holding current->sigmask_lock.
262  */
263 
264 int
265 dequeue_signal(sigset_t *mask, siginfo_t *info)
266 {
267         int sig = 0;
268 
269 #if DEBUG_SIG
270 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
271         signal_pending(current));
272 #endif
273 
274         sig = next_signal(current, mask);
275         if (sig) {
276                 if (current->notifier) {
277                         if (sigismember(current->notifier_mask, sig)) {
278                                 if (!(current->notifier)(current->notifier_data)) {
279                                         current->sigpending = 0;
280                                         return 0;
281                                 }
282                         }
283                 }
284 
285                 if (!collect_signal(sig, &current->pending, info))
286                         sig = 0;
287                                 
288                 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
289                    we need to xchg out the timer overrun values.  */
290         }
291         recalc_sigpending(current);
292 
293 #if DEBUG_SIG
294 printk(" %d -> %d\n", signal_pending(current), sig);
295 #endif
296 
297         return sig;
298 }
299 
300 static int rm_from_queue(int sig, struct sigpending *s)
301 {
302         struct sigqueue *q, **pp;
303 
304         if (!sigismember(&s->signal, sig))
305                 return 0;
306 
307         sigdelset(&s->signal, sig);
308 
309         pp = &s->head;
310 
311         while ((q = *pp) != NULL) {
312                 if (q->info.si_signo == sig) {
313                         if ((*pp = q->next) == NULL)
314                                 s->tail = pp;
315                         kmem_cache_free(sigqueue_cachep,q);
316                         atomic_dec(&nr_queued_signals);
317                         continue;
318                 }
319                 pp = &q->next;
320         }
321         return 1;
322 }
323 
324 /*
325  * Remove signal sig from t->pending.
326  * Returns 1 if sig was found.
327  *
328  * All callers must be holding t->sigmask_lock.
329  */
330 static int rm_sig_from_queue(int sig, struct task_struct *t)
331 {
332         return rm_from_queue(sig, &t->pending);
333 }
334 
335 /*
336  * Bad permissions for sending the signal
337  */
338 int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
339 {
340         return (!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
341             && ((sig != SIGCONT) || (current->session != t->session))
342             && (current->euid ^ t->suid) && (current->euid ^ t->uid)
343             && (current->uid ^ t->suid) && (current->uid ^ t->uid)
344             && !capable(CAP_KILL);
345 }
346 
347 /*
348  * Signal type:
349  *    < 0 : global action (kill - spread to all non-blocked threads)
350  *    = 0 : ignored
351  *    > 0 : wake up.
352  */
353 static int signal_type(int sig, struct signal_struct *signals)
354 {
355         unsigned long handler;
356 
357         if (!signals)
358                 return 0;
359         
360         handler = (unsigned long) signals->action[sig-1].sa.sa_handler;
361         if (handler > 1)
362                 return 1;
363 
364         /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
365         if (handler == 1)
366                 return sig == SIGCHLD;
367 
368         /* Default handler. Normally lethal, but.. */
369         switch (sig) {
370 
371         /* Ignored */
372         case SIGCONT: case SIGWINCH:
373         case SIGCHLD: case SIGURG:
374                 return 0;
375 
376         /* Implicit behaviour */
377         case SIGTSTP: case SIGTTIN: case SIGTTOU:
378                 return 1;
379 
380         /* Implicit actions (kill or do special stuff) */
381         default:
382                 return -1;
383         }
384 }
385                 
386 
387 /*
388  * Determine whether a signal should be posted or not.
389  *
390  * Signals with SIG_IGN can be ignored, except for the
391  * special case of a SIGCHLD. 
392  *
393  * Some signals with SIG_DFL default to a non-action.
394  */
395 static int ignored_signal(int sig, struct task_struct *t)
396 {
397         /* Don't ignore traced or blocked signals */
398         if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
399                 return 0;
400 
401         return signal_type(sig, t->sig) == 0;
402 }
403 
404 /*
405  * Handle TASK_STOPPED cases etc implicit behaviour
406  * of certain magical signals.
407  *
408  * SIGKILL gets spread out to every thread. 
409  */
410 static void handle_stop_signal(int sig, struct task_struct *t)
411 {
412         switch (sig) {
413         case SIGCONT:
414                 /* SIGCONT must not wake a task while it's being traced */
415                 if ((t->state == TASK_STOPPED) &&
416                     ((t->ptrace & (PT_PTRACED|PT_TRACESYS)) ==
417                      (PT_PTRACED|PT_TRACESYS)))
418                         return;
419                 /* fall through */
420         case SIGKILL:
421                 /* Wake up the process if stopped.
422                  * Note that if the process is being traced, waking it up
423                  * will make it continue before being killed. This may end
424                  * up unexpectedly completing whatever syscall is pending.
425                  */
426                 if (t->state == TASK_STOPPED)
427                         wake_up_process(t);
428                 t->exit_code = 0;
429                 rm_sig_from_queue(SIGSTOP, t);
430                 rm_sig_from_queue(SIGTSTP, t);
431                 rm_sig_from_queue(SIGTTOU, t);
432                 rm_sig_from_queue(SIGTTIN, t);
433                 break;
434 
435         case SIGSTOP: case SIGTSTP:
436         case SIGTTIN: case SIGTTOU:
437                 /* If we're stopping again, cancel SIGCONT */
438                 rm_sig_from_queue(SIGCONT, t);
439                 break;
440         }
441 }
442 
443 static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
444 {
445         struct sigqueue * q = NULL;
446 
447         /* Real-time signals must be queued if sent by sigqueue, or
448            some other real-time mechanism.  It is implementation
449            defined whether kill() does so.  We attempt to do so, on
450            the principle of least surprise, but since kill is not
451            allowed to fail with EAGAIN when low on memory we just
452            make sure at least one signal gets delivered and don't
453            pass on the info struct.  */
454 
455         if (atomic_read(&nr_queued_signals) < max_queued_signals) {
456                 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
457         }
458 
459         if (q) {
460                 atomic_inc(&nr_queued_signals);
461                 q->next = NULL;
462                 *signals->tail = q;
463                 signals->tail = &q->next;
464                 switch ((unsigned long) info) {
465                         case 0:
466                                 q->info.si_signo = sig;
467                                 q->info.si_errno = 0;
468                                 q->info.si_code = SI_USER;
469                                 q->info.si_pid = current->pid;
470                                 q->info.si_uid = current->uid;
471                                 break;
472                         case 1:
473                                 q->info.si_signo = sig;
474                                 q->info.si_errno = 0;
475                                 q->info.si_code = SI_KERNEL;
476                                 q->info.si_pid = 0;
477                                 q->info.si_uid = 0;
478                                 break;
479                         default:
480                                 copy_siginfo(&q->info, info);
481                                 break;
482                 }
483         } else if (sig >= SIGRTMIN && info && (unsigned long)info != 1
484                    && info->si_code != SI_USER) {
485                 /*
486                  * Queue overflow, abort.  We may abort if the signal was rt
487                  * and sent by user using something other than kill().
488                  */
489                 return -EAGAIN;
490         }
491 
492         sigaddset(&signals->signal, sig);
493         return 0;
494 }
495 
496 /*
497  * Tell a process that it has a new active signal..
498  *
499  * NOTE! we rely on the previous spin_lock to
500  * lock interrupts for us! We can only be called with
501  * "sigmask_lock" held, and the local interrupt must
502  * have been disabled when that got acquired!
503  *
504  * No need to set need_resched since signal event passing
505  * goes through ->blocked
506  */
507 static inline void signal_wake_up(struct task_struct *t)
508 {
509         t->sigpending = 1;
510 
511 #ifdef CONFIG_SMP
512         /*
513          * If the task is running on a different CPU 
514          * force a reschedule on the other CPU to make
515          * it notice the new signal quickly.
516          *
517          * The code below is a tad loose and might occasionally
518          * kick the wrong CPU if we catch the process in the
519          * process of changing - but no harm is done by that
520          * other than doing an extra (lightweight) IPI interrupt.
521          */
522         spin_lock(&runqueue_lock);
523         if (task_has_cpu(t) && t->processor != smp_processor_id())
524                 smp_send_reschedule(t->processor);
525         spin_unlock(&runqueue_lock);
526 #endif /* CONFIG_SMP */
527 
528         if (t->state & TASK_INTERRUPTIBLE) {
529                 wake_up_process(t);
530                 return;
531         }
532 }
533 
534 static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
535 {
536         int retval = send_signal(sig, info, &t->pending);
537 
538         if (!retval && !sigismember(&t->blocked, sig))
539                 signal_wake_up(t);
540 
541         return retval;
542 }
543 
544 int
545 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
546 {
547         unsigned long flags;
548         int ret;
549 
550 
551 #if DEBUG_SIG
552 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
553 #endif
554 
555         ret = -EINVAL;
556         if (sig < 0 || sig > _NSIG)
557                 goto out_nolock;
558         /* The somewhat baroque permissions check... */
559         ret = -EPERM;
560         if (bad_signal(sig, info, t))
561                 goto out_nolock;
562 
563         /* The null signal is a permissions and process existence probe.
564            No signal is actually delivered.  Same goes for zombies. */
565         ret = 0;
566         if (!sig || !t->sig)
567                 goto out_nolock;
568 
569         spin_lock_irqsave(&t->sigmask_lock, flags);
570         handle_stop_signal(sig, t);
571 
572         /* Optimize away the signal, if it's a signal that can be
573            handled immediately (ie non-blocked and untraced) and
574            that is ignored (either explicitly or by default).  */
575 
576         if (ignored_signal(sig, t))
577                 goto out;
578 
579         /* Support queueing exactly one non-rt signal, so that we
580            can get more detailed information about the cause of
581            the signal. */
582         if (sig < SIGRTMIN && sigismember(&t->pending.signal, sig))
583                 goto out;
584 
585         ret = deliver_signal(sig, info, t);
586 out:
587         spin_unlock_irqrestore(&t->sigmask_lock, flags);
588 out_nolock:
589 #if DEBUG_SIG
590 printk(" %d -> %d\n", signal_pending(t), ret);
591 #endif
592 
593         return ret;
594 }
595 
596 /*
597  * Force a signal that the process can't ignore: if necessary
598  * we unblock the signal and change any SIG_IGN to SIG_DFL.
599  */
600 
601 int
602 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
603 {
604         unsigned long int flags;
605 
606         spin_lock_irqsave(&t->sigmask_lock, flags);
607         if (t->sig == NULL) {
608                 spin_unlock_irqrestore(&t->sigmask_lock, flags);
609                 return -ESRCH;
610         }
611 
612         if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
613                 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
614         sigdelset(&t->blocked, sig);
615         recalc_sigpending(t);
616         spin_unlock_irqrestore(&t->sigmask_lock, flags);
617 
618         return send_sig_info(sig, info, t);
619 }
620 
621 /*
622  * kill_pg_info() sends a signal to a process group: this is what the tty
623  * control characters do (^C, ^Z etc)
624  */
625 
626 int
627 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
628 {
629         int retval = -EINVAL;
630         if (pgrp > 0) {
631                 struct task_struct *p;
632 
633                 retval = -ESRCH;
634                 read_lock(&tasklist_lock);
635                 for_each_task(p) {
636                         if (p->pgrp == pgrp && thread_group_leader(p)) {
637                                 int err = send_sig_info(sig, info, p);
638                                 if (retval)
639                                         retval = err;
640                         }
641                 }
642                 read_unlock(&tasklist_lock);
643         }
644         return retval;
645 }
646 
647 /*
648  * kill_sl_info() sends a signal to the session leader: this is used
649  * to send SIGHUP to the controlling process of a terminal when
650  * the connection is lost.
651  */
652 
653 int
654 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
655 {
656         int retval = -EINVAL;
657         if (sess > 0) {
658                 struct task_struct *p;
659 
660                 retval = -ESRCH;
661                 read_lock(&tasklist_lock);
662                 for_each_task(p) {
663                         if (p->leader && p->session == sess) {
664                                 int err = send_sig_info(sig, info, p);
665                                 if (retval)
666                                         retval = err;
667                         }
668                 }
669                 read_unlock(&tasklist_lock);
670         }
671         return retval;
672 }
673 
674 inline int
675 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
676 {
677         int error;
678         struct task_struct *p;
679 
680         read_lock(&tasklist_lock);
681         p = find_task_by_pid(pid);
682         error = -ESRCH;
683         if (p) {
684                 if (!thread_group_leader(p)) {
685                        struct task_struct *tg;
686                        tg = find_task_by_pid(p->tgid);
687                        if (tg)
688                                p = tg;
689                 }
690                 error = send_sig_info(sig, info, p);
691         }
692         read_unlock(&tasklist_lock);
693         return error;
694 }
695 
696 
697 /*
698  * kill_something_info() interprets pid in interesting ways just like kill(2).
699  *
700  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
701  * is probably wrong.  Should make it like BSD or SYSV.
702  */
703 
704 static int kill_something_info(int sig, struct siginfo *info, int pid)
705 {
706         if (!pid) {
707                 return kill_pg_info(sig, info, current->pgrp);
708         } else if (pid == -1) {
709                 int retval = 0, count = 0;
710                 struct task_struct * p;
711 
712                 read_lock(&tasklist_lock);
713                 for_each_task(p) {
714                         if (p->pid > 1 && p != current && thread_group_leader(p)) {
715                                 int err = send_sig_info(sig, info, p);
716                                 ++count;
717                                 if (err != -EPERM)
718                                         retval = err;
719                         }
720                 }
721                 read_unlock(&tasklist_lock);
722                 return count ? retval : -ESRCH;
723         } else if (pid < 0) {
724                 return kill_pg_info(sig, info, -pid);
725         } else {
726                 return kill_proc_info(sig, info, pid);
727         }
728 }
729 
730 /*
731  * These are for backward compatibility with the rest of the kernel source.
732  */
733 
734 int
735 send_sig(int sig, struct task_struct *p, int priv)
736 {
737         return send_sig_info(sig, (void*)(long)(priv != 0), p);
738 }
739 
740 void
741 force_sig(int sig, struct task_struct *p)
742 {
743         force_sig_info(sig, (void*)1L, p);
744 }
745 
746 int
747 kill_pg(pid_t pgrp, int sig, int priv)
748 {
749         return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
750 }
751 
752 int
753 kill_sl(pid_t sess, int sig, int priv)
754 {
755         return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
756 }
757 
758 int
759 kill_proc(pid_t pid, int sig, int priv)
760 {
761         return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
762 }
763 
764 /*
765  * Joy. Or not. Pthread wants us to wake up every thread
766  * in our parent group.
767  */
768 static void wake_up_parent(struct task_struct *parent)
769 {
770         struct task_struct *tsk = parent;
771 
772         do {
773                 wake_up_interruptible(&tsk->wait_chldexit);
774                 tsk = next_thread(tsk);
775         } while (tsk != parent);
776 }
777 
778 /*
779  * Let a parent know about a status change of a child.
780  */
781 
782 void do_notify_parent(struct task_struct *tsk, int sig)
783 {
784         struct siginfo info;
785         int why, status;
786 
787         info.si_signo = sig;
788         info.si_errno = 0;
789         info.si_pid = tsk->pid;
790         info.si_uid = tsk->uid;
791 
792         /* FIXME: find out whether or not this is supposed to be c*time. */
793         info.si_utime = tsk->times.tms_utime;
794         info.si_stime = tsk->times.tms_stime;
795 
796         status = tsk->exit_code & 0x7f;
797         why = SI_KERNEL;        /* shouldn't happen */
798         switch (tsk->state) {
799         case TASK_STOPPED:
800                 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
801                 if (tsk->ptrace & PT_PTRACED)
802                         why = CLD_TRAPPED;
803                 else
804                         why = CLD_STOPPED;
805                 break;
806 
807         default:
808                 if (tsk->exit_code & 0x80)
809                         why = CLD_DUMPED;
810                 else if (tsk->exit_code & 0x7f)
811                         why = CLD_KILLED;
812                 else {
813                         why = CLD_EXITED;
814                         status = tsk->exit_code >> 8;
815                 }
816                 break;
817         }
818         info.si_code = why;
819         info.si_status = status;
820 
821         send_sig_info(sig, &info, tsk->p_pptr);
822         wake_up_parent(tsk->p_pptr);
823 }
824 
825 
826 /*
827  * We need the tasklist lock because it's the only
828  * thing that protects out "parent" pointer.
829  *
830  * exit.c calls "do_notify_parent()" directly, because
831  * it already has the tasklist lock.
832  */
833 void
834 notify_parent(struct task_struct *tsk, int sig)
835 {
836         read_lock(&tasklist_lock);
837         do_notify_parent(tsk, sig);
838         read_unlock(&tasklist_lock);
839 }
840 
841 EXPORT_SYMBOL(dequeue_signal);
842 EXPORT_SYMBOL(flush_signals);
843 EXPORT_SYMBOL(force_sig);
844 EXPORT_SYMBOL(force_sig_info);
845 EXPORT_SYMBOL(kill_pg);
846 EXPORT_SYMBOL(kill_pg_info);
847 EXPORT_SYMBOL(kill_proc);
848 EXPORT_SYMBOL(kill_proc_info);
849 EXPORT_SYMBOL(kill_sl);
850 EXPORT_SYMBOL(kill_sl_info);
851 EXPORT_SYMBOL(notify_parent);
852 EXPORT_SYMBOL(recalc_sigpending);
853 EXPORT_SYMBOL(send_sig);
854 EXPORT_SYMBOL(send_sig_info);
855 EXPORT_SYMBOL(block_all_signals);
856 EXPORT_SYMBOL(unblock_all_signals);
857 
858 
859 /*
860  * System call entry points.
861  */
862 
863 /*
864  * We don't need to get the kernel lock - this is all local to this
865  * particular thread.. (and that's good, because this is _heavily_
866  * used by various programs)
867  */
868 
869 asmlinkage long
870 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
871 {
872         int error = -EINVAL;
873         sigset_t old_set, new_set;
874 
875         /* XXX: Don't preclude handling different sized sigset_t's.  */
876         if (sigsetsize != sizeof(sigset_t))
877                 goto out;
878 
879         if (set) {
880                 error = -EFAULT;
881                 if (copy_from_user(&new_set, set, sizeof(*set)))
882                         goto out;
883                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
884 
885                 spin_lock_irq(&current->sigmask_lock);
886                 old_set = current->blocked;
887 
888                 error = 0;
889                 switch (how) {
890                 default:
891                         error = -EINVAL;
892                         break;
893                 case SIG_BLOCK:
894                         sigorsets(&current->blocked, &old_set, &new_set);
895                         break;
896                 case SIG_UNBLOCK:
897                         signandsets(&current->blocked, &old_set, &new_set);
898                         break;
899                 case SIG_SETMASK:
900                         current->blocked = new_set;
901                         break;
902                 }
903 
904                 recalc_sigpending(current);
905                 spin_unlock_irq(&current->sigmask_lock);
906                 if (error)
907                         goto out;
908                 if (oset)
909                         goto set_old;
910         } else if (oset) {
911                 spin_lock_irq(&current->sigmask_lock);
912                 old_set = current->blocked;
913                 spin_unlock_irq(&current->sigmask_lock);
914 
915         set_old:
916                 error = -EFAULT;
917                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
918                         goto out;
919         }
920         error = 0;
921 out:
922         return error;
923 }
924 
925 long do_sigpending(void *set, unsigned long sigsetsize)
926 {
927         long error = -EINVAL;
928         sigset_t pending;
929 
930         if (sigsetsize > sizeof(sigset_t))
931                 goto out;
932 
933         spin_lock_irq(&current->sigmask_lock);
934         sigandsets(&pending, &current->blocked, &current->pending.signal);
935         spin_unlock_irq(&current->sigmask_lock);
936 
937         error = -EFAULT;
938         if (!copy_to_user(set, &pending, sigsetsize))
939                 error = 0;
940 out:
941         return error;
942 }       
943 
944 asmlinkage long
945 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
946 {
947         return do_sigpending(set, sigsetsize);
948 }
949 
950 asmlinkage long
951 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
952                     const struct timespec *uts, size_t sigsetsize)
953 {
954         int ret, sig;
955         sigset_t these;
956         struct timespec ts;
957         siginfo_t info;
958         long timeout = 0;
959 
960         /* XXX: Don't preclude handling different sized sigset_t's.  */
961         if (sigsetsize != sizeof(sigset_t))
962                 return -EINVAL;
963 
964         if (copy_from_user(&these, uthese, sizeof(these)))
965                 return -EFAULT;
966                 
967         /*
968          * Invert the set of allowed signals to get those we
969          * want to block.
970          */
971         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
972         signotset(&these);
973 
974         if (uts) {
975                 if (copy_from_user(&ts, uts, sizeof(ts)))
976                         return -EFAULT;
977                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
978                     || ts.tv_sec < 0)
979                         return -EINVAL;
980         }
981 
982         spin_lock_irq(&current->sigmask_lock);
983         sig = dequeue_signal(&these, &info);
984         if (!sig) {
985                 timeout = MAX_SCHEDULE_TIMEOUT;
986                 if (uts)
987                         timeout = (timespec_to_jiffies(&ts)
988                                    + (ts.tv_sec || ts.tv_nsec));
989 
990                 if (timeout) {
991                         /* None ready -- temporarily unblock those we're
992                          * interested while we are sleeping in so that we'll
993                          * be awakened when they arrive.  */
994                         sigset_t oldblocked = current->blocked;
995                         sigandsets(&current->blocked, &current->blocked, &these);
996                         recalc_sigpending(current);
997                         spin_unlock_irq(&current->sigmask_lock);
998 
999                         current->state = TASK_INTERRUPTIBLE;
1000                         timeout = schedule_timeout(timeout);
1001 
1002                         spin_lock_irq(&current->sigmask_lock);
1003                         sig = dequeue_signal(&these, &info);
1004                         current->blocked = oldblocked;
1005                         recalc_sigpending(current);
1006                 }
1007         }
1008         spin_unlock_irq(&current->sigmask_lock);
1009 
1010         if (sig) {
1011                 ret = sig;
1012                 if (uinfo) {
1013                         if (copy_siginfo_to_user(uinfo, &info))
1014                                 ret = -EFAULT;
1015                 }
1016         } else {
1017                 ret = -EAGAIN;
1018                 if (timeout)
1019                         ret = -EINTR;
1020         }
1021 
1022         return ret;
1023 }
1024 
1025 asmlinkage long
1026 sys_kill(int pid, int sig)
1027 {
1028         struct siginfo info;
1029         if (ccs_kill_permission(pid, sig))
1030                 return -EPERM;
1031 
1032         info.si_signo = sig;
1033         info.si_errno = 0;
1034         info.si_code = SI_USER;
1035         info.si_pid = current->pid;
1036         info.si_uid = current->uid;
1037 
1038         return kill_something_info(sig, &info, pid);
1039 }
1040 
1041 /*
1042  *  Kill only one task, even if it's a CLONE_THREAD task.
1043  */
1044 asmlinkage long
1045 sys_tkill(int pid, int sig)
1046 {
1047        struct siginfo info;
1048        int error;
1049        struct task_struct *p;
1050 
1051        /* This is only valid for single tasks */
1052        if (pid <= 0)
1053            return -EINVAL;
1054        if (ccs_tkill_permission(pid, sig))
1055                return -EPERM;
1056 
1057        info.si_signo = sig;
1058        info.si_errno = 0;
1059        info.si_code = SI_TKILL;
1060        info.si_pid = current->pid;
1061        info.si_uid = current->uid;
1062 
1063        read_lock(&tasklist_lock);
1064        p = find_task_by_pid(pid);
1065        error = -ESRCH;
1066        if (p) {
1067                error = send_sig_info(sig, &info, p);
1068        }
1069        read_unlock(&tasklist_lock);
1070        return error;
1071 }
1072 
1073 asmlinkage long
1074 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
1075 {
1076         siginfo_t info;
1077 
1078         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
1079                 return -EFAULT;
1080 
1081         /* Not even root can pretend to send signals from the kernel.
1082            Nor can they impersonate a kill(), which adds source info.  */
1083         if (info.si_code >= 0)
1084                 return -EPERM;
1085         info.si_signo = sig;
1086         if (ccs_sigqueue_permission(pid, sig))
1087                 return -EPERM;
1088 
1089         /* POSIX.1b doesn't mention process groups.  */
1090         return kill_proc_info(sig, &info, pid);
1091 }
1092 
1093 int
1094 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
1095 {
1096         struct k_sigaction *k;
1097 
1098         if (sig < 1 || sig > _NSIG ||
1099             (act && (sig == SIGKILL || sig == SIGSTOP)))
1100                 return -EINVAL;
1101 
1102         k = &current->sig->action[sig-1];
1103 
1104         spin_lock(&current->sig->siglock);
1105 
1106         if (oact)
1107                 *oact = *k;
1108 
1109         if (act) {
1110                 *k = *act;
1111                 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1112 
1113                 /*
1114                  * POSIX 3.3.1.3:
1115                  *  "Setting a signal action to SIG_IGN for a signal that is
1116                  *   pending shall cause the pending signal to be discarded,
1117                  *   whether or not it is blocked."
1118                  *
1119                  *  "Setting a signal action to SIG_DFL for a signal that is
1120                  *   pending and whose default action is to ignore the signal
1121                  *   (for example, SIGCHLD), shall cause the pending signal to
1122                  *   be discarded, whether or not it is blocked"
1123                  *
1124                  * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1125                  * signal isn't actually ignored, but does automatic child
1126                  * reaping, while SIG_DFL is explicitly said by POSIX to force
1127                  * the signal to be ignored.
1128                  */
1129 
1130                 if (k->sa.sa_handler == SIG_IGN
1131                     || (k->sa.sa_handler == SIG_DFL
1132                         && (sig == SIGCONT ||
1133                             sig == SIGCHLD ||
1134                             sig == SIGURG ||
1135                             sig == SIGWINCH))) {
1136                         spin_lock_irq(&current->sigmask_lock);
1137                         if (rm_sig_from_queue(sig, current))
1138                                 recalc_sigpending(current);
1139                         spin_unlock_irq(&current->sigmask_lock);
1140                 }
1141         }
1142 
1143         spin_unlock(&current->sig->siglock);
1144         return 0;
1145 }
1146 
1147 int 
1148 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
1149 {
1150         stack_t oss;
1151         int error;
1152 
1153         oss.ss_sp = (void *) current->sas_ss_sp;
1154         oss.ss_size = current->sas_ss_size;
1155         oss.ss_flags = sas_ss_flags(sp);
1156 
1157         if (uss) {
1158                 void *ss_sp;
1159                 size_t ss_size;
1160                 int ss_flags;
1161 
1162                 error = -EFAULT;
1163                 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
1164                     || __get_user(ss_sp, &uss->ss_sp)
1165                     || __get_user(ss_flags, &uss->ss_flags)
1166                     || __get_user(ss_size, &uss->ss_size))
1167                         goto out;
1168 
1169                 error = -EPERM;
1170                 if (on_sig_stack (sp))
1171                         goto out;
1172 
1173                 error = -EINVAL;
1174                 /*
1175                  *
1176                  * Note - this code used to test ss_flags incorrectly
1177                  *        old code may have been written using ss_flags==0
1178                  *        to mean ss_flags==SS_ONSTACK (as this was the only
1179                  *        way that worked) - this fix preserves that older
1180                  *        mechanism
1181                  */
1182                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
1183                         goto out;
1184 
1185                 if (ss_flags == SS_DISABLE) {
1186                         ss_size = 0;
1187                         ss_sp = NULL;
1188                 } else {
1189                         error = -ENOMEM;
1190                         if (ss_size < MINSIGSTKSZ)
1191                                 goto out;
1192                 }
1193 
1194                 current->sas_ss_sp = (unsigned long) ss_sp;
1195                 current->sas_ss_size = ss_size;
1196         }
1197 
1198         error = 0;
1199         if (uoss) {
1200                 error = -EFAULT;
1201                 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
1202                         goto out;
1203                 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
1204                         __put_user(oss.ss_size, &uoss->ss_size) |
1205                         __put_user(oss.ss_flags, &uoss->ss_flags);
1206         }
1207 
1208 out:
1209         return error;
1210 }
1211 
1212 asmlinkage long
1213 sys_sigpending(old_sigset_t *set)
1214 {
1215         return do_sigpending(set, sizeof(*set));
1216 }
1217 
1218 #if !defined(__alpha__)
1219 /* Alpha has its own versions with special arguments.  */
1220 
1221 asmlinkage long
1222 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
1223 {
1224         int error;
1225         old_sigset_t old_set, new_set;
1226 
1227         if (set) {
1228                 error = -EFAULT;
1229                 if (copy_from_user(&new_set, set, sizeof(*set)))
1230                         goto out;
1231                 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
1232 
1233                 spin_lock_irq(&current->sigmask_lock);
1234                 old_set = current->blocked.sig[0];
1235 
1236                 error = 0;
1237                 switch (how) {
1238                 default:
1239                         error = -EINVAL;
1240                         break;
1241                 case SIG_BLOCK:
1242                         sigaddsetmask(&current->blocked, new_set);
1243                         break;
1244                 case SIG_UNBLOCK:
1245                         sigdelsetmask(&current->blocked, new_set);
1246                         break;
1247                 case SIG_SETMASK:
1248                         current->blocked.sig[0] = new_set;
1249                         break;
1250                 }
1251 
1252                 recalc_sigpending(current);
1253                 spin_unlock_irq(&current->sigmask_lock);
1254                 if (error)
1255                         goto out;
1256                 if (oset)
1257                         goto set_old;
1258         } else if (oset) {
1259                 old_set = current->blocked.sig[0];
1260         set_old:
1261                 error = -EFAULT;
1262                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1263                         goto out;
1264         }
1265         error = 0;
1266 out:
1267         return error;
1268 }
1269 
1270 #ifndef __sparc__
1271 asmlinkage long
1272 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1273                  size_t sigsetsize)
1274 {
1275         struct k_sigaction new_sa, old_sa;
1276         int ret = -EINVAL;
1277 
1278         /* XXX: Don't preclude handling different sized sigset_t's.  */
1279         if (sigsetsize != sizeof(sigset_t))
1280                 goto out;
1281 
1282         if (act) {
1283                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1284                         return -EFAULT;
1285         }
1286 
1287         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1288 
1289         if (!ret && oact) {
1290                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1291                         return -EFAULT;
1292         }
1293 out:
1294         return ret;
1295 }
1296 #endif /* __sparc__ */
1297 #endif
1298 
1299 #if !defined(__alpha__) && !defined(__ia64__)
1300 /*
1301  * For backwards compatibility.  Functionality superseded by sigprocmask.
1302  */
1303 asmlinkage long
1304 sys_sgetmask(void)
1305 {
1306         /* SMP safe */
1307         return current->blocked.sig[0];
1308 }
1309 
1310 asmlinkage long
1311 sys_ssetmask(int newmask)
1312 {
1313         int old;
1314 
1315         spin_lock_irq(&current->sigmask_lock);
1316         old = current->blocked.sig[0];
1317 
1318         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1319                                                   sigmask(SIGSTOP)));
1320         recalc_sigpending(current);
1321         spin_unlock_irq(&current->sigmask_lock);
1322 
1323         return old;
1324 }
1325 #endif /* !defined(__alpha__) */
1326 
1327 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1328 /*
1329  * For backwards compatibility.  Functionality superseded by sigaction.
1330  */
1331 asmlinkage unsigned long
1332 sys_signal(int sig, __sighandler_t handler)
1333 {
1334         struct k_sigaction new_sa, old_sa;
1335         int ret;
1336 
1337         new_sa.sa.sa_handler = handler;
1338         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1339 
1340         ret = do_sigaction(sig, &new_sa, &old_sa);
1341 
1342         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1343 }
1344 #endif /* !alpha && !__ia64__ && !defined(__mips__) */
1345 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp