~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/sched/signal.h

Version: ~ [ linux-5.7 ] ~ [ linux-5.6.15 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.43 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.125 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.182 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.225 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.225 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.84 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_SCHED_SIGNAL_H
  3 #define _LINUX_SCHED_SIGNAL_H
  4 
  5 #include <linux/rculist.h>
  6 #include <linux/signal.h>
  7 #include <linux/sched.h>
  8 #include <linux/sched/jobctl.h>
  9 #include <linux/sched/task.h>
 10 #include <linux/cred.h>
 11 #include <linux/refcount.h>
 12 
 13 /*
 14  * Types defining task->signal and task->sighand and APIs using them:
 15  */
 16 
 17 struct sighand_struct {
 18         refcount_t              count;
 19         struct k_sigaction      action[_NSIG];
 20         spinlock_t              siglock;
 21         wait_queue_head_t       signalfd_wqh;
 22 };
 23 
 24 /*
 25  * Per-process accounting stats:
 26  */
 27 struct pacct_struct {
 28         int                     ac_flag;
 29         long                    ac_exitcode;
 30         unsigned long           ac_mem;
 31         u64                     ac_utime, ac_stime;
 32         unsigned long           ac_minflt, ac_majflt;
 33 };
 34 
 35 struct cpu_itimer {
 36         u64 expires;
 37         u64 incr;
 38 };
 39 
 40 /*
 41  * This is the atomic variant of task_cputime, which can be used for
 42  * storing and updating task_cputime statistics without locking.
 43  */
 44 struct task_cputime_atomic {
 45         atomic64_t utime;
 46         atomic64_t stime;
 47         atomic64_t sum_exec_runtime;
 48 };
 49 
 50 #define INIT_CPUTIME_ATOMIC \
 51         (struct task_cputime_atomic) {                          \
 52                 .utime = ATOMIC64_INIT(0),                      \
 53                 .stime = ATOMIC64_INIT(0),                      \
 54                 .sum_exec_runtime = ATOMIC64_INIT(0),           \
 55         }
 56 /**
 57  * struct thread_group_cputimer - thread group interval timer counts
 58  * @cputime_atomic:     atomic thread group interval timers.
 59  * @running:            true when there are timers running and
 60  *                      @cputime_atomic receives updates.
 61  * @checking_timer:     true when a thread in the group is in the
 62  *                      process of checking for thread group timers.
 63  *
 64  * This structure contains the version of task_cputime, above, that is
 65  * used for thread group CPU timer calculations.
 66  */
 67 struct thread_group_cputimer {
 68         struct task_cputime_atomic cputime_atomic;
 69         bool running;
 70         bool checking_timer;
 71 };
 72 
 73 struct multiprocess_signals {
 74         sigset_t signal;
 75         struct hlist_node node;
 76 };
 77 
 78 /*
 79  * NOTE! "signal_struct" does not have its own
 80  * locking, because a shared signal_struct always
 81  * implies a shared sighand_struct, so locking
 82  * sighand_struct is always a proper superset of
 83  * the locking of signal_struct.
 84  */
 85 struct signal_struct {
 86         refcount_t              sigcnt;
 87         atomic_t                live;
 88         int                     nr_threads;
 89         struct list_head        thread_head;
 90 
 91         wait_queue_head_t       wait_chldexit;  /* for wait4() */
 92 
 93         /* current thread group signal load-balancing target: */
 94         struct task_struct      *curr_target;
 95 
 96         /* shared signal handling: */
 97         struct sigpending       shared_pending;
 98 
 99         /* For collecting multiprocess signals during fork */
100         struct hlist_head       multiprocess;
101 
102         /* thread group exit support */
103         int                     group_exit_code;
104         /* overloaded:
105          * - notify group_exit_task when ->count is equal to notify_count
106          * - everyone except group_exit_task is stopped during signal delivery
107          *   of fatal signals, group_exit_task processes the signal.
108          */
109         int                     notify_count;
110         struct task_struct      *group_exit_task;
111 
112         /* thread group stop support, overloads group_exit_code too */
113         int                     group_stop_count;
114         unsigned int            flags; /* see SIGNAL_* flags below */
115 
116         /*
117          * PR_SET_CHILD_SUBREAPER marks a process, like a service
118          * manager, to re-parent orphan (double-forking) child processes
119          * to this process instead of 'init'. The service manager is
120          * able to receive SIGCHLD signals and is able to investigate
121          * the process until it calls wait(). All children of this
122          * process will inherit a flag if they should look for a
123          * child_subreaper process at exit.
124          */
125         unsigned int            is_child_subreaper:1;
126         unsigned int            has_child_subreaper:1;
127 
128 #ifdef CONFIG_POSIX_TIMERS
129 
130         /* POSIX.1b Interval Timers */
131         int                     posix_timer_id;
132         struct list_head        posix_timers;
133 
134         /* ITIMER_REAL timer for the process */
135         struct hrtimer real_timer;
136         ktime_t it_real_incr;
137 
138         /*
139          * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
140          * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
141          * values are defined to 0 and 1 respectively
142          */
143         struct cpu_itimer it[2];
144 
145         /*
146          * Thread group totals for process CPU timers.
147          * See thread_group_cputimer(), et al, for details.
148          */
149         struct thread_group_cputimer cputimer;
150 
151         /* Earliest-expiration cache. */
152         struct task_cputime cputime_expires;
153 
154         struct list_head cpu_timers[3];
155 
156 #endif
157 
158         /* PID/PID hash table linkage. */
159         struct pid *pids[PIDTYPE_MAX];
160 
161 #ifdef CONFIG_NO_HZ_FULL
162         atomic_t tick_dep_mask;
163 #endif
164 
165         struct pid *tty_old_pgrp;
166 
167         /* boolean value for session group leader */
168         int leader;
169 
170         struct tty_struct *tty; /* NULL if no tty */
171 
172 #ifdef CONFIG_SCHED_AUTOGROUP
173         struct autogroup *autogroup;
174 #endif
175         /*
176          * Cumulative resource counters for dead threads in the group,
177          * and for reaped dead child processes forked by this group.
178          * Live threads maintain their own counters and add to these
179          * in __exit_signal, except for the group leader.
180          */
181         seqlock_t stats_lock;
182         u64 utime, stime, cutime, cstime;
183         u64 gtime;
184         u64 cgtime;
185         struct prev_cputime prev_cputime;
186         unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
187         unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
188         unsigned long inblock, oublock, cinblock, coublock;
189         unsigned long maxrss, cmaxrss;
190         struct task_io_accounting ioac;
191 
192         /*
193          * Cumulative ns of schedule CPU time fo dead threads in the
194          * group, not including a zombie group leader, (This only differs
195          * from jiffies_to_ns(utime + stime) if sched_clock uses something
196          * other than jiffies.)
197          */
198         unsigned long long sum_sched_runtime;
199 
200         /*
201          * We don't bother to synchronize most readers of this at all,
202          * because there is no reader checking a limit that actually needs
203          * to get both rlim_cur and rlim_max atomically, and either one
204          * alone is a single word that can safely be read normally.
205          * getrlimit/setrlimit use task_lock(current->group_leader) to
206          * protect this instead of the siglock, because they really
207          * have no need to disable irqs.
208          */
209         struct rlimit rlim[RLIM_NLIMITS];
210 
211 #ifdef CONFIG_BSD_PROCESS_ACCT
212         struct pacct_struct pacct;      /* per-process accounting information */
213 #endif
214 #ifdef CONFIG_TASKSTATS
215         struct taskstats *stats;
216 #endif
217 #ifdef CONFIG_AUDIT
218         unsigned audit_tty;
219         struct tty_audit_buf *tty_audit_buf;
220 #endif
221 
222         /*
223          * Thread is the potential origin of an oom condition; kill first on
224          * oom
225          */
226         bool oom_flag_origin;
227         short oom_score_adj;            /* OOM kill score adjustment */
228         short oom_score_adj_min;        /* OOM kill score adjustment min value.
229                                          * Only settable by CAP_SYS_RESOURCE. */
230         struct mm_struct *oom_mm;       /* recorded mm when the thread group got
231                                          * killed by the oom killer */
232 
233         struct mutex cred_guard_mutex;  /* guard against foreign influences on
234                                          * credential calculations
235                                          * (notably. ptrace) */
236 } __randomize_layout;
237 
238 /*
239  * Bits in flags field of signal_struct.
240  */
241 #define SIGNAL_STOP_STOPPED     0x00000001 /* job control stop in effect */
242 #define SIGNAL_STOP_CONTINUED   0x00000002 /* SIGCONT since WCONTINUED reap */
243 #define SIGNAL_GROUP_EXIT       0x00000004 /* group exit in progress */
244 #define SIGNAL_GROUP_COREDUMP   0x00000008 /* coredump in progress */
245 /*
246  * Pending notifications to parent.
247  */
248 #define SIGNAL_CLD_STOPPED      0x00000010
249 #define SIGNAL_CLD_CONTINUED    0x00000020
250 #define SIGNAL_CLD_MASK         (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
251 
252 #define SIGNAL_UNKILLABLE       0x00000040 /* for init: ignore fatal signals */
253 
254 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
255                           SIGNAL_STOP_CONTINUED)
256 
257 static inline void signal_set_stop_flags(struct signal_struct *sig,
258                                          unsigned int flags)
259 {
260         WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
261         sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
262 }
263 
264 /* If true, all threads except ->group_exit_task have pending SIGKILL */
265 static inline int signal_group_exit(const struct signal_struct *sig)
266 {
267         return  (sig->flags & SIGNAL_GROUP_EXIT) ||
268                 (sig->group_exit_task != NULL);
269 }
270 
271 extern void flush_signals(struct task_struct *);
272 extern void ignore_signals(struct task_struct *);
273 extern void flush_signal_handlers(struct task_struct *, int force_default);
274 extern int dequeue_signal(struct task_struct *task,
275                           sigset_t *mask, kernel_siginfo_t *info);
276 
277 static inline int kernel_dequeue_signal(void)
278 {
279         struct task_struct *task = current;
280         kernel_siginfo_t __info;
281         int ret;
282 
283         spin_lock_irq(&task->sighand->siglock);
284         ret = dequeue_signal(task, &task->blocked, &__info);
285         spin_unlock_irq(&task->sighand->siglock);
286 
287         return ret;
288 }
289 
290 static inline void kernel_signal_stop(void)
291 {
292         spin_lock_irq(&current->sighand->siglock);
293         if (current->jobctl & JOBCTL_STOP_DEQUEUED)
294                 set_special_state(TASK_STOPPED);
295         spin_unlock_irq(&current->sighand->siglock);
296 
297         schedule();
298 }
299 #ifdef __ARCH_SI_TRAPNO
300 # define ___ARCH_SI_TRAPNO(_a1) , _a1
301 #else
302 # define ___ARCH_SI_TRAPNO(_a1)
303 #endif
304 #ifdef __ia64__
305 # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
306 #else
307 # define ___ARCH_SI_IA64(_a1, _a2, _a3)
308 #endif
309 
310 int force_sig_fault(int sig, int code, void __user *addr
311         ___ARCH_SI_TRAPNO(int trapno)
312         ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
313         , struct task_struct *t);
314 int send_sig_fault(int sig, int code, void __user *addr
315         ___ARCH_SI_TRAPNO(int trapno)
316         ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
317         , struct task_struct *t);
318 
319 int force_sig_mceerr(int code, void __user *, short, struct task_struct *);
320 int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
321 
322 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
323 int force_sig_pkuerr(void __user *addr, u32 pkey);
324 
325 int force_sig_ptrace_errno_trap(int errno, void __user *addr);
326 
327 extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
328 extern void force_sigsegv(int sig, struct task_struct *p);
329 extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *);
330 extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
331 extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
332 extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
333                                 const struct cred *);
334 extern int kill_pgrp(struct pid *pid, int sig, int priv);
335 extern int kill_pid(struct pid *pid, int sig, int priv);
336 extern __must_check bool do_notify_parent(struct task_struct *, int);
337 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
338 extern void force_sig(int, struct task_struct *);
339 extern int send_sig(int, struct task_struct *, int);
340 extern int zap_other_threads(struct task_struct *p);
341 extern struct sigqueue *sigqueue_alloc(void);
342 extern void sigqueue_free(struct sigqueue *);
343 extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
344 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
345 
346 static inline int restart_syscall(void)
347 {
348         set_tsk_thread_flag(current, TIF_SIGPENDING);
349         return -ERESTARTNOINTR;
350 }
351 
352 static inline int signal_pending(struct task_struct *p)
353 {
354         return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
355 }
356 
357 static inline int __fatal_signal_pending(struct task_struct *p)
358 {
359         return unlikely(sigismember(&p->pending.signal, SIGKILL));
360 }
361 
362 static inline int fatal_signal_pending(struct task_struct *p)
363 {
364         return signal_pending(p) && __fatal_signal_pending(p);
365 }
366 
367 static inline int signal_pending_state(long state, struct task_struct *p)
368 {
369         if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
370                 return 0;
371         if (!signal_pending(p))
372                 return 0;
373 
374         return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
375 }
376 
377 /*
378  * Reevaluate whether the task has signals pending delivery.
379  * Wake the task if so.
380  * This is required every time the blocked sigset_t changes.
381  * callers must hold sighand->siglock.
382  */
383 extern void recalc_sigpending_and_wake(struct task_struct *t);
384 extern void recalc_sigpending(void);
385 extern void calculate_sigpending(void);
386 
387 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
388 
389 static inline void signal_wake_up(struct task_struct *t, bool resume)
390 {
391         signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
392 }
393 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
394 {
395         signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
396 }
397 
398 void task_join_group_stop(struct task_struct *task);
399 
400 #ifdef TIF_RESTORE_SIGMASK
401 /*
402  * Legacy restore_sigmask accessors.  These are inefficient on
403  * SMP architectures because they require atomic operations.
404  */
405 
406 /**
407  * set_restore_sigmask() - make sure saved_sigmask processing gets done
408  *
409  * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
410  * will run before returning to user mode, to process the flag.  For
411  * all callers, TIF_SIGPENDING is already set or it's no harm to set
412  * it.  TIF_RESTORE_SIGMASK need not be in the set of bits that the
413  * arch code will notice on return to user mode, in case those bits
414  * are scarce.  We set TIF_SIGPENDING here to ensure that the arch
415  * signal code always gets run when TIF_RESTORE_SIGMASK is set.
416  */
417 static inline void set_restore_sigmask(void)
418 {
419         set_thread_flag(TIF_RESTORE_SIGMASK);
420         WARN_ON(!test_thread_flag(TIF_SIGPENDING));
421 }
422 
423 static inline void clear_tsk_restore_sigmask(struct task_struct *task)
424 {
425         clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
426 }
427 
428 static inline void clear_restore_sigmask(void)
429 {
430         clear_thread_flag(TIF_RESTORE_SIGMASK);
431 }
432 static inline bool test_tsk_restore_sigmask(struct task_struct *task)
433 {
434         return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
435 }
436 static inline bool test_restore_sigmask(void)
437 {
438         return test_thread_flag(TIF_RESTORE_SIGMASK);
439 }
440 static inline bool test_and_clear_restore_sigmask(void)
441 {
442         return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
443 }
444 
445 #else   /* TIF_RESTORE_SIGMASK */
446 
447 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
448 static inline void set_restore_sigmask(void)
449 {
450         current->restore_sigmask = true;
451         WARN_ON(!test_thread_flag(TIF_SIGPENDING));
452 }
453 static inline void clear_tsk_restore_sigmask(struct task_struct *task)
454 {
455         task->restore_sigmask = false;
456 }
457 static inline void clear_restore_sigmask(void)
458 {
459         current->restore_sigmask = false;
460 }
461 static inline bool test_restore_sigmask(void)
462 {
463         return current->restore_sigmask;
464 }
465 static inline bool test_tsk_restore_sigmask(struct task_struct *task)
466 {
467         return task->restore_sigmask;
468 }
469 static inline bool test_and_clear_restore_sigmask(void)
470 {
471         if (!current->restore_sigmask)
472                 return false;
473         current->restore_sigmask = false;
474         return true;
475 }
476 #endif
477 
478 static inline void restore_saved_sigmask(void)
479 {
480         if (test_and_clear_restore_sigmask())
481                 __set_current_blocked(&current->saved_sigmask);
482 }
483 
484 static inline sigset_t *sigmask_to_save(void)
485 {
486         sigset_t *res = &current->blocked;
487         if (unlikely(test_restore_sigmask()))
488                 res = &current->saved_sigmask;
489         return res;
490 }
491 
492 static inline int kill_cad_pid(int sig, int priv)
493 {
494         return kill_pid(cad_pid, sig, priv);
495 }
496 
497 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
498 #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
499 #define SEND_SIG_PRIV   ((struct kernel_siginfo *) 1)
500 
501 /*
502  * True if we are on the alternate signal stack.
503  */
504 static inline int on_sig_stack(unsigned long sp)
505 {
506         /*
507          * If the signal stack is SS_AUTODISARM then, by construction, we
508          * can't be on the signal stack unless user code deliberately set
509          * SS_AUTODISARM when we were already on it.
510          *
511          * This improves reliability: if user state gets corrupted such that
512          * the stack pointer points very close to the end of the signal stack,
513          * then this check will enable the signal to be handled anyway.
514          */
515         if (current->sas_ss_flags & SS_AUTODISARM)
516                 return 0;
517 
518 #ifdef CONFIG_STACK_GROWSUP
519         return sp >= current->sas_ss_sp &&
520                 sp - current->sas_ss_sp < current->sas_ss_size;
521 #else
522         return sp > current->sas_ss_sp &&
523                 sp - current->sas_ss_sp <= current->sas_ss_size;
524 #endif
525 }
526 
527 static inline int sas_ss_flags(unsigned long sp)
528 {
529         if (!current->sas_ss_size)
530                 return SS_DISABLE;
531 
532         return on_sig_stack(sp) ? SS_ONSTACK : 0;
533 }
534 
535 static inline void sas_ss_reset(struct task_struct *p)
536 {
537         p->sas_ss_sp = 0;
538         p->sas_ss_size = 0;
539         p->sas_ss_flags = SS_DISABLE;
540 }
541 
542 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
543 {
544         if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
545 #ifdef CONFIG_STACK_GROWSUP
546                 return current->sas_ss_sp;
547 #else
548                 return current->sas_ss_sp + current->sas_ss_size;
549 #endif
550         return sp;
551 }
552 
553 extern void __cleanup_sighand(struct sighand_struct *);
554 extern void flush_itimer_signals(void);
555 
556 #define tasklist_empty() \
557         list_empty(&init_task.tasks)
558 
559 #define next_task(p) \
560         list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
561 
562 #define for_each_process(p) \
563         for (p = &init_task ; (p = next_task(p)) != &init_task ; )
564 
565 extern bool current_is_single_threaded(void);
566 
567 /*
568  * Careful: do_each_thread/while_each_thread is a double loop so
569  *          'break' will not work as expected - use goto instead.
570  */
571 #define do_each_thread(g, t) \
572         for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
573 
574 #define while_each_thread(g, t) \
575         while ((t = next_thread(t)) != g)
576 
577 #define __for_each_thread(signal, t)    \
578         list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
579 
580 #define for_each_thread(p, t)           \
581         __for_each_thread((p)->signal, t)
582 
583 /* Careful: this is a double loop, 'break' won't work as expected. */
584 #define for_each_process_thread(p, t)   \
585         for_each_process(p) for_each_thread(p, t)
586 
587 typedef int (*proc_visitor)(struct task_struct *p, void *data);
588 void walk_process_tree(struct task_struct *top, proc_visitor, void *);
589 
590 static inline
591 struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
592 {
593         struct pid *pid;
594         if (type == PIDTYPE_PID)
595                 pid = task_pid(task);
596         else
597                 pid = task->signal->pids[type];
598         return pid;
599 }
600 
601 static inline struct pid *task_tgid(struct task_struct *task)
602 {
603         return task->signal->pids[PIDTYPE_TGID];
604 }
605 
606 /*
607  * Without tasklist or RCU lock it is not safe to dereference
608  * the result of task_pgrp/task_session even if task == current,
609  * we can race with another thread doing sys_setsid/sys_setpgid.
610  */
611 static inline struct pid *task_pgrp(struct task_struct *task)
612 {
613         return task->signal->pids[PIDTYPE_PGID];
614 }
615 
616 static inline struct pid *task_session(struct task_struct *task)
617 {
618         return task->signal->pids[PIDTYPE_SID];
619 }
620 
621 static inline int get_nr_threads(struct task_struct *task)
622 {
623         return task->signal->nr_threads;
624 }
625 
626 static inline bool thread_group_leader(struct task_struct *p)
627 {
628         return p->exit_signal >= 0;
629 }
630 
631 /* Do to the insanities of de_thread it is possible for a process
632  * to have the pid of the thread group leader without actually being
633  * the thread group leader.  For iteration through the pids in proc
634  * all we care about is that we have a task with the appropriate
635  * pid, we don't actually care if we have the right task.
636  */
637 static inline bool has_group_leader_pid(struct task_struct *p)
638 {
639         return task_pid(p) == task_tgid(p);
640 }
641 
642 static inline
643 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
644 {
645         return p1->signal == p2->signal;
646 }
647 
648 static inline struct task_struct *next_thread(const struct task_struct *p)
649 {
650         return list_entry_rcu(p->thread_group.next,
651                               struct task_struct, thread_group);
652 }
653 
654 static inline int thread_group_empty(struct task_struct *p)
655 {
656         return list_empty(&p->thread_group);
657 }
658 
659 #define delay_group_leader(p) \
660                 (thread_group_leader(p) && !thread_group_empty(p))
661 
662 extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
663                                                         unsigned long *flags);
664 
665 static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
666                                                        unsigned long *flags)
667 {
668         struct sighand_struct *ret;
669 
670         ret = __lock_task_sighand(task, flags);
671         (void)__cond_lock(&task->sighand->siglock, ret);
672         return ret;
673 }
674 
675 static inline void unlock_task_sighand(struct task_struct *task,
676                                                 unsigned long *flags)
677 {
678         spin_unlock_irqrestore(&task->sighand->siglock, *flags);
679 }
680 
681 static inline unsigned long task_rlimit(const struct task_struct *task,
682                 unsigned int limit)
683 {
684         return READ_ONCE(task->signal->rlim[limit].rlim_cur);
685 }
686 
687 static inline unsigned long task_rlimit_max(const struct task_struct *task,
688                 unsigned int limit)
689 {
690         return READ_ONCE(task->signal->rlim[limit].rlim_max);
691 }
692 
693 static inline unsigned long rlimit(unsigned int limit)
694 {
695         return task_rlimit(current, limit);
696 }
697 
698 static inline unsigned long rlimit_max(unsigned int limit)
699 {
700         return task_rlimit_max(current, limit);
701 }
702 
703 #endif /* _LINUX_SCHED_SIGNAL_H */
704 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp