~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/exit.c

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/kernel/exit.c
  3  *
  4  *  Copyright (C) 1991, 1992  Linus Torvalds
  5  */
  6 
  7 #include <linux/mm.h>
  8 #include <linux/slab.h>
  9 #include <linux/interrupt.h>
 10 #include <linux/module.h>
 11 #include <linux/capability.h>
 12 #include <linux/completion.h>
 13 #include <linux/personality.h>
 14 #include <linux/tty.h>
 15 #include <linux/iocontext.h>
 16 #include <linux/key.h>
 17 #include <linux/security.h>
 18 #include <linux/cpu.h>
 19 #include <linux/acct.h>
 20 #include <linux/tsacct_kern.h>
 21 #include <linux/file.h>
 22 #include <linux/fdtable.h>
 23 #include <linux/freezer.h>
 24 #include <linux/binfmts.h>
 25 #include <linux/nsproxy.h>
 26 #include <linux/pid_namespace.h>
 27 #include <linux/ptrace.h>
 28 #include <linux/profile.h>
 29 #include <linux/mount.h>
 30 #include <linux/proc_fs.h>
 31 #include <linux/kthread.h>
 32 #include <linux/mempolicy.h>
 33 #include <linux/taskstats_kern.h>
 34 #include <linux/delayacct.h>
 35 #include <linux/cgroup.h>
 36 #include <linux/syscalls.h>
 37 #include <linux/signal.h>
 38 #include <linux/posix-timers.h>
 39 #include <linux/cn_proc.h>
 40 #include <linux/mutex.h>
 41 #include <linux/futex.h>
 42 #include <linux/pipe_fs_i.h>
 43 #include <linux/audit.h> /* for audit_free() */
 44 #include <linux/resource.h>
 45 #include <linux/blkdev.h>
 46 #include <linux/task_io_accounting_ops.h>
 47 #include <linux/tracehook.h>
 48 #include <linux/fs_struct.h>
 49 #include <linux/init_task.h>
 50 #include <linux/perf_event.h>
 51 #include <trace/events/sched.h>
 52 #include <linux/hw_breakpoint.h>
 53 #include <linux/oom.h>
 54 #include <linux/writeback.h>
 55 #include <linux/shm.h>
 56 
 57 #include <asm/uaccess.h>
 58 #include <asm/unistd.h>
 59 #include <asm/pgtable.h>
 60 #include <asm/mmu_context.h>
 61 
 62 static void exit_mm(struct task_struct *tsk);
 63 
 64 static void __unhash_process(struct task_struct *p, bool group_dead)
 65 {
 66         nr_threads--;
 67         detach_pid(p, PIDTYPE_PID);
 68         if (group_dead) {
 69                 detach_pid(p, PIDTYPE_PGID);
 70                 detach_pid(p, PIDTYPE_SID);
 71 
 72                 list_del_rcu(&p->tasks);
 73                 list_del_init(&p->sibling);
 74                 __this_cpu_dec(process_counts);
 75         }
 76         list_del_rcu(&p->thread_group);
 77         list_del_rcu(&p->thread_node);
 78 }
 79 
 80 /*
 81  * This function expects the tasklist_lock write-locked.
 82  */
 83 static void __exit_signal(struct task_struct *tsk)
 84 {
 85         struct signal_struct *sig = tsk->signal;
 86         bool group_dead = thread_group_leader(tsk);
 87         struct sighand_struct *sighand;
 88         struct tty_struct *uninitialized_var(tty);
 89         cputime_t utime, stime;
 90 
 91         sighand = rcu_dereference_check(tsk->sighand,
 92                                         lockdep_tasklist_lock_is_held());
 93         spin_lock(&sighand->siglock);
 94 
 95         posix_cpu_timers_exit(tsk);
 96         if (group_dead) {
 97                 posix_cpu_timers_exit_group(tsk);
 98                 tty = sig->tty;
 99                 sig->tty = NULL;
100         } else {
101                 /*
102                  * This can only happen if the caller is de_thread().
103                  * FIXME: this is the temporary hack, we should teach
104                  * posix-cpu-timers to handle this case correctly.
105                  */
106                 if (unlikely(has_group_leader_pid(tsk)))
107                         posix_cpu_timers_exit_group(tsk);
108 
109                 /*
110                  * If there is any task waiting for the group exit
111                  * then notify it:
112                  */
113                 if (sig->notify_count > 0 && !--sig->notify_count)
114                         wake_up_process(sig->group_exit_task);
115 
116                 if (tsk == sig->curr_target)
117                         sig->curr_target = next_thread(tsk);
118         }
119 
120         /*
121          * Accumulate here the counters for all threads as they die. We could
122          * skip the group leader because it is the last user of signal_struct,
123          * but we want to avoid the race with thread_group_cputime() which can
124          * see the empty ->thread_head list.
125          */
126         task_cputime(tsk, &utime, &stime);
127         write_seqlock(&sig->stats_lock);
128         sig->utime += utime;
129         sig->stime += stime;
130         sig->gtime += task_gtime(tsk);
131         sig->min_flt += tsk->min_flt;
132         sig->maj_flt += tsk->maj_flt;
133         sig->nvcsw += tsk->nvcsw;
134         sig->nivcsw += tsk->nivcsw;
135         sig->inblock += task_io_get_inblock(tsk);
136         sig->oublock += task_io_get_oublock(tsk);
137         task_io_accounting_add(&sig->ioac, &tsk->ioac);
138         sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
139         sig->nr_threads--;
140         __unhash_process(tsk, group_dead);
141         write_sequnlock(&sig->stats_lock);
142 
143         /*
144          * Do this under ->siglock, we can race with another thread
145          * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
146          */
147         flush_sigqueue(&tsk->pending);
148         tsk->sighand = NULL;
149         spin_unlock(&sighand->siglock);
150 
151         __cleanup_sighand(sighand);
152         clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
153         if (group_dead) {
154                 flush_sigqueue(&sig->shared_pending);
155                 tty_kref_put(tty);
156         }
157 }
158 
159 static void delayed_put_task_struct(struct rcu_head *rhp)
160 {
161         struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
162 
163         perf_event_delayed_put(tsk);
164         trace_sched_process_free(tsk);
165         put_task_struct(tsk);
166 }
167 
168 
169 void release_task(struct task_struct *p)
170 {
171         struct task_struct *leader;
172         int zap_leader;
173 repeat:
174         /* don't need to get the RCU readlock here - the process is dead and
175          * can't be modifying its own credentials. But shut RCU-lockdep up */
176         rcu_read_lock();
177         atomic_dec(&__task_cred(p)->user->processes);
178         rcu_read_unlock();
179 
180         proc_flush_task(p);
181 
182         write_lock_irq(&tasklist_lock);
183         ptrace_release_task(p);
184         __exit_signal(p);
185 
186         /*
187          * If we are the last non-leader member of the thread
188          * group, and the leader is zombie, then notify the
189          * group leader's parent process. (if it wants notification.)
190          */
191         zap_leader = 0;
192         leader = p->group_leader;
193         if (leader != p && thread_group_empty(leader)
194                         && leader->exit_state == EXIT_ZOMBIE) {
195                 /*
196                  * If we were the last child thread and the leader has
197                  * exited already, and the leader's parent ignores SIGCHLD,
198                  * then we are the one who should release the leader.
199                  */
200                 zap_leader = do_notify_parent(leader, leader->exit_signal);
201                 if (zap_leader)
202                         leader->exit_state = EXIT_DEAD;
203         }
204 
205         write_unlock_irq(&tasklist_lock);
206         release_thread(p);
207         call_rcu(&p->rcu, delayed_put_task_struct);
208 
209         p = leader;
210         if (unlikely(zap_leader))
211                 goto repeat;
212 }
213 
214 /*
215  * Determine if a process group is "orphaned", according to the POSIX
216  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
217  * by terminal-generated stop signals.  Newly orphaned process groups are
218  * to receive a SIGHUP and a SIGCONT.
219  *
220  * "I ask you, have you ever known what it is to be an orphan?"
221  */
222 static int will_become_orphaned_pgrp(struct pid *pgrp,
223                                         struct task_struct *ignored_task)
224 {
225         struct task_struct *p;
226 
227         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
228                 if ((p == ignored_task) ||
229                     (p->exit_state && thread_group_empty(p)) ||
230                     is_global_init(p->real_parent))
231                         continue;
232 
233                 if (task_pgrp(p->real_parent) != pgrp &&
234                     task_session(p->real_parent) == task_session(p))
235                         return 0;
236         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
237 
238         return 1;
239 }
240 
241 int is_current_pgrp_orphaned(void)
242 {
243         int retval;
244 
245         read_lock(&tasklist_lock);
246         retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
247         read_unlock(&tasklist_lock);
248 
249         return retval;
250 }
251 
252 static bool has_stopped_jobs(struct pid *pgrp)
253 {
254         struct task_struct *p;
255 
256         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
257                 if (p->signal->flags & SIGNAL_STOP_STOPPED)
258                         return true;
259         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
260 
261         return false;
262 }
263 
264 /*
265  * Check to see if any process groups have become orphaned as
266  * a result of our exiting, and if they have any stopped jobs,
267  * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
268  */
269 static void
270 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
271 {
272         struct pid *pgrp = task_pgrp(tsk);
273         struct task_struct *ignored_task = tsk;
274 
275         if (!parent)
276                 /* exit: our father is in a different pgrp than
277                  * we are and we were the only connection outside.
278                  */
279                 parent = tsk->real_parent;
280         else
281                 /* reparent: our child is in a different pgrp than
282                  * we are, and it was the only connection outside.
283                  */
284                 ignored_task = NULL;
285 
286         if (task_pgrp(parent) != pgrp &&
287             task_session(parent) == task_session(tsk) &&
288             will_become_orphaned_pgrp(pgrp, ignored_task) &&
289             has_stopped_jobs(pgrp)) {
290                 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
291                 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
292         }
293 }
294 
295 #ifdef CONFIG_MEMCG
296 /*
297  * A task is exiting.   If it owned this mm, find a new owner for the mm.
298  */
299 void mm_update_next_owner(struct mm_struct *mm)
300 {
301         struct task_struct *c, *g, *p = current;
302 
303 retry:
304         /*
305          * If the exiting or execing task is not the owner, it's
306          * someone else's problem.
307          */
308         if (mm->owner != p)
309                 return;
310         /*
311          * The current owner is exiting/execing and there are no other
312          * candidates.  Do not leave the mm pointing to a possibly
313          * freed task structure.
314          */
315         if (atomic_read(&mm->mm_users) <= 1) {
316                 mm->owner = NULL;
317                 return;
318         }
319 
320         read_lock(&tasklist_lock);
321         /*
322          * Search in the children
323          */
324         list_for_each_entry(c, &p->children, sibling) {
325                 if (c->mm == mm)
326                         goto assign_new_owner;
327         }
328 
329         /*
330          * Search in the siblings
331          */
332         list_for_each_entry(c, &p->real_parent->children, sibling) {
333                 if (c->mm == mm)
334                         goto assign_new_owner;
335         }
336 
337         /*
338          * Search through everything else, we should not get here often.
339          */
340         for_each_process(g) {
341                 if (g->flags & PF_KTHREAD)
342                         continue;
343                 for_each_thread(g, c) {
344                         if (c->mm == mm)
345                                 goto assign_new_owner;
346                         if (c->mm)
347                                 break;
348                 }
349         }
350         read_unlock(&tasklist_lock);
351         /*
352          * We found no owner yet mm_users > 1: this implies that we are
353          * most likely racing with swapoff (try_to_unuse()) or /proc or
354          * ptrace or page migration (get_task_mm()).  Mark owner as NULL.
355          */
356         mm->owner = NULL;
357         return;
358 
359 assign_new_owner:
360         BUG_ON(c == p);
361         get_task_struct(c);
362         /*
363          * The task_lock protects c->mm from changing.
364          * We always want mm->owner->mm == mm
365          */
366         task_lock(c);
367         /*
368          * Delay read_unlock() till we have the task_lock()
369          * to ensure that c does not slip away underneath us
370          */
371         read_unlock(&tasklist_lock);
372         if (c->mm != mm) {
373                 task_unlock(c);
374                 put_task_struct(c);
375                 goto retry;
376         }
377         mm->owner = c;
378         task_unlock(c);
379         put_task_struct(c);
380 }
381 #endif /* CONFIG_MEMCG */
382 
383 /*
384  * Turn us into a lazy TLB process if we
385  * aren't already..
386  */
387 static void exit_mm(struct task_struct *tsk)
388 {
389         struct mm_struct *mm = tsk->mm;
390         struct core_state *core_state;
391 
392         mm_release(tsk, mm);
393         if (!mm)
394                 return;
395         sync_mm_rss(mm);
396         /*
397          * Serialize with any possible pending coredump.
398          * We must hold mmap_sem around checking core_state
399          * and clearing tsk->mm.  The core-inducing thread
400          * will increment ->nr_threads for each thread in the
401          * group with ->mm != NULL.
402          */
403         down_read(&mm->mmap_sem);
404         core_state = mm->core_state;
405         if (core_state) {
406                 struct core_thread self;
407 
408                 up_read(&mm->mmap_sem);
409 
410                 self.task = tsk;
411                 self.next = xchg(&core_state->dumper.next, &self);
412                 /*
413                  * Implies mb(), the result of xchg() must be visible
414                  * to core_state->dumper.
415                  */
416                 if (atomic_dec_and_test(&core_state->nr_threads))
417                         complete(&core_state->startup);
418 
419                 for (;;) {
420                         set_task_state(tsk, TASK_UNINTERRUPTIBLE);
421                         if (!self.task) /* see coredump_finish() */
422                                 break;
423                         freezable_schedule();
424                 }
425                 __set_task_state(tsk, TASK_RUNNING);
426                 down_read(&mm->mmap_sem);
427         }
428         atomic_inc(&mm->mm_count);
429         BUG_ON(mm != tsk->active_mm);
430         /* more a memory barrier than a real lock */
431         task_lock(tsk);
432         tsk->mm = NULL;
433         up_read(&mm->mmap_sem);
434         enter_lazy_tlb(mm, current);
435         task_unlock(tsk);
436         mm_update_next_owner(mm);
437         mmput(mm);
438         if (test_thread_flag(TIF_MEMDIE))
439                 exit_oom_victim();
440 }
441 
442 static struct task_struct *find_alive_thread(struct task_struct *p)
443 {
444         struct task_struct *t;
445 
446         for_each_thread(p, t) {
447                 if (!(t->flags & PF_EXITING))
448                         return t;
449         }
450         return NULL;
451 }
452 
453 static struct task_struct *find_child_reaper(struct task_struct *father)
454         __releases(&tasklist_lock)
455         __acquires(&tasklist_lock)
456 {
457         struct pid_namespace *pid_ns = task_active_pid_ns(father);
458         struct task_struct *reaper = pid_ns->child_reaper;
459 
460         if (likely(reaper != father))
461                 return reaper;
462 
463         reaper = find_alive_thread(father);
464         if (reaper) {
465                 pid_ns->child_reaper = reaper;
466                 return reaper;
467         }
468 
469         write_unlock_irq(&tasklist_lock);
470         if (unlikely(pid_ns == &init_pid_ns)) {
471                 panic("Attempted to kill init! exitcode=0x%08x\n",
472                         father->signal->group_exit_code ?: father->exit_code);
473         }
474         zap_pid_ns_processes(pid_ns);
475         write_lock_irq(&tasklist_lock);
476 
477         return father;
478 }
479 
480 /*
481  * When we die, we re-parent all our children, and try to:
482  * 1. give them to another thread in our thread group, if such a member exists
483  * 2. give it to the first ancestor process which prctl'd itself as a
484  *    child_subreaper for its children (like a service manager)
485  * 3. give it to the init process (PID 1) in our pid namespace
486  */
487 static struct task_struct *find_new_reaper(struct task_struct *father,
488                                            struct task_struct *child_reaper)
489 {
490         struct task_struct *thread, *reaper;
491 
492         thread = find_alive_thread(father);
493         if (thread)
494                 return thread;
495 
496         if (father->signal->has_child_subreaper) {
497                 /*
498                  * Find the first ->is_child_subreaper ancestor in our pid_ns.
499                  * We start from father to ensure we can not look into another
500                  * namespace, this is safe because all its threads are dead.
501                  */
502                 for (reaper = father;
503                      !same_thread_group(reaper, child_reaper);
504                      reaper = reaper->real_parent) {
505                         /* call_usermodehelper() descendants need this check */
506                         if (reaper == &init_task)
507                                 break;
508                         if (!reaper->signal->is_child_subreaper)
509                                 continue;
510                         thread = find_alive_thread(reaper);
511                         if (thread)
512                                 return thread;
513                 }
514         }
515 
516         return child_reaper;
517 }
518 
519 /*
520 * Any that need to be release_task'd are put on the @dead list.
521  */
522 static void reparent_leader(struct task_struct *father, struct task_struct *p,
523                                 struct list_head *dead)
524 {
525         if (unlikely(p->exit_state == EXIT_DEAD))
526                 return;
527 
528         /* We don't want people slaying init. */
529         p->exit_signal = SIGCHLD;
530 
531         /* If it has exited notify the new parent about this child's death. */
532         if (!p->ptrace &&
533             p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
534                 if (do_notify_parent(p, p->exit_signal)) {
535                         p->exit_state = EXIT_DEAD;
536                         list_add(&p->ptrace_entry, dead);
537                 }
538         }
539 
540         kill_orphaned_pgrp(p, father);
541 }
542 
543 /*
544  * This does two things:
545  *
546  * A.  Make init inherit all the child processes
547  * B.  Check to see if any process groups have become orphaned
548  *      as a result of our exiting, and if they have any stopped
549  *      jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
550  */
551 static void forget_original_parent(struct task_struct *father,
552                                         struct list_head *dead)
553 {
554         struct task_struct *p, *t, *reaper;
555 
556         if (unlikely(!list_empty(&father->ptraced)))
557                 exit_ptrace(father, dead);
558 
559         /* Can drop and reacquire tasklist_lock */
560         reaper = find_child_reaper(father);
561         if (list_empty(&father->children))
562                 return;
563 
564         reaper = find_new_reaper(father, reaper);
565         list_for_each_entry(p, &father->children, sibling) {
566                 for_each_thread(p, t) {
567                         t->real_parent = reaper;
568                         BUG_ON((!t->ptrace) != (t->parent == father));
569                         if (likely(!t->ptrace))
570                                 t->parent = t->real_parent;
571                         if (t->pdeath_signal)
572                                 group_send_sig_info(t->pdeath_signal,
573                                                     SEND_SIG_NOINFO, t);
574                 }
575                 /*
576                  * If this is a threaded reparent there is no need to
577                  * notify anyone anything has happened.
578                  */
579                 if (!same_thread_group(reaper, father))
580                         reparent_leader(father, p, dead);
581         }
582         list_splice_tail_init(&father->children, &reaper->children);
583 }
584 
585 /*
586  * Send signals to all our closest relatives so that they know
587  * to properly mourn us..
588  */
589 static void exit_notify(struct task_struct *tsk, int group_dead)
590 {
591         bool autoreap;
592         struct task_struct *p, *n;
593         LIST_HEAD(dead);
594 
595         write_lock_irq(&tasklist_lock);
596         forget_original_parent(tsk, &dead);
597 
598         if (group_dead)
599                 kill_orphaned_pgrp(tsk->group_leader, NULL);
600 
601         if (unlikely(tsk->ptrace)) {
602                 int sig = thread_group_leader(tsk) &&
603                                 thread_group_empty(tsk) &&
604                                 !ptrace_reparented(tsk) ?
605                         tsk->exit_signal : SIGCHLD;
606                 autoreap = do_notify_parent(tsk, sig);
607         } else if (thread_group_leader(tsk)) {
608                 autoreap = thread_group_empty(tsk) &&
609                         do_notify_parent(tsk, tsk->exit_signal);
610         } else {
611                 autoreap = true;
612         }
613 
614         tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
615         if (tsk->exit_state == EXIT_DEAD)
616                 list_add(&tsk->ptrace_entry, &dead);
617 
618         /* mt-exec, de_thread() is waiting for group leader */
619         if (unlikely(tsk->signal->notify_count < 0))
620                 wake_up_process(tsk->signal->group_exit_task);
621         write_unlock_irq(&tasklist_lock);
622 
623         list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
624                 list_del_init(&p->ptrace_entry);
625                 release_task(p);
626         }
627 }
628 
629 #ifdef CONFIG_DEBUG_STACK_USAGE
630 static void check_stack_usage(void)
631 {
632         static DEFINE_SPINLOCK(low_water_lock);
633         static int lowest_to_date = THREAD_SIZE;
634         unsigned long free;
635 
636         free = stack_not_used(current);
637 
638         if (free >= lowest_to_date)
639                 return;
640 
641         spin_lock(&low_water_lock);
642         if (free < lowest_to_date) {
643                 pr_warn("%s (%d) used greatest stack depth: %lu bytes left\n",
644                         current->comm, task_pid_nr(current), free);
645                 lowest_to_date = free;
646         }
647         spin_unlock(&low_water_lock);
648 }
649 #else
650 static inline void check_stack_usage(void) {}
651 #endif
652 
653 void do_exit(long code)
654 {
655         struct task_struct *tsk = current;
656         int group_dead;
657         TASKS_RCU(int tasks_rcu_i);
658 
659         profile_task_exit(tsk);
660 
661         WARN_ON(blk_needs_flush_plug(tsk));
662 
663         if (unlikely(in_interrupt()))
664                 panic("Aiee, killing interrupt handler!");
665         if (unlikely(!tsk->pid))
666                 panic("Attempted to kill the idle task!");
667 
668         /*
669          * If do_exit is called because this processes oopsed, it's possible
670          * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
671          * continuing. Amongst other possible reasons, this is to prevent
672          * mm_release()->clear_child_tid() from writing to a user-controlled
673          * kernel address.
674          */
675         set_fs(USER_DS);
676 
677         ptrace_event(PTRACE_EVENT_EXIT, code);
678 
679         validate_creds_for_do_exit(tsk);
680 
681         /*
682          * We're taking recursive faults here in do_exit. Safest is to just
683          * leave this task alone and wait for reboot.
684          */
685         if (unlikely(tsk->flags & PF_EXITING)) {
686                 pr_alert("Fixing recursive fault but reboot is needed!\n");
687                 /*
688                  * We can do this unlocked here. The futex code uses
689                  * this flag just to verify whether the pi state
690                  * cleanup has been done or not. In the worst case it
691                  * loops once more. We pretend that the cleanup was
692                  * done as there is no way to return. Either the
693                  * OWNER_DIED bit is set by now or we push the blocked
694                  * task into the wait for ever nirwana as well.
695                  */
696                 tsk->flags |= PF_EXITPIDONE;
697                 set_current_state(TASK_UNINTERRUPTIBLE);
698                 schedule();
699         }
700 
701         exit_signals(tsk);  /* sets PF_EXITING */
702         /*
703          * tsk->flags are checked in the futex code to protect against
704          * an exiting task cleaning up the robust pi futexes.
705          */
706         smp_mb();
707         raw_spin_unlock_wait(&tsk->pi_lock);
708 
709         if (unlikely(in_atomic()))
710                 pr_info("note: %s[%d] exited with preempt_count %d\n",
711                         current->comm, task_pid_nr(current),
712                         preempt_count());
713 
714         /* sync mm's RSS info before statistics gathering */
715         if (tsk->mm)
716                 sync_mm_rss(tsk->mm);
717         acct_update_integrals(tsk);
718         group_dead = atomic_dec_and_test(&tsk->signal->live);
719         if (group_dead) {
720                 hrtimer_cancel(&tsk->signal->real_timer);
721                 exit_itimers(tsk->signal);
722                 if (tsk->mm)
723                         setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
724         }
725         acct_collect(code, group_dead);
726         if (group_dead)
727                 tty_audit_exit();
728         audit_free(tsk);
729 
730         tsk->exit_code = code;
731         taskstats_exit(tsk, group_dead);
732 
733         exit_mm(tsk);
734 
735         if (group_dead)
736                 acct_process();
737         trace_sched_process_exit(tsk);
738 
739         exit_sem(tsk);
740         exit_shm(tsk);
741         exit_files(tsk);
742         exit_fs(tsk);
743         if (group_dead)
744                 disassociate_ctty(1);
745         exit_task_namespaces(tsk);
746         exit_task_work(tsk);
747         exit_thread();
748 
749         /*
750          * Flush inherited counters to the parent - before the parent
751          * gets woken up by child-exit notifications.
752          *
753          * because of cgroup mode, must be called before cgroup_exit()
754          */
755         perf_event_exit_task(tsk);
756 
757         cgroup_exit(tsk);
758 
759         /*
760          * FIXME: do that only when needed, using sched_exit tracepoint
761          */
762         flush_ptrace_hw_breakpoint(tsk);
763 
764         TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
765         exit_notify(tsk, group_dead);
766         proc_exit_connector(tsk);
767 #ifdef CONFIG_NUMA
768         task_lock(tsk);
769         mpol_put(tsk->mempolicy);
770         tsk->mempolicy = NULL;
771         task_unlock(tsk);
772 #endif
773 #ifdef CONFIG_FUTEX
774         if (unlikely(current->pi_state_cache))
775                 kfree(current->pi_state_cache);
776 #endif
777         /*
778          * Make sure we are holding no locks:
779          */
780         debug_check_no_locks_held();
781         /*
782          * We can do this unlocked here. The futex code uses this flag
783          * just to verify whether the pi state cleanup has been done
784          * or not. In the worst case it loops once more.
785          */
786         tsk->flags |= PF_EXITPIDONE;
787 
788         if (tsk->io_context)
789                 exit_io_context(tsk);
790 
791         if (tsk->splice_pipe)
792                 free_pipe_info(tsk->splice_pipe);
793 
794         if (tsk->task_frag.page)
795                 put_page(tsk->task_frag.page);
796 
797         validate_creds_for_do_exit(tsk);
798 
799         check_stack_usage();
800         preempt_disable();
801         if (tsk->nr_dirtied)
802                 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
803         exit_rcu();
804         TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
805 
806         /*
807          * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
808          * when the following two conditions become true.
809          *   - There is race condition of mmap_sem (It is acquired by
810          *     exit_mm()), and
811          *   - SMI occurs before setting TASK_RUNINNG.
812          *     (or hypervisor of virtual machine switches to other guest)
813          *  As a result, we may become TASK_RUNNING after becoming TASK_DEAD
814          *
815          * To avoid it, we have to wait for releasing tsk->pi_lock which
816          * is held by try_to_wake_up()
817          */
818         smp_mb();
819         raw_spin_unlock_wait(&tsk->pi_lock);
820 
821         /* causes final put_task_struct in finish_task_switch(). */
822         tsk->state = TASK_DEAD;
823         tsk->flags |= PF_NOFREEZE;      /* tell freezer to ignore us */
824         schedule();
825         BUG();
826         /* Avoid "noreturn function does return".  */
827         for (;;)
828                 cpu_relax();    /* For when BUG is null */
829 }
830 EXPORT_SYMBOL_GPL(do_exit);
831 
832 void complete_and_exit(struct completion *comp, long code)
833 {
834         if (comp)
835                 complete(comp);
836 
837         do_exit(code);
838 }
839 EXPORT_SYMBOL(complete_and_exit);
840 
841 SYSCALL_DEFINE1(exit, int, error_code)
842 {
843         do_exit((error_code&0xff)<<8);
844 }
845 
846 /*
847  * Take down every thread in the group.  This is called by fatal signals
848  * as well as by sys_exit_group (below).
849  */
850 void
851 do_group_exit(int exit_code)
852 {
853         struct signal_struct *sig = current->signal;
854 
855         BUG_ON(exit_code & 0x80); /* core dumps don't get here */
856 
857         if (signal_group_exit(sig))
858                 exit_code = sig->group_exit_code;
859         else if (!thread_group_empty(current)) {
860                 struct sighand_struct *const sighand = current->sighand;
861 
862                 spin_lock_irq(&sighand->siglock);
863                 if (signal_group_exit(sig))
864                         /* Another thread got here before we took the lock.  */
865                         exit_code = sig->group_exit_code;
866                 else {
867                         sig->group_exit_code = exit_code;
868                         sig->flags = SIGNAL_GROUP_EXIT;
869                         zap_other_threads(current);
870                 }
871                 spin_unlock_irq(&sighand->siglock);
872         }
873 
874         do_exit(exit_code);
875         /* NOTREACHED */
876 }
877 
878 /*
879  * this kills every thread in the thread group. Note that any externally
880  * wait4()-ing process will get the correct exit code - even if this
881  * thread is not the thread group leader.
882  */
883 SYSCALL_DEFINE1(exit_group, int, error_code)
884 {
885         do_group_exit((error_code & 0xff) << 8);
886         /* NOTREACHED */
887         return 0;
888 }
889 
890 struct wait_opts {
891         enum pid_type           wo_type;
892         int                     wo_flags;
893         struct pid              *wo_pid;
894 
895         struct siginfo __user   *wo_info;
896         int __user              *wo_stat;
897         struct rusage __user    *wo_rusage;
898 
899         wait_queue_t            child_wait;
900         int                     notask_error;
901 };
902 
903 static inline
904 struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
905 {
906         if (type != PIDTYPE_PID)
907                 task = task->group_leader;
908         return task->pids[type].pid;
909 }
910 
911 static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
912 {
913         return  wo->wo_type == PIDTYPE_MAX ||
914                 task_pid_type(p, wo->wo_type) == wo->wo_pid;
915 }
916 
917 static int eligible_child(struct wait_opts *wo, struct task_struct *p)
918 {
919         if (!eligible_pid(wo, p))
920                 return 0;
921         /* Wait for all children (clone and not) if __WALL is set;
922          * otherwise, wait for clone children *only* if __WCLONE is
923          * set; otherwise, wait for non-clone children *only*.  (Note:
924          * A "clone" child here is one that reports to its parent
925          * using a signal other than SIGCHLD.) */
926         if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
927             && !(wo->wo_flags & __WALL))
928                 return 0;
929 
930         return 1;
931 }
932 
933 static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
934                                 pid_t pid, uid_t uid, int why, int status)
935 {
936         struct siginfo __user *infop;
937         int retval = wo->wo_rusage
938                 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
939 
940         put_task_struct(p);
941         infop = wo->wo_info;
942         if (infop) {
943                 if (!retval)
944                         retval = put_user(SIGCHLD, &infop->si_signo);
945                 if (!retval)
946                         retval = put_user(0, &infop->si_errno);
947                 if (!retval)
948                         retval = put_user((short)why, &infop->si_code);
949                 if (!retval)
950                         retval = put_user(pid, &infop->si_pid);
951                 if (!retval)
952                         retval = put_user(uid, &infop->si_uid);
953                 if (!retval)
954                         retval = put_user(status, &infop->si_status);
955         }
956         if (!retval)
957                 retval = pid;
958         return retval;
959 }
960 
961 /*
962  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
963  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
964  * the lock and this task is uninteresting.  If we return nonzero, we have
965  * released the lock and the system call should return.
966  */
967 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
968 {
969         int state, retval, status;
970         pid_t pid = task_pid_vnr(p);
971         uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
972         struct siginfo __user *infop;
973 
974         if (!likely(wo->wo_flags & WEXITED))
975                 return 0;
976 
977         if (unlikely(wo->wo_flags & WNOWAIT)) {
978                 int exit_code = p->exit_code;
979                 int why;
980 
981                 get_task_struct(p);
982                 read_unlock(&tasklist_lock);
983                 sched_annotate_sleep();
984 
985                 if ((exit_code & 0x7f) == 0) {
986                         why = CLD_EXITED;
987                         status = exit_code >> 8;
988                 } else {
989                         why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
990                         status = exit_code & 0x7f;
991                 }
992                 return wait_noreap_copyout(wo, p, pid, uid, why, status);
993         }
994         /*
995          * Move the task's state to DEAD/TRACE, only one thread can do this.
996          */
997         state = (ptrace_reparented(p) && thread_group_leader(p)) ?
998                 EXIT_TRACE : EXIT_DEAD;
999         if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1000                 return 0;
1001         /*
1002          * We own this thread, nobody else can reap it.
1003          */
1004         read_unlock(&tasklist_lock);
1005         sched_annotate_sleep();
1006 
1007         /*
1008          * Check thread_group_leader() to exclude the traced sub-threads.
1009          */
1010         if (state == EXIT_DEAD && thread_group_leader(p)) {
1011                 struct signal_struct *sig = p->signal;
1012                 struct signal_struct *psig = current->signal;
1013                 unsigned long maxrss;
1014                 cputime_t tgutime, tgstime;
1015 
1016                 /*
1017                  * The resource counters for the group leader are in its
1018                  * own task_struct.  Those for dead threads in the group
1019                  * are in its signal_struct, as are those for the child
1020                  * processes it has previously reaped.  All these
1021                  * accumulate in the parent's signal_struct c* fields.
1022                  *
1023                  * We don't bother to take a lock here to protect these
1024                  * p->signal fields because the whole thread group is dead
1025                  * and nobody can change them.
1026                  *
1027                  * psig->stats_lock also protects us from our sub-theads
1028                  * which can reap other children at the same time. Until
1029                  * we change k_getrusage()-like users to rely on this lock
1030                  * we have to take ->siglock as well.
1031                  *
1032                  * We use thread_group_cputime_adjusted() to get times for
1033                  * the thread group, which consolidates times for all threads
1034                  * in the group including the group leader.
1035                  */
1036                 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1037                 spin_lock_irq(&current->sighand->siglock);
1038                 write_seqlock(&psig->stats_lock);
1039                 psig->cutime += tgutime + sig->cutime;
1040                 psig->cstime += tgstime + sig->cstime;
1041                 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1042                 psig->cmin_flt +=
1043                         p->min_flt + sig->min_flt + sig->cmin_flt;
1044                 psig->cmaj_flt +=
1045                         p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1046                 psig->cnvcsw +=
1047                         p->nvcsw + sig->nvcsw + sig->cnvcsw;
1048                 psig->cnivcsw +=
1049                         p->nivcsw + sig->nivcsw + sig->cnivcsw;
1050                 psig->cinblock +=
1051                         task_io_get_inblock(p) +
1052                         sig->inblock + sig->cinblock;
1053                 psig->coublock +=
1054                         task_io_get_oublock(p) +
1055                         sig->oublock + sig->coublock;
1056                 maxrss = max(sig->maxrss, sig->cmaxrss);
1057                 if (psig->cmaxrss < maxrss)
1058                         psig->cmaxrss = maxrss;
1059                 task_io_accounting_add(&psig->ioac, &p->ioac);
1060                 task_io_accounting_add(&psig->ioac, &sig->ioac);
1061                 write_sequnlock(&psig->stats_lock);
1062                 spin_unlock_irq(&current->sighand->siglock);
1063         }
1064 
1065         retval = wo->wo_rusage
1066                 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1067         status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1068                 ? p->signal->group_exit_code : p->exit_code;
1069         if (!retval && wo->wo_stat)
1070                 retval = put_user(status, wo->wo_stat);
1071 
1072         infop = wo->wo_info;
1073         if (!retval && infop)
1074                 retval = put_user(SIGCHLD, &infop->si_signo);
1075         if (!retval && infop)
1076                 retval = put_user(0, &infop->si_errno);
1077         if (!retval && infop) {
1078                 int why;
1079 
1080                 if ((status & 0x7f) == 0) {
1081                         why = CLD_EXITED;
1082                         status >>= 8;
1083                 } else {
1084                         why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1085                         status &= 0x7f;
1086                 }
1087                 retval = put_user((short)why, &infop->si_code);
1088                 if (!retval)
1089                         retval = put_user(status, &infop->si_status);
1090         }
1091         if (!retval && infop)
1092                 retval = put_user(pid, &infop->si_pid);
1093         if (!retval && infop)
1094                 retval = put_user(uid, &infop->si_uid);
1095         if (!retval)
1096                 retval = pid;
1097 
1098         if (state == EXIT_TRACE) {
1099                 write_lock_irq(&tasklist_lock);
1100                 /* We dropped tasklist, ptracer could die and untrace */
1101                 ptrace_unlink(p);
1102 
1103                 /* If parent wants a zombie, don't release it now */
1104                 state = EXIT_ZOMBIE;
1105                 if (do_notify_parent(p, p->exit_signal))
1106                         state = EXIT_DEAD;
1107                 p->exit_state = state;
1108                 write_unlock_irq(&tasklist_lock);
1109         }
1110         if (state == EXIT_DEAD)
1111                 release_task(p);
1112 
1113         return retval;
1114 }
1115 
1116 static int *task_stopped_code(struct task_struct *p, bool ptrace)
1117 {
1118         if (ptrace) {
1119                 if (task_is_stopped_or_traced(p) &&
1120                     !(p->jobctl & JOBCTL_LISTENING))
1121                         return &p->exit_code;
1122         } else {
1123                 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1124                         return &p->signal->group_exit_code;
1125         }
1126         return NULL;
1127 }
1128 
1129 /**
1130  * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1131  * @wo: wait options
1132  * @ptrace: is the wait for ptrace
1133  * @p: task to wait for
1134  *
1135  * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1136  *
1137  * CONTEXT:
1138  * read_lock(&tasklist_lock), which is released if return value is
1139  * non-zero.  Also, grabs and releases @p->sighand->siglock.
1140  *
1141  * RETURNS:
1142  * 0 if wait condition didn't exist and search for other wait conditions
1143  * should continue.  Non-zero return, -errno on failure and @p's pid on
1144  * success, implies that tasklist_lock is released and wait condition
1145  * search should terminate.
1146  */
1147 static int wait_task_stopped(struct wait_opts *wo,
1148                                 int ptrace, struct task_struct *p)
1149 {
1150         struct siginfo __user *infop;
1151         int retval, exit_code, *p_code, why;
1152         uid_t uid = 0; /* unneeded, required by compiler */
1153         pid_t pid;
1154 
1155         /*
1156          * Traditionally we see ptrace'd stopped tasks regardless of options.
1157          */
1158         if (!ptrace && !(wo->wo_flags & WUNTRACED))
1159                 return 0;
1160 
1161         if (!task_stopped_code(p, ptrace))
1162                 return 0;
1163 
1164         exit_code = 0;
1165         spin_lock_irq(&p->sighand->siglock);
1166 
1167         p_code = task_stopped_code(p, ptrace);
1168         if (unlikely(!p_code))
1169                 goto unlock_sig;
1170 
1171         exit_code = *p_code;
1172         if (!exit_code)
1173                 goto unlock_sig;
1174 
1175         if (!unlikely(wo->wo_flags & WNOWAIT))
1176                 *p_code = 0;
1177 
1178         uid = from_kuid_munged(current_user_ns(), task_uid(p));
1179 unlock_sig:
1180         spin_unlock_irq(&p->sighand->siglock);
1181         if (!exit_code)
1182                 return 0;
1183 
1184         /*
1185          * Now we are pretty sure this task is interesting.
1186          * Make sure it doesn't get reaped out from under us while we
1187          * give up the lock and then examine it below.  We don't want to
1188          * keep holding onto the tasklist_lock while we call getrusage and
1189          * possibly take page faults for user memory.
1190          */
1191         get_task_struct(p);
1192         pid = task_pid_vnr(p);
1193         why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1194         read_unlock(&tasklist_lock);
1195         sched_annotate_sleep();
1196 
1197         if (unlikely(wo->wo_flags & WNOWAIT))
1198                 return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
1199 
1200         retval = wo->wo_rusage
1201                 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1202         if (!retval && wo->wo_stat)
1203                 retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
1204 
1205         infop = wo->wo_info;
1206         if (!retval && infop)
1207                 retval = put_user(SIGCHLD, &infop->si_signo);
1208         if (!retval && infop)
1209                 retval = put_user(0, &infop->si_errno);
1210         if (!retval && infop)
1211                 retval = put_user((short)why, &infop->si_code);
1212         if (!retval && infop)
1213                 retval = put_user(exit_code, &infop->si_status);
1214         if (!retval && infop)
1215                 retval = put_user(pid, &infop->si_pid);
1216         if (!retval && infop)
1217                 retval = put_user(uid, &infop->si_uid);
1218         if (!retval)
1219                 retval = pid;
1220         put_task_struct(p);
1221 
1222         BUG_ON(!retval);
1223         return retval;
1224 }
1225 
1226 /*
1227  * Handle do_wait work for one task in a live, non-stopped state.
1228  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1229  * the lock and this task is uninteresting.  If we return nonzero, we have
1230  * released the lock and the system call should return.
1231  */
1232 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1233 {
1234         int retval;
1235         pid_t pid;
1236         uid_t uid;
1237 
1238         if (!unlikely(wo->wo_flags & WCONTINUED))
1239                 return 0;
1240 
1241         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1242                 return 0;
1243 
1244         spin_lock_irq(&p->sighand->siglock);
1245         /* Re-check with the lock held.  */
1246         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1247                 spin_unlock_irq(&p->sighand->siglock);
1248                 return 0;
1249         }
1250         if (!unlikely(wo->wo_flags & WNOWAIT))
1251                 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1252         uid = from_kuid_munged(current_user_ns(), task_uid(p));
1253         spin_unlock_irq(&p->sighand->siglock);
1254 
1255         pid = task_pid_vnr(p);
1256         get_task_struct(p);
1257         read_unlock(&tasklist_lock);
1258         sched_annotate_sleep();
1259 
1260         if (!wo->wo_info) {
1261                 retval = wo->wo_rusage
1262                         ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1263                 put_task_struct(p);
1264                 if (!retval && wo->wo_stat)
1265                         retval = put_user(0xffff, wo->wo_stat);
1266                 if (!retval)
1267                         retval = pid;
1268         } else {
1269                 retval = wait_noreap_copyout(wo, p, pid, uid,
1270                                              CLD_CONTINUED, SIGCONT);
1271                 BUG_ON(retval == 0);
1272         }
1273 
1274         return retval;
1275 }
1276 
1277 /*
1278  * Consider @p for a wait by @parent.
1279  *
1280  * -ECHILD should be in ->notask_error before the first call.
1281  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1282  * Returns zero if the search for a child should continue;
1283  * then ->notask_error is 0 if @p is an eligible child,
1284  * or another error from security_task_wait(), or still -ECHILD.
1285  */
1286 static int wait_consider_task(struct wait_opts *wo, int ptrace,
1287                                 struct task_struct *p)
1288 {
1289         /*
1290          * We can race with wait_task_zombie() from another thread.
1291          * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1292          * can't confuse the checks below.
1293          */
1294         int exit_state = ACCESS_ONCE(p->exit_state);
1295         int ret;
1296 
1297         if (unlikely(exit_state == EXIT_DEAD))
1298                 return 0;
1299 
1300         ret = eligible_child(wo, p);
1301         if (!ret)
1302                 return ret;
1303 
1304         ret = security_task_wait(p);
1305         if (unlikely(ret < 0)) {
1306                 /*
1307                  * If we have not yet seen any eligible child,
1308                  * then let this error code replace -ECHILD.
1309                  * A permission error will give the user a clue
1310                  * to look for security policy problems, rather
1311                  * than for mysterious wait bugs.
1312                  */
1313                 if (wo->notask_error)
1314                         wo->notask_error = ret;
1315                 return 0;
1316         }
1317 
1318         if (unlikely(exit_state == EXIT_TRACE)) {
1319                 /*
1320                  * ptrace == 0 means we are the natural parent. In this case
1321                  * we should clear notask_error, debugger will notify us.
1322                  */
1323                 if (likely(!ptrace))
1324                         wo->notask_error = 0;
1325                 return 0;
1326         }
1327 
1328         if (likely(!ptrace) && unlikely(p->ptrace)) {
1329                 /*
1330                  * If it is traced by its real parent's group, just pretend
1331                  * the caller is ptrace_do_wait() and reap this child if it
1332                  * is zombie.
1333                  *
1334                  * This also hides group stop state from real parent; otherwise
1335                  * a single stop can be reported twice as group and ptrace stop.
1336                  * If a ptracer wants to distinguish these two events for its
1337                  * own children it should create a separate process which takes
1338                  * the role of real parent.
1339                  */
1340                 if (!ptrace_reparented(p))
1341                         ptrace = 1;
1342         }
1343 
1344         /* slay zombie? */
1345         if (exit_state == EXIT_ZOMBIE) {
1346                 /* we don't reap group leaders with subthreads */
1347                 if (!delay_group_leader(p)) {
1348                         /*
1349                          * A zombie ptracee is only visible to its ptracer.
1350                          * Notification and reaping will be cascaded to the
1351                          * real parent when the ptracer detaches.
1352                          */
1353                         if (unlikely(ptrace) || likely(!p->ptrace))
1354                                 return wait_task_zombie(wo, p);
1355                 }
1356 
1357                 /*
1358                  * Allow access to stopped/continued state via zombie by
1359                  * falling through.  Clearing of notask_error is complex.
1360                  *
1361                  * When !@ptrace:
1362                  *
1363                  * If WEXITED is set, notask_error should naturally be
1364                  * cleared.  If not, subset of WSTOPPED|WCONTINUED is set,
1365                  * so, if there are live subthreads, there are events to
1366                  * wait for.  If all subthreads are dead, it's still safe
1367                  * to clear - this function will be called again in finite
1368                  * amount time once all the subthreads are released and
1369                  * will then return without clearing.
1370                  *
1371                  * When @ptrace:
1372                  *
1373                  * Stopped state is per-task and thus can't change once the
1374                  * target task dies.  Only continued and exited can happen.
1375                  * Clear notask_error if WCONTINUED | WEXITED.
1376                  */
1377                 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1378                         wo->notask_error = 0;
1379         } else {
1380                 /*
1381                  * @p is alive and it's gonna stop, continue or exit, so
1382                  * there always is something to wait for.
1383                  */
1384                 wo->notask_error = 0;
1385         }
1386 
1387         /*
1388          * Wait for stopped.  Depending on @ptrace, different stopped state
1389          * is used and the two don't interact with each other.
1390          */
1391         ret = wait_task_stopped(wo, ptrace, p);
1392         if (ret)
1393                 return ret;
1394 
1395         /*
1396          * Wait for continued.  There's only one continued state and the
1397          * ptracer can consume it which can confuse the real parent.  Don't
1398          * use WCONTINUED from ptracer.  You don't need or want it.
1399          */
1400         return wait_task_continued(wo, p);
1401 }
1402 
1403 /*
1404  * Do the work of do_wait() for one thread in the group, @tsk.
1405  *
1406  * -ECHILD should be in ->notask_error before the first call.
1407  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1408  * Returns zero if the search for a child should continue; then
1409  * ->notask_error is 0 if there were any eligible children,
1410  * or another error from security_task_wait(), or still -ECHILD.
1411  */
1412 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1413 {
1414         struct task_struct *p;
1415 
1416         list_for_each_entry(p, &tsk->children, sibling) {
1417                 int ret = wait_consider_task(wo, 0, p);
1418 
1419                 if (ret)
1420                         return ret;
1421         }
1422 
1423         return 0;
1424 }
1425 
1426 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1427 {
1428         struct task_struct *p;
1429 
1430         list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1431                 int ret = wait_consider_task(wo, 1, p);
1432 
1433                 if (ret)
1434                         return ret;
1435         }
1436 
1437         return 0;
1438 }
1439 
1440 static int child_wait_callback(wait_queue_t *wait, unsigned mode,
1441                                 int sync, void *key)
1442 {
1443         struct wait_opts *wo = container_of(wait, struct wait_opts,
1444                                                 child_wait);
1445         struct task_struct *p = key;
1446 
1447         if (!eligible_pid(wo, p))
1448                 return 0;
1449 
1450         if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1451                 return 0;
1452 
1453         return default_wake_function(wait, mode, sync, key);
1454 }
1455 
1456 void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1457 {
1458         __wake_up_sync_key(&parent->signal->wait_chldexit,
1459                                 TASK_INTERRUPTIBLE, 1, p);
1460 }
1461 
1462 static long do_wait(struct wait_opts *wo)
1463 {
1464         struct task_struct *tsk;
1465         int retval;
1466 
1467         trace_sched_process_wait(wo->wo_pid);
1468 
1469         init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1470         wo->child_wait.private = current;
1471         add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1472 repeat:
1473         /*
1474          * If there is nothing that can match our critiera just get out.
1475          * We will clear ->notask_error to zero if we see any child that
1476          * might later match our criteria, even if we are not able to reap
1477          * it yet.
1478          */
1479         wo->notask_error = -ECHILD;
1480         if ((wo->wo_type < PIDTYPE_MAX) &&
1481            (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
1482                 goto notask;
1483 
1484         set_current_state(TASK_INTERRUPTIBLE);
1485         read_lock(&tasklist_lock);
1486         tsk = current;
1487         do {
1488                 retval = do_wait_thread(wo, tsk);
1489                 if (retval)
1490                         goto end;
1491 
1492                 retval = ptrace_do_wait(wo, tsk);
1493                 if (retval)
1494                         goto end;
1495 
1496                 if (wo->wo_flags & __WNOTHREAD)
1497                         break;
1498         } while_each_thread(current, tsk);
1499         read_unlock(&tasklist_lock);
1500 
1501 notask:
1502         retval = wo->notask_error;
1503         if (!retval && !(wo->wo_flags & WNOHANG)) {
1504                 retval = -ERESTARTSYS;
1505                 if (!signal_pending(current)) {
1506                         schedule();
1507                         goto repeat;
1508                 }
1509         }
1510 end:
1511         __set_current_state(TASK_RUNNING);
1512         remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1513         return retval;
1514 }
1515 
1516 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1517                 infop, int, options, struct rusage __user *, ru)
1518 {
1519         struct wait_opts wo;
1520         struct pid *pid = NULL;
1521         enum pid_type type;
1522         long ret;
1523 
1524         if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1525                 return -EINVAL;
1526         if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1527                 return -EINVAL;
1528 
1529         switch (which) {
1530         case P_ALL:
1531                 type = PIDTYPE_MAX;
1532                 break;
1533         case P_PID:
1534                 type = PIDTYPE_PID;
1535                 if (upid <= 0)
1536                         return -EINVAL;
1537                 break;
1538         case P_PGID:
1539                 type = PIDTYPE_PGID;
1540                 if (upid <= 0)
1541                         return -EINVAL;
1542                 break;
1543         default:
1544                 return -EINVAL;
1545         }
1546 
1547         if (type < PIDTYPE_MAX)
1548                 pid = find_get_pid(upid);
1549 
1550         wo.wo_type      = type;
1551         wo.wo_pid       = pid;
1552         wo.wo_flags     = options;
1553         wo.wo_info      = infop;
1554         wo.wo_stat      = NULL;
1555         wo.wo_rusage    = ru;
1556         ret = do_wait(&wo);
1557 
1558         if (ret > 0) {
1559                 ret = 0;
1560         } else if (infop) {
1561                 /*
1562                  * For a WNOHANG return, clear out all the fields
1563                  * we would set so the user can easily tell the
1564                  * difference.
1565                  */
1566                 if (!ret)
1567                         ret = put_user(0, &infop->si_signo);
1568                 if (!ret)
1569                         ret = put_user(0, &infop->si_errno);
1570                 if (!ret)
1571                         ret = put_user(0, &infop->si_code);
1572                 if (!ret)
1573                         ret = put_user(0, &infop->si_pid);
1574                 if (!ret)
1575                         ret = put_user(0, &infop->si_uid);
1576                 if (!ret)
1577                         ret = put_user(0, &infop->si_status);
1578         }
1579 
1580         put_pid(pid);
1581         return ret;
1582 }
1583 
1584 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1585                 int, options, struct rusage __user *, ru)
1586 {
1587         struct wait_opts wo;
1588         struct pid *pid = NULL;
1589         enum pid_type type;
1590         long ret;
1591 
1592         if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1593                         __WNOTHREAD|__WCLONE|__WALL))
1594                 return -EINVAL;
1595 
1596         if (upid == -1)
1597                 type = PIDTYPE_MAX;
1598         else if (upid < 0) {
1599                 type = PIDTYPE_PGID;
1600                 pid = find_get_pid(-upid);
1601         } else if (upid == 0) {
1602                 type = PIDTYPE_PGID;
1603                 pid = get_task_pid(current, PIDTYPE_PGID);
1604         } else /* upid > 0 */ {
1605                 type = PIDTYPE_PID;
1606                 pid = find_get_pid(upid);
1607         }
1608 
1609         wo.wo_type      = type;
1610         wo.wo_pid       = pid;
1611         wo.wo_flags     = options | WEXITED;
1612         wo.wo_info      = NULL;
1613         wo.wo_stat      = stat_addr;
1614         wo.wo_rusage    = ru;
1615         ret = do_wait(&wo);
1616         put_pid(pid);
1617 
1618         return ret;
1619 }
1620 
1621 #ifdef __ARCH_WANT_SYS_WAITPID
1622 
1623 /*
1624  * sys_waitpid() remains for compatibility. waitpid() should be
1625  * implemented by calling sys_wait4() from libc.a.
1626  */
1627 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1628 {
1629         return sys_wait4(pid, stat_addr, options, NULL);
1630 }
1631 
1632 #endif
1633 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp