~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/ptrace.c

Version: ~ [ linux-5.15-rc5 ] ~ [ linux-5.14.11 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.72 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.152 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.210 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.250 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.286 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.288 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * linux/kernel/ptrace.c
  3  *
  4  * (C) Copyright 1999 Linus Torvalds
  5  *
  6  * Common interfaces for "ptrace()" which we do not want
  7  * to continually duplicate across every architecture.
  8  */
  9 
 10 #include <linux/capability.h>
 11 #include <linux/export.h>
 12 #include <linux/sched.h>
 13 #include <linux/errno.h>
 14 #include <linux/mm.h>
 15 #include <linux/highmem.h>
 16 #include <linux/pagemap.h>
 17 #include <linux/ptrace.h>
 18 #include <linux/security.h>
 19 #include <linux/signal.h>
 20 #include <linux/uio.h>
 21 #include <linux/audit.h>
 22 #include <linux/pid_namespace.h>
 23 #include <linux/syscalls.h>
 24 #include <linux/uaccess.h>
 25 #include <linux/regset.h>
 26 #include <linux/hw_breakpoint.h>
 27 #include <linux/cn_proc.h>
 28 #include <linux/compat.h>
 29 
 30 /*
 31  * Access another process' address space via ptrace.
 32  * Source/target buffer must be kernel space,
 33  * Do not walk the page table directly, use get_user_pages
 34  */
 35 int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
 36                      void *buf, int len, int write)
 37 {
 38         struct mm_struct *mm;
 39         int ret;
 40 
 41         mm = get_task_mm(tsk);
 42         if (!mm)
 43                 return 0;
 44 
 45         if (!tsk->ptrace ||
 46             (current != tsk->parent) ||
 47             ((get_dumpable(mm) != SUID_DUMP_USER) &&
 48              !ptracer_capable(tsk, mm->user_ns))) {
 49                 mmput(mm);
 50                 return 0;
 51         }
 52 
 53         ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
 54         mmput(mm);
 55 
 56         return ret;
 57 }
 58 
 59 
 60 static int ptrace_trapping_sleep_fn(void *flags)
 61 {
 62         schedule();
 63         return 0;
 64 }
 65 
 66 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
 67                    const struct cred *ptracer_cred)
 68 {
 69         BUG_ON(!list_empty(&child->ptrace_entry));
 70         list_add(&child->ptrace_entry, &new_parent->ptraced);
 71         child->parent = new_parent;
 72         child->ptracer_cred = get_cred(ptracer_cred);
 73 }
 74 
 75 /*
 76  * ptrace a task: make the debugger its new parent and
 77  * move it to the ptrace list.
 78  *
 79  * Must be called with the tasklist lock write-held.
 80  */
 81 static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 82 {
 83         __ptrace_link(child, new_parent, current_cred());
 84 }
 85 
 86 /**
 87  * __ptrace_unlink - unlink ptracee and restore its execution state
 88  * @child: ptracee to be unlinked
 89  *
 90  * Remove @child from the ptrace list, move it back to the original parent,
 91  * and restore the execution state so that it conforms to the group stop
 92  * state.
 93  *
 94  * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
 95  * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
 96  * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
 97  * If the ptracer is exiting, the ptracee can be in any state.
 98  *
 99  * After detach, the ptracee should be in a state which conforms to the
100  * group stop.  If the group is stopped or in the process of stopping, the
101  * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
102  * up from TASK_TRACED.
103  *
104  * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
105  * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
106  * to but in the opposite direction of what happens while attaching to a
107  * stopped task.  However, in this direction, the intermediate RUNNING
108  * state is not hidden even from the current ptracer and if it immediately
109  * re-attaches and performs a WNOHANG wait(2), it may fail.
110  *
111  * CONTEXT:
112  * write_lock_irq(tasklist_lock)
113  */
114 void __ptrace_unlink(struct task_struct *child)
115 {
116         const struct cred *old_cred;
117         BUG_ON(!child->ptrace);
118 
119         child->parent = child->real_parent;
120         list_del_init(&child->ptrace_entry);
121         old_cred = child->ptracer_cred;
122         child->ptracer_cred = NULL;
123         put_cred(old_cred);
124 
125         spin_lock(&child->sighand->siglock);
126         child->ptrace = 0;
127         /*
128          * Clear all pending traps and TRAPPING.  TRAPPING should be
129          * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
130          */
131         task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
132         task_clear_jobctl_trapping(child);
133 
134         /*
135          * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
136          * @child isn't dead.
137          */
138         if (!(child->flags & PF_EXITING) &&
139             (child->signal->flags & SIGNAL_STOP_STOPPED ||
140              child->signal->group_stop_count)) {
141                 child->jobctl |= JOBCTL_STOP_PENDING;
142 
143                 /*
144                  * This is only possible if this thread was cloned by the
145                  * traced task running in the stopped group, set the signal
146                  * for the future reports.
147                  * FIXME: we should change ptrace_init_task() to handle this
148                  * case.
149                  */
150                 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
151                         child->jobctl |= SIGSTOP;
152         }
153 
154         /*
155          * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
156          * @child in the butt.  Note that @resume should be used iff @child
157          * is in TASK_TRACED; otherwise, we might unduly disrupt
158          * TASK_KILLABLE sleeps.
159          */
160         if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
161                 ptrace_signal_wake_up(child, true);
162 
163         spin_unlock(&child->sighand->siglock);
164 }
165 
166 /* Ensure that nothing can wake it up, even SIGKILL */
167 static bool ptrace_freeze_traced(struct task_struct *task)
168 {
169         bool ret = false;
170 
171         /* Lockless, nobody but us can set this flag */
172         if (task->jobctl & JOBCTL_LISTENING)
173                 return ret;
174 
175         spin_lock_irq(&task->sighand->siglock);
176         if (task_is_traced(task) && !__fatal_signal_pending(task)) {
177                 task->state = __TASK_TRACED;
178                 ret = true;
179         }
180         spin_unlock_irq(&task->sighand->siglock);
181 
182         return ret;
183 }
184 
185 static void ptrace_unfreeze_traced(struct task_struct *task)
186 {
187         if (task->state != __TASK_TRACED)
188                 return;
189 
190         WARN_ON(!task->ptrace || task->parent != current);
191 
192         /*
193          * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
194          * Recheck state under the lock to close this race.
195          */
196         spin_lock_irq(&task->sighand->siglock);
197         if (task->state == __TASK_TRACED) {
198                 if (__fatal_signal_pending(task))
199                         wake_up_state(task, __TASK_TRACED);
200                 else
201                         task->state = TASK_TRACED;
202         }
203         spin_unlock_irq(&task->sighand->siglock);
204 }
205 
206 /**
207  * ptrace_check_attach - check whether ptracee is ready for ptrace operation
208  * @child: ptracee to check for
209  * @ignore_state: don't check whether @child is currently %TASK_TRACED
210  *
211  * Check whether @child is being ptraced by %current and ready for further
212  * ptrace operations.  If @ignore_state is %false, @child also should be in
213  * %TASK_TRACED state and on return the child is guaranteed to be traced
214  * and not executing.  If @ignore_state is %true, @child can be in any
215  * state.
216  *
217  * CONTEXT:
218  * Grabs and releases tasklist_lock and @child->sighand->siglock.
219  *
220  * RETURNS:
221  * 0 on success, -ESRCH if %child is not ready.
222  */
223 static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
224 {
225         int ret = -ESRCH;
226 
227         /*
228          * We take the read lock around doing both checks to close a
229          * possible race where someone else was tracing our child and
230          * detached between these two checks.  After this locked check,
231          * we are sure that this is our traced child and that can only
232          * be changed by us so it's not changing right after this.
233          */
234         read_lock(&tasklist_lock);
235         if (child->ptrace && child->parent == current) {
236                 WARN_ON(child->state == __TASK_TRACED);
237                 /*
238                  * child->sighand can't be NULL, release_task()
239                  * does ptrace_unlink() before __exit_signal().
240                  */
241                 if (ignore_state || ptrace_freeze_traced(child))
242                         ret = 0;
243         }
244         read_unlock(&tasklist_lock);
245 
246         if (!ret && !ignore_state) {
247                 if (!wait_task_inactive(child, __TASK_TRACED)) {
248                         /*
249                          * This can only happen if may_ptrace_stop() fails and
250                          * ptrace_stop() changes ->state back to TASK_RUNNING,
251                          * so we should not worry about leaking __TASK_TRACED.
252                          */
253                         WARN_ON(child->state == __TASK_TRACED);
254                         ret = -ESRCH;
255                 }
256         }
257 
258         return ret;
259 }
260 
261 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
262 {
263         if (mode & PTRACE_MODE_SCHED)
264                 return false;
265 
266         if (mode & PTRACE_MODE_NOAUDIT)
267                 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
268         else
269                 return has_ns_capability(current, ns, CAP_SYS_PTRACE);
270 }
271 
272 /* Returns 0 on success, -errno on denial. */
273 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
274 {
275         const struct cred *cred = current_cred(), *tcred;
276         struct mm_struct *mm;
277         kuid_t caller_uid;
278         kgid_t caller_gid;
279 
280         if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
281                 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
282                 return -EPERM;
283         }
284 
285         /* May we inspect the given task?
286          * This check is used both for attaching with ptrace
287          * and for allowing access to sensitive information in /proc.
288          *
289          * ptrace_attach denies several cases that /proc allows
290          * because setting up the necessary parent/child relationship
291          * or halting the specified task is impossible.
292          */
293 
294         /* Don't let security modules deny introspection */
295         if (same_thread_group(task, current))
296                 return 0;
297         rcu_read_lock();
298         if (mode & PTRACE_MODE_FSCREDS) {
299                 caller_uid = cred->fsuid;
300                 caller_gid = cred->fsgid;
301         } else {
302                 /*
303                  * Using the euid would make more sense here, but something
304                  * in userland might rely on the old behavior, and this
305                  * shouldn't be a security problem since
306                  * PTRACE_MODE_REALCREDS implies that the caller explicitly
307                  * used a syscall that requests access to another process
308                  * (and not a filesystem syscall to procfs).
309                  */
310                 caller_uid = cred->uid;
311                 caller_gid = cred->gid;
312         }
313         tcred = __task_cred(task);
314         if (uid_eq(caller_uid, tcred->euid) &&
315             uid_eq(caller_uid, tcred->suid) &&
316             uid_eq(caller_uid, tcred->uid)  &&
317             gid_eq(caller_gid, tcred->egid) &&
318             gid_eq(caller_gid, tcred->sgid) &&
319             gid_eq(caller_gid, tcred->gid))
320                 goto ok;
321         if (ptrace_has_cap(tcred->user_ns, mode))
322                 goto ok;
323         rcu_read_unlock();
324         return -EPERM;
325 ok:
326         rcu_read_unlock();
327         /*
328          * If a task drops privileges and becomes nondumpable (through a syscall
329          * like setresuid()) while we are trying to access it, we must ensure
330          * that the dumpability is read after the credentials; otherwise,
331          * we may be able to attach to a task that we shouldn't be able to
332          * attach to (as if the task had dropped privileges without becoming
333          * nondumpable).
334          * Pairs with a write barrier in commit_creds().
335          */
336         smp_rmb();
337         mm = task->mm;
338         if (mm &&
339             ((get_dumpable(mm) != SUID_DUMP_USER) &&
340              !ptrace_has_cap(mm->user_ns, mode)))
341             return -EPERM;
342 
343         if (mode & PTRACE_MODE_SCHED)
344                 return 0;
345         return security_ptrace_access_check(task, mode);
346 }
347 
348 bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
349 {
350         return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
351 }
352 
353 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
354 {
355         int err;
356         task_lock(task);
357         err = __ptrace_may_access(task, mode);
358         task_unlock(task);
359         return !err;
360 }
361 
362 static int ptrace_attach(struct task_struct *task, long request,
363                          unsigned long addr,
364                          unsigned long flags)
365 {
366         bool seize = (request == PTRACE_SEIZE);
367         int retval;
368 
369         retval = -EIO;
370         if (seize) {
371                 if (addr != 0)
372                         goto out;
373                 if (flags & ~(unsigned long)PTRACE_O_MASK)
374                         goto out;
375                 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
376         } else {
377                 flags = PT_PTRACED;
378         }
379 
380         audit_ptrace(task);
381 
382         retval = -EPERM;
383         if (unlikely(task->flags & PF_KTHREAD))
384                 goto out;
385         if (same_thread_group(task, current))
386                 goto out;
387 
388         /*
389          * Protect exec's credential calculations against our interference;
390          * SUID, SGID and LSM creds get determined differently
391          * under ptrace.
392          */
393         retval = -ERESTARTNOINTR;
394         if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
395                 goto out;
396 
397         task_lock(task);
398         retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
399         task_unlock(task);
400         if (retval)
401                 goto unlock_creds;
402 
403         write_lock_irq(&tasklist_lock);
404         retval = -EPERM;
405         if (unlikely(task->exit_state))
406                 goto unlock_tasklist;
407         if (task->ptrace)
408                 goto unlock_tasklist;
409 
410         if (seize)
411                 flags |= PT_SEIZED;
412         task->ptrace = flags;
413 
414         ptrace_link(task, current);
415 
416         /* SEIZE doesn't trap tracee on attach */
417         if (!seize)
418                 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
419 
420         spin_lock(&task->sighand->siglock);
421 
422         /*
423          * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
424          * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
425          * will be cleared if the child completes the transition or any
426          * event which clears the group stop states happens.  We'll wait
427          * for the transition to complete before returning from this
428          * function.
429          *
430          * This hides STOPPED -> RUNNING -> TRACED transition from the
431          * attaching thread but a different thread in the same group can
432          * still observe the transient RUNNING state.  IOW, if another
433          * thread's WNOHANG wait(2) on the stopped tracee races against
434          * ATTACH, the wait(2) may fail due to the transient RUNNING.
435          *
436          * The following task_is_stopped() test is safe as both transitions
437          * in and out of STOPPED are protected by siglock.
438          */
439         if (task_is_stopped(task) &&
440             task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
441                 signal_wake_up_state(task, __TASK_STOPPED);
442 
443         spin_unlock(&task->sighand->siglock);
444 
445         retval = 0;
446 unlock_tasklist:
447         write_unlock_irq(&tasklist_lock);
448 unlock_creds:
449         mutex_unlock(&task->signal->cred_guard_mutex);
450 out:
451         if (!retval) {
452                 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
453                             ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
454                 proc_ptrace_connector(task, PTRACE_ATTACH);
455         }
456 
457         return retval;
458 }
459 
460 /**
461  * ptrace_traceme  --  helper for PTRACE_TRACEME
462  *
463  * Performs checks and sets PT_PTRACED.
464  * Should be used by all ptrace implementations for PTRACE_TRACEME.
465  */
466 static int ptrace_traceme(void)
467 {
468         int ret = -EPERM;
469 
470         write_lock_irq(&tasklist_lock);
471         /* Are we already being traced? */
472         if (!current->ptrace) {
473                 ret = security_ptrace_traceme(current->parent);
474                 /*
475                  * Check PF_EXITING to ensure ->real_parent has not passed
476                  * exit_ptrace(). Otherwise we don't report the error but
477                  * pretend ->real_parent untraces us right after return.
478                  */
479                 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
480                         current->ptrace = PT_PTRACED;
481                         ptrace_link(current, current->real_parent);
482                 }
483         }
484         write_unlock_irq(&tasklist_lock);
485 
486         return ret;
487 }
488 
489 /*
490  * Called with irqs disabled, returns true if childs should reap themselves.
491  */
492 static int ignoring_children(struct sighand_struct *sigh)
493 {
494         int ret;
495         spin_lock(&sigh->siglock);
496         ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
497               (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
498         spin_unlock(&sigh->siglock);
499         return ret;
500 }
501 
502 /*
503  * Called with tasklist_lock held for writing.
504  * Unlink a traced task, and clean it up if it was a traced zombie.
505  * Return true if it needs to be reaped with release_task().
506  * (We can't call release_task() here because we already hold tasklist_lock.)
507  *
508  * If it's a zombie, our attachedness prevented normal parent notification
509  * or self-reaping.  Do notification now if it would have happened earlier.
510  * If it should reap itself, return true.
511  *
512  * If it's our own child, there is no notification to do. But if our normal
513  * children self-reap, then this child was prevented by ptrace and we must
514  * reap it now, in that case we must also wake up sub-threads sleeping in
515  * do_wait().
516  */
517 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
518 {
519         bool dead;
520 
521         __ptrace_unlink(p);
522 
523         if (p->exit_state != EXIT_ZOMBIE)
524                 return false;
525 
526         dead = !thread_group_leader(p);
527 
528         if (!dead && thread_group_empty(p)) {
529                 if (!same_thread_group(p->real_parent, tracer))
530                         dead = do_notify_parent(p, p->exit_signal);
531                 else if (ignoring_children(tracer->sighand)) {
532                         __wake_up_parent(p, tracer);
533                         dead = true;
534                 }
535         }
536         /* Mark it as in the process of being reaped. */
537         if (dead)
538                 p->exit_state = EXIT_DEAD;
539         return dead;
540 }
541 
542 static int ptrace_detach(struct task_struct *child, unsigned int data)
543 {
544         bool dead = false;
545 
546         if (!valid_signal(data))
547                 return -EIO;
548 
549         /* Architecture-specific hardware disable .. */
550         ptrace_disable(child);
551         clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
552 
553         write_lock_irq(&tasklist_lock);
554         /*
555          * This child can be already killed. Make sure de_thread() or
556          * our sub-thread doing do_wait() didn't do release_task() yet.
557          */
558         if (child->ptrace) {
559                 child->exit_code = data;
560                 dead = __ptrace_detach(current, child);
561         }
562         write_unlock_irq(&tasklist_lock);
563 
564         proc_ptrace_connector(child, PTRACE_DETACH);
565         if (unlikely(dead))
566                 release_task(child);
567 
568         return 0;
569 }
570 
571 /*
572  * Detach all tasks we were using ptrace on. Called with tasklist held
573  * for writing, and returns with it held too. But note it can release
574  * and reacquire the lock.
575  */
576 void exit_ptrace(struct task_struct *tracer)
577         __releases(&tasklist_lock)
578         __acquires(&tasklist_lock)
579 {
580         struct task_struct *p, *n;
581         LIST_HEAD(ptrace_dead);
582 
583         if (likely(list_empty(&tracer->ptraced)))
584                 return;
585 
586         list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
587                 if (unlikely(p->ptrace & PT_EXITKILL))
588                         send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
589 
590                 if (__ptrace_detach(tracer, p))
591                         list_add(&p->ptrace_entry, &ptrace_dead);
592         }
593 
594         write_unlock_irq(&tasklist_lock);
595         BUG_ON(!list_empty(&tracer->ptraced));
596 
597         list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
598                 list_del_init(&p->ptrace_entry);
599                 release_task(p);
600         }
601 
602         write_lock_irq(&tasklist_lock);
603 }
604 
605 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
606 {
607         int copied = 0;
608 
609         while (len > 0) {
610                 char buf[128];
611                 int this_len, retval;
612 
613                 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
614                 retval = ptrace_access_vm(tsk, src, buf, this_len, 0);
615 
616                 if (!retval) {
617                         if (copied)
618                                 break;
619                         return -EIO;
620                 }
621                 if (copy_to_user(dst, buf, retval))
622                         return -EFAULT;
623                 copied += retval;
624                 src += retval;
625                 dst += retval;
626                 len -= retval;
627         }
628         return copied;
629 }
630 
631 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
632 {
633         int copied = 0;
634 
635         while (len > 0) {
636                 char buf[128];
637                 int this_len, retval;
638 
639                 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
640                 if (copy_from_user(buf, src, this_len))
641                         return -EFAULT;
642                 retval = ptrace_access_vm(tsk, dst, buf, this_len, 1);
643                 if (!retval) {
644                         if (copied)
645                                 break;
646                         return -EIO;
647                 }
648                 copied += retval;
649                 src += retval;
650                 dst += retval;
651                 len -= retval;
652         }
653         return copied;
654 }
655 
656 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
657 {
658         unsigned flags;
659 
660         if (data & ~(unsigned long)PTRACE_O_MASK)
661                 return -EINVAL;
662 
663         /* Avoid intermediate state when all opts are cleared */
664         flags = child->ptrace;
665         flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
666         flags |= (data << PT_OPT_FLAG_SHIFT);
667         child->ptrace = flags;
668 
669         return 0;
670 }
671 
672 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
673 {
674         unsigned long flags;
675         int error = -ESRCH;
676 
677         if (lock_task_sighand(child, &flags)) {
678                 error = -EINVAL;
679                 if (likely(child->last_siginfo != NULL)) {
680                         *info = *child->last_siginfo;
681                         error = 0;
682                 }
683                 unlock_task_sighand(child, &flags);
684         }
685         return error;
686 }
687 
688 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
689 {
690         unsigned long flags;
691         int error = -ESRCH;
692 
693         if (lock_task_sighand(child, &flags)) {
694                 error = -EINVAL;
695                 if (likely(child->last_siginfo != NULL)) {
696                         *child->last_siginfo = *info;
697                         error = 0;
698                 }
699                 unlock_task_sighand(child, &flags);
700         }
701         return error;
702 }
703 
704 static int ptrace_peek_siginfo(struct task_struct *child,
705                                 unsigned long addr,
706                                 unsigned long data)
707 {
708         struct ptrace_peeksiginfo_args arg;
709         struct sigpending *pending;
710         struct sigqueue *q;
711         int ret, i;
712 
713         ret = copy_from_user(&arg, (void __user *) addr,
714                                 sizeof(struct ptrace_peeksiginfo_args));
715         if (ret)
716                 return -EFAULT;
717 
718         if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
719                 return -EINVAL; /* unknown flags */
720 
721         if (arg.nr < 0)
722                 return -EINVAL;
723 
724         /* Ensure arg.off fits in an unsigned long */
725         if (arg.off > ULONG_MAX)
726                 return 0;
727 
728         if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
729                 pending = &child->signal->shared_pending;
730         else
731                 pending = &child->pending;
732 
733         for (i = 0; i < arg.nr; ) {
734                 siginfo_t info;
735                 unsigned long off = arg.off + i;
736                 bool found = false;
737 
738                 spin_lock_irq(&child->sighand->siglock);
739                 list_for_each_entry(q, &pending->list, list) {
740                         if (!off--) {
741                                 found = true;
742                                 copy_siginfo(&info, &q->info);
743                                 break;
744                         }
745                 }
746                 spin_unlock_irq(&child->sighand->siglock);
747 
748                 if (!found) /* beyond the end of the list */
749                         break;
750 
751 #ifdef CONFIG_COMPAT
752                 if (unlikely(is_compat_task())) {
753                         compat_siginfo_t __user *uinfo = compat_ptr(data);
754 
755                         if (copy_siginfo_to_user32(uinfo, &info) ||
756                             __put_user(info.si_code, &uinfo->si_code)) {
757                                 ret = -EFAULT;
758                                 break;
759                         }
760 
761                 } else
762 #endif
763                 {
764                         siginfo_t __user *uinfo = (siginfo_t __user *) data;
765 
766                         if (copy_siginfo_to_user(uinfo, &info) ||
767                             __put_user(info.si_code, &uinfo->si_code)) {
768                                 ret = -EFAULT;
769                                 break;
770                         }
771                 }
772 
773                 data += sizeof(siginfo_t);
774                 i++;
775 
776                 if (signal_pending(current))
777                         break;
778 
779                 cond_resched();
780         }
781 
782         if (i > 0)
783                 return i;
784 
785         return ret;
786 }
787 
788 #ifdef PTRACE_SINGLESTEP
789 #define is_singlestep(request)          ((request) == PTRACE_SINGLESTEP)
790 #else
791 #define is_singlestep(request)          0
792 #endif
793 
794 #ifdef PTRACE_SINGLEBLOCK
795 #define is_singleblock(request)         ((request) == PTRACE_SINGLEBLOCK)
796 #else
797 #define is_singleblock(request)         0
798 #endif
799 
800 #ifdef PTRACE_SYSEMU
801 #define is_sysemu_singlestep(request)   ((request) == PTRACE_SYSEMU_SINGLESTEP)
802 #else
803 #define is_sysemu_singlestep(request)   0
804 #endif
805 
806 static int ptrace_resume(struct task_struct *child, long request,
807                          unsigned long data)
808 {
809         bool need_siglock;
810 
811         if (!valid_signal(data))
812                 return -EIO;
813 
814         if (request == PTRACE_SYSCALL)
815                 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
816         else
817                 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
818 
819 #ifdef TIF_SYSCALL_EMU
820         if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
821                 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
822         else
823                 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
824 #endif
825 
826         if (is_singleblock(request)) {
827                 if (unlikely(!arch_has_block_step()))
828                         return -EIO;
829                 user_enable_block_step(child);
830         } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
831                 if (unlikely(!arch_has_single_step()))
832                         return -EIO;
833                 user_enable_single_step(child);
834         } else {
835                 user_disable_single_step(child);
836         }
837 
838         /*
839          * Change ->exit_code and ->state under siglock to avoid the race
840          * with wait_task_stopped() in between; a non-zero ->exit_code will
841          * wrongly look like another report from tracee.
842          *
843          * Note that we need siglock even if ->exit_code == data and/or this
844          * status was not reported yet, the new status must not be cleared by
845          * wait_task_stopped() after resume.
846          *
847          * If data == 0 we do not care if wait_task_stopped() reports the old
848          * status and clears the code too; this can't race with the tracee, it
849          * takes siglock after resume.
850          */
851         need_siglock = data && !thread_group_empty(current);
852         if (need_siglock)
853                 spin_lock_irq(&child->sighand->siglock);
854         child->exit_code = data;
855         wake_up_state(child, __TASK_TRACED);
856         if (need_siglock)
857                 spin_unlock_irq(&child->sighand->siglock);
858 
859         return 0;
860 }
861 
862 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
863 
864 static const struct user_regset *
865 find_regset(const struct user_regset_view *view, unsigned int type)
866 {
867         const struct user_regset *regset;
868         int n;
869 
870         for (n = 0; n < view->n; ++n) {
871                 regset = view->regsets + n;
872                 if (regset->core_note_type == type)
873                         return regset;
874         }
875 
876         return NULL;
877 }
878 
879 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
880                          struct iovec *kiov)
881 {
882         const struct user_regset_view *view = task_user_regset_view(task);
883         const struct user_regset *regset = find_regset(view, type);
884         int regset_no;
885 
886         if (!regset || (kiov->iov_len % regset->size) != 0)
887                 return -EINVAL;
888 
889         regset_no = regset - view->regsets;
890         kiov->iov_len = min(kiov->iov_len,
891                             (__kernel_size_t) (regset->n * regset->size));
892 
893         if (req == PTRACE_GETREGSET)
894                 return copy_regset_to_user(task, view, regset_no, 0,
895                                            kiov->iov_len, kiov->iov_base);
896         else
897                 return copy_regset_from_user(task, view, regset_no, 0,
898                                              kiov->iov_len, kiov->iov_base);
899 }
900 
901 /*
902  * This is declared in linux/regset.h and defined in machine-dependent
903  * code.  We put the export here, near the primary machine-neutral use,
904  * to ensure no machine forgets it.
905  */
906 EXPORT_SYMBOL_GPL(task_user_regset_view);
907 #endif
908 
909 int ptrace_request(struct task_struct *child, long request,
910                    unsigned long addr, unsigned long data)
911 {
912         bool seized = child->ptrace & PT_SEIZED;
913         int ret = -EIO;
914         siginfo_t siginfo, *si;
915         void __user *datavp = (void __user *) data;
916         unsigned long __user *datalp = datavp;
917         unsigned long flags;
918 
919         switch (request) {
920         case PTRACE_PEEKTEXT:
921         case PTRACE_PEEKDATA:
922                 return generic_ptrace_peekdata(child, addr, data);
923         case PTRACE_POKETEXT:
924         case PTRACE_POKEDATA:
925                 return generic_ptrace_pokedata(child, addr, data);
926 
927 #ifdef PTRACE_OLDSETOPTIONS
928         case PTRACE_OLDSETOPTIONS:
929 #endif
930         case PTRACE_SETOPTIONS:
931                 ret = ptrace_setoptions(child, data);
932                 break;
933         case PTRACE_GETEVENTMSG:
934                 ret = put_user(child->ptrace_message, datalp);
935                 break;
936 
937         case PTRACE_PEEKSIGINFO:
938                 ret = ptrace_peek_siginfo(child, addr, data);
939                 break;
940 
941         case PTRACE_GETSIGINFO:
942                 ret = ptrace_getsiginfo(child, &siginfo);
943                 if (!ret)
944                         ret = copy_siginfo_to_user(datavp, &siginfo);
945                 break;
946 
947         case PTRACE_SETSIGINFO:
948                 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
949                         ret = -EFAULT;
950                 else
951                         ret = ptrace_setsiginfo(child, &siginfo);
952                 break;
953 
954         case PTRACE_GETSIGMASK:
955                 if (addr != sizeof(sigset_t)) {
956                         ret = -EINVAL;
957                         break;
958                 }
959 
960                 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
961                         ret = -EFAULT;
962                 else
963                         ret = 0;
964 
965                 break;
966 
967         case PTRACE_SETSIGMASK: {
968                 sigset_t new_set;
969 
970                 if (addr != sizeof(sigset_t)) {
971                         ret = -EINVAL;
972                         break;
973                 }
974 
975                 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
976                         ret = -EFAULT;
977                         break;
978                 }
979 
980                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
981 
982                 /*
983                  * Every thread does recalc_sigpending() after resume, so
984                  * retarget_shared_pending() and recalc_sigpending() are not
985                  * called here.
986                  */
987                 spin_lock_irq(&child->sighand->siglock);
988                 child->blocked = new_set;
989                 spin_unlock_irq(&child->sighand->siglock);
990 
991                 ret = 0;
992                 break;
993         }
994 
995         case PTRACE_INTERRUPT:
996                 /*
997                  * Stop tracee without any side-effect on signal or job
998                  * control.  At least one trap is guaranteed to happen
999                  * after this request.  If @child is already trapped, the
1000                  * current trap is not disturbed and another trap will
1001                  * happen after the current trap is ended with PTRACE_CONT.
1002                  *
1003                  * The actual trap might not be PTRACE_EVENT_STOP trap but
1004                  * the pending condition is cleared regardless.
1005                  */
1006                 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1007                         break;
1008 
1009                 /*
1010                  * INTERRUPT doesn't disturb existing trap sans one
1011                  * exception.  If ptracer issued LISTEN for the current
1012                  * STOP, this INTERRUPT should clear LISTEN and re-trap
1013                  * tracee into STOP.
1014                  */
1015                 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1016                         ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1017 
1018                 unlock_task_sighand(child, &flags);
1019                 ret = 0;
1020                 break;
1021 
1022         case PTRACE_LISTEN:
1023                 /*
1024                  * Listen for events.  Tracee must be in STOP.  It's not
1025                  * resumed per-se but is not considered to be in TRACED by
1026                  * wait(2) or ptrace(2).  If an async event (e.g. group
1027                  * stop state change) happens, tracee will enter STOP trap
1028                  * again.  Alternatively, ptracer can issue INTERRUPT to
1029                  * finish listening and re-trap tracee into STOP.
1030                  */
1031                 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1032                         break;
1033 
1034                 si = child->last_siginfo;
1035                 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1036                         child->jobctl |= JOBCTL_LISTENING;
1037                         /*
1038                          * If NOTIFY is set, it means event happened between
1039                          * start of this trap and now.  Trigger re-trap.
1040                          */
1041                         if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1042                                 ptrace_signal_wake_up(child, true);
1043                         ret = 0;
1044                 }
1045                 unlock_task_sighand(child, &flags);
1046                 break;
1047 
1048         case PTRACE_DETACH:      /* detach a process that was attached. */
1049                 ret = ptrace_detach(child, data);
1050                 break;
1051 
1052 #ifdef CONFIG_BINFMT_ELF_FDPIC
1053         case PTRACE_GETFDPIC: {
1054                 struct mm_struct *mm = get_task_mm(child);
1055                 unsigned long tmp = 0;
1056 
1057                 ret = -ESRCH;
1058                 if (!mm)
1059                         break;
1060 
1061                 switch (addr) {
1062                 case PTRACE_GETFDPIC_EXEC:
1063                         tmp = mm->context.exec_fdpic_loadmap;
1064                         break;
1065                 case PTRACE_GETFDPIC_INTERP:
1066                         tmp = mm->context.interp_fdpic_loadmap;
1067                         break;
1068                 default:
1069                         break;
1070                 }
1071                 mmput(mm);
1072 
1073                 ret = put_user(tmp, datalp);
1074                 break;
1075         }
1076 #endif
1077 
1078 #ifdef PTRACE_SINGLESTEP
1079         case PTRACE_SINGLESTEP:
1080 #endif
1081 #ifdef PTRACE_SINGLEBLOCK
1082         case PTRACE_SINGLEBLOCK:
1083 #endif
1084 #ifdef PTRACE_SYSEMU
1085         case PTRACE_SYSEMU:
1086         case PTRACE_SYSEMU_SINGLESTEP:
1087 #endif
1088         case PTRACE_SYSCALL:
1089         case PTRACE_CONT:
1090                 return ptrace_resume(child, request, data);
1091 
1092         case PTRACE_KILL:
1093                 if (child->exit_state)  /* already dead */
1094                         return 0;
1095                 return ptrace_resume(child, request, SIGKILL);
1096 
1097 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1098         case PTRACE_GETREGSET:
1099         case PTRACE_SETREGSET: {
1100                 struct iovec kiov;
1101                 struct iovec __user *uiov = datavp;
1102 
1103                 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1104                         return -EFAULT;
1105 
1106                 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1107                     __get_user(kiov.iov_len, &uiov->iov_len))
1108                         return -EFAULT;
1109 
1110                 ret = ptrace_regset(child, request, addr, &kiov);
1111                 if (!ret)
1112                         ret = __put_user(kiov.iov_len, &uiov->iov_len);
1113                 break;
1114         }
1115 #endif
1116         default:
1117                 break;
1118         }
1119 
1120         return ret;
1121 }
1122 
1123 static struct task_struct *ptrace_get_task_struct(pid_t pid)
1124 {
1125         struct task_struct *child;
1126 
1127         rcu_read_lock();
1128         child = find_task_by_vpid(pid);
1129         if (child)
1130                 get_task_struct(child);
1131         rcu_read_unlock();
1132 
1133         if (!child)
1134                 return ERR_PTR(-ESRCH);
1135         return child;
1136 }
1137 
1138 #ifndef arch_ptrace_attach
1139 #define arch_ptrace_attach(child)       do { } while (0)
1140 #endif
1141 
1142 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1143                 unsigned long, data)
1144 {
1145         struct task_struct *child;
1146         long ret;
1147         {
1148                 const int rc = ccs_ptrace_permission(request, pid);
1149                 if (rc)
1150                         return rc;
1151         }
1152 
1153         if (request == PTRACE_TRACEME) {
1154                 ret = ptrace_traceme();
1155                 if (!ret)
1156                         arch_ptrace_attach(current);
1157                 goto out;
1158         }
1159 
1160         child = ptrace_get_task_struct(pid);
1161         if (IS_ERR(child)) {
1162                 ret = PTR_ERR(child);
1163                 goto out;
1164         }
1165 
1166         if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1167                 ret = ptrace_attach(child, request, addr, data);
1168                 /*
1169                  * Some architectures need to do book-keeping after
1170                  * a ptrace attach.
1171                  */
1172                 if (!ret)
1173                         arch_ptrace_attach(child);
1174                 goto out_put_task_struct;
1175         }
1176 
1177         ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1178                                   request == PTRACE_INTERRUPT);
1179         if (ret < 0)
1180                 goto out_put_task_struct;
1181 
1182         ret = arch_ptrace(child, request, addr, data);
1183         if (ret || request != PTRACE_DETACH)
1184                 ptrace_unfreeze_traced(child);
1185 
1186  out_put_task_struct:
1187         put_task_struct(child);
1188  out:
1189         return ret;
1190 }
1191 
1192 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1193                             unsigned long data)
1194 {
1195         unsigned long tmp;
1196         int copied;
1197 
1198         copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), 0);
1199         if (copied != sizeof(tmp))
1200                 return -EIO;
1201         return put_user(tmp, (unsigned long __user *)data);
1202 }
1203 
1204 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1205                             unsigned long data)
1206 {
1207         int copied;
1208 
1209         copied = ptrace_access_vm(tsk, addr, &data, sizeof(data), 1);
1210         return (copied == sizeof(data)) ? 0 : -EIO;
1211 }
1212 
1213 #if defined CONFIG_COMPAT
1214 #include <linux/compat.h>
1215 
1216 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1217                           compat_ulong_t addr, compat_ulong_t data)
1218 {
1219         compat_ulong_t __user *datap = compat_ptr(data);
1220         compat_ulong_t word;
1221         siginfo_t siginfo;
1222         int ret;
1223 
1224         switch (request) {
1225         case PTRACE_PEEKTEXT:
1226         case PTRACE_PEEKDATA:
1227                 ret = ptrace_access_vm(child, addr, &word, sizeof(word), 0);
1228                 if (ret != sizeof(word))
1229                         ret = -EIO;
1230                 else
1231                         ret = put_user(word, datap);
1232                 break;
1233 
1234         case PTRACE_POKETEXT:
1235         case PTRACE_POKEDATA:
1236                 ret = ptrace_access_vm(child, addr, &data, sizeof(data), 1);
1237                 ret = (ret != sizeof(data) ? -EIO : 0);
1238                 break;
1239 
1240         case PTRACE_GETEVENTMSG:
1241                 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1242                 break;
1243 
1244         case PTRACE_GETSIGINFO:
1245                 ret = ptrace_getsiginfo(child, &siginfo);
1246                 if (!ret)
1247                         ret = copy_siginfo_to_user32(
1248                                 (struct compat_siginfo __user *) datap,
1249                                 &siginfo);
1250                 break;
1251 
1252         case PTRACE_SETSIGINFO:
1253                 memset(&siginfo, 0, sizeof siginfo);
1254                 if (copy_siginfo_from_user32(
1255                             &siginfo, (struct compat_siginfo __user *) datap))
1256                         ret = -EFAULT;
1257                 else
1258                         ret = ptrace_setsiginfo(child, &siginfo);
1259                 break;
1260 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1261         case PTRACE_GETREGSET:
1262         case PTRACE_SETREGSET:
1263         {
1264                 struct iovec kiov;
1265                 struct compat_iovec __user *uiov =
1266                         (struct compat_iovec __user *) datap;
1267                 compat_uptr_t ptr;
1268                 compat_size_t len;
1269 
1270                 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1271                         return -EFAULT;
1272 
1273                 if (__get_user(ptr, &uiov->iov_base) ||
1274                     __get_user(len, &uiov->iov_len))
1275                         return -EFAULT;
1276 
1277                 kiov.iov_base = compat_ptr(ptr);
1278                 kiov.iov_len = len;
1279 
1280                 ret = ptrace_regset(child, request, addr, &kiov);
1281                 if (!ret)
1282                         ret = __put_user(kiov.iov_len, &uiov->iov_len);
1283                 break;
1284         }
1285 #endif
1286 
1287         default:
1288                 ret = ptrace_request(child, request, addr, data);
1289         }
1290 
1291         return ret;
1292 }
1293 
1294 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1295                        compat_long_t, addr, compat_long_t, data)
1296 {
1297         struct task_struct *child;
1298         long ret;
1299         {
1300                 const int rc = ccs_ptrace_permission(request, pid);
1301                 if (rc)
1302                         return rc;
1303         }
1304 
1305         if (request == PTRACE_TRACEME) {
1306                 ret = ptrace_traceme();
1307                 goto out;
1308         }
1309 
1310         child = ptrace_get_task_struct(pid);
1311         if (IS_ERR(child)) {
1312                 ret = PTR_ERR(child);
1313                 goto out;
1314         }
1315 
1316         if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1317                 ret = ptrace_attach(child, request, addr, data);
1318                 /*
1319                  * Some architectures need to do book-keeping after
1320                  * a ptrace attach.
1321                  */
1322                 if (!ret)
1323                         arch_ptrace_attach(child);
1324                 goto out_put_task_struct;
1325         }
1326 
1327         ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1328                                   request == PTRACE_INTERRUPT);
1329         if (!ret) {
1330                 ret = compat_arch_ptrace(child, request, addr, data);
1331                 if (ret || request != PTRACE_DETACH)
1332                         ptrace_unfreeze_traced(child);
1333         }
1334 
1335  out_put_task_struct:
1336         put_task_struct(child);
1337  out:
1338         return ret;
1339 }
1340 #endif  /* CONFIG_COMPAT */
1341 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp