~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sys.c

Version: ~ [ linux-5.3-rc5 ] ~ [ linux-5.2.9 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.67 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.139 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.189 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.189 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.72 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/kernel/sys.c
  3  *
  4  *  Copyright (C) 1991, 1992  Linus Torvalds
  5  */
  6 
  7 #include <linux/export.h>
  8 #include <linux/mm.h>
  9 #include <linux/utsname.h>
 10 #include <linux/mman.h>
 11 #include <linux/reboot.h>
 12 #include <linux/prctl.h>
 13 #include <linux/highuid.h>
 14 #include <linux/fs.h>
 15 #include <linux/kmod.h>
 16 #include <linux/perf_event.h>
 17 #include <linux/resource.h>
 18 #include <linux/kernel.h>
 19 #include <linux/kexec.h>
 20 #include <linux/workqueue.h>
 21 #include <linux/capability.h>
 22 #include <linux/device.h>
 23 #include <linux/key.h>
 24 #include <linux/times.h>
 25 #include <linux/posix-timers.h>
 26 #include <linux/security.h>
 27 #include <linux/dcookies.h>
 28 #include <linux/suspend.h>
 29 #include <linux/tty.h>
 30 #include <linux/signal.h>
 31 #include <linux/cn_proc.h>
 32 #include <linux/getcpu.h>
 33 #include <linux/task_io_accounting_ops.h>
 34 #include <linux/seccomp.h>
 35 #include <linux/cpu.h>
 36 #include <linux/personality.h>
 37 #include <linux/ptrace.h>
 38 #include <linux/fs_struct.h>
 39 #include <linux/file.h>
 40 #include <linux/mount.h>
 41 #include <linux/gfp.h>
 42 #include <linux/syscore_ops.h>
 43 #include <linux/version.h>
 44 #include <linux/ctype.h>
 45 
 46 #include <linux/compat.h>
 47 #include <linux/syscalls.h>
 48 #include <linux/kprobes.h>
 49 #include <linux/user_namespace.h>
 50 
 51 #include <linux/kmsg_dump.h>
 52 /* Move somewhere else to avoid recompiling? */
 53 #include <generated/utsrelease.h>
 54 
 55 #include <asm/uaccess.h>
 56 #include <asm/io.h>
 57 #include <asm/unistd.h>
 58 
 59 #ifndef SET_UNALIGN_CTL
 60 # define SET_UNALIGN_CTL(a,b)   (-EINVAL)
 61 #endif
 62 #ifndef GET_UNALIGN_CTL
 63 # define GET_UNALIGN_CTL(a,b)   (-EINVAL)
 64 #endif
 65 #ifndef SET_FPEMU_CTL
 66 # define SET_FPEMU_CTL(a,b)     (-EINVAL)
 67 #endif
 68 #ifndef GET_FPEMU_CTL
 69 # define GET_FPEMU_CTL(a,b)     (-EINVAL)
 70 #endif
 71 #ifndef SET_FPEXC_CTL
 72 # define SET_FPEXC_CTL(a,b)     (-EINVAL)
 73 #endif
 74 #ifndef GET_FPEXC_CTL
 75 # define GET_FPEXC_CTL(a,b)     (-EINVAL)
 76 #endif
 77 #ifndef GET_ENDIAN
 78 # define GET_ENDIAN(a,b)        (-EINVAL)
 79 #endif
 80 #ifndef SET_ENDIAN
 81 # define SET_ENDIAN(a,b)        (-EINVAL)
 82 #endif
 83 #ifndef GET_TSC_CTL
 84 # define GET_TSC_CTL(a)         (-EINVAL)
 85 #endif
 86 #ifndef SET_TSC_CTL
 87 # define SET_TSC_CTL(a)         (-EINVAL)
 88 #endif
 89 
 90 /*
 91  * this is where the system-wide overflow UID and GID are defined, for
 92  * architectures that now have 32-bit UID/GID but didn't in the past
 93  */
 94 
 95 int overflowuid = DEFAULT_OVERFLOWUID;
 96 int overflowgid = DEFAULT_OVERFLOWGID;
 97 
 98 EXPORT_SYMBOL(overflowuid);
 99 EXPORT_SYMBOL(overflowgid);
100 
101 /*
102  * the same as above, but for filesystems which can only store a 16-bit
103  * UID and GID. as such, this is needed on all architectures
104  */
105 
106 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
107 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
108 
109 EXPORT_SYMBOL(fs_overflowuid);
110 EXPORT_SYMBOL(fs_overflowgid);
111 
112 /*
113  * this indicates whether you can reboot with ctrl-alt-del: the default is yes
114  */
115 
116 int C_A_D = 1;
117 struct pid *cad_pid;
118 EXPORT_SYMBOL(cad_pid);
119 
120 /*
121  * If set, this is used for preparing the system to power off.
122  */
123 
124 void (*pm_power_off_prepare)(void);
125 
126 /*
127  * Returns true if current's euid is same as p's uid or euid,
128  * or has CAP_SYS_NICE to p's user_ns.
129  *
130  * Called with rcu_read_lock, creds are safe
131  */
132 static bool set_one_prio_perm(struct task_struct *p)
133 {
134         const struct cred *cred = current_cred(), *pcred = __task_cred(p);
135 
136         if (uid_eq(pcred->uid,  cred->euid) ||
137             uid_eq(pcred->euid, cred->euid))
138                 return true;
139         if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
140                 return true;
141         return false;
142 }
143 
144 /*
145  * set the priority of a task
146  * - the caller must hold the RCU read lock
147  */
148 static int set_one_prio(struct task_struct *p, int niceval, int error)
149 {
150         int no_nice;
151 
152         if (!set_one_prio_perm(p)) {
153                 error = -EPERM;
154                 goto out;
155         }
156         if (niceval < task_nice(p) && !can_nice(p, niceval)) {
157                 error = -EACCES;
158                 goto out;
159         }
160         no_nice = security_task_setnice(p, niceval);
161         if (no_nice) {
162                 error = no_nice;
163                 goto out;
164         }
165         if (error == -ESRCH)
166                 error = 0;
167         set_user_nice(p, niceval);
168 out:
169         return error;
170 }
171 
172 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
173 {
174         struct task_struct *g, *p;
175         struct user_struct *user;
176         const struct cred *cred = current_cred();
177         int error = -EINVAL;
178         struct pid *pgrp;
179         kuid_t uid;
180 
181         if (which > PRIO_USER || which < PRIO_PROCESS)
182                 goto out;
183         if (!ccs_capable(CCS_SYS_NICE)) {
184                 error = -EPERM;
185                 goto out;
186         }
187 
188         /* normalize: avoid signed division (rounding problems) */
189         error = -ESRCH;
190         if (niceval < -20)
191                 niceval = -20;
192         if (niceval > 19)
193                 niceval = 19;
194 
195         rcu_read_lock();
196         read_lock(&tasklist_lock);
197         switch (which) {
198                 case PRIO_PROCESS:
199                         if (who)
200                                 p = find_task_by_vpid(who);
201                         else
202                                 p = current;
203                         if (p)
204                                 error = set_one_prio(p, niceval, error);
205                         break;
206                 case PRIO_PGRP:
207                         if (who)
208                                 pgrp = find_vpid(who);
209                         else
210                                 pgrp = task_pgrp(current);
211                         do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
212                                 error = set_one_prio(p, niceval, error);
213                         } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
214                         break;
215                 case PRIO_USER:
216                         uid = make_kuid(cred->user_ns, who);
217                         user = cred->user;
218                         if (!who)
219                                 uid = cred->uid;
220                         else if (!uid_eq(uid, cred->uid) &&
221                                  !(user = find_user(uid)))
222                                 goto out_unlock;        /* No processes for this user */
223 
224                         do_each_thread(g, p) {
225                                 if (uid_eq(task_uid(p), uid))
226                                         error = set_one_prio(p, niceval, error);
227                         } while_each_thread(g, p);
228                         if (!uid_eq(uid, cred->uid))
229                                 free_uid(user);         /* For find_user() */
230                         break;
231         }
232 out_unlock:
233         read_unlock(&tasklist_lock);
234         rcu_read_unlock();
235 out:
236         return error;
237 }
238 
239 /*
240  * Ugh. To avoid negative return values, "getpriority()" will
241  * not return the normal nice-value, but a negated value that
242  * has been offset by 20 (ie it returns 40..1 instead of -20..19)
243  * to stay compatible.
244  */
245 SYSCALL_DEFINE2(getpriority, int, which, int, who)
246 {
247         struct task_struct *g, *p;
248         struct user_struct *user;
249         const struct cred *cred = current_cred();
250         long niceval, retval = -ESRCH;
251         struct pid *pgrp;
252         kuid_t uid;
253 
254         if (which > PRIO_USER || which < PRIO_PROCESS)
255                 return -EINVAL;
256 
257         rcu_read_lock();
258         read_lock(&tasklist_lock);
259         switch (which) {
260                 case PRIO_PROCESS:
261                         if (who)
262                                 p = find_task_by_vpid(who);
263                         else
264                                 p = current;
265                         if (p) {
266                                 niceval = 20 - task_nice(p);
267                                 if (niceval > retval)
268                                         retval = niceval;
269                         }
270                         break;
271                 case PRIO_PGRP:
272                         if (who)
273                                 pgrp = find_vpid(who);
274                         else
275                                 pgrp = task_pgrp(current);
276                         do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
277                                 niceval = 20 - task_nice(p);
278                                 if (niceval > retval)
279                                         retval = niceval;
280                         } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
281                         break;
282                 case PRIO_USER:
283                         uid = make_kuid(cred->user_ns, who);
284                         user = cred->user;
285                         if (!who)
286                                 uid = cred->uid;
287                         else if (!uid_eq(uid, cred->uid) &&
288                                  !(user = find_user(uid)))
289                                 goto out_unlock;        /* No processes for this user */
290 
291                         do_each_thread(g, p) {
292                                 if (uid_eq(task_uid(p), uid)) {
293                                         niceval = 20 - task_nice(p);
294                                         if (niceval > retval)
295                                                 retval = niceval;
296                                 }
297                         } while_each_thread(g, p);
298                         if (!uid_eq(uid, cred->uid))
299                                 free_uid(user);         /* for find_user() */
300                         break;
301         }
302 out_unlock:
303         read_unlock(&tasklist_lock);
304         rcu_read_unlock();
305 
306         return retval;
307 }
308 
309 /**
310  *      emergency_restart - reboot the system
311  *
312  *      Without shutting down any hardware or taking any locks
313  *      reboot the system.  This is called when we know we are in
314  *      trouble so this is our best effort to reboot.  This is
315  *      safe to call in interrupt context.
316  */
317 void emergency_restart(void)
318 {
319         kmsg_dump(KMSG_DUMP_EMERG);
320         machine_emergency_restart();
321 }
322 EXPORT_SYMBOL_GPL(emergency_restart);
323 
324 void kernel_restart_prepare(char *cmd)
325 {
326         blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
327         system_state = SYSTEM_RESTART;
328         usermodehelper_disable();
329         device_shutdown();
330 }
331 
332 /**
333  *      register_reboot_notifier - Register function to be called at reboot time
334  *      @nb: Info about notifier function to be called
335  *
336  *      Registers a function with the list of functions
337  *      to be called at reboot time.
338  *
339  *      Currently always returns zero, as blocking_notifier_chain_register()
340  *      always returns zero.
341  */
342 int register_reboot_notifier(struct notifier_block *nb)
343 {
344         return blocking_notifier_chain_register(&reboot_notifier_list, nb);
345 }
346 EXPORT_SYMBOL(register_reboot_notifier);
347 
348 /**
349  *      unregister_reboot_notifier - Unregister previously registered reboot notifier
350  *      @nb: Hook to be unregistered
351  *
352  *      Unregisters a previously registered reboot
353  *      notifier function.
354  *
355  *      Returns zero on success, or %-ENOENT on failure.
356  */
357 int unregister_reboot_notifier(struct notifier_block *nb)
358 {
359         return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
360 }
361 EXPORT_SYMBOL(unregister_reboot_notifier);
362 
363 /**
364  *      kernel_restart - reboot the system
365  *      @cmd: pointer to buffer containing command to execute for restart
366  *              or %NULL
367  *
368  *      Shutdown everything and perform a clean reboot.
369  *      This is not safe to call in interrupt context.
370  */
371 void kernel_restart(char *cmd)
372 {
373         kernel_restart_prepare(cmd);
374         disable_nonboot_cpus();
375         syscore_shutdown();
376         if (!cmd)
377                 printk(KERN_EMERG "Restarting system.\n");
378         else
379                 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
380         kmsg_dump(KMSG_DUMP_RESTART);
381         machine_restart(cmd);
382 }
383 EXPORT_SYMBOL_GPL(kernel_restart);
384 
385 static void kernel_shutdown_prepare(enum system_states state)
386 {
387         blocking_notifier_call_chain(&reboot_notifier_list,
388                 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
389         system_state = state;
390         usermodehelper_disable();
391         device_shutdown();
392 }
393 /**
394  *      kernel_halt - halt the system
395  *
396  *      Shutdown everything and perform a clean system halt.
397  */
398 void kernel_halt(void)
399 {
400         kernel_shutdown_prepare(SYSTEM_HALT);
401         disable_nonboot_cpus();
402         syscore_shutdown();
403         printk(KERN_EMERG "System halted.\n");
404         kmsg_dump(KMSG_DUMP_HALT);
405         machine_halt();
406 }
407 
408 EXPORT_SYMBOL_GPL(kernel_halt);
409 
410 /**
411  *      kernel_power_off - power_off the system
412  *
413  *      Shutdown everything and perform a clean system power_off.
414  */
415 void kernel_power_off(void)
416 {
417         kernel_shutdown_prepare(SYSTEM_POWER_OFF);
418         if (pm_power_off_prepare)
419                 pm_power_off_prepare();
420         disable_nonboot_cpus();
421         syscore_shutdown();
422         printk(KERN_EMERG "Power down.\n");
423         kmsg_dump(KMSG_DUMP_POWEROFF);
424         machine_power_off();
425 }
426 EXPORT_SYMBOL_GPL(kernel_power_off);
427 
428 static DEFINE_MUTEX(reboot_mutex);
429 
430 /*
431  * Reboot system call: for obvious reasons only root may call it,
432  * and even root needs to set up some magic numbers in the registers
433  * so that some mistake won't make this reboot the whole machine.
434  * You can also set the meaning of the ctrl-alt-del-key here.
435  *
436  * reboot doesn't sync: do that yourself before calling this.
437  */
438 SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
439                 void __user *, arg)
440 {
441         char buffer[256];
442         int ret = 0;
443 
444         /* We only trust the superuser with rebooting the system. */
445         if (!capable(CAP_SYS_BOOT))
446                 return -EPERM;
447 
448         /* For safety, we require "magic" arguments. */
449         if (magic1 != LINUX_REBOOT_MAGIC1 ||
450             (magic2 != LINUX_REBOOT_MAGIC2 &&
451                         magic2 != LINUX_REBOOT_MAGIC2A &&
452                         magic2 != LINUX_REBOOT_MAGIC2B &&
453                         magic2 != LINUX_REBOOT_MAGIC2C))
454                 return -EINVAL;
455         if (!ccs_capable(CCS_SYS_REBOOT))
456                 return -EPERM;
457 
458         /*
459          * If pid namespaces are enabled and the current task is in a child
460          * pid_namespace, the command is handled by reboot_pid_ns() which will
461          * call do_exit().
462          */
463         ret = reboot_pid_ns(task_active_pid_ns(current), cmd);
464         if (ret)
465                 return ret;
466 
467         /* Instead of trying to make the power_off code look like
468          * halt when pm_power_off is not set do it the easy way.
469          */
470         if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
471                 cmd = LINUX_REBOOT_CMD_HALT;
472 
473         mutex_lock(&reboot_mutex);
474         switch (cmd) {
475         case LINUX_REBOOT_CMD_RESTART:
476                 kernel_restart(NULL);
477                 break;
478 
479         case LINUX_REBOOT_CMD_CAD_ON:
480                 C_A_D = 1;
481                 break;
482 
483         case LINUX_REBOOT_CMD_CAD_OFF:
484                 C_A_D = 0;
485                 break;
486 
487         case LINUX_REBOOT_CMD_HALT:
488                 kernel_halt();
489                 do_exit(0);
490                 panic("cannot halt");
491 
492         case LINUX_REBOOT_CMD_POWER_OFF:
493                 kernel_power_off();
494                 do_exit(0);
495                 break;
496 
497         case LINUX_REBOOT_CMD_RESTART2:
498                 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
499                         ret = -EFAULT;
500                         break;
501                 }
502                 buffer[sizeof(buffer) - 1] = '\0';
503 
504                 kernel_restart(buffer);
505                 break;
506 
507 #ifdef CONFIG_KEXEC
508         case LINUX_REBOOT_CMD_KEXEC:
509                 ret = kernel_kexec();
510                 break;
511 #endif
512 
513 #ifdef CONFIG_HIBERNATION
514         case LINUX_REBOOT_CMD_SW_SUSPEND:
515                 ret = hibernate();
516                 break;
517 #endif
518 
519         default:
520                 ret = -EINVAL;
521                 break;
522         }
523         mutex_unlock(&reboot_mutex);
524         return ret;
525 }
526 
527 static void deferred_cad(struct work_struct *dummy)
528 {
529         kernel_restart(NULL);
530 }
531 
532 /*
533  * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
534  * As it's called within an interrupt, it may NOT sync: the only choice
535  * is whether to reboot at once, or just ignore the ctrl-alt-del.
536  */
537 void ctrl_alt_del(void)
538 {
539         static DECLARE_WORK(cad_work, deferred_cad);
540 
541         if (C_A_D)
542                 schedule_work(&cad_work);
543         else
544                 kill_cad_pid(SIGINT, 1);
545 }
546         
547 /*
548  * Unprivileged users may change the real gid to the effective gid
549  * or vice versa.  (BSD-style)
550  *
551  * If you set the real gid at all, or set the effective gid to a value not
552  * equal to the real gid, then the saved gid is set to the new effective gid.
553  *
554  * This makes it possible for a setgid program to completely drop its
555  * privileges, which is often a useful assertion to make when you are doing
556  * a security audit over a program.
557  *
558  * The general idea is that a program which uses just setregid() will be
559  * 100% compatible with BSD.  A program which uses just setgid() will be
560  * 100% compatible with POSIX with saved IDs. 
561  *
562  * SMP: There are not races, the GIDs are checked only by filesystem
563  *      operations (as far as semantic preservation is concerned).
564  */
565 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
566 {
567         struct user_namespace *ns = current_user_ns();
568         const struct cred *old;
569         struct cred *new;
570         int retval;
571         kgid_t krgid, kegid;
572 
573         krgid = make_kgid(ns, rgid);
574         kegid = make_kgid(ns, egid);
575 
576         if ((rgid != (gid_t) -1) && !gid_valid(krgid))
577                 return -EINVAL;
578         if ((egid != (gid_t) -1) && !gid_valid(kegid))
579                 return -EINVAL;
580 
581         new = prepare_creds();
582         if (!new)
583                 return -ENOMEM;
584         old = current_cred();
585 
586         retval = -EPERM;
587         if (rgid != (gid_t) -1) {
588                 if (gid_eq(old->gid, krgid) ||
589                     gid_eq(old->egid, krgid) ||
590                     nsown_capable(CAP_SETGID))
591                         new->gid = krgid;
592                 else
593                         goto error;
594         }
595         if (egid != (gid_t) -1) {
596                 if (gid_eq(old->gid, kegid) ||
597                     gid_eq(old->egid, kegid) ||
598                     gid_eq(old->sgid, kegid) ||
599                     nsown_capable(CAP_SETGID))
600                         new->egid = kegid;
601                 else
602                         goto error;
603         }
604 
605         if (rgid != (gid_t) -1 ||
606             (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
607                 new->sgid = new->egid;
608         new->fsgid = new->egid;
609 
610         return commit_creds(new);
611 
612 error:
613         abort_creds(new);
614         return retval;
615 }
616 
617 /*
618  * setgid() is implemented like SysV w/ SAVED_IDS 
619  *
620  * SMP: Same implicit races as above.
621  */
622 SYSCALL_DEFINE1(setgid, gid_t, gid)
623 {
624         struct user_namespace *ns = current_user_ns();
625         const struct cred *old;
626         struct cred *new;
627         int retval;
628         kgid_t kgid;
629 
630         kgid = make_kgid(ns, gid);
631         if (!gid_valid(kgid))
632                 return -EINVAL;
633 
634         new = prepare_creds();
635         if (!new)
636                 return -ENOMEM;
637         old = current_cred();
638 
639         retval = -EPERM;
640         if (nsown_capable(CAP_SETGID))
641                 new->gid = new->egid = new->sgid = new->fsgid = kgid;
642         else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
643                 new->egid = new->fsgid = kgid;
644         else
645                 goto error;
646 
647         return commit_creds(new);
648 
649 error:
650         abort_creds(new);
651         return retval;
652 }
653 
654 /*
655  * change the user struct in a credentials set to match the new UID
656  */
657 static int set_user(struct cred *new)
658 {
659         struct user_struct *new_user;
660 
661         new_user = alloc_uid(new->uid);
662         if (!new_user)
663                 return -EAGAIN;
664 
665         /*
666          * We don't fail in case of NPROC limit excess here because too many
667          * poorly written programs don't check set*uid() return code, assuming
668          * it never fails if called by root.  We may still enforce NPROC limit
669          * for programs doing set*uid()+execve() by harmlessly deferring the
670          * failure to the execve() stage.
671          */
672         if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
673                         new_user != INIT_USER)
674                 current->flags |= PF_NPROC_EXCEEDED;
675         else
676                 current->flags &= ~PF_NPROC_EXCEEDED;
677 
678         free_uid(new->user);
679         new->user = new_user;
680         return 0;
681 }
682 
683 /*
684  * Unprivileged users may change the real uid to the effective uid
685  * or vice versa.  (BSD-style)
686  *
687  * If you set the real uid at all, or set the effective uid to a value not
688  * equal to the real uid, then the saved uid is set to the new effective uid.
689  *
690  * This makes it possible for a setuid program to completely drop its
691  * privileges, which is often a useful assertion to make when you are doing
692  * a security audit over a program.
693  *
694  * The general idea is that a program which uses just setreuid() will be
695  * 100% compatible with BSD.  A program which uses just setuid() will be
696  * 100% compatible with POSIX with saved IDs. 
697  */
698 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
699 {
700         struct user_namespace *ns = current_user_ns();
701         const struct cred *old;
702         struct cred *new;
703         int retval;
704         kuid_t kruid, keuid;
705 
706         kruid = make_kuid(ns, ruid);
707         keuid = make_kuid(ns, euid);
708 
709         if ((ruid != (uid_t) -1) && !uid_valid(kruid))
710                 return -EINVAL;
711         if ((euid != (uid_t) -1) && !uid_valid(keuid))
712                 return -EINVAL;
713 
714         new = prepare_creds();
715         if (!new)
716                 return -ENOMEM;
717         old = current_cred();
718 
719         retval = -EPERM;
720         if (ruid != (uid_t) -1) {
721                 new->uid = kruid;
722                 if (!uid_eq(old->uid, kruid) &&
723                     !uid_eq(old->euid, kruid) &&
724                     !nsown_capable(CAP_SETUID))
725                         goto error;
726         }
727 
728         if (euid != (uid_t) -1) {
729                 new->euid = keuid;
730                 if (!uid_eq(old->uid, keuid) &&
731                     !uid_eq(old->euid, keuid) &&
732                     !uid_eq(old->suid, keuid) &&
733                     !nsown_capable(CAP_SETUID))
734                         goto error;
735         }
736 
737         if (!uid_eq(new->uid, old->uid)) {
738                 retval = set_user(new);
739                 if (retval < 0)
740                         goto error;
741         }
742         if (ruid != (uid_t) -1 ||
743             (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
744                 new->suid = new->euid;
745         new->fsuid = new->euid;
746 
747         retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
748         if (retval < 0)
749                 goto error;
750 
751         return commit_creds(new);
752 
753 error:
754         abort_creds(new);
755         return retval;
756 }
757                 
758 /*
759  * setuid() is implemented like SysV with SAVED_IDS 
760  * 
761  * Note that SAVED_ID's is deficient in that a setuid root program
762  * like sendmail, for example, cannot set its uid to be a normal 
763  * user and then switch back, because if you're root, setuid() sets
764  * the saved uid too.  If you don't like this, blame the bright people
765  * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
766  * will allow a root program to temporarily drop privileges and be able to
767  * regain them by swapping the real and effective uid.  
768  */
769 SYSCALL_DEFINE1(setuid, uid_t, uid)
770 {
771         struct user_namespace *ns = current_user_ns();
772         const struct cred *old;
773         struct cred *new;
774         int retval;
775         kuid_t kuid;
776 
777         kuid = make_kuid(ns, uid);
778         if (!uid_valid(kuid))
779                 return -EINVAL;
780 
781         new = prepare_creds();
782         if (!new)
783                 return -ENOMEM;
784         old = current_cred();
785 
786         retval = -EPERM;
787         if (nsown_capable(CAP_SETUID)) {
788                 new->suid = new->uid = kuid;
789                 if (!uid_eq(kuid, old->uid)) {
790                         retval = set_user(new);
791                         if (retval < 0)
792                                 goto error;
793                 }
794         } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
795                 goto error;
796         }
797 
798         new->fsuid = new->euid = kuid;
799 
800         retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
801         if (retval < 0)
802                 goto error;
803 
804         return commit_creds(new);
805 
806 error:
807         abort_creds(new);
808         return retval;
809 }
810 
811 
812 /*
813  * This function implements a generic ability to update ruid, euid,
814  * and suid.  This allows you to implement the 4.4 compatible seteuid().
815  */
816 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
817 {
818         struct user_namespace *ns = current_user_ns();
819         const struct cred *old;
820         struct cred *new;
821         int retval;
822         kuid_t kruid, keuid, ksuid;
823 
824         kruid = make_kuid(ns, ruid);
825         keuid = make_kuid(ns, euid);
826         ksuid = make_kuid(ns, suid);
827 
828         if ((ruid != (uid_t) -1) && !uid_valid(kruid))
829                 return -EINVAL;
830 
831         if ((euid != (uid_t) -1) && !uid_valid(keuid))
832                 return -EINVAL;
833 
834         if ((suid != (uid_t) -1) && !uid_valid(ksuid))
835                 return -EINVAL;
836 
837         new = prepare_creds();
838         if (!new)
839                 return -ENOMEM;
840 
841         old = current_cred();
842 
843         retval = -EPERM;
844         if (!nsown_capable(CAP_SETUID)) {
845                 if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
846                     !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
847                         goto error;
848                 if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
849                     !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
850                         goto error;
851                 if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
852                     !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
853                         goto error;
854         }
855 
856         if (ruid != (uid_t) -1) {
857                 new->uid = kruid;
858                 if (!uid_eq(kruid, old->uid)) {
859                         retval = set_user(new);
860                         if (retval < 0)
861                                 goto error;
862                 }
863         }
864         if (euid != (uid_t) -1)
865                 new->euid = keuid;
866         if (suid != (uid_t) -1)
867                 new->suid = ksuid;
868         new->fsuid = new->euid;
869 
870         retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
871         if (retval < 0)
872                 goto error;
873 
874         return commit_creds(new);
875 
876 error:
877         abort_creds(new);
878         return retval;
879 }
880 
881 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
882 {
883         const struct cred *cred = current_cred();
884         int retval;
885         uid_t ruid, euid, suid;
886 
887         ruid = from_kuid_munged(cred->user_ns, cred->uid);
888         euid = from_kuid_munged(cred->user_ns, cred->euid);
889         suid = from_kuid_munged(cred->user_ns, cred->suid);
890 
891         if (!(retval   = put_user(ruid, ruidp)) &&
892             !(retval   = put_user(euid, euidp)))
893                 retval = put_user(suid, suidp);
894 
895         return retval;
896 }
897 
898 /*
899  * Same as above, but for rgid, egid, sgid.
900  */
901 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
902 {
903         struct user_namespace *ns = current_user_ns();
904         const struct cred *old;
905         struct cred *new;
906         int retval;
907         kgid_t krgid, kegid, ksgid;
908 
909         krgid = make_kgid(ns, rgid);
910         kegid = make_kgid(ns, egid);
911         ksgid = make_kgid(ns, sgid);
912 
913         if ((rgid != (gid_t) -1) && !gid_valid(krgid))
914                 return -EINVAL;
915         if ((egid != (gid_t) -1) && !gid_valid(kegid))
916                 return -EINVAL;
917         if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
918                 return -EINVAL;
919 
920         new = prepare_creds();
921         if (!new)
922                 return -ENOMEM;
923         old = current_cred();
924 
925         retval = -EPERM;
926         if (!nsown_capable(CAP_SETGID)) {
927                 if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
928                     !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
929                         goto error;
930                 if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
931                     !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
932                         goto error;
933                 if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
934                     !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
935                         goto error;
936         }
937 
938         if (rgid != (gid_t) -1)
939                 new->gid = krgid;
940         if (egid != (gid_t) -1)
941                 new->egid = kegid;
942         if (sgid != (gid_t) -1)
943                 new->sgid = ksgid;
944         new->fsgid = new->egid;
945 
946         return commit_creds(new);
947 
948 error:
949         abort_creds(new);
950         return retval;
951 }
952 
953 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
954 {
955         const struct cred *cred = current_cred();
956         int retval;
957         gid_t rgid, egid, sgid;
958 
959         rgid = from_kgid_munged(cred->user_ns, cred->gid);
960         egid = from_kgid_munged(cred->user_ns, cred->egid);
961         sgid = from_kgid_munged(cred->user_ns, cred->sgid);
962 
963         if (!(retval   = put_user(rgid, rgidp)) &&
964             !(retval   = put_user(egid, egidp)))
965                 retval = put_user(sgid, sgidp);
966 
967         return retval;
968 }
969 
970 
971 /*
972  * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
973  * is used for "access()" and for the NFS daemon (letting nfsd stay at
974  * whatever uid it wants to). It normally shadows "euid", except when
975  * explicitly set by setfsuid() or for access..
976  */
977 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
978 {
979         const struct cred *old;
980         struct cred *new;
981         uid_t old_fsuid;
982         kuid_t kuid;
983 
984         old = current_cred();
985         old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
986 
987         kuid = make_kuid(old->user_ns, uid);
988         if (!uid_valid(kuid))
989                 return old_fsuid;
990 
991         new = prepare_creds();
992         if (!new)
993                 return old_fsuid;
994 
995         if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
996             uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
997             nsown_capable(CAP_SETUID)) {
998                 if (!uid_eq(kuid, old->fsuid)) {
999                         new->fsuid = kuid;
1000                         if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
1001                                 goto change_okay;
1002                 }
1003         }
1004 
1005         abort_creds(new);
1006         return old_fsuid;
1007 
1008 change_okay:
1009         commit_creds(new);
1010         return old_fsuid;
1011 }
1012 
1013 /*
1014  * Samma på svenska..
1015  */
1016 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
1017 {
1018         const struct cred *old;
1019         struct cred *new;
1020         gid_t old_fsgid;
1021         kgid_t kgid;
1022 
1023         old = current_cred();
1024         old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
1025 
1026         kgid = make_kgid(old->user_ns, gid);
1027         if (!gid_valid(kgid))
1028                 return old_fsgid;
1029 
1030         new = prepare_creds();
1031         if (!new)
1032                 return old_fsgid;
1033 
1034         if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
1035             gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
1036             nsown_capable(CAP_SETGID)) {
1037                 if (!gid_eq(kgid, old->fsgid)) {
1038                         new->fsgid = kgid;
1039                         goto change_okay;
1040                 }
1041         }
1042 
1043         abort_creds(new);
1044         return old_fsgid;
1045 
1046 change_okay:
1047         commit_creds(new);
1048         return old_fsgid;
1049 }
1050 
1051 void do_sys_times(struct tms *tms)
1052 {
1053         cputime_t tgutime, tgstime, cutime, cstime;
1054 
1055         spin_lock_irq(&current->sighand->siglock);
1056         thread_group_cputime_adjusted(current, &tgutime, &tgstime);
1057         cutime = current->signal->cutime;
1058         cstime = current->signal->cstime;
1059         spin_unlock_irq(&current->sighand->siglock);
1060         tms->tms_utime = cputime_to_clock_t(tgutime);
1061         tms->tms_stime = cputime_to_clock_t(tgstime);
1062         tms->tms_cutime = cputime_to_clock_t(cutime);
1063         tms->tms_cstime = cputime_to_clock_t(cstime);
1064 }
1065 
1066 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
1067 {
1068         if (tbuf) {
1069                 struct tms tmp;
1070 
1071                 do_sys_times(&tmp);
1072                 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1073                         return -EFAULT;
1074         }
1075         force_successful_syscall_return();
1076         return (long) jiffies_64_to_clock_t(get_jiffies_64());
1077 }
1078 
1079 /*
1080  * This needs some heavy checking ...
1081  * I just haven't the stomach for it. I also don't fully
1082  * understand sessions/pgrp etc. Let somebody who does explain it.
1083  *
1084  * OK, I think I have the protection semantics right.... this is really
1085  * only important on a multi-user system anyway, to make sure one user
1086  * can't send a signal to a process owned by another.  -TYT, 12/12/91
1087  *
1088  * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1089  * LBT 04.03.94
1090  */
1091 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1092 {
1093         struct task_struct *p;
1094         struct task_struct *group_leader = current->group_leader;
1095         struct pid *pgrp;
1096         int err;
1097 
1098         if (!pid)
1099                 pid = task_pid_vnr(group_leader);
1100         if (!pgid)
1101                 pgid = pid;
1102         if (pgid < 0)
1103                 return -EINVAL;
1104         rcu_read_lock();
1105 
1106         /* From this point forward we keep holding onto the tasklist lock
1107          * so that our parent does not change from under us. -DaveM
1108          */
1109         write_lock_irq(&tasklist_lock);
1110 
1111         err = -ESRCH;
1112         p = find_task_by_vpid(pid);
1113         if (!p)
1114                 goto out;
1115 
1116         err = -EINVAL;
1117         if (!thread_group_leader(p))
1118                 goto out;
1119 
1120         if (same_thread_group(p->real_parent, group_leader)) {
1121                 err = -EPERM;
1122                 if (task_session(p) != task_session(group_leader))
1123                         goto out;
1124                 err = -EACCES;
1125                 if (p->did_exec)
1126                         goto out;
1127         } else {
1128                 err = -ESRCH;
1129                 if (p != group_leader)
1130                         goto out;
1131         }
1132 
1133         err = -EPERM;
1134         if (p->signal->leader)
1135                 goto out;
1136 
1137         pgrp = task_pid(p);
1138         if (pgid != pid) {
1139                 struct task_struct *g;
1140 
1141                 pgrp = find_vpid(pgid);
1142                 g = pid_task(pgrp, PIDTYPE_PGID);
1143                 if (!g || task_session(g) != task_session(group_leader))
1144                         goto out;
1145         }
1146 
1147         err = security_task_setpgid(p, pgid);
1148         if (err)
1149                 goto out;
1150 
1151         if (task_pgrp(p) != pgrp)
1152                 change_pid(p, PIDTYPE_PGID, pgrp);
1153 
1154         err = 0;
1155 out:
1156         /* All paths lead to here, thus we are safe. -DaveM */
1157         write_unlock_irq(&tasklist_lock);
1158         rcu_read_unlock();
1159         return err;
1160 }
1161 
1162 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1163 {
1164         struct task_struct *p;
1165         struct pid *grp;
1166         int retval;
1167 
1168         rcu_read_lock();
1169         if (!pid)
1170                 grp = task_pgrp(current);
1171         else {
1172                 retval = -ESRCH;
1173                 p = find_task_by_vpid(pid);
1174                 if (!p)
1175                         goto out;
1176                 grp = task_pgrp(p);
1177                 if (!grp)
1178                         goto out;
1179 
1180                 retval = security_task_getpgid(p);
1181                 if (retval)
1182                         goto out;
1183         }
1184         retval = pid_vnr(grp);
1185 out:
1186         rcu_read_unlock();
1187         return retval;
1188 }
1189 
1190 #ifdef __ARCH_WANT_SYS_GETPGRP
1191 
1192 SYSCALL_DEFINE0(getpgrp)
1193 {
1194         return sys_getpgid(0);
1195 }
1196 
1197 #endif
1198 
1199 SYSCALL_DEFINE1(getsid, pid_t, pid)
1200 {
1201         struct task_struct *p;
1202         struct pid *sid;
1203         int retval;
1204 
1205         rcu_read_lock();
1206         if (!pid)
1207                 sid = task_session(current);
1208         else {
1209                 retval = -ESRCH;
1210                 p = find_task_by_vpid(pid);
1211                 if (!p)
1212                         goto out;
1213                 sid = task_session(p);
1214                 if (!sid)
1215                         goto out;
1216 
1217                 retval = security_task_getsid(p);
1218                 if (retval)
1219                         goto out;
1220         }
1221         retval = pid_vnr(sid);
1222 out:
1223         rcu_read_unlock();
1224         return retval;
1225 }
1226 
1227 SYSCALL_DEFINE0(setsid)
1228 {
1229         struct task_struct *group_leader = current->group_leader;
1230         struct pid *sid = task_pid(group_leader);
1231         pid_t session = pid_vnr(sid);
1232         int err = -EPERM;
1233 
1234         write_lock_irq(&tasklist_lock);
1235         /* Fail if I am already a session leader */
1236         if (group_leader->signal->leader)
1237                 goto out;
1238 
1239         /* Fail if a process group id already exists that equals the
1240          * proposed session id.
1241          */
1242         if (pid_task(sid, PIDTYPE_PGID))
1243                 goto out;
1244 
1245         group_leader->signal->leader = 1;
1246         __set_special_pids(sid);
1247 
1248         proc_clear_tty(group_leader);
1249 
1250         err = session;
1251 out:
1252         write_unlock_irq(&tasklist_lock);
1253         if (err > 0) {
1254                 proc_sid_connector(group_leader);
1255                 sched_autogroup_create_attach(group_leader);
1256         }
1257         return err;
1258 }
1259 
1260 DECLARE_RWSEM(uts_sem);
1261 
1262 #ifdef COMPAT_UTS_MACHINE
1263 #define override_architecture(name) \
1264         (personality(current->personality) == PER_LINUX32 && \
1265          copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1266                       sizeof(COMPAT_UTS_MACHINE)))
1267 #else
1268 #define override_architecture(name)     0
1269 #endif
1270 
1271 /*
1272  * Work around broken programs that cannot handle "Linux 3.0".
1273  * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1274  */
1275 static int override_release(char __user *release, size_t len)
1276 {
1277         int ret = 0;
1278 
1279         if (current->personality & UNAME26) {
1280                 const char *rest = UTS_RELEASE;
1281                 char buf[65] = { 0 };
1282                 int ndots = 0;
1283                 unsigned v;
1284                 size_t copy;
1285 
1286                 while (*rest) {
1287                         if (*rest == '.' && ++ndots >= 3)
1288                                 break;
1289                         if (!isdigit(*rest) && *rest != '.')
1290                                 break;
1291                         rest++;
1292                 }
1293                 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1294                 copy = clamp_t(size_t, len, 1, sizeof(buf));
1295                 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1296                 ret = copy_to_user(release, buf, copy + 1);
1297         }
1298         return ret;
1299 }
1300 
1301 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1302 {
1303         int errno = 0;
1304 
1305         down_read(&uts_sem);
1306         if (copy_to_user(name, utsname(), sizeof *name))
1307                 errno = -EFAULT;
1308         up_read(&uts_sem);
1309 
1310         if (!errno && override_release(name->release, sizeof(name->release)))
1311                 errno = -EFAULT;
1312         if (!errno && override_architecture(name))
1313                 errno = -EFAULT;
1314         return errno;
1315 }
1316 
1317 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1318 /*
1319  * Old cruft
1320  */
1321 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1322 {
1323         int error = 0;
1324 
1325         if (!name)
1326                 return -EFAULT;
1327 
1328         down_read(&uts_sem);
1329         if (copy_to_user(name, utsname(), sizeof(*name)))
1330                 error = -EFAULT;
1331         up_read(&uts_sem);
1332 
1333         if (!error && override_release(name->release, sizeof(name->release)))
1334                 error = -EFAULT;
1335         if (!error && override_architecture(name))
1336                 error = -EFAULT;
1337         return error;
1338 }
1339 
1340 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1341 {
1342         int error;
1343 
1344         if (!name)
1345                 return -EFAULT;
1346         if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1347                 return -EFAULT;
1348 
1349         down_read(&uts_sem);
1350         error = __copy_to_user(&name->sysname, &utsname()->sysname,
1351                                __OLD_UTS_LEN);
1352         error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1353         error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1354                                 __OLD_UTS_LEN);
1355         error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1356         error |= __copy_to_user(&name->release, &utsname()->release,
1357                                 __OLD_UTS_LEN);
1358         error |= __put_user(0, name->release + __OLD_UTS_LEN);
1359         error |= __copy_to_user(&name->version, &utsname()->version,
1360                                 __OLD_UTS_LEN);
1361         error |= __put_user(0, name->version + __OLD_UTS_LEN);
1362         error |= __copy_to_user(&name->machine, &utsname()->machine,
1363                                 __OLD_UTS_LEN);
1364         error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1365         up_read(&uts_sem);
1366 
1367         if (!error && override_architecture(name))
1368                 error = -EFAULT;
1369         if (!error && override_release(name->release, sizeof(name->release)))
1370                 error = -EFAULT;
1371         return error ? -EFAULT : 0;
1372 }
1373 #endif
1374 
1375 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1376 {
1377         int errno;
1378         char tmp[__NEW_UTS_LEN];
1379 
1380         if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1381                 return -EPERM;
1382 
1383         if (len < 0 || len > __NEW_UTS_LEN)
1384                 return -EINVAL;
1385         if (!ccs_capable(CCS_SYS_SETHOSTNAME))
1386                 return -EPERM;
1387         down_write(&uts_sem);
1388         errno = -EFAULT;
1389         if (!copy_from_user(tmp, name, len)) {
1390                 struct new_utsname *u = utsname();
1391 
1392                 memcpy(u->nodename, tmp, len);
1393                 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1394                 errno = 0;
1395                 uts_proc_notify(UTS_PROC_HOSTNAME);
1396         }
1397         up_write(&uts_sem);
1398         return errno;
1399 }
1400 
1401 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1402 
1403 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1404 {
1405         int i, errno;
1406         struct new_utsname *u;
1407 
1408         if (len < 0)
1409                 return -EINVAL;
1410         down_read(&uts_sem);
1411         u = utsname();
1412         i = 1 + strlen(u->nodename);
1413         if (i > len)
1414                 i = len;
1415         errno = 0;
1416         if (copy_to_user(name, u->nodename, i))
1417                 errno = -EFAULT;
1418         up_read(&uts_sem);
1419         return errno;
1420 }
1421 
1422 #endif
1423 
1424 /*
1425  * Only setdomainname; getdomainname can be implemented by calling
1426  * uname()
1427  */
1428 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1429 {
1430         int errno;
1431         char tmp[__NEW_UTS_LEN];
1432 
1433         if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1434                 return -EPERM;
1435         if (len < 0 || len > __NEW_UTS_LEN)
1436                 return -EINVAL;
1437         if (!ccs_capable(CCS_SYS_SETHOSTNAME))
1438                 return -EPERM;
1439 
1440         down_write(&uts_sem);
1441         errno = -EFAULT;
1442         if (!copy_from_user(tmp, name, len)) {
1443                 struct new_utsname *u = utsname();
1444 
1445                 memcpy(u->domainname, tmp, len);
1446                 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1447                 errno = 0;
1448                 uts_proc_notify(UTS_PROC_DOMAINNAME);
1449         }
1450         up_write(&uts_sem);
1451         return errno;
1452 }
1453 
1454 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1455 {
1456         struct rlimit value;
1457         int ret;
1458 
1459         ret = do_prlimit(current, resource, NULL, &value);
1460         if (!ret)
1461                 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1462 
1463         return ret;
1464 }
1465 
1466 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1467 
1468 /*
1469  *      Back compatibility for getrlimit. Needed for some apps.
1470  */
1471  
1472 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1473                 struct rlimit __user *, rlim)
1474 {
1475         struct rlimit x;
1476         if (resource >= RLIM_NLIMITS)
1477                 return -EINVAL;
1478 
1479         task_lock(current->group_leader);
1480         x = current->signal->rlim[resource];
1481         task_unlock(current->group_leader);
1482         if (x.rlim_cur > 0x7FFFFFFF)
1483                 x.rlim_cur = 0x7FFFFFFF;
1484         if (x.rlim_max > 0x7FFFFFFF)
1485                 x.rlim_max = 0x7FFFFFFF;
1486         return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1487 }
1488 
1489 #endif
1490 
1491 static inline bool rlim64_is_infinity(__u64 rlim64)
1492 {
1493 #if BITS_PER_LONG < 64
1494         return rlim64 >= ULONG_MAX;
1495 #else
1496         return rlim64 == RLIM64_INFINITY;
1497 #endif
1498 }
1499 
1500 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1501 {
1502         if (rlim->rlim_cur == RLIM_INFINITY)
1503                 rlim64->rlim_cur = RLIM64_INFINITY;
1504         else
1505                 rlim64->rlim_cur = rlim->rlim_cur;
1506         if (rlim->rlim_max == RLIM_INFINITY)
1507                 rlim64->rlim_max = RLIM64_INFINITY;
1508         else
1509                 rlim64->rlim_max = rlim->rlim_max;
1510 }
1511 
1512 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1513 {
1514         if (rlim64_is_infinity(rlim64->rlim_cur))
1515                 rlim->rlim_cur = RLIM_INFINITY;
1516         else
1517                 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1518         if (rlim64_is_infinity(rlim64->rlim_max))
1519                 rlim->rlim_max = RLIM_INFINITY;
1520         else
1521                 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1522 }
1523 
1524 /* make sure you are allowed to change @tsk limits before calling this */
1525 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1526                 struct rlimit *new_rlim, struct rlimit *old_rlim)
1527 {
1528         struct rlimit *rlim;
1529         int retval = 0;
1530 
1531         if (resource >= RLIM_NLIMITS)
1532                 return -EINVAL;
1533         if (new_rlim) {
1534                 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1535                         return -EINVAL;
1536                 if (resource == RLIMIT_NOFILE &&
1537                                 new_rlim->rlim_max > sysctl_nr_open)
1538                         return -EPERM;
1539         }
1540 
1541         /* protect tsk->signal and tsk->sighand from disappearing */
1542         read_lock(&tasklist_lock);
1543         if (!tsk->sighand) {
1544                 retval = -ESRCH;
1545                 goto out;
1546         }
1547 
1548         rlim = tsk->signal->rlim + resource;
1549         task_lock(tsk->group_leader);
1550         if (new_rlim) {
1551                 /* Keep the capable check against init_user_ns until
1552                    cgroups can contain all limits */
1553                 if (new_rlim->rlim_max > rlim->rlim_max &&
1554                                 !capable(CAP_SYS_RESOURCE))
1555                         retval = -EPERM;
1556                 if (!retval)
1557                         retval = security_task_setrlimit(tsk->group_leader,
1558                                         resource, new_rlim);
1559                 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1560                         /*
1561                          * The caller is asking for an immediate RLIMIT_CPU
1562                          * expiry.  But we use the zero value to mean "it was
1563                          * never set".  So let's cheat and make it one second
1564                          * instead
1565                          */
1566                         new_rlim->rlim_cur = 1;
1567                 }
1568         }
1569         if (!retval) {
1570                 if (old_rlim)
1571                         *old_rlim = *rlim;
1572                 if (new_rlim)
1573                         *rlim = *new_rlim;
1574         }
1575         task_unlock(tsk->group_leader);
1576 
1577         /*
1578          * RLIMIT_CPU handling.   Note that the kernel fails to return an error
1579          * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a
1580          * very long-standing error, and fixing it now risks breakage of
1581          * applications, so we live with it
1582          */
1583          if (!retval && new_rlim && resource == RLIMIT_CPU &&
1584                          new_rlim->rlim_cur != RLIM_INFINITY)
1585                 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1586 out:
1587         read_unlock(&tasklist_lock);
1588         return retval;
1589 }
1590 
1591 /* rcu lock must be held */
1592 static int check_prlimit_permission(struct task_struct *task)
1593 {
1594         const struct cred *cred = current_cred(), *tcred;
1595 
1596         if (current == task)
1597                 return 0;
1598 
1599         tcred = __task_cred(task);
1600         if (uid_eq(cred->uid, tcred->euid) &&
1601             uid_eq(cred->uid, tcred->suid) &&
1602             uid_eq(cred->uid, tcred->uid)  &&
1603             gid_eq(cred->gid, tcred->egid) &&
1604             gid_eq(cred->gid, tcred->sgid) &&
1605             gid_eq(cred->gid, tcred->gid))
1606                 return 0;
1607         if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1608                 return 0;
1609 
1610         return -EPERM;
1611 }
1612 
1613 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1614                 const struct rlimit64 __user *, new_rlim,
1615                 struct rlimit64 __user *, old_rlim)
1616 {
1617         struct rlimit64 old64, new64;
1618         struct rlimit old, new;
1619         struct task_struct *tsk;
1620         int ret;
1621 
1622         if (new_rlim) {
1623                 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1624                         return -EFAULT;
1625                 rlim64_to_rlim(&new64, &new);
1626         }
1627 
1628         rcu_read_lock();
1629         tsk = pid ? find_task_by_vpid(pid) : current;
1630         if (!tsk) {
1631                 rcu_read_unlock();
1632                 return -ESRCH;
1633         }
1634         ret = check_prlimit_permission(tsk);
1635         if (ret) {
1636                 rcu_read_unlock();
1637                 return ret;
1638         }
1639         get_task_struct(tsk);
1640         rcu_read_unlock();
1641 
1642         ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1643                         old_rlim ? &old : NULL);
1644 
1645         if (!ret && old_rlim) {
1646                 rlim_to_rlim64(&old, &old64);
1647                 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1648                         ret = -EFAULT;
1649         }
1650 
1651         put_task_struct(tsk);
1652         return ret;
1653 }
1654 
1655 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1656 {
1657         struct rlimit new_rlim;
1658 
1659         if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1660                 return -EFAULT;
1661         return do_prlimit(current, resource, &new_rlim, NULL);
1662 }
1663 
1664 /*
1665  * It would make sense to put struct rusage in the task_struct,
1666  * except that would make the task_struct be *really big*.  After
1667  * task_struct gets moved into malloc'ed memory, it would
1668  * make sense to do this.  It will make moving the rest of the information
1669  * a lot simpler!  (Which we're not doing right now because we're not
1670  * measuring them yet).
1671  *
1672  * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1673  * races with threads incrementing their own counters.  But since word
1674  * reads are atomic, we either get new values or old values and we don't
1675  * care which for the sums.  We always take the siglock to protect reading
1676  * the c* fields from p->signal from races with exit.c updating those
1677  * fields when reaping, so a sample either gets all the additions of a
1678  * given child after it's reaped, or none so this sample is before reaping.
1679  *
1680  * Locking:
1681  * We need to take the siglock for CHILDEREN, SELF and BOTH
1682  * for  the cases current multithreaded, non-current single threaded
1683  * non-current multithreaded.  Thread traversal is now safe with
1684  * the siglock held.
1685  * Strictly speaking, we donot need to take the siglock if we are current and
1686  * single threaded,  as no one else can take our signal_struct away, no one
1687  * else can  reap the  children to update signal->c* counters, and no one else
1688  * can race with the signal-> fields. If we do not take any lock, the
1689  * signal-> fields could be read out of order while another thread was just
1690  * exiting. So we should  place a read memory barrier when we avoid the lock.
1691  * On the writer side,  write memory barrier is implied in  __exit_signal
1692  * as __exit_signal releases  the siglock spinlock after updating the signal->
1693  * fields. But we don't do this yet to keep things simple.
1694  *
1695  */
1696 
1697 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1698 {
1699         r->ru_nvcsw += t->nvcsw;
1700         r->ru_nivcsw += t->nivcsw;
1701         r->ru_minflt += t->min_flt;
1702         r->ru_majflt += t->maj_flt;
1703         r->ru_inblock += task_io_get_inblock(t);
1704         r->ru_oublock += task_io_get_oublock(t);
1705 }
1706 
1707 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1708 {
1709         struct task_struct *t;
1710         unsigned long flags;
1711         cputime_t tgutime, tgstime, utime, stime;
1712         unsigned long maxrss = 0;
1713 
1714         memset((char *) r, 0, sizeof *r);
1715         utime = stime = 0;
1716 
1717         if (who == RUSAGE_THREAD) {
1718                 task_cputime_adjusted(current, &utime, &stime);
1719                 accumulate_thread_rusage(p, r);
1720                 maxrss = p->signal->maxrss;
1721                 goto out;
1722         }
1723 
1724         if (!lock_task_sighand(p, &flags))
1725                 return;
1726 
1727         switch (who) {
1728                 case RUSAGE_BOTH:
1729                 case RUSAGE_CHILDREN:
1730                         utime = p->signal->cutime;
1731                         stime = p->signal->cstime;
1732                         r->ru_nvcsw = p->signal->cnvcsw;
1733                         r->ru_nivcsw = p->signal->cnivcsw;
1734                         r->ru_minflt = p->signal->cmin_flt;
1735                         r->ru_majflt = p->signal->cmaj_flt;
1736                         r->ru_inblock = p->signal->cinblock;
1737                         r->ru_oublock = p->signal->coublock;
1738                         maxrss = p->signal->cmaxrss;
1739 
1740                         if (who == RUSAGE_CHILDREN)
1741                                 break;
1742 
1743                 case RUSAGE_SELF:
1744                         thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1745                         utime += tgutime;
1746                         stime += tgstime;
1747                         r->ru_nvcsw += p->signal->nvcsw;
1748                         r->ru_nivcsw += p->signal->nivcsw;
1749                         r->ru_minflt += p->signal->min_flt;
1750                         r->ru_majflt += p->signal->maj_flt;
1751                         r->ru_inblock += p->signal->inblock;
1752                         r->ru_oublock += p->signal->oublock;
1753                         if (maxrss < p->signal->maxrss)
1754                                 maxrss = p->signal->maxrss;
1755                         t = p;
1756                         do {
1757                                 accumulate_thread_rusage(t, r);
1758                                 t = next_thread(t);
1759                         } while (t != p);
1760                         break;
1761 
1762                 default:
1763                         BUG();
1764         }
1765         unlock_task_sighand(p, &flags);
1766 
1767 out:
1768         cputime_to_timeval(utime, &r->ru_utime);
1769         cputime_to_timeval(stime, &r->ru_stime);
1770 
1771         if (who != RUSAGE_CHILDREN) {
1772                 struct mm_struct *mm = get_task_mm(p);
1773                 if (mm) {
1774                         setmax_mm_hiwater_rss(&maxrss, mm);
1775                         mmput(mm);
1776                 }
1777         }
1778         r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1779 }
1780 
1781 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1782 {
1783         struct rusage r;
1784         k_getrusage(p, who, &r);
1785         return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1786 }
1787 
1788 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1789 {
1790         if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1791             who != RUSAGE_THREAD)
1792                 return -EINVAL;
1793         return getrusage(current, who, ru);
1794 }
1795 
1796 SYSCALL_DEFINE1(umask, int, mask)
1797 {
1798         mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1799         return mask;
1800 }
1801 
1802 #ifdef CONFIG_CHECKPOINT_RESTORE
1803 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1804 {
1805         struct fd exe;
1806         struct dentry *dentry;
1807         int err;
1808 
1809         exe = fdget(fd);
1810         if (!exe.file)
1811                 return -EBADF;
1812 
1813         dentry = exe.file->f_path.dentry;
1814 
1815         /*
1816          * Because the original mm->exe_file points to executable file, make
1817          * sure that this one is executable as well, to avoid breaking an
1818          * overall picture.
1819          */
1820         err = -EACCES;
1821         if (!S_ISREG(dentry->d_inode->i_mode)   ||
1822             exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC)
1823                 goto exit;
1824 
1825         err = inode_permission(dentry->d_inode, MAY_EXEC);
1826         if (err)
1827                 goto exit;
1828 
1829         down_write(&mm->mmap_sem);
1830 
1831         /*
1832          * Forbid mm->exe_file change if old file still mapped.
1833          */
1834         err = -EBUSY;
1835         if (mm->exe_file) {
1836                 struct vm_area_struct *vma;
1837 
1838                 for (vma = mm->mmap; vma; vma = vma->vm_next)
1839                         if (vma->vm_file &&
1840                             path_equal(&vma->vm_file->f_path,
1841                                        &mm->exe_file->f_path))
1842                                 goto exit_unlock;
1843         }
1844 
1845         /*
1846          * The symlink can be changed only once, just to disallow arbitrary
1847          * transitions malicious software might bring in. This means one
1848          * could make a snapshot over all processes running and monitor
1849          * /proc/pid/exe changes to notice unusual activity if needed.
1850          */
1851         err = -EPERM;
1852         if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1853                 goto exit_unlock;
1854 
1855         err = 0;
1856         set_mm_exe_file(mm, exe.file);  /* this grabs a reference to exe.file */
1857 exit_unlock:
1858         up_write(&mm->mmap_sem);
1859 
1860 exit:
1861         fdput(exe);
1862         return err;
1863 }
1864 
1865 static int prctl_set_mm(int opt, unsigned long addr,
1866                         unsigned long arg4, unsigned long arg5)
1867 {
1868         unsigned long rlim = rlimit(RLIMIT_DATA);
1869         struct mm_struct *mm = current->mm;
1870         struct vm_area_struct *vma;
1871         int error;
1872 
1873         if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
1874                 return -EINVAL;
1875 
1876         if (!capable(CAP_SYS_RESOURCE))
1877                 return -EPERM;
1878 
1879         if (opt == PR_SET_MM_EXE_FILE)
1880                 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1881 
1882         if (addr >= TASK_SIZE || addr < mmap_min_addr)
1883                 return -EINVAL;
1884 
1885         error = -EINVAL;
1886 
1887         down_read(&mm->mmap_sem);
1888         vma = find_vma(mm, addr);
1889 
1890         switch (opt) {
1891         case PR_SET_MM_START_CODE:
1892                 mm->start_code = addr;
1893                 break;
1894         case PR_SET_MM_END_CODE:
1895                 mm->end_code = addr;
1896                 break;
1897         case PR_SET_MM_START_DATA:
1898                 mm->start_data = addr;
1899                 break;
1900         case PR_SET_MM_END_DATA:
1901                 mm->end_data = addr;
1902                 break;
1903 
1904         case PR_SET_MM_START_BRK:
1905                 if (addr <= mm->end_data)
1906                         goto out;
1907 
1908                 if (rlim < RLIM_INFINITY &&
1909                     (mm->brk - addr) +
1910                     (mm->end_data - mm->start_data) > rlim)
1911                         goto out;
1912 
1913                 mm->start_brk = addr;
1914                 break;
1915 
1916         case PR_SET_MM_BRK:
1917                 if (addr <= mm->end_data)
1918                         goto out;
1919 
1920                 if (rlim < RLIM_INFINITY &&
1921                     (addr - mm->start_brk) +
1922                     (mm->end_data - mm->start_data) > rlim)
1923                         goto out;
1924 
1925                 mm->brk = addr;
1926                 break;
1927 
1928         /*
1929          * If command line arguments and environment
1930          * are placed somewhere else on stack, we can
1931          * set them up here, ARG_START/END to setup
1932          * command line argumets and ENV_START/END
1933          * for environment.
1934          */
1935         case PR_SET_MM_START_STACK:
1936         case PR_SET_MM_ARG_START:
1937         case PR_SET_MM_ARG_END:
1938         case PR_SET_MM_ENV_START:
1939         case PR_SET_MM_ENV_END:
1940                 if (!vma) {
1941                         error = -EFAULT;
1942                         goto out;
1943                 }
1944                 if (opt == PR_SET_MM_START_STACK)
1945                         mm->start_stack = addr;
1946                 else if (opt == PR_SET_MM_ARG_START)
1947                         mm->arg_start = addr;
1948                 else if (opt == PR_SET_MM_ARG_END)
1949                         mm->arg_end = addr;
1950                 else if (opt == PR_SET_MM_ENV_START)
1951                         mm->env_start = addr;
1952                 else if (opt == PR_SET_MM_ENV_END)
1953                         mm->env_end = addr;
1954                 break;
1955 
1956         /*
1957          * This doesn't move auxiliary vector itself
1958          * since it's pinned to mm_struct, but allow
1959          * to fill vector with new values. It's up
1960          * to a caller to provide sane values here
1961          * otherwise user space tools which use this
1962          * vector might be unhappy.
1963          */
1964         case PR_SET_MM_AUXV: {
1965                 unsigned long user_auxv[AT_VECTOR_SIZE];
1966 
1967                 if (arg4 > sizeof(user_auxv))
1968                         goto out;
1969                 up_read(&mm->mmap_sem);
1970 
1971                 if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
1972                         return -EFAULT;
1973 
1974                 /* Make sure the last entry is always AT_NULL */
1975                 user_auxv[AT_VECTOR_SIZE - 2] = 0;
1976                 user_auxv[AT_VECTOR_SIZE - 1] = 0;
1977 
1978                 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1979 
1980                 task_lock(current);
1981                 memcpy(mm->saved_auxv, user_auxv, arg4);
1982                 task_unlock(current);
1983 
1984                 return 0;
1985         }
1986         default:
1987                 goto out;
1988         }
1989 
1990         error = 0;
1991 out:
1992         up_read(&mm->mmap_sem);
1993         return error;
1994 }
1995 
1996 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1997 {
1998         return put_user(me->clear_child_tid, tid_addr);
1999 }
2000 
2001 #else /* CONFIG_CHECKPOINT_RESTORE */
2002 static int prctl_set_mm(int opt, unsigned long addr,
2003                         unsigned long arg4, unsigned long arg5)
2004 {
2005         return -EINVAL;
2006 }
2007 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2008 {
2009         return -EINVAL;
2010 }
2011 #endif
2012 
2013 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2014                 unsigned long, arg4, unsigned long, arg5)
2015 {
2016         struct task_struct *me = current;
2017         unsigned char comm[sizeof(me->comm)];
2018         long error;
2019 
2020         error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2021         if (error != -ENOSYS)
2022                 return error;
2023 
2024         error = 0;
2025         switch (option) {
2026                 case PR_SET_PDEATHSIG:
2027                         if (!valid_signal(arg2)) {
2028                                 error = -EINVAL;
2029                                 break;
2030                         }
2031                         me->pdeath_signal = arg2;
2032                         break;
2033                 case PR_GET_PDEATHSIG:
2034                         error = put_user(me->pdeath_signal, (int __user *)arg2);
2035                         break;
2036                 case PR_GET_DUMPABLE:
2037                         error = get_dumpable(me->mm);
2038                         break;
2039                 case PR_SET_DUMPABLE:
2040                         if (arg2 < 0 || arg2 > 1) {
2041                                 error = -EINVAL;
2042                                 break;
2043                         }
2044                         set_dumpable(me->mm, arg2);
2045                         break;
2046 
2047                 case PR_SET_UNALIGN:
2048                         error = SET_UNALIGN_CTL(me, arg2);
2049                         break;
2050                 case PR_GET_UNALIGN:
2051                         error = GET_UNALIGN_CTL(me, arg2);
2052                         break;
2053                 case PR_SET_FPEMU:
2054                         error = SET_FPEMU_CTL(me, arg2);
2055                         break;
2056                 case PR_GET_FPEMU:
2057                         error = GET_FPEMU_CTL(me, arg2);
2058                         break;
2059                 case PR_SET_FPEXC:
2060                         error = SET_FPEXC_CTL(me, arg2);
2061                         break;
2062                 case PR_GET_FPEXC:
2063                         error = GET_FPEXC_CTL(me, arg2);
2064                         break;
2065                 case PR_GET_TIMING:
2066                         error = PR_TIMING_STATISTICAL;
2067                         break;
2068                 case PR_SET_TIMING:
2069                         if (arg2 != PR_TIMING_STATISTICAL)
2070                                 error = -EINVAL;
2071                         break;
2072                 case PR_SET_NAME:
2073                         comm[sizeof(me->comm)-1] = 0;
2074                         if (strncpy_from_user(comm, (char __user *)arg2,
2075                                               sizeof(me->comm) - 1) < 0)
2076                                 return -EFAULT;
2077                         set_task_comm(me, comm);
2078                         proc_comm_connector(me);
2079                         break;
2080                 case PR_GET_NAME:
2081                         get_task_comm(comm, me);
2082                         if (copy_to_user((char __user *)arg2, comm,
2083                                          sizeof(comm)))
2084                                 return -EFAULT;
2085                         break;
2086                 case PR_GET_ENDIAN:
2087                         error = GET_ENDIAN(me, arg2);
2088                         break;
2089                 case PR_SET_ENDIAN:
2090                         error = SET_ENDIAN(me, arg2);
2091                         break;
2092                 case PR_GET_SECCOMP:
2093                         error = prctl_get_seccomp();
2094                         break;
2095                 case PR_SET_SECCOMP:
2096                         error = prctl_set_seccomp(arg2, (char __user *)arg3);
2097                         break;
2098                 case PR_GET_TSC:
2099                         error = GET_TSC_CTL(arg2);
2100                         break;
2101                 case PR_SET_TSC:
2102                         error = SET_TSC_CTL(arg2);
2103                         break;
2104                 case PR_TASK_PERF_EVENTS_DISABLE:
2105                         error = perf_event_task_disable();
2106                         break;
2107                 case PR_TASK_PERF_EVENTS_ENABLE:
2108                         error = perf_event_task_enable();
2109                         break;
2110                 case PR_GET_TIMERSLACK:
2111                         error = current->timer_slack_ns;
2112                         break;
2113                 case PR_SET_TIMERSLACK:
2114                         if (arg2 <= 0)
2115                                 current->timer_slack_ns =
2116                                         current->default_timer_slack_ns;
2117                         else
2118                                 current->timer_slack_ns = arg2;
2119                         break;
2120                 case PR_MCE_KILL:
2121                         if (arg4 | arg5)
2122                                 return -EINVAL;
2123                         switch (arg2) {
2124                         case PR_MCE_KILL_CLEAR:
2125                                 if (arg3 != 0)
2126                                         return -EINVAL;
2127                                 current->flags &= ~PF_MCE_PROCESS;
2128                                 break;
2129                         case PR_MCE_KILL_SET:
2130                                 current->flags |= PF_MCE_PROCESS;
2131                                 if (arg3 == PR_MCE_KILL_EARLY)
2132                                         current->flags |= PF_MCE_EARLY;
2133                                 else if (arg3 == PR_MCE_KILL_LATE)
2134                                         current->flags &= ~PF_MCE_EARLY;
2135                                 else if (arg3 == PR_MCE_KILL_DEFAULT)
2136                                         current->flags &=
2137                                                 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2138                                 else
2139                                         return -EINVAL;
2140                                 break;
2141                         default:
2142                                 return -EINVAL;
2143                         }
2144                         break;
2145                 case PR_MCE_KILL_GET:
2146                         if (arg2 | arg3 | arg4 | arg5)
2147                                 return -EINVAL;
2148                         if (current->flags & PF_MCE_PROCESS)
2149                                 error = (current->flags & PF_MCE_EARLY) ?
2150                                         PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2151                         else
2152                                 error = PR_MCE_KILL_DEFAULT;
2153                         break;
2154                 case PR_SET_MM:
2155                         error = prctl_set_mm(arg2, arg3, arg4, arg5);
2156                         break;
2157                 case PR_GET_TID_ADDRESS:
2158                         error = prctl_get_tid_address(me, (int __user **)arg2);
2159                         break;
2160                 case PR_SET_CHILD_SUBREAPER:
2161                         me->signal->is_child_subreaper = !!arg2;
2162                         break;
2163                 case PR_GET_CHILD_SUBREAPER:
2164                         error = put_user(me->signal->is_child_subreaper,
2165                                          (int __user *) arg2);
2166                         break;
2167                 case PR_SET_NO_NEW_PRIVS:
2168                         if (arg2 != 1 || arg3 || arg4 || arg5)
2169                                 return -EINVAL;
2170 
2171                         current->no_new_privs = 1;
2172                         break;
2173                 case PR_GET_NO_NEW_PRIVS:
2174                         if (arg2 || arg3 || arg4 || arg5)
2175                                 return -EINVAL;
2176                         return current->no_new_privs ? 1 : 0;
2177                 default:
2178                         error = -EINVAL;
2179                         break;
2180         }
2181         return error;
2182 }
2183 
2184 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2185                 struct getcpu_cache __user *, unused)
2186 {
2187         int err = 0;
2188         int cpu = raw_smp_processor_id();
2189         if (cpup)
2190                 err |= put_user(cpu, cpup);
2191         if (nodep)
2192                 err |= put_user(cpu_to_node(cpu), nodep);
2193         return err ? -EFAULT : 0;
2194 }
2195 
2196 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
2197 
2198 static void argv_cleanup(struct subprocess_info *info)
2199 {
2200         argv_free(info->argv);
2201 }
2202 
2203 static int __orderly_poweroff(void)
2204 {
2205         int argc;
2206         char **argv;
2207         static char *envp[] = {
2208                 "HOME=/",
2209                 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
2210                 NULL
2211         };
2212         int ret;
2213 
2214         argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
2215         if (argv == NULL) {
2216                 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
2217                        __func__, poweroff_cmd);
2218                 return -ENOMEM;
2219         }
2220 
2221         ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC,
2222                                       NULL, argv_cleanup, NULL);
2223         if (ret == -ENOMEM)
2224                 argv_free(argv);
2225 
2226         return ret;
2227 }
2228 
2229 /**
2230  * orderly_poweroff - Trigger an orderly system poweroff
2231  * @force: force poweroff if command execution fails
2232  *
2233  * This may be called from any context to trigger a system shutdown.
2234  * If the orderly shutdown fails, it will force an immediate shutdown.
2235  */
2236 int orderly_poweroff(bool force)
2237 {
2238         int ret = __orderly_poweroff();
2239 
2240         if (ret && force) {
2241                 printk(KERN_WARNING "Failed to start orderly shutdown: "
2242                        "forcing the issue\n");
2243 
2244                 /*
2245                  * I guess this should try to kick off some daemon to sync and
2246                  * poweroff asap.  Or not even bother syncing if we're doing an
2247                  * emergency shutdown?
2248                  */
2249                 emergency_sync();
2250                 kernel_power_off();
2251         }
2252 
2253         return ret;
2254 }
2255 EXPORT_SYMBOL_GPL(orderly_poweroff);
2256 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp