1 /* 2 * linux/fs/binfmt_aout.c 3 * 4 * Copyright (C) 1991, 1992, 1996 Linus Torvalds 5 */ 6 7 #include <linux/module.h> 8 9 #include <linux/time.h> 10 #include <linux/kernel.h> 11 #include <linux/mm.h> 12 #include <linux/mman.h> 13 #include <linux/a.out.h> 14 #include <linux/errno.h> 15 #include <linux/signal.h> 16 #include <linux/string.h> 17 #include <linux/fs.h> 18 #include <linux/file.h> 19 #include <linux/stat.h> 20 #include <linux/fcntl.h> 21 #include <linux/ptrace.h> 22 #include <linux/user.h> 23 #include <linux/slab.h> 24 #include <linux/binfmts.h> 25 #include <linux/personality.h> 26 #include <linux/init.h> 27 28 #include <asm/system.h> 29 #include <asm/uaccess.h> 30 #include <asm/cacheflush.h> 31 #include <asm/a.out-core.h> 32 33 static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs); 34 static int load_aout_library(struct file*); 35 static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); 36 37 static struct linux_binfmt aout_format = { 38 .module = THIS_MODULE, 39 .load_binary = load_aout_binary, 40 .load_shlib = load_aout_library, 41 .core_dump = aout_core_dump, 42 .min_coredump = PAGE_SIZE 43 }; 44 45 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) 46 47 static int set_brk(unsigned long start, unsigned long end) 48 { 49 start = PAGE_ALIGN(start); 50 end = PAGE_ALIGN(end); 51 if (end > start) { 52 unsigned long addr; 53 down_write(¤t->mm->mmap_sem); 54 addr = do_brk(start, end - start); 55 up_write(¤t->mm->mmap_sem); 56 if (BAD_ADDR(addr)) 57 return addr; 58 } 59 return 0; 60 } 61 62 /* 63 * These are the only things you should do on a core-file: use only these 64 * macros to write out all the necessary info. 65 */ 66 67 static int dump_write(struct file *file, const void *addr, int nr) 68 { 69 return file->f_op->write(file, addr, nr, &file->f_pos) == nr; 70 } 71 72 #define DUMP_WRITE(addr, nr) \ 73 if (!dump_write(file, (void *)(addr), (nr))) \ 74 goto end_coredump; 75 76 #define DUMP_SEEK(offset) \ 77 if (file->f_op->llseek) { \ 78 if (file->f_op->llseek(file,(offset),0) != (offset)) \ 79 goto end_coredump; \ 80 } else file->f_pos = (offset) 81 82 /* 83 * Routine writes a core dump image in the current directory. 84 * Currently only a stub-function. 85 * 86 * Note that setuid/setgid files won't make a core-dump if the uid/gid 87 * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable" 88 * field, which also makes sure the core-dumps won't be recursive if the 89 * dumping of the process results in another error.. 90 */ 91 92 static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit) 93 { 94 mm_segment_t fs; 95 int has_dumped = 0; 96 unsigned long dump_start, dump_size; 97 struct user dump; 98 #ifdef __alpha__ 99 # define START_DATA(u) (u.start_data) 100 #else 101 # define START_DATA(u) ((u.u_tsize << PAGE_SHIFT) + u.start_code) 102 #endif 103 # define START_STACK(u) (u.start_stack) 104 105 fs = get_fs(); 106 set_fs(KERNEL_DS); 107 has_dumped = 1; 108 current->flags |= PF_DUMPCORE; 109 strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); 110 dump.u_ar0 = offsetof(struct user, regs); 111 dump.signal = signr; 112 aout_dump_thread(regs, &dump); 113 114 /* If the size of the dump file exceeds the rlimit, then see what would happen 115 if we wrote the stack, but not the data area. */ 116 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit) 117 dump.u_dsize = 0; 118 119 /* Make sure we have enough room to write the stack and data areas. */ 120 if ((dump.u_ssize + 1) * PAGE_SIZE > limit) 121 dump.u_ssize = 0; 122 123 /* make sure we actually have a data and stack area to dump */ 124 set_fs(USER_DS); 125 if (!access_ok(VERIFY_READ, (void __user *)START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) 126 dump.u_dsize = 0; 127 if (!access_ok(VERIFY_READ, (void __user *)START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) 128 dump.u_ssize = 0; 129 130 set_fs(KERNEL_DS); 131 /* struct user */ 132 DUMP_WRITE(&dump,sizeof(dump)); 133 /* Now dump all of the user data. Include malloced stuff as well */ 134 DUMP_SEEK(PAGE_SIZE); 135 /* now we start writing out the user space info */ 136 set_fs(USER_DS); 137 /* Dump the data area */ 138 if (dump.u_dsize != 0) { 139 dump_start = START_DATA(dump); 140 dump_size = dump.u_dsize << PAGE_SHIFT; 141 DUMP_WRITE(dump_start,dump_size); 142 } 143 /* Now prepare to dump the stack area */ 144 if (dump.u_ssize != 0) { 145 dump_start = START_STACK(dump); 146 dump_size = dump.u_ssize << PAGE_SHIFT; 147 DUMP_WRITE(dump_start,dump_size); 148 } 149 /* Finally dump the task struct. Not be used by gdb, but could be useful */ 150 set_fs(KERNEL_DS); 151 DUMP_WRITE(current,sizeof(*current)); 152 end_coredump: 153 set_fs(fs); 154 return has_dumped; 155 } 156 157 /* 158 * create_aout_tables() parses the env- and arg-strings in new user 159 * memory and creates the pointer tables from them, and puts their 160 * addresses on the "stack", returning the new stack pointer value. 161 */ 162 static unsigned long __user *create_aout_tables(char __user *p, struct linux_binprm * bprm) 163 { 164 char __user * __user *argv; 165 char __user * __user *envp; 166 unsigned long __user *sp; 167 int argc = bprm->argc; 168 int envc = bprm->envc; 169 170 sp = (void __user *)((-(unsigned long)sizeof(char *)) & (unsigned long) p); 171 #ifdef __alpha__ 172 /* whee.. test-programs are so much fun. */ 173 put_user(0, --sp); 174 put_user(0, --sp); 175 if (bprm->loader) { 176 put_user(0, --sp); 177 put_user(1003, --sp); 178 put_user(bprm->loader, --sp); 179 put_user(1002, --sp); 180 } 181 put_user(bprm->exec, --sp); 182 put_user(1001, --sp); 183 #endif 184 sp -= envc+1; 185 envp = (char __user * __user *) sp; 186 sp -= argc+1; 187 argv = (char __user * __user *) sp; 188 #ifndef __alpha__ 189 put_user((unsigned long) envp,--sp); 190 put_user((unsigned long) argv,--sp); 191 #endif 192 put_user(argc,--sp); 193 current->mm->arg_start = (unsigned long) p; 194 while (argc-->0) { 195 char c; 196 put_user(p,argv++); 197 do { 198 get_user(c,p++); 199 } while (c); 200 } 201 put_user(NULL,argv); 202 current->mm->arg_end = current->mm->env_start = (unsigned long) p; 203 while (envc-->0) { 204 char c; 205 put_user(p,envp++); 206 do { 207 get_user(c,p++); 208 } while (c); 209 } 210 put_user(NULL,envp); 211 current->mm->env_end = (unsigned long) p; 212 return sp; 213 } 214 215 /* 216 * These are the functions used to load a.out style executables and shared 217 * libraries. There is no binary dependent code anywhere else. 218 */ 219 220 static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) 221 { 222 struct exec ex; 223 unsigned long error; 224 unsigned long fd_offset; 225 unsigned long rlim; 226 int retval; 227 228 ex = *((struct exec *) bprm->buf); /* exec-header */ 229 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC && 230 N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) || 231 N_TRSIZE(ex) || N_DRSIZE(ex) || 232 i_size_read(bprm->file->f_path.dentry->d_inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) { 233 return -ENOEXEC; 234 } 235 236 /* 237 * Requires a mmap handler. This prevents people from using a.out 238 * as part of an exploit attack against /proc-related vulnerabilities. 239 */ 240 if (!bprm->file->f_op || !bprm->file->f_op->mmap) 241 return -ENOEXEC; 242 243 fd_offset = N_TXTOFF(ex); 244 245 /* Check initial limits. This avoids letting people circumvent 246 * size limits imposed on them by creating programs with large 247 * arrays in the data or bss. 248 */ 249 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; 250 if (rlim >= RLIM_INFINITY) 251 rlim = ~0; 252 if (ex.a_data + ex.a_bss > rlim) 253 return -ENOMEM; 254 255 /* Flush all traces of the currently running executable */ 256 retval = flush_old_exec(bprm); 257 if (retval) 258 return retval; 259 260 /* OK, This is the point of no return */ 261 #ifdef __alpha__ 262 SET_AOUT_PERSONALITY(bprm, ex); 263 #else 264 set_personality(PER_LINUX); 265 #endif 266 setup_new_exec(bprm); 267 268 current->mm->end_code = ex.a_text + 269 (current->mm->start_code = N_TXTADDR(ex)); 270 current->mm->end_data = ex.a_data + 271 (current->mm->start_data = N_DATADDR(ex)); 272 current->mm->brk = ex.a_bss + 273 (current->mm->start_brk = N_BSSADDR(ex)); 274 current->mm->free_area_cache = current->mm->mmap_base; 275 current->mm->cached_hole_size = 0; 276 277 install_exec_creds(bprm); 278 current->flags &= ~PF_FORKNOEXEC; 279 280 if (N_MAGIC(ex) == OMAGIC) { 281 unsigned long text_addr, map_size; 282 loff_t pos; 283 284 text_addr = N_TXTADDR(ex); 285 286 #ifdef __alpha__ 287 pos = fd_offset; 288 map_size = ex.a_text+ex.a_data + PAGE_SIZE - 1; 289 #else 290 pos = 32; 291 map_size = ex.a_text+ex.a_data; 292 #endif 293 down_write(¤t->mm->mmap_sem); 294 error = do_brk(text_addr & PAGE_MASK, map_size); 295 up_write(¤t->mm->mmap_sem); 296 if (error != (text_addr & PAGE_MASK)) { 297 send_sig(SIGKILL, current, 0); 298 return error; 299 } 300 301 error = bprm->file->f_op->read(bprm->file, 302 (char __user *)text_addr, 303 ex.a_text+ex.a_data, &pos); 304 if ((signed long)error < 0) { 305 send_sig(SIGKILL, current, 0); 306 return error; 307 } 308 309 flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data); 310 } else { 311 if ((ex.a_text & 0xfff || ex.a_data & 0xfff) && 312 (N_MAGIC(ex) != NMAGIC) && printk_ratelimit()) 313 { 314 printk(KERN_NOTICE "executable not page aligned\n"); 315 } 316 317 if ((fd_offset & ~PAGE_MASK) != 0 && printk_ratelimit()) 318 { 319 printk(KERN_WARNING 320 "fd_offset is not page aligned. Please convert program: %s\n", 321 bprm->file->f_path.dentry->d_name.name); 322 } 323 324 if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { 325 loff_t pos = fd_offset; 326 down_write(¤t->mm->mmap_sem); 327 do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); 328 up_write(¤t->mm->mmap_sem); 329 bprm->file->f_op->read(bprm->file, 330 (char __user *)N_TXTADDR(ex), 331 ex.a_text+ex.a_data, &pos); 332 flush_icache_range((unsigned long) N_TXTADDR(ex), 333 (unsigned long) N_TXTADDR(ex) + 334 ex.a_text+ex.a_data); 335 goto beyond_if; 336 } 337 338 down_write(¤t->mm->mmap_sem); 339 error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, 340 PROT_READ | PROT_EXEC, 341 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, 342 fd_offset); 343 up_write(¤t->mm->mmap_sem); 344 345 if (error != N_TXTADDR(ex)) { 346 send_sig(SIGKILL, current, 0); 347 return error; 348 } 349 350 down_write(¤t->mm->mmap_sem); 351 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, 352 PROT_READ | PROT_WRITE | PROT_EXEC, 353 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, 354 fd_offset + ex.a_text); 355 up_write(¤t->mm->mmap_sem); 356 if (error != N_DATADDR(ex)) { 357 send_sig(SIGKILL, current, 0); 358 return error; 359 } 360 } 361 beyond_if: 362 set_binfmt(&aout_format); 363 364 retval = set_brk(current->mm->start_brk, current->mm->brk); 365 if (retval < 0) { 366 send_sig(SIGKILL, current, 0); 367 return retval; 368 } 369 370 retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); 371 if (retval < 0) { 372 /* Someone check-me: is this error path enough? */ 373 send_sig(SIGKILL, current, 0); 374 return retval; 375 } 376 377 current->mm->start_stack = 378 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm); 379 #ifdef __alpha__ 380 regs->gp = ex.a_gpvalue; 381 #endif 382 start_thread(regs, ex.a_entry, current->mm->start_stack); 383 return 0; 384 } 385 386 static int load_aout_library(struct file *file) 387 { 388 struct inode * inode; 389 unsigned long bss, start_addr, len; 390 unsigned long error; 391 int retval; 392 struct exec ex; 393 394 inode = file->f_path.dentry->d_inode; 395 396 retval = -ENOEXEC; 397 error = kernel_read(file, 0, (char *) &ex, sizeof(ex)); 398 if (error != sizeof(ex)) 399 goto out; 400 401 /* We come in here for the regular a.out style of shared libraries */ 402 if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) || 403 N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) || 404 i_size_read(inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) { 405 goto out; 406 } 407 408 /* 409 * Requires a mmap handler. This prevents people from using a.out 410 * as part of an exploit attack against /proc-related vulnerabilities. 411 */ 412 if (!file->f_op || !file->f_op->mmap) 413 goto out; 414 415 if (N_FLAGS(ex)) 416 goto out; 417 418 /* For QMAGIC, the starting address is 0x20 into the page. We mask 419 this off to get the starting address for the page */ 420 421 start_addr = ex.a_entry & 0xfffff000; 422 423 if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) { 424 loff_t pos = N_TXTOFF(ex); 425 426 if (printk_ratelimit()) 427 { 428 printk(KERN_WARNING 429 "N_TXTOFF is not page aligned. Please convert library: %s\n", 430 file->f_path.dentry->d_name.name); 431 } 432 down_write(¤t->mm->mmap_sem); 433 do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); 434 up_write(¤t->mm->mmap_sem); 435 436 file->f_op->read(file, (char __user *)start_addr, 437 ex.a_text + ex.a_data, &pos); 438 flush_icache_range((unsigned long) start_addr, 439 (unsigned long) start_addr + ex.a_text + ex.a_data); 440 441 retval = 0; 442 goto out; 443 } 444 /* Now use mmap to map the library into memory. */ 445 down_write(¤t->mm->mmap_sem); 446 error = do_mmap(file, start_addr, ex.a_text + ex.a_data, 447 PROT_READ | PROT_WRITE | PROT_EXEC, 448 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, 449 N_TXTOFF(ex)); 450 up_write(¤t->mm->mmap_sem); 451 retval = error; 452 if (error != start_addr) 453 goto out; 454 455 len = PAGE_ALIGN(ex.a_text + ex.a_data); 456 bss = ex.a_text + ex.a_data + ex.a_bss; 457 if (bss > len) { 458 down_write(¤t->mm->mmap_sem); 459 error = do_brk(start_addr + len, bss - len); 460 up_write(¤t->mm->mmap_sem); 461 retval = error; 462 if (error != start_addr + len) 463 goto out; 464 } 465 retval = 0; 466 out: 467 return retval; 468 } 469 470 static int __init init_aout_binfmt(void) 471 { 472 return register_binfmt(&aout_format); 473 } 474 475 static void __exit exit_aout_binfmt(void) 476 { 477 unregister_binfmt(&aout_format); 478 } 479 480 core_initcall(init_aout_binfmt); 481 module_exit(exit_aout_binfmt); 482 MODULE_LICENSE("GPL"); 483
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.