~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/fault.c

Version: ~ [ linux-5.6 ] ~ [ linux-5.5.13 ] ~ [ linux-5.4.28 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.113 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.174 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.217 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.217 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.82 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/arch/arm/mm/fault.c
  3  *
  4  *  Copyright (C) 1995  Linus Torvalds
  5  *  Modifications for ARM processor (c) 1995-2004 Russell King
  6  *
  7  * This program is free software; you can redistribute it and/or modify
  8  * it under the terms of the GNU General Public License version 2 as
  9  * published by the Free Software Foundation.
 10  */
 11 #include <linux/module.h>
 12 #include <linux/signal.h>
 13 #include <linux/mm.h>
 14 #include <linux/hardirq.h>
 15 #include <linux/init.h>
 16 #include <linux/kprobes.h>
 17 #include <linux/uaccess.h>
 18 #include <linux/page-flags.h>
 19 #include <linux/sched.h>
 20 #include <linux/highmem.h>
 21 #include <linux/perf_event.h>
 22 
 23 #include <asm/exception.h>
 24 #include <asm/pgtable.h>
 25 #include <asm/system_misc.h>
 26 #include <asm/system_info.h>
 27 #include <asm/tlbflush.h>
 28 
 29 #include "fault.h"
 30 
 31 #ifdef CONFIG_MMU
 32 
 33 #ifdef CONFIG_KPROBES
 34 static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
 35 {
 36         int ret = 0;
 37 
 38         if (!user_mode(regs)) {
 39                 /* kprobe_running() needs smp_processor_id() */
 40                 preempt_disable();
 41                 if (kprobe_running() && kprobe_fault_handler(regs, fsr))
 42                         ret = 1;
 43                 preempt_enable();
 44         }
 45 
 46         return ret;
 47 }
 48 #else
 49 static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
 50 {
 51         return 0;
 52 }
 53 #endif
 54 
 55 /*
 56  * This is useful to dump out the page tables associated with
 57  * 'addr' in mm 'mm'.
 58  */
 59 void show_pte(struct mm_struct *mm, unsigned long addr)
 60 {
 61         pgd_t *pgd;
 62 
 63         if (!mm)
 64                 mm = &init_mm;
 65 
 66         printk(KERN_ALERT "pgd = %p\n", mm->pgd);
 67         pgd = pgd_offset(mm, addr);
 68         printk(KERN_ALERT "[%08lx] *pgd=%08llx",
 69                         addr, (long long)pgd_val(*pgd));
 70 
 71         do {
 72                 pud_t *pud;
 73                 pmd_t *pmd;
 74                 pte_t *pte;
 75 
 76                 if (pgd_none(*pgd))
 77                         break;
 78 
 79                 if (pgd_bad(*pgd)) {
 80                         printk("(bad)");
 81                         break;
 82                 }
 83 
 84                 pud = pud_offset(pgd, addr);
 85                 if (PTRS_PER_PUD != 1)
 86                         printk(", *pud=%08llx", (long long)pud_val(*pud));
 87 
 88                 if (pud_none(*pud))
 89                         break;
 90 
 91                 if (pud_bad(*pud)) {
 92                         printk("(bad)");
 93                         break;
 94                 }
 95 
 96                 pmd = pmd_offset(pud, addr);
 97                 if (PTRS_PER_PMD != 1)
 98                         printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
 99 
100                 if (pmd_none(*pmd))
101                         break;
102 
103                 if (pmd_bad(*pmd)) {
104                         printk("(bad)");
105                         break;
106                 }
107 
108                 /* We must not map this if we have highmem enabled */
109                 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
110                         break;
111 
112                 pte = pte_offset_map(pmd, addr);
113                 printk(", *pte=%08llx", (long long)pte_val(*pte));
114 #ifndef CONFIG_ARM_LPAE
115                 printk(", *ppte=%08llx",
116                        (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
117 #endif
118                 pte_unmap(pte);
119         } while(0);
120 
121         printk("\n");
122 }
123 #else                                   /* CONFIG_MMU */
124 void show_pte(struct mm_struct *mm, unsigned long addr)
125 { }
126 #endif                                  /* CONFIG_MMU */
127 
128 /*
129  * Oops.  The kernel tried to access some page that wasn't present.
130  */
131 static void
132 __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
133                   struct pt_regs *regs)
134 {
135         /*
136          * Are we prepared to handle this kernel fault?
137          */
138         if (fixup_exception(regs))
139                 return;
140 
141         /*
142          * No handler, we'll have to terminate things with extreme prejudice.
143          */
144         bust_spinlocks(1);
145         printk(KERN_ALERT
146                 "Unable to handle kernel %s at virtual address %08lx\n",
147                 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
148                 "paging request", addr);
149 
150         show_pte(mm, addr);
151         die("Oops", regs, fsr);
152         bust_spinlocks(0);
153         do_exit(SIGKILL);
154 }
155 
156 /*
157  * Something tried to access memory that isn't in our memory map..
158  * User mode accesses just cause a SIGSEGV
159  */
160 static void
161 __do_user_fault(struct task_struct *tsk, unsigned long addr,
162                 unsigned int fsr, unsigned int sig, int code,
163                 struct pt_regs *regs)
164 {
165         struct siginfo si;
166 
167 #ifdef CONFIG_DEBUG_USER
168         if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
169             ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
170                 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
171                        tsk->comm, sig, addr, fsr);
172                 show_pte(tsk->mm, addr);
173                 show_regs(regs);
174         }
175 #endif
176 
177         tsk->thread.address = addr;
178         tsk->thread.error_code = fsr;
179         tsk->thread.trap_no = 14;
180         si.si_signo = sig;
181         si.si_errno = 0;
182         si.si_code = code;
183         si.si_addr = (void __user *)addr;
184         force_sig_info(sig, &si, tsk);
185 }
186 
187 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
188 {
189         struct task_struct *tsk = current;
190         struct mm_struct *mm = tsk->active_mm;
191 
192         /*
193          * If we are in kernel mode at this point, we
194          * have no context to handle this fault with.
195          */
196         if (user_mode(regs))
197                 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
198         else
199                 __do_kernel_fault(mm, addr, fsr, regs);
200 }
201 
202 #ifdef CONFIG_MMU
203 #define VM_FAULT_BADMAP         0x010000
204 #define VM_FAULT_BADACCESS      0x020000
205 
206 /*
207  * Check that the permissions on the VMA allow for the fault which occurred.
208  * If we encountered a write fault, we must have write permission, otherwise
209  * we allow any permission.
210  */
211 static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
212 {
213         unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
214 
215         if (fsr & FSR_WRITE)
216                 mask = VM_WRITE;
217         if (fsr & FSR_LNX_PF)
218                 mask = VM_EXEC;
219 
220         return vma->vm_flags & mask ? false : true;
221 }
222 
223 static int __kprobes
224 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
225                 unsigned int flags, struct task_struct *tsk)
226 {
227         struct vm_area_struct *vma;
228         int fault;
229 
230         vma = find_vma(mm, addr);
231         fault = VM_FAULT_BADMAP;
232         if (unlikely(!vma))
233                 goto out;
234         if (unlikely(vma->vm_start > addr))
235                 goto check_stack;
236 
237         /*
238          * Ok, we have a good vm_area for this
239          * memory access, so we can handle it.
240          */
241 good_area:
242         if (access_error(fsr, vma)) {
243                 fault = VM_FAULT_BADACCESS;
244                 goto out;
245         }
246 
247         return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
248 
249 check_stack:
250         /* Don't allow expansion below FIRST_USER_ADDRESS */
251         if (vma->vm_flags & VM_GROWSDOWN &&
252             addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
253                 goto good_area;
254 out:
255         return fault;
256 }
257 
258 static int __kprobes
259 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
260 {
261         struct task_struct *tsk;
262         struct mm_struct *mm;
263         int fault, sig, code;
264         unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
265 
266         if (notify_page_fault(regs, fsr))
267                 return 0;
268 
269         tsk = current;
270         mm  = tsk->mm;
271 
272         /* Enable interrupts if they were enabled in the parent context. */
273         if (interrupts_enabled(regs))
274                 local_irq_enable();
275 
276         /*
277          * If we're in an interrupt or have no user
278          * context, we must not take the fault..
279          */
280         if (in_atomic() || !mm)
281                 goto no_context;
282 
283         if (user_mode(regs))
284                 flags |= FAULT_FLAG_USER;
285         if (fsr & FSR_WRITE)
286                 flags |= FAULT_FLAG_WRITE;
287 
288         /*
289          * As per x86, we may deadlock here.  However, since the kernel only
290          * validly references user space from well defined areas of the code,
291          * we can bug out early if this is from code which shouldn't.
292          */
293         if (!down_read_trylock(&mm->mmap_sem)) {
294                 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
295                         goto no_context;
296 retry:
297                 down_read(&mm->mmap_sem);
298         } else {
299                 /*
300                  * The above down_read_trylock() might have succeeded in
301                  * which case, we'll have missed the might_sleep() from
302                  * down_read()
303                  */
304                 might_sleep();
305 #ifdef CONFIG_DEBUG_VM
306                 if (!user_mode(regs) &&
307                     !search_exception_tables(regs->ARM_pc))
308                         goto no_context;
309 #endif
310         }
311 
312         fault = __do_page_fault(mm, addr, fsr, flags, tsk);
313 
314         /* If we need to retry but a fatal signal is pending, handle the
315          * signal first. We do not need to release the mmap_sem because
316          * it would already be released in __lock_page_or_retry in
317          * mm/filemap.c. */
318         if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
319                 return 0;
320 
321         /*
322          * Major/minor page fault accounting is only done on the
323          * initial attempt. If we go through a retry, it is extremely
324          * likely that the page will be found in page cache at that point.
325          */
326 
327         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
328         if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
329                 if (fault & VM_FAULT_MAJOR) {
330                         tsk->maj_flt++;
331                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
332                                         regs, addr);
333                 } else {
334                         tsk->min_flt++;
335                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
336                                         regs, addr);
337                 }
338                 if (fault & VM_FAULT_RETRY) {
339                         /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
340                         * of starvation. */
341                         flags &= ~FAULT_FLAG_ALLOW_RETRY;
342                         flags |= FAULT_FLAG_TRIED;
343                         goto retry;
344                 }
345         }
346 
347         up_read(&mm->mmap_sem);
348 
349         /*
350          * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
351          */
352         if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
353                 return 0;
354 
355         /*
356          * If we are in kernel mode at this point, we
357          * have no context to handle this fault with.
358          */
359         if (!user_mode(regs))
360                 goto no_context;
361 
362         if (fault & VM_FAULT_OOM) {
363                 /*
364                  * We ran out of memory, call the OOM killer, and return to
365                  * userspace (which will retry the fault, or kill us if we
366                  * got oom-killed)
367                  */
368                 pagefault_out_of_memory();
369                 return 0;
370         }
371 
372         if (fault & VM_FAULT_SIGBUS) {
373                 /*
374                  * We had some memory, but were unable to
375                  * successfully fix up this page fault.
376                  */
377                 sig = SIGBUS;
378                 code = BUS_ADRERR;
379         } else {
380                 /*
381                  * Something tried to access memory that
382                  * isn't in our memory map..
383                  */
384                 sig = SIGSEGV;
385                 code = fault == VM_FAULT_BADACCESS ?
386                         SEGV_ACCERR : SEGV_MAPERR;
387         }
388 
389         __do_user_fault(tsk, addr, fsr, sig, code, regs);
390         return 0;
391 
392 no_context:
393         __do_kernel_fault(mm, addr, fsr, regs);
394         return 0;
395 }
396 #else                                   /* CONFIG_MMU */
397 static int
398 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
399 {
400         return 0;
401 }
402 #endif                                  /* CONFIG_MMU */
403 
404 /*
405  * First Level Translation Fault Handler
406  *
407  * We enter here because the first level page table doesn't contain
408  * a valid entry for the address.
409  *
410  * If the address is in kernel space (>= TASK_SIZE), then we are
411  * probably faulting in the vmalloc() area.
412  *
413  * If the init_task's first level page tables contains the relevant
414  * entry, we copy the it to this task.  If not, we send the process
415  * a signal, fixup the exception, or oops the kernel.
416  *
417  * NOTE! We MUST NOT take any locks for this case. We may be in an
418  * interrupt or a critical region, and should only copy the information
419  * from the master page table, nothing more.
420  */
421 #ifdef CONFIG_MMU
422 static int __kprobes
423 do_translation_fault(unsigned long addr, unsigned int fsr,
424                      struct pt_regs *regs)
425 {
426         unsigned int index;
427         pgd_t *pgd, *pgd_k;
428         pud_t *pud, *pud_k;
429         pmd_t *pmd, *pmd_k;
430 
431         if (addr < TASK_SIZE)
432                 return do_page_fault(addr, fsr, regs);
433 
434         if (user_mode(regs))
435                 goto bad_area;
436 
437         index = pgd_index(addr);
438 
439         pgd = cpu_get_pgd() + index;
440         pgd_k = init_mm.pgd + index;
441 
442         if (pgd_none(*pgd_k))
443                 goto bad_area;
444         if (!pgd_present(*pgd))
445                 set_pgd(pgd, *pgd_k);
446 
447         pud = pud_offset(pgd, addr);
448         pud_k = pud_offset(pgd_k, addr);
449 
450         if (pud_none(*pud_k))
451                 goto bad_area;
452         if (!pud_present(*pud))
453                 set_pud(pud, *pud_k);
454 
455         pmd = pmd_offset(pud, addr);
456         pmd_k = pmd_offset(pud_k, addr);
457 
458 #ifdef CONFIG_ARM_LPAE
459         /*
460          * Only one hardware entry per PMD with LPAE.
461          */
462         index = 0;
463 #else
464         /*
465          * On ARM one Linux PGD entry contains two hardware entries (see page
466          * tables layout in pgtable.h). We normally guarantee that we always
467          * fill both L1 entries. But create_mapping() doesn't follow the rule.
468          * It can create inidividual L1 entries, so here we have to call
469          * pmd_none() check for the entry really corresponded to address, not
470          * for the first of pair.
471          */
472         index = (addr >> SECTION_SHIFT) & 1;
473 #endif
474         if (pmd_none(pmd_k[index]))
475                 goto bad_area;
476 
477         copy_pmd(pmd, pmd_k);
478         return 0;
479 
480 bad_area:
481         do_bad_area(addr, fsr, regs);
482         return 0;
483 }
484 #else                                   /* CONFIG_MMU */
485 static int
486 do_translation_fault(unsigned long addr, unsigned int fsr,
487                      struct pt_regs *regs)
488 {
489         return 0;
490 }
491 #endif                                  /* CONFIG_MMU */
492 
493 /*
494  * Some section permission faults need to be handled gracefully.
495  * They can happen due to a __{get,put}_user during an oops.
496  */
497 #ifndef CONFIG_ARM_LPAE
498 static int
499 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
500 {
501         do_bad_area(addr, fsr, regs);
502         return 0;
503 }
504 #endif /* CONFIG_ARM_LPAE */
505 
506 /*
507  * This abort handler always returns "fault".
508  */
509 static int
510 do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
511 {
512         return 1;
513 }
514 
515 struct fsr_info {
516         int     (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
517         int     sig;
518         int     code;
519         const char *name;
520 };
521 
522 /* FSR definition */
523 #ifdef CONFIG_ARM_LPAE
524 #include "fsr-3level.c"
525 #else
526 #include "fsr-2level.c"
527 #endif
528 
529 void __init
530 hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
531                 int sig, int code, const char *name)
532 {
533         if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
534                 BUG();
535 
536         fsr_info[nr].fn   = fn;
537         fsr_info[nr].sig  = sig;
538         fsr_info[nr].code = code;
539         fsr_info[nr].name = name;
540 }
541 
542 /*
543  * Dispatch a data abort to the relevant handler.
544  */
545 asmlinkage void __exception
546 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
547 {
548         const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
549         struct siginfo info;
550 
551         if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
552                 return;
553 
554         printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
555                 inf->name, fsr, addr);
556 
557         info.si_signo = inf->sig;
558         info.si_errno = 0;
559         info.si_code  = inf->code;
560         info.si_addr  = (void __user *)addr;
561         arm_notify_die("", regs, &info, fsr, 0);
562 }
563 
564 void __init
565 hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
566                  int sig, int code, const char *name)
567 {
568         if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
569                 BUG();
570 
571         ifsr_info[nr].fn   = fn;
572         ifsr_info[nr].sig  = sig;
573         ifsr_info[nr].code = code;
574         ifsr_info[nr].name = name;
575 }
576 
577 asmlinkage void __exception
578 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
579 {
580         const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
581         struct siginfo info;
582 
583         if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
584                 return;
585 
586         printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
587                 inf->name, ifsr, addr);
588 
589         info.si_signo = inf->sig;
590         info.si_errno = 0;
591         info.si_code  = inf->code;
592         info.si_addr  = (void __user *)addr;
593         arm_notify_die("", regs, &info, ifsr, 0);
594 }
595 
596 #ifndef CONFIG_ARM_LPAE
597 static int __init exceptions_init(void)
598 {
599         if (cpu_architecture() >= CPU_ARCH_ARMv6) {
600                 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
601                                 "I-cache maintenance fault");
602         }
603 
604         if (cpu_architecture() >= CPU_ARCH_ARMv7) {
605                 /*
606                  * TODO: Access flag faults introduced in ARMv6K.
607                  * Runtime check for 'K' extension is needed
608                  */
609                 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
610                                 "section access flag fault");
611                 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
612                                 "section access flag fault");
613         }
614 
615         return 0;
616 }
617 
618 arch_initcall(exceptions_init);
619 #endif
620 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp