~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/ia64/mm/fault.c

Version: ~ [ linux-5.3-rc4 ] ~ [ linux-5.2.8 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.66 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.138 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.189 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.189 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.71 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * MMU fault handling support.
  3  *
  4  * Copyright (C) 1998-2002 Hewlett-Packard Co
  5  *      David Mosberger-Tang <davidm@hpl.hp.com>
  6  */
  7 #include <linux/sched.h>
  8 #include <linux/kernel.h>
  9 #include <linux/mm.h>
 10 #include <linux/interrupt.h>
 11 #include <linux/kprobes.h>
 12 #include <linux/kdebug.h>
 13 #include <linux/prefetch.h>
 14 
 15 #include <asm/pgtable.h>
 16 #include <asm/processor.h>
 17 #include <asm/uaccess.h>
 18 
 19 extern int die(char *, struct pt_regs *, long);
 20 
 21 #ifdef CONFIG_KPROBES
 22 static inline int notify_page_fault(struct pt_regs *regs, int trap)
 23 {
 24         int ret = 0;
 25 
 26         if (!user_mode(regs)) {
 27                 /* kprobe_running() needs smp_processor_id() */
 28                 preempt_disable();
 29                 if (kprobe_running() && kprobe_fault_handler(regs, trap))
 30                         ret = 1;
 31                 preempt_enable();
 32         }
 33 
 34         return ret;
 35 }
 36 #else
 37 static inline int notify_page_fault(struct pt_regs *regs, int trap)
 38 {
 39         return 0;
 40 }
 41 #endif
 42 
 43 /*
 44  * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 45  * (inside region 5, on ia64) and that page is present.
 46  */
 47 static int
 48 mapped_kernel_page_is_present (unsigned long address)
 49 {
 50         pgd_t *pgd;
 51         pud_t *pud;
 52         pmd_t *pmd;
 53         pte_t *ptep, pte;
 54 
 55         pgd = pgd_offset_k(address);
 56         if (pgd_none(*pgd) || pgd_bad(*pgd))
 57                 return 0;
 58 
 59         pud = pud_offset(pgd, address);
 60         if (pud_none(*pud) || pud_bad(*pud))
 61                 return 0;
 62 
 63         pmd = pmd_offset(pud, address);
 64         if (pmd_none(*pmd) || pmd_bad(*pmd))
 65                 return 0;
 66 
 67         ptep = pte_offset_kernel(pmd, address);
 68         if (!ptep)
 69                 return 0;
 70 
 71         pte = *ptep;
 72         return pte_present(pte);
 73 }
 74 
 75 #       define VM_READ_BIT      0
 76 #       define VM_WRITE_BIT     1
 77 #       define VM_EXEC_BIT      2
 78 
 79 void __kprobes
 80 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 81 {
 82         int signal = SIGSEGV, code = SEGV_MAPERR;
 83         struct vm_area_struct *vma, *prev_vma;
 84         struct mm_struct *mm = current->mm;
 85         struct siginfo si;
 86         unsigned long mask;
 87         int fault;
 88         unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 89 
 90         mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
 91                 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 92 
 93         flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
 94 
 95         /* mmap_sem is performance critical.... */
 96         prefetchw(&mm->mmap_sem);
 97 
 98         /*
 99          * If we're in an interrupt or have no user context, we must not take the fault..
100          */
101         if (in_atomic() || !mm)
102                 goto no_context;
103 
104 #ifdef CONFIG_VIRTUAL_MEM_MAP
105         /*
106          * If fault is in region 5 and we are in the kernel, we may already
107          * have the mmap_sem (pfn_valid macro is called during mmap). There
108          * is no vma for region 5 addr's anyway, so skip getting the semaphore
109          * and go directly to the exception handling code.
110          */
111 
112         if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
113                 goto bad_area_no_up;
114 #endif
115 
116         /*
117          * This is to handle the kprobes on user space access instructions
118          */
119         if (notify_page_fault(regs, TRAP_BRKPT))
120                 return;
121 
122 retry:
123         down_read(&mm->mmap_sem);
124 
125         vma = find_vma_prev(mm, address, &prev_vma);
126         if (!vma && !prev_vma )
127                 goto bad_area;
128 
129         /*
130          * find_vma_prev() returns vma such that address < vma->vm_end or NULL
131          *
132          * May find no vma, but could be that the last vm area is the
133          * register backing store that needs to expand upwards, in
134          * this case vma will be null, but prev_vma will ne non-null
135          */
136         if (( !vma && prev_vma ) || (address < vma->vm_start) )
137                 goto check_expansion;
138 
139   good_area:
140         code = SEGV_ACCERR;
141 
142         /* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
143 
144 #       if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
145             || (1 << VM_EXEC_BIT) != VM_EXEC)
146 #               error File is out of sync with <linux/mm.h>.  Please update.
147 #       endif
148 
149         if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
150                 goto bad_area;
151 
152         if ((vma->vm_flags & mask) != mask)
153                 goto bad_area;
154 
155         /*
156          * If for any reason at all we couldn't handle the fault, make
157          * sure we exit gracefully rather than endlessly redo the
158          * fault.
159          */
160         fault = handle_mm_fault(mm, vma, address, flags);
161 
162         if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
163                 return;
164 
165         if (unlikely(fault & VM_FAULT_ERROR)) {
166                 /*
167                  * We ran out of memory, or some other thing happened
168                  * to us that made us unable to handle the page fault
169                  * gracefully.
170                  */
171                 if (fault & VM_FAULT_OOM) {
172                         goto out_of_memory;
173                 } else if (fault & VM_FAULT_SIGBUS) {
174                         signal = SIGBUS;
175                         goto bad_area;
176                 }
177                 BUG();
178         }
179 
180         if (flags & FAULT_FLAG_ALLOW_RETRY) {
181                 if (fault & VM_FAULT_MAJOR)
182                         current->maj_flt++;
183                 else
184                         current->min_flt++;
185                 if (fault & VM_FAULT_RETRY) {
186                         flags &= ~FAULT_FLAG_ALLOW_RETRY;
187                         flags |= FAULT_FLAG_TRIED;
188 
189                          /* No need to up_read(&mm->mmap_sem) as we would
190                          * have already released it in __lock_page_or_retry
191                          * in mm/filemap.c.
192                          */
193 
194                         goto retry;
195                 }
196         }
197 
198         up_read(&mm->mmap_sem);
199         return;
200 
201   check_expansion:
202         if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
203                 if (!vma)
204                         goto bad_area;
205                 if (!(vma->vm_flags & VM_GROWSDOWN))
206                         goto bad_area;
207                 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
208                     || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
209                         goto bad_area;
210                 if (expand_stack(vma, address))
211                         goto bad_area;
212         } else {
213                 vma = prev_vma;
214                 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
215                     || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
216                         goto bad_area;
217                 /*
218                  * Since the register backing store is accessed sequentially,
219                  * we disallow growing it by more than a page at a time.
220                  */
221                 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
222                         goto bad_area;
223                 if (expand_upwards(vma, address))
224                         goto bad_area;
225         }
226         goto good_area;
227 
228   bad_area:
229         up_read(&mm->mmap_sem);
230 #ifdef CONFIG_VIRTUAL_MEM_MAP
231   bad_area_no_up:
232 #endif
233         if ((isr & IA64_ISR_SP)
234             || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
235         {
236                 /*
237                  * This fault was due to a speculative load or lfetch.fault, set the "ed"
238                  * bit in the psr to ensure forward progress.  (Target register will get a
239                  * NaT for ld.s, lfetch will be canceled.)
240                  */
241                 ia64_psr(regs)->ed = 1;
242                 return;
243         }
244         if (user_mode(regs)) {
245                 si.si_signo = signal;
246                 si.si_errno = 0;
247                 si.si_code = code;
248                 si.si_addr = (void __user *) address;
249                 si.si_isr = isr;
250                 si.si_flags = __ISR_VALID;
251                 force_sig_info(signal, &si, current);
252                 return;
253         }
254 
255   no_context:
256         if ((isr & IA64_ISR_SP)
257             || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
258         {
259                 /*
260                  * This fault was due to a speculative load or lfetch.fault, set the "ed"
261                  * bit in the psr to ensure forward progress.  (Target register will get a
262                  * NaT for ld.s, lfetch will be canceled.)
263                  */
264                 ia64_psr(regs)->ed = 1;
265                 return;
266         }
267 
268         /*
269          * Since we have no vma's for region 5, we might get here even if the address is
270          * valid, due to the VHPT walker inserting a non present translation that becomes
271          * stale. If that happens, the non present fault handler already purged the stale
272          * translation, which fixed the problem. So, we check to see if the translation is
273          * valid, and return if it is.
274          */
275         if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
276                 return;
277 
278         if (ia64_done_with_exception(regs))
279                 return;
280 
281         /*
282          * Oops. The kernel tried to access some bad page. We'll have to terminate things
283          * with extreme prejudice.
284          */
285         bust_spinlocks(1);
286 
287         if (address < PAGE_SIZE)
288                 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
289         else
290                 printk(KERN_ALERT "Unable to handle kernel paging request at "
291                        "virtual address %016lx\n", address);
292         if (die("Oops", regs, isr))
293                 regs = NULL;
294         bust_spinlocks(0);
295         if (regs)
296                 do_exit(SIGKILL);
297         return;
298 
299   out_of_memory:
300         up_read(&mm->mmap_sem);
301         if (!user_mode(regs))
302                 goto no_context;
303         pagefault_out_of_memory();
304 }
305 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp