~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/tile/mm/fault.c

Version: ~ [ linux-5.18 ] ~ [ linux-5.17.9 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.41 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.117 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.195 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.244 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.280 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.315 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3  *
  4  *   This program is free software; you can redistribute it and/or
  5  *   modify it under the terms of the GNU General Public License
  6  *   as published by the Free Software Foundation, version 2.
  7  *
  8  *   This program is distributed in the hope that it will be useful, but
  9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11  *   NON INFRINGEMENT.  See the GNU General Public License for
 12  *   more details.
 13  *
 14  * From i386 code copyright (C) 1995  Linus Torvalds
 15  */
 16 
 17 #include <linux/signal.h>
 18 #include <linux/sched.h>
 19 #include <linux/kernel.h>
 20 #include <linux/errno.h>
 21 #include <linux/string.h>
 22 #include <linux/types.h>
 23 #include <linux/ptrace.h>
 24 #include <linux/mman.h>
 25 #include <linux/mm.h>
 26 #include <linux/smp.h>
 27 #include <linux/interrupt.h>
 28 #include <linux/init.h>
 29 #include <linux/tty.h>
 30 #include <linux/vt_kern.h>              /* For unblank_screen() */
 31 #include <linux/highmem.h>
 32 #include <linux/module.h>
 33 #include <linux/kprobes.h>
 34 #include <linux/hugetlb.h>
 35 #include <linux/syscalls.h>
 36 #include <linux/uaccess.h>
 37 #include <linux/kdebug.h>
 38 
 39 #include <asm/pgalloc.h>
 40 #include <asm/sections.h>
 41 #include <asm/traps.h>
 42 #include <asm/syscalls.h>
 43 
 44 #include <arch/interrupts.h>
 45 
 46 static noinline void force_sig_info_fault(const char *type, int si_signo,
 47                                           int si_code, unsigned long address,
 48                                           int fault_num,
 49                                           struct task_struct *tsk,
 50                                           struct pt_regs *regs)
 51 {
 52         siginfo_t info;
 53 
 54         if (unlikely(tsk->pid < 2)) {
 55                 panic("Signal %d (code %d) at %#lx sent to %s!",
 56                       si_signo, si_code & 0xffff, address,
 57                       is_idle_task(tsk) ? "the idle task" : "init");
 58         }
 59 
 60         info.si_signo = si_signo;
 61         info.si_errno = 0;
 62         info.si_code = si_code;
 63         info.si_addr = (void __user *)address;
 64         info.si_trapno = fault_num;
 65         trace_unhandled_signal(type, regs, address, si_signo);
 66         force_sig_info(si_signo, &info, tsk);
 67 }
 68 
 69 #ifndef __tilegx__
 70 /*
 71  * Synthesize the fault a PL0 process would get by doing a word-load of
 72  * an unaligned address or a high kernel address.
 73  */
 74 SYSCALL_DEFINE1(cmpxchg_badaddr, unsigned long, address)
 75 {
 76         struct pt_regs *regs = current_pt_regs();
 77 
 78         if (address >= PAGE_OFFSET)
 79                 force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
 80                                      address, INT_DTLB_MISS, current, regs);
 81         else
 82                 force_sig_info_fault("atomic alignment fault", SIGBUS,
 83                                      BUS_ADRALN, address,
 84                                      INT_UNALIGN_DATA, current, regs);
 85 
 86         /*
 87          * Adjust pc to point at the actual instruction, which is unusual
 88          * for syscalls normally, but is appropriate when we are claiming
 89          * that a syscall swint1 caused a page fault or bus error.
 90          */
 91         regs->pc -= 8;
 92 
 93         /*
 94          * Mark this as a caller-save interrupt, like a normal page fault,
 95          * so that when we go through the signal handler path we will
 96          * properly restore r0, r1, and r2 for the signal handler arguments.
 97          */
 98         regs->flags |= PT_FLAGS_CALLER_SAVES;
 99 
100         return 0;
101 }
102 #endif
103 
104 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
105 {
106         unsigned index = pgd_index(address);
107         pgd_t *pgd_k;
108         pud_t *pud, *pud_k;
109         pmd_t *pmd, *pmd_k;
110 
111         pgd += index;
112         pgd_k = init_mm.pgd + index;
113 
114         if (!pgd_present(*pgd_k))
115                 return NULL;
116 
117         pud = pud_offset(pgd, address);
118         pud_k = pud_offset(pgd_k, address);
119         if (!pud_present(*pud_k))
120                 return NULL;
121 
122         pmd = pmd_offset(pud, address);
123         pmd_k = pmd_offset(pud_k, address);
124         if (!pmd_present(*pmd_k))
125                 return NULL;
126         if (!pmd_present(*pmd))
127                 set_pmd(pmd, *pmd_k);
128         else
129                 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
130         return pmd_k;
131 }
132 
133 /*
134  * Handle a fault on the vmalloc area.
135  */
136 static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
137 {
138         pmd_t *pmd_k;
139         pte_t *pte_k;
140 
141         /* Make sure we are in vmalloc area */
142         if (!(address >= VMALLOC_START && address < VMALLOC_END))
143                 return -1;
144 
145         /*
146          * Synchronize this task's top level page-table
147          * with the 'reference' page table.
148          */
149         pmd_k = vmalloc_sync_one(pgd, address);
150         if (!pmd_k)
151                 return -1;
152         pte_k = pte_offset_kernel(pmd_k, address);
153         if (!pte_present(*pte_k))
154                 return -1;
155         return 0;
156 }
157 
158 /* Wait until this PTE has completed migration. */
159 static void wait_for_migration(pte_t *pte)
160 {
161         if (pte_migrating(*pte)) {
162                 /*
163                  * Wait until the migrater fixes up this pte.
164                  * We scale the loop count by the clock rate so we'll wait for
165                  * a few seconds here.
166                  */
167                 int retries = 0;
168                 int bound = get_clock_rate();
169                 while (pte_migrating(*pte)) {
170                         barrier();
171                         if (++retries > bound)
172                                 panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating",
173                                       pte->val, pte_pfn(*pte));
174                 }
175         }
176 }
177 
178 /*
179  * It's not generally safe to use "current" to get the page table pointer,
180  * since we might be running an oprofile interrupt in the middle of a
181  * task switch.
182  */
183 static pgd_t *get_current_pgd(void)
184 {
185         HV_Context ctx = hv_inquire_context();
186         unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
187         struct page *pgd_page = pfn_to_page(pgd_pfn);
188         BUG_ON(PageHighMem(pgd_page));
189         return (pgd_t *) __va(ctx.page_table);
190 }
191 
192 /*
193  * We can receive a page fault from a migrating PTE at any time.
194  * Handle it by just waiting until the fault resolves.
195  *
196  * It's also possible to get a migrating kernel PTE that resolves
197  * itself during the downcall from hypervisor to Linux.  We just check
198  * here to see if the PTE seems valid, and if so we retry it.
199  *
200  * NOTE! We MUST NOT take any locks for this case.  We may be in an
201  * interrupt or a critical region, and must do as little as possible.
202  * Similarly, we can't use atomic ops here, since we may be handling a
203  * fault caused by an atomic op access.
204  *
205  * If we find a migrating PTE while we're in an NMI context, and we're
206  * at a PC that has a registered exception handler, we don't wait,
207  * since this thread may (e.g.) have been interrupted while migrating
208  * its own stack, which would then cause us to self-deadlock.
209  */
210 static int handle_migrating_pte(pgd_t *pgd, int fault_num,
211                                 unsigned long address, unsigned long pc,
212                                 int is_kernel_mode, int write)
213 {
214         pud_t *pud;
215         pmd_t *pmd;
216         pte_t *pte;
217         pte_t pteval;
218 
219         if (pgd_addr_invalid(address))
220                 return 0;
221 
222         pgd += pgd_index(address);
223         pud = pud_offset(pgd, address);
224         if (!pud || !pud_present(*pud))
225                 return 0;
226         pmd = pmd_offset(pud, address);
227         if (!pmd || !pmd_present(*pmd))
228                 return 0;
229         pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
230                 pte_offset_kernel(pmd, address);
231         pteval = *pte;
232         if (pte_migrating(pteval)) {
233                 if (in_nmi() && search_exception_tables(pc))
234                         return 0;
235                 wait_for_migration(pte);
236                 return 1;
237         }
238 
239         if (!is_kernel_mode || !pte_present(pteval))
240                 return 0;
241         if (fault_num == INT_ITLB_MISS) {
242                 if (pte_exec(pteval))
243                         return 1;
244         } else if (write) {
245                 if (pte_write(pteval))
246                         return 1;
247         } else {
248                 if (pte_read(pteval))
249                         return 1;
250         }
251 
252         return 0;
253 }
254 
255 /*
256  * This routine is responsible for faulting in user pages.
257  * It passes the work off to one of the appropriate routines.
258  * It returns true if the fault was successfully handled.
259  */
260 static int handle_page_fault(struct pt_regs *regs,
261                              int fault_num,
262                              int is_page_fault,
263                              unsigned long address,
264                              int write)
265 {
266         struct task_struct *tsk;
267         struct mm_struct *mm;
268         struct vm_area_struct *vma;
269         unsigned long stack_offset;
270         int fault;
271         int si_code;
272         int is_kernel_mode;
273         pgd_t *pgd;
274         unsigned int flags;
275 
276         /* on TILE, protection faults are always writes */
277         if (!is_page_fault)
278                 write = 1;
279 
280         flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
281 
282         is_kernel_mode = !user_mode(regs);
283 
284         tsk = validate_current();
285 
286         /*
287          * Check to see if we might be overwriting the stack, and bail
288          * out if so.  The page fault code is a relatively likely
289          * place to get trapped in an infinite regress, and once we
290          * overwrite the whole stack, it becomes very hard to recover.
291          */
292         stack_offset = stack_pointer & (THREAD_SIZE-1);
293         if (stack_offset < THREAD_SIZE / 8) {
294                 pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer);
295                 show_regs(regs);
296                 pr_alert("Killing current process %d/%s\n",
297                          tsk->pid, tsk->comm);
298                 do_group_exit(SIGKILL);
299         }
300 
301         /*
302          * Early on, we need to check for migrating PTE entries;
303          * see homecache.c.  If we find a migrating PTE, we wait until
304          * the backing page claims to be done migrating, then we proceed.
305          * For kernel PTEs, we rewrite the PTE and return and retry.
306          * Otherwise, we treat the fault like a normal "no PTE" fault,
307          * rather than trying to patch up the existing PTE.
308          */
309         pgd = get_current_pgd();
310         if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
311                                  is_kernel_mode, write))
312                 return 1;
313 
314         si_code = SEGV_MAPERR;
315 
316         /*
317          * We fault-in kernel-space virtual memory on-demand. The
318          * 'reference' page table is init_mm.pgd.
319          *
320          * NOTE! We MUST NOT take any locks for this case. We may
321          * be in an interrupt or a critical region, and should
322          * only copy the information from the master page table,
323          * nothing more.
324          *
325          * This verifies that the fault happens in kernel space
326          * and that the fault was not a protection fault.
327          */
328         if (unlikely(address >= TASK_SIZE &&
329                      !is_arch_mappable_range(address, 0))) {
330                 if (is_kernel_mode && is_page_fault &&
331                     vmalloc_fault(pgd, address) >= 0)
332                         return 1;
333                 /*
334                  * Don't take the mm semaphore here. If we fixup a prefetch
335                  * fault we could otherwise deadlock.
336                  */
337                 mm = NULL;  /* happy compiler */
338                 vma = NULL;
339                 goto bad_area_nosemaphore;
340         }
341 
342         /*
343          * If we're trying to touch user-space addresses, we must
344          * be either at PL0, or else with interrupts enabled in the
345          * kernel, so either way we can re-enable interrupts here
346          * unless we are doing atomic access to user space with
347          * interrupts disabled.
348          */
349         if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
350                 local_irq_enable();
351 
352         mm = tsk->mm;
353 
354         /*
355          * If we're in an interrupt, have no user context or are running in an
356          * region with pagefaults disabled then we must not take the fault.
357          */
358         if (pagefault_disabled() || !mm) {
359                 vma = NULL;  /* happy compiler */
360                 goto bad_area_nosemaphore;
361         }
362 
363         if (!is_kernel_mode)
364                 flags |= FAULT_FLAG_USER;
365 
366         /*
367          * When running in the kernel we expect faults to occur only to
368          * addresses in user space.  All other faults represent errors in the
369          * kernel and should generate an OOPS.  Unfortunately, in the case of an
370          * erroneous fault occurring in a code path which already holds mmap_sem
371          * we will deadlock attempting to validate the fault against the
372          * address space.  Luckily the kernel only validly references user
373          * space from well defined areas of code, which are listed in the
374          * exceptions table.
375          *
376          * As the vast majority of faults will be valid we will only perform
377          * the source reference check when there is a possibility of a deadlock.
378          * Attempt to lock the address space, if we cannot we then validate the
379          * source.  If this is invalid we can skip the address space check,
380          * thus avoiding the deadlock.
381          */
382         if (!down_read_trylock(&mm->mmap_sem)) {
383                 if (is_kernel_mode &&
384                     !search_exception_tables(regs->pc)) {
385                         vma = NULL;  /* happy compiler */
386                         goto bad_area_nosemaphore;
387                 }
388 
389 retry:
390                 down_read(&mm->mmap_sem);
391         }
392 
393         vma = find_vma(mm, address);
394         if (!vma)
395                 goto bad_area;
396         if (vma->vm_start <= address)
397                 goto good_area;
398         if (!(vma->vm_flags & VM_GROWSDOWN))
399                 goto bad_area;
400         if (regs->sp < PAGE_OFFSET) {
401                 /*
402                  * accessing the stack below sp is always a bug.
403                  */
404                 if (address < regs->sp)
405                         goto bad_area;
406         }
407         if (expand_stack(vma, address))
408                 goto bad_area;
409 
410 /*
411  * Ok, we have a good vm_area for this memory access, so
412  * we can handle it..
413  */
414 good_area:
415         si_code = SEGV_ACCERR;
416         if (fault_num == INT_ITLB_MISS) {
417                 if (!(vma->vm_flags & VM_EXEC))
418                         goto bad_area;
419         } else if (write) {
420 #ifdef TEST_VERIFY_AREA
421                 if (!is_page_fault && regs->cs == KERNEL_CS)
422                         pr_err("WP fault at " REGFMT "\n", regs->eip);
423 #endif
424                 if (!(vma->vm_flags & VM_WRITE))
425                         goto bad_area;
426                 flags |= FAULT_FLAG_WRITE;
427         } else {
428                 if (!is_page_fault || !(vma->vm_flags & VM_READ))
429                         goto bad_area;
430         }
431 
432         /*
433          * If for any reason at all we couldn't handle the fault,
434          * make sure we exit gracefully rather than endlessly redo
435          * the fault.
436          */
437         fault = handle_mm_fault(mm, vma, address, flags);
438 
439         if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
440                 return 0;
441 
442         if (unlikely(fault & VM_FAULT_ERROR)) {
443                 if (fault & VM_FAULT_OOM)
444                         goto out_of_memory;
445                 else if (fault & VM_FAULT_SIGSEGV)
446                         goto bad_area;
447                 else if (fault & VM_FAULT_SIGBUS)
448                         goto do_sigbus;
449                 BUG();
450         }
451         if (flags & FAULT_FLAG_ALLOW_RETRY) {
452                 if (fault & VM_FAULT_MAJOR)
453                         tsk->maj_flt++;
454                 else
455                         tsk->min_flt++;
456                 if (fault & VM_FAULT_RETRY) {
457                         flags &= ~FAULT_FLAG_ALLOW_RETRY;
458                         flags |= FAULT_FLAG_TRIED;
459 
460                          /*
461                           * No need to up_read(&mm->mmap_sem) as we would
462                           * have already released it in __lock_page_or_retry
463                           * in mm/filemap.c.
464                           */
465                         goto retry;
466                 }
467         }
468 
469 #if CHIP_HAS_TILE_DMA()
470         /* If this was a DMA TLB fault, restart the DMA engine. */
471         switch (fault_num) {
472         case INT_DMATLB_MISS:
473         case INT_DMATLB_MISS_DWNCL:
474         case INT_DMATLB_ACCESS:
475         case INT_DMATLB_ACCESS_DWNCL:
476                 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
477                 break;
478         }
479 #endif
480 
481         up_read(&mm->mmap_sem);
482         return 1;
483 
484 /*
485  * Something tried to access memory that isn't in our memory map..
486  * Fix it, but check if it's kernel or user first..
487  */
488 bad_area:
489         up_read(&mm->mmap_sem);
490 
491 bad_area_nosemaphore:
492         /* User mode accesses just cause a SIGSEGV */
493         if (!is_kernel_mode) {
494                 /*
495                  * It's possible to have interrupts off here.
496                  */
497                 local_irq_enable();
498 
499                 force_sig_info_fault("segfault", SIGSEGV, si_code, address,
500                                      fault_num, tsk, regs);
501                 return 0;
502         }
503 
504 no_context:
505         /* Are we prepared to handle this kernel fault?  */
506         if (fixup_exception(regs))
507                 return 0;
508 
509 /*
510  * Oops. The kernel tried to access some bad page. We'll have to
511  * terminate things with extreme prejudice.
512  */
513 
514         bust_spinlocks(1);
515 
516         /* FIXME: no lookup_address() yet */
517 #ifdef SUPPORT_LOOKUP_ADDRESS
518         if (fault_num == INT_ITLB_MISS) {
519                 pte_t *pte = lookup_address(address);
520 
521                 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
522                         pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n",
523                                 current->uid);
524         }
525 #endif
526         if (address < PAGE_SIZE)
527                 pr_alert("Unable to handle kernel NULL pointer dereference\n");
528         else
529                 pr_alert("Unable to handle kernel paging request\n");
530         pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n",
531                  address, regs->pc);
532 
533         show_regs(regs);
534 
535         if (unlikely(tsk->pid < 2)) {
536                 panic("Kernel page fault running %s!",
537                       is_idle_task(tsk) ? "the idle task" : "init");
538         }
539 
540         /*
541          * More FIXME: we should probably copy the i386 here and
542          * implement a generic die() routine.  Not today.
543          */
544 #ifdef SUPPORT_DIE
545         die("Oops", regs);
546 #endif
547         bust_spinlocks(1);
548 
549         do_group_exit(SIGKILL);
550 
551 /*
552  * We ran out of memory, or some other thing happened to us that made
553  * us unable to handle the page fault gracefully.
554  */
555 out_of_memory:
556         up_read(&mm->mmap_sem);
557         if (is_kernel_mode)
558                 goto no_context;
559         pagefault_out_of_memory();
560         return 0;
561 
562 do_sigbus:
563         up_read(&mm->mmap_sem);
564 
565         /* Kernel mode? Handle exceptions or die */
566         if (is_kernel_mode)
567                 goto no_context;
568 
569         force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
570                              fault_num, tsk, regs);
571         return 0;
572 }
573 
574 #ifndef __tilegx__
575 
576 /* We must release ICS before panicking or we won't get anywhere. */
577 #define ics_panic(fmt, ...)                                     \
578 do {                                                            \
579         __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);        \
580         panic(fmt, ##__VA_ARGS__);                              \
581 } while (0)
582 
583 /*
584  * When we take an ITLB or DTLB fault or access violation in the
585  * supervisor while the critical section bit is set, the hypervisor is
586  * reluctant to write new values into the EX_CONTEXT_K_x registers,
587  * since that might indicate we have not yet squirreled the SPR
588  * contents away and can thus safely take a recursive interrupt.
589  * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
590  *
591  * Note that this routine is called before homecache_tlb_defer_enter(),
592  * which means that we can properly unlock any atomics that might
593  * be used there (good), but also means we must be very sensitive
594  * to not touch any data structures that might be located in memory
595  * that could migrate, as we could be entering the kernel on a dataplane
596  * cpu that has been deferring kernel TLB updates.  This means, for
597  * example, that we can't migrate init_mm or its pgd.
598  */
599 struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
600                                       unsigned long address,
601                                       unsigned long info)
602 {
603         unsigned long pc = info & ~1;
604         int write = info & 1;
605         pgd_t *pgd = get_current_pgd();
606 
607         /* Retval is 1 at first since we will handle the fault fully. */
608         struct intvec_state state = {
609                 do_page_fault, fault_num, address, write, 1
610         };
611 
612         /* Validate that we are plausibly in the right routine. */
613         if ((pc & 0x7) != 0 || pc < PAGE_OFFSET ||
614             (fault_num != INT_DTLB_MISS &&
615              fault_num != INT_DTLB_ACCESS)) {
616                 unsigned long old_pc = regs->pc;
617                 regs->pc = pc;
618                 ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx",
619                           old_pc, fault_num, write, address);
620         }
621 
622         /* We might be faulting on a vmalloc page, so check that first. */
623         if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0)
624                 return state;
625 
626         /*
627          * If we faulted with ICS set in sys_cmpxchg, we are providing
628          * a user syscall service that should generate a signal on
629          * fault.  We didn't set up a kernel stack on initial entry to
630          * sys_cmpxchg, but instead had one set up by the fault, which
631          * (because sys_cmpxchg never releases ICS) came to us via the
632          * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
633          * still referencing the original user code.  We release the
634          * atomic lock and rewrite pt_regs so that it appears that we
635          * came from user-space directly, and after we finish the
636          * fault we'll go back to user space and re-issue the swint.
637          * This way the backtrace information is correct if we need to
638          * emit a stack dump at any point while handling this.
639          *
640          * Must match register use in sys_cmpxchg().
641          */
642         if (pc >= (unsigned long) sys_cmpxchg &&
643             pc < (unsigned long) __sys_cmpxchg_end) {
644 #ifdef CONFIG_SMP
645                 /* Don't unlock before we could have locked. */
646                 if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) {
647                         int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
648                         __atomic_fault_unlock(lock_ptr);
649                 }
650 #endif
651                 regs->sp = regs->regs[27];
652         }
653 
654         /*
655          * We can also fault in the atomic assembly, in which
656          * case we use the exception table to do the first-level fixup.
657          * We may re-fixup again in the real fault handler if it
658          * turns out the faulting address is just bad, and not,
659          * for example, migrating.
660          */
661         else if (pc >= (unsigned long) __start_atomic_asm_code &&
662                    pc < (unsigned long) __end_atomic_asm_code) {
663                 const struct exception_table_entry *fixup;
664 #ifdef CONFIG_SMP
665                 /* Unlock the atomic lock. */
666                 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
667                 __atomic_fault_unlock(lock_ptr);
668 #endif
669                 fixup = search_exception_tables(pc);
670                 if (!fixup)
671                         ics_panic("ICS atomic fault not in table: PC %#lx, fault %d",
672                                   pc, fault_num);
673                 regs->pc = fixup->fixup;
674                 regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
675         }
676 
677         /*
678          * Now that we have released the atomic lock (if necessary),
679          * it's safe to spin if the PTE that caused the fault was migrating.
680          */
681         if (fault_num == INT_DTLB_ACCESS)
682                 write = 1;
683         if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
684                 return state;
685 
686         /* Return zero so that we continue on with normal fault handling. */
687         state.retval = 0;
688         return state;
689 }
690 
691 #endif /* !__tilegx__ */
692 
693 /*
694  * This routine handles page faults.  It determines the address, and the
695  * problem, and then passes it handle_page_fault() for normal DTLB and
696  * ITLB issues, and for DMA or SN processor faults when we are in user
697  * space.  For the latter, if we're in kernel mode, we just save the
698  * interrupt away appropriately and return immediately.  We can't do
699  * page faults for user code while in kernel mode.
700  */
701 static inline void __do_page_fault(struct pt_regs *regs, int fault_num,
702                                    unsigned long address, unsigned long write)
703 {
704         int is_page_fault;
705 
706 #ifdef CONFIG_KPROBES
707         /*
708          * This is to notify the fault handler of the kprobes.  The
709          * exception code is redundant as it is also carried in REGS,
710          * but we pass it anyhow.
711          */
712         if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
713                        regs->faultnum, SIGSEGV) == NOTIFY_STOP)
714                 return;
715 #endif
716 
717 #ifdef __tilegx__
718         /*
719          * We don't need early do_page_fault_ics() support, since unlike
720          * Pro we don't need to worry about unlocking the atomic locks.
721          * There is only one current case in GX where we touch any memory
722          * under ICS other than our own kernel stack, and we handle that
723          * here.  (If we crash due to trying to touch our own stack,
724          * we're in too much trouble for C code to help out anyway.)
725          */
726         if (write & ~1) {
727                 unsigned long pc = write & ~1;
728                 if (pc >= (unsigned long) __start_unalign_asm_code &&
729                     pc < (unsigned long) __end_unalign_asm_code) {
730                         struct thread_info *ti = current_thread_info();
731                         /*
732                          * Our EX_CONTEXT is still what it was from the
733                          * initial unalign exception, but now we've faulted
734                          * on the JIT page.  We would like to complete the
735                          * page fault however is appropriate, and then retry
736                          * the instruction that caused the unalign exception.
737                          * Our state has been "corrupted" by setting the low
738                          * bit in "sp", and stashing r0..r3 in the
739                          * thread_info area, so we revert all of that, then
740                          * continue as if this were a normal page fault.
741                          */
742                         regs->sp &= ~1UL;
743                         regs->regs[0] = ti->unalign_jit_tmp[0];
744                         regs->regs[1] = ti->unalign_jit_tmp[1];
745                         regs->regs[2] = ti->unalign_jit_tmp[2];
746                         regs->regs[3] = ti->unalign_jit_tmp[3];
747                         write &= 1;
748                 } else {
749                         pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
750                                  current->comm, current->pid, pc, address);
751                         show_regs(regs);
752                         do_group_exit(SIGKILL);
753                 }
754         }
755 #else
756         /* This case should have been handled by do_page_fault_ics(). */
757         BUG_ON(write & ~1);
758 #endif
759 
760 #if CHIP_HAS_TILE_DMA()
761         /*
762          * If it's a DMA fault, suspend the transfer while we're
763          * handling the miss; we'll restart after it's handled.  If we
764          * don't suspend, it's possible that this process could swap
765          * out and back in, and restart the engine since the DMA is
766          * still 'running'.
767          */
768         if (fault_num == INT_DMATLB_MISS ||
769             fault_num == INT_DMATLB_ACCESS ||
770             fault_num == INT_DMATLB_MISS_DWNCL ||
771             fault_num == INT_DMATLB_ACCESS_DWNCL) {
772                 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
773                 while (__insn_mfspr(SPR_DMA_USER_STATUS) &
774                        SPR_DMA_STATUS__BUSY_MASK)
775                         ;
776         }
777 #endif
778 
779         /* Validate fault num and decide if this is a first-time page fault. */
780         switch (fault_num) {
781         case INT_ITLB_MISS:
782         case INT_DTLB_MISS:
783 #if CHIP_HAS_TILE_DMA()
784         case INT_DMATLB_MISS:
785         case INT_DMATLB_MISS_DWNCL:
786 #endif
787                 is_page_fault = 1;
788                 break;
789 
790         case INT_DTLB_ACCESS:
791 #if CHIP_HAS_TILE_DMA()
792         case INT_DMATLB_ACCESS:
793         case INT_DMATLB_ACCESS_DWNCL:
794 #endif
795                 is_page_fault = 0;
796                 break;
797 
798         default:
799                 panic("Bad fault number %d in do_page_fault", fault_num);
800         }
801 
802 #if CHIP_HAS_TILE_DMA()
803         if (!user_mode(regs)) {
804                 struct async_tlb *async;
805                 switch (fault_num) {
806 #if CHIP_HAS_TILE_DMA()
807                 case INT_DMATLB_MISS:
808                 case INT_DMATLB_ACCESS:
809                 case INT_DMATLB_MISS_DWNCL:
810                 case INT_DMATLB_ACCESS_DWNCL:
811                         async = &current->thread.dma_async_tlb;
812                         break;
813 #endif
814                 default:
815                         async = NULL;
816                 }
817                 if (async) {
818 
819                         /*
820                          * No vmalloc check required, so we can allow
821                          * interrupts immediately at this point.
822                          */
823                         local_irq_enable();
824 
825                         set_thread_flag(TIF_ASYNC_TLB);
826                         if (async->fault_num != 0) {
827                                 panic("Second async fault %d; old fault was %d (%#lx/%ld)",
828                                       fault_num, async->fault_num,
829                                       address, write);
830                         }
831                         BUG_ON(fault_num == 0);
832                         async->fault_num = fault_num;
833                         async->is_fault = is_page_fault;
834                         async->is_write = write;
835                         async->address = address;
836                         return;
837                 }
838         }
839 #endif
840 
841         handle_page_fault(regs, fault_num, is_page_fault, address, write);
842 }
843 
844 void do_page_fault(struct pt_regs *regs, int fault_num,
845                    unsigned long address, unsigned long write)
846 {
847         __do_page_fault(regs, fault_num, address, write);
848 }
849 
850 #if CHIP_HAS_TILE_DMA()
851 /*
852  * This routine effectively re-issues asynchronous page faults
853  * when we are returning to user space.
854  */
855 void do_async_page_fault(struct pt_regs *regs)
856 {
857         struct async_tlb *async = &current->thread.dma_async_tlb;
858 
859         /*
860          * Clear thread flag early.  If we re-interrupt while processing
861          * code here, we will reset it and recall this routine before
862          * returning to user space.
863          */
864         clear_thread_flag(TIF_ASYNC_TLB);
865 
866         if (async->fault_num) {
867                 /*
868                  * Clear async->fault_num before calling the page-fault
869                  * handler so that if we re-interrupt before returning
870                  * from the function we have somewhere to put the
871                  * information from the new interrupt.
872                  */
873                 int fault_num = async->fault_num;
874                 async->fault_num = 0;
875                 handle_page_fault(regs, fault_num, async->is_fault,
876                                   async->address, async->is_write);
877         }
878 }
879 #endif /* CHIP_HAS_TILE_DMA() */
880 
881 
882 void vmalloc_sync_all(void)
883 {
884 #ifdef __tilegx__
885         /* Currently all L1 kernel pmd's are static and shared. */
886         BUILD_BUG_ON(pgd_index(VMALLOC_END - PAGE_SIZE) !=
887                      pgd_index(VMALLOC_START));
888 #else
889         /*
890          * Note that races in the updates of insync and start aren't
891          * problematic: insync can only get set bits added, and updates to
892          * start are only improving performance (without affecting correctness
893          * if undone).
894          */
895         static DECLARE_BITMAP(insync, PTRS_PER_PGD);
896         static unsigned long start = PAGE_OFFSET;
897         unsigned long address;
898 
899         BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK);
900         for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) {
901                 if (!test_bit(pgd_index(address), insync)) {
902                         unsigned long flags;
903                         struct list_head *pos;
904 
905                         spin_lock_irqsave(&pgd_lock, flags);
906                         list_for_each(pos, &pgd_list)
907                                 if (!vmalloc_sync_one(list_to_pgd(pos),
908                                                                 address)) {
909                                         /* Must be at first entry in list. */
910                                         BUG_ON(pos != pgd_list.next);
911                                         break;
912                                 }
913                         spin_unlock_irqrestore(&pgd_lock, flags);
914                         if (pos != pgd_list.next)
915                                 set_bit(pgd_index(address), insync);
916                 }
917                 if (address == start && test_bit(pgd_index(address), insync))
918                         start = address + PGDIR_SIZE;
919         }
920 #endif
921 }
922 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp