~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/m32r/mm/fault.c

Version: ~ [ linux-5.10-rc1 ] ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/arch/m32r/mm/fault.c
  3  *
  4  *  Copyright (c) 2001, 2002  Hitoshi Yamamoto, and H. Kondo
  5  *  Copyright (c) 2004  Naoto Sugai, NIIBE Yutaka
  6  *
  7  *  Some code taken from i386 version.
  8  *    Copyright (C) 1995  Linus Torvalds
  9  */
 10 
 11 #include <linux/signal.h>
 12 #include <linux/sched.h>
 13 #include <linux/kernel.h>
 14 #include <linux/errno.h>
 15 #include <linux/string.h>
 16 #include <linux/types.h>
 17 #include <linux/ptrace.h>
 18 #include <linux/mman.h>
 19 #include <linux/mm.h>
 20 #include <linux/smp.h>
 21 #include <linux/interrupt.h>
 22 #include <linux/init.h>
 23 #include <linux/tty.h>
 24 #include <linux/vt_kern.h>              /* For unblank_screen() */
 25 #include <linux/highmem.h>
 26 #include <linux/module.h>
 27 
 28 #include <asm/m32r.h>
 29 #include <asm/uaccess.h>
 30 #include <asm/hardirq.h>
 31 #include <asm/mmu_context.h>
 32 #include <asm/tlbflush.h>
 33 
 34 extern void die(const char *, struct pt_regs *, long);
 35 
 36 #ifndef CONFIG_SMP
 37 asmlinkage unsigned int tlb_entry_i_dat;
 38 asmlinkage unsigned int tlb_entry_d_dat;
 39 #define tlb_entry_i tlb_entry_i_dat
 40 #define tlb_entry_d tlb_entry_d_dat
 41 #else
 42 unsigned int tlb_entry_i_dat[NR_CPUS];
 43 unsigned int tlb_entry_d_dat[NR_CPUS];
 44 #define tlb_entry_i tlb_entry_i_dat[smp_processor_id()]
 45 #define tlb_entry_d tlb_entry_d_dat[smp_processor_id()]
 46 #endif
 47 
 48 extern void init_tlb(void);
 49 
 50 /*======================================================================*
 51  * do_page_fault()
 52  *======================================================================*
 53  * This routine handles page faults.  It determines the address,
 54  * and the problem, and then passes it off to one of the appropriate
 55  * routines.
 56  *
 57  * ARGUMENT:
 58  *  regs       : M32R SP reg.
 59  *  error_code : See below
 60  *  address    : M32R MMU MDEVA reg. (Operand ACE)
 61  *             : M32R BPC reg. (Instruction ACE)
 62  *
 63  * error_code :
 64  *  bit 0 == 0 means no page found, 1 means protection fault
 65  *  bit 1 == 0 means read, 1 means write
 66  *  bit 2 == 0 means kernel, 1 means user-mode
 67  *  bit 3 == 0 means data, 1 means instruction
 68  *======================================================================*/
 69 #define ACE_PROTECTION          1
 70 #define ACE_WRITE               2
 71 #define ACE_USERMODE            4
 72 #define ACE_INSTRUCTION         8
 73 
 74 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
 75   unsigned long address)
 76 {
 77         struct task_struct *tsk;
 78         struct mm_struct *mm;
 79         struct vm_area_struct * vma;
 80         unsigned long page, addr;
 81         unsigned long flags = 0;
 82         int fault;
 83         siginfo_t info;
 84 
 85         /*
 86          * If BPSW IE bit enable --> set PSW IE bit
 87          */
 88         if (regs->psw & M32R_PSW_BIE)
 89                 local_irq_enable();
 90 
 91         tsk = current;
 92 
 93         info.si_code = SEGV_MAPERR;
 94 
 95         /*
 96          * We fault-in kernel-space virtual memory on-demand. The
 97          * 'reference' page table is init_mm.pgd.
 98          *
 99          * NOTE! We MUST NOT take any locks for this case. We may
100          * be in an interrupt or a critical region, and should
101          * only copy the information from the master page table,
102          * nothing more.
103          *
104          * This verifies that the fault happens in kernel space
105          * (error_code & ACE_USERMODE) == 0, and that the fault was not a
106          * protection error (error_code & ACE_PROTECTION) == 0.
107          */
108         if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))
109                 goto vmalloc_fault;
110 
111         mm = tsk->mm;
112 
113         /*
114          * If we're in an interrupt or have no user context or are running in an
115          * atomic region then we must not take the fault..
116          */
117         if (in_atomic() || !mm)
118                 goto bad_area_nosemaphore;
119 
120         if (error_code & ACE_USERMODE)
121                 flags |= FAULT_FLAG_USER;
122 
123         /* When running in the kernel we expect faults to occur only to
124          * addresses in user space.  All other faults represent errors in the
125          * kernel and should generate an OOPS.  Unfortunately, in the case of an
126          * erroneous fault occurring in a code path which already holds mmap_sem
127          * we will deadlock attempting to validate the fault against the
128          * address space.  Luckily the kernel only validly references user
129          * space from well defined areas of code, which are listed in the
130          * exceptions table.
131          *
132          * As the vast majority of faults will be valid we will only perform
133          * the source reference check when there is a possibility of a deadlock.
134          * Attempt to lock the address space, if we cannot we then validate the
135          * source.  If this is invalid we can skip the address space check,
136          * thus avoiding the deadlock.
137          */
138         if (!down_read_trylock(&mm->mmap_sem)) {
139                 if ((error_code & ACE_USERMODE) == 0 &&
140                     !search_exception_tables(regs->psw))
141                         goto bad_area_nosemaphore;
142                 down_read(&mm->mmap_sem);
143         }
144 
145         vma = find_vma(mm, address);
146         if (!vma)
147                 goto bad_area;
148         if (vma->vm_start <= address)
149                 goto good_area;
150         if (!(vma->vm_flags & VM_GROWSDOWN))
151                 goto bad_area;
152 
153         if (error_code & ACE_USERMODE) {
154                 /*
155                  * accessing the stack below "spu" is always a bug.
156                  * The "+ 4" is there due to the push instruction
157                  * doing pre-decrement on the stack and that
158                  * doesn't show up until later..
159                  */
160                 if (address + 4 < regs->spu)
161                         goto bad_area;
162         }
163 
164         if (expand_stack(vma, address))
165                 goto bad_area;
166 /*
167  * Ok, we have a good vm_area for this memory access, so
168  * we can handle it..
169  */
170 good_area:
171         info.si_code = SEGV_ACCERR;
172         switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
173                 default:        /* 3: write, present */
174                         /* fall through */
175                 case ACE_WRITE: /* write, not present */
176                         if (!(vma->vm_flags & VM_WRITE))
177                                 goto bad_area;
178                         flags |= FAULT_FLAG_WRITE;
179                         break;
180                 case ACE_PROTECTION:    /* read, present */
181                 case 0:         /* read, not present */
182                         if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
183                                 goto bad_area;
184         }
185 
186         /*
187          * For instruction access exception, check if the area is executable
188          */
189         if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
190           goto bad_area;
191 
192         /*
193          * If for any reason at all we couldn't handle the fault,
194          * make sure we exit gracefully rather than endlessly redo
195          * the fault.
196          */
197         addr = (address & PAGE_MASK);
198         set_thread_fault_code(error_code);
199         fault = handle_mm_fault(mm, vma, addr, flags);
200         if (unlikely(fault & VM_FAULT_ERROR)) {
201                 if (fault & VM_FAULT_OOM)
202                         goto out_of_memory;
203                 else if (fault & VM_FAULT_SIGSEGV)
204                         goto bad_area;
205                 else if (fault & VM_FAULT_SIGBUS)
206                         goto do_sigbus;
207                 BUG();
208         }
209         if (fault & VM_FAULT_MAJOR)
210                 tsk->maj_flt++;
211         else
212                 tsk->min_flt++;
213         set_thread_fault_code(0);
214         up_read(&mm->mmap_sem);
215         return;
216 
217 /*
218  * Something tried to access memory that isn't in our memory map..
219  * Fix it, but check if it's kernel or user first..
220  */
221 bad_area:
222         up_read(&mm->mmap_sem);
223 
224 bad_area_nosemaphore:
225         /* User mode accesses just cause a SIGSEGV */
226         if (error_code & ACE_USERMODE) {
227                 tsk->thread.address = address;
228                 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
229                 tsk->thread.trap_no = 14;
230                 info.si_signo = SIGSEGV;
231                 info.si_errno = 0;
232                 /* info.si_code has been set above */
233                 info.si_addr = (void __user *)address;
234                 force_sig_info(SIGSEGV, &info, tsk);
235                 return;
236         }
237 
238 no_context:
239         /* Are we prepared to handle this kernel fault?  */
240         if (fixup_exception(regs))
241                 return;
242 
243 /*
244  * Oops. The kernel tried to access some bad page. We'll have to
245  * terminate things with extreme prejudice.
246  */
247 
248         bust_spinlocks(1);
249 
250         if (address < PAGE_SIZE)
251                 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
252         else
253                 printk(KERN_ALERT "Unable to handle kernel paging request");
254         printk(" at virtual address %08lx\n",address);
255         printk(KERN_ALERT " printing bpc:\n");
256         printk("%08lx\n", regs->bpc);
257         page = *(unsigned long *)MPTB;
258         page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
259         printk(KERN_ALERT "*pde = %08lx\n", page);
260         if (page & _PAGE_PRESENT) {
261                 page &= PAGE_MASK;
262                 address &= 0x003ff000;
263                 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
264                 printk(KERN_ALERT "*pte = %08lx\n", page);
265         }
266         die("Oops", regs, error_code);
267         bust_spinlocks(0);
268         do_exit(SIGKILL);
269 
270 /*
271  * We ran out of memory, or some other thing happened to us that made
272  * us unable to handle the page fault gracefully.
273  */
274 out_of_memory:
275         up_read(&mm->mmap_sem);
276         if (!(error_code & ACE_USERMODE))
277                 goto no_context;
278         pagefault_out_of_memory();
279         return;
280 
281 do_sigbus:
282         up_read(&mm->mmap_sem);
283 
284         /* Kernel mode? Handle exception or die */
285         if (!(error_code & ACE_USERMODE))
286                 goto no_context;
287 
288         tsk->thread.address = address;
289         tsk->thread.error_code = error_code;
290         tsk->thread.trap_no = 14;
291         info.si_signo = SIGBUS;
292         info.si_errno = 0;
293         info.si_code = BUS_ADRERR;
294         info.si_addr = (void __user *)address;
295         force_sig_info(SIGBUS, &info, tsk);
296         return;
297 
298 vmalloc_fault:
299         {
300                 /*
301                  * Synchronize this task's top level page-table
302                  * with the 'reference' page table.
303                  *
304                  * Do _not_ use "tsk" here. We might be inside
305                  * an interrupt in the middle of a task switch..
306                  */
307                 int offset = pgd_index(address);
308                 pgd_t *pgd, *pgd_k;
309                 pmd_t *pmd, *pmd_k;
310                 pte_t *pte_k;
311 
312                 pgd = (pgd_t *)*(unsigned long *)MPTB;
313                 pgd = offset + (pgd_t *)pgd;
314                 pgd_k = init_mm.pgd + offset;
315 
316                 if (!pgd_present(*pgd_k))
317                         goto no_context;
318 
319                 /*
320                  * set_pgd(pgd, *pgd_k); here would be useless on PAE
321                  * and redundant with the set_pmd() on non-PAE.
322                  */
323 
324                 pmd = pmd_offset(pgd, address);
325                 pmd_k = pmd_offset(pgd_k, address);
326                 if (!pmd_present(*pmd_k))
327                         goto no_context;
328                 set_pmd(pmd, *pmd_k);
329 
330                 pte_k = pte_offset_kernel(pmd_k, address);
331                 if (!pte_present(*pte_k))
332                         goto no_context;
333 
334                 addr = (address & PAGE_MASK);
335                 set_thread_fault_code(error_code);
336                 update_mmu_cache(NULL, addr, pte_k);
337                 set_thread_fault_code(0);
338                 return;
339         }
340 }
341 
342 /*======================================================================*
343  * update_mmu_cache()
344  *======================================================================*/
345 #define TLB_MASK        (NR_TLB_ENTRIES - 1)
346 #define ITLB_END        (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
347 #define DTLB_END        (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
348 void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
349         pte_t *ptep)
350 {
351         volatile unsigned long *entry1, *entry2;
352         unsigned long pte_data, flags;
353         unsigned int *entry_dat;
354         int inst = get_thread_fault_code() & ACE_INSTRUCTION;
355         int i;
356 
357         /* Ptrace may call this routine. */
358         if (vma && current->active_mm != vma->vm_mm)
359                 return;
360 
361         local_irq_save(flags);
362 
363         vaddr = (vaddr & PAGE_MASK) | get_asid();
364 
365         pte_data = pte_val(*ptep);
366 
367 #ifdef CONFIG_CHIP_OPSP
368         entry1 = (unsigned long *)ITLB_BASE;
369         for (i = 0; i < NR_TLB_ENTRIES; i++) {
370                 if (*entry1++ == vaddr) {
371                         set_tlb_data(entry1, pte_data);
372                         break;
373                 }
374                 entry1++;
375         }
376         entry2 = (unsigned long *)DTLB_BASE;
377         for (i = 0; i < NR_TLB_ENTRIES; i++) {
378                 if (*entry2++ == vaddr) {
379                         set_tlb_data(entry2, pte_data);
380                         break;
381                 }
382                 entry2++;
383         }
384 #else
385         /*
386          * Update TLB entries
387          *  entry1: ITLB entry address
388          *  entry2: DTLB entry address
389          */
390         __asm__ __volatile__ (
391                 "seth   %0, #high(%4)   \n\t"
392                 "st     %2, @(%5, %0)   \n\t"
393                 "ldi    %1, #1          \n\t"
394                 "st     %1, @(%6, %0)   \n\t"
395                 "add3   r4, %0, %7      \n\t"
396                 ".fillinsn              \n"
397                 "1:                     \n\t"
398                 "ld     %1, @(%6, %0)   \n\t"
399                 "bnez   %1, 1b          \n\t"
400                 "ld     %0, @r4+        \n\t"
401                 "ld     %1, @r4         \n\t"
402                 "st     %3, @+%0        \n\t"
403                 "st     %3, @+%1        \n\t"
404                 : "=&r" (entry1), "=&r" (entry2)
405                 : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE),
406                 "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset)
407                 : "r4", "memory"
408         );
409 #endif
410 
411         if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END))
412                 goto notfound;
413 
414 found:
415         local_irq_restore(flags);
416 
417         return;
418 
419         /* Valid entry not found */
420 notfound:
421         /*
422          * Update ITLB or DTLB entry
423          *  entry1: TLB entry address
424          *  entry2: TLB base address
425          */
426         if (!inst) {
427                 entry2 = (unsigned long *)DTLB_BASE;
428                 entry_dat = &tlb_entry_d;
429         } else {
430                 entry2 = (unsigned long *)ITLB_BASE;
431                 entry_dat = &tlb_entry_i;
432         }
433         entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1);
434 
435         for (i = 0 ; i < NR_TLB_ENTRIES ; i++) {
436                 if (!(entry1[1] & 2))   /* Valid bit check */
437                         break;
438 
439                 if (entry1 != entry2)
440                         entry1 -= 2;
441                 else
442                         entry1 += TLB_MASK << 1;
443         }
444 
445         if (i >= NR_TLB_ENTRIES) {      /* Empty entry not found */
446                 entry1 = entry2 + (*entry_dat << 1);
447                 *entry_dat = (*entry_dat + 1) & TLB_MASK;
448         }
449         *entry1++ = vaddr;      /* Set TLB tag */
450         set_tlb_data(entry1, pte_data);
451 
452         goto found;
453 }
454 
455 /*======================================================================*
456  * flush_tlb_page() : flushes one page
457  *======================================================================*/
458 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
459 {
460         if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) {
461                 unsigned long flags;
462 
463                 local_irq_save(flags);
464                 page &= PAGE_MASK;
465                 page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK);
466                 __flush_tlb_page(page);
467                 local_irq_restore(flags);
468         }
469 }
470 
471 /*======================================================================*
472  * flush_tlb_range() : flushes a range of pages
473  *======================================================================*/
474 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
475         unsigned long end)
476 {
477         struct mm_struct *mm;
478 
479         mm = vma->vm_mm;
480         if (mm_context(mm) != NO_CONTEXT) {
481                 unsigned long flags;
482                 int size;
483 
484                 local_irq_save(flags);
485                 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
486                 if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */
487                         mm_context(mm) = NO_CONTEXT;
488                         if (mm == current->mm)
489                                 activate_context(mm);
490                 } else {
491                         unsigned long asid;
492 
493                         asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK;
494                         start &= PAGE_MASK;
495                         end += (PAGE_SIZE - 1);
496                         end &= PAGE_MASK;
497 
498                         start |= asid;
499                         end   |= asid;
500                         while (start < end) {
501                                 __flush_tlb_page(start);
502                                 start += PAGE_SIZE;
503                         }
504                 }
505                 local_irq_restore(flags);
506         }
507 }
508 
509 /*======================================================================*
510  * flush_tlb_mm() : flushes the specified mm context TLB's
511  *======================================================================*/
512 void local_flush_tlb_mm(struct mm_struct *mm)
513 {
514         /* Invalidate all TLB of this process. */
515         /* Instead of invalidating each TLB, we get new MMU context. */
516         if (mm_context(mm) != NO_CONTEXT) {
517                 unsigned long flags;
518 
519                 local_irq_save(flags);
520                 mm_context(mm) = NO_CONTEXT;
521                 if (mm == current->mm)
522                         activate_context(mm);
523                 local_irq_restore(flags);
524         }
525 }
526 
527 /*======================================================================*
528  * flush_tlb_all() : flushes all processes TLBs
529  *======================================================================*/
530 void local_flush_tlb_all(void)
531 {
532         unsigned long flags;
533 
534         local_irq_save(flags);
535         __flush_tlb_all();
536         local_irq_restore(flags);
537 }
538 
539 /*======================================================================*
540  * init_mmu()
541  *======================================================================*/
542 void __init init_mmu(void)
543 {
544         tlb_entry_i = 0;
545         tlb_entry_d = 0;
546         mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
547         set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
548         *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir;
549 }
550 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp