~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/csky/mm/fault.c

Version: ~ [ linux-5.18-rc6 ] ~ [ linux-5.17.6 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.38 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.114 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.192 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.241 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.277 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.312 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3 
  4 #include <linux/signal.h>
  5 #include <linux/module.h>
  6 #include <linux/sched.h>
  7 #include <linux/interrupt.h>
  8 #include <linux/kernel.h>
  9 #include <linux/errno.h>
 10 #include <linux/string.h>
 11 #include <linux/types.h>
 12 #include <linux/ptrace.h>
 13 #include <linux/mman.h>
 14 #include <linux/mm.h>
 15 #include <linux/smp.h>
 16 #include <linux/version.h>
 17 #include <linux/vt_kern.h>
 18 #include <linux/kernel.h>
 19 #include <linux/extable.h>
 20 #include <linux/uaccess.h>
 21 
 22 #include <asm/hardirq.h>
 23 #include <asm/mmu_context.h>
 24 #include <asm/traps.h>
 25 #include <asm/page.h>
 26 
 27 int fixup_exception(struct pt_regs *regs)
 28 {
 29         const struct exception_table_entry *fixup;
 30 
 31         fixup = search_exception_tables(instruction_pointer(regs));
 32         if (fixup) {
 33                 regs->pc = fixup->nextinsn;
 34 
 35                 return 1;
 36         }
 37 
 38         return 0;
 39 }
 40 
 41 /*
 42  * This routine handles page faults. It determines the address,
 43  * and the problem, and then passes it off to one of the appropriate
 44  * routines.
 45  */
 46 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
 47                               unsigned long mmu_meh)
 48 {
 49         struct vm_area_struct *vma = NULL;
 50         struct task_struct *tsk = current;
 51         struct mm_struct *mm = tsk->mm;
 52         int si_code;
 53         int fault;
 54         unsigned long address = mmu_meh & PAGE_MASK;
 55 
 56         si_code = SEGV_MAPERR;
 57 
 58 #ifndef CONFIG_CPU_HAS_TLBI
 59         /*
 60          * We fault-in kernel-space virtual memory on-demand. The
 61          * 'reference' page table is init_mm.pgd.
 62          *
 63          * NOTE! We MUST NOT take any locks for this case. We may
 64          * be in an interrupt or a critical region, and should
 65          * only copy the information from the master page table,
 66          * nothing more.
 67          */
 68         if (unlikely(address >= VMALLOC_START) &&
 69             unlikely(address <= VMALLOC_END)) {
 70                 /*
 71                  * Synchronize this task's top level page-table
 72                  * with the 'reference' page table.
 73                  *
 74                  * Do _not_ use "tsk" here. We might be inside
 75                  * an interrupt in the middle of a task switch..
 76                  */
 77                 int offset = __pgd_offset(address);
 78                 pgd_t *pgd, *pgd_k;
 79                 pud_t *pud, *pud_k;
 80                 pmd_t *pmd, *pmd_k;
 81                 pte_t *pte_k;
 82 
 83                 unsigned long pgd_base;
 84 
 85                 pgd_base = tlb_get_pgd();
 86                 pgd = (pgd_t *)pgd_base + offset;
 87                 pgd_k = init_mm.pgd + offset;
 88 
 89                 if (!pgd_present(*pgd_k))
 90                         goto no_context;
 91                 set_pgd(pgd, *pgd_k);
 92 
 93                 pud = (pud_t *)pgd;
 94                 pud_k = (pud_t *)pgd_k;
 95                 if (!pud_present(*pud_k))
 96                         goto no_context;
 97 
 98                 pmd = pmd_offset(pud, address);
 99                 pmd_k = pmd_offset(pud_k, address);
100                 if (!pmd_present(*pmd_k))
101                         goto no_context;
102                 set_pmd(pmd, *pmd_k);
103 
104                 pte_k = pte_offset_kernel(pmd_k, address);
105                 if (!pte_present(*pte_k))
106                         goto no_context;
107                 return;
108         }
109 #endif
110         /*
111          * If we're in an interrupt or have no user
112          * context, we must not take the fault..
113          */
114         if (in_atomic() || !mm)
115                 goto bad_area_nosemaphore;
116 
117         down_read(&mm->mmap_sem);
118         vma = find_vma(mm, address);
119         if (!vma)
120                 goto bad_area;
121         if (vma->vm_start <= address)
122                 goto good_area;
123         if (!(vma->vm_flags & VM_GROWSDOWN))
124                 goto bad_area;
125         if (expand_stack(vma, address))
126                 goto bad_area;
127         /*
128          * Ok, we have a good vm_area for this memory access, so
129          * we can handle it..
130          */
131 good_area:
132         si_code = SEGV_ACCERR;
133 
134         if (write) {
135                 if (!(vma->vm_flags & VM_WRITE))
136                         goto bad_area;
137         } else {
138                 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
139                         goto bad_area;
140         }
141 
142         /*
143          * If for any reason at all we couldn't handle the fault,
144          * make sure we exit gracefully rather than endlessly redo
145          * the fault.
146          */
147         fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0);
148         if (unlikely(fault & VM_FAULT_ERROR)) {
149                 if (fault & VM_FAULT_OOM)
150                         goto out_of_memory;
151                 else if (fault & VM_FAULT_SIGBUS)
152                         goto do_sigbus;
153                 else if (fault & VM_FAULT_SIGSEGV)
154                         goto bad_area;
155                 BUG();
156         }
157         if (fault & VM_FAULT_MAJOR)
158                 tsk->maj_flt++;
159         else
160                 tsk->min_flt++;
161 
162         up_read(&mm->mmap_sem);
163         return;
164 
165         /*
166          * Something tried to access memory that isn't in our memory map..
167          * Fix it, but check if it's kernel or user first..
168          */
169 bad_area:
170         up_read(&mm->mmap_sem);
171 
172 bad_area_nosemaphore:
173         /* User mode accesses just cause a SIGSEGV */
174         if (user_mode(regs)) {
175                 force_sig_fault(SIGSEGV, si_code, (void __user *)address, current);
176                 return;
177         }
178 
179 no_context:
180         /* Are we prepared to handle this kernel fault? */
181         if (fixup_exception(regs))
182                 return;
183 
184         /*
185          * Oops. The kernel tried to access some bad page. We'll have to
186          * terminate things with extreme prejudice.
187          */
188         bust_spinlocks(1);
189         pr_alert("Unable to handle kernel paging request at virtual "
190                  "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
191         die_if_kernel("Oops", regs, write);
192 
193 out_of_memory:
194         /*
195          * We ran out of memory, call the OOM killer, and return the userspace
196          * (which will retry the fault, or kill us if we got oom-killed).
197          */
198         pagefault_out_of_memory();
199         return;
200 
201 do_sigbus:
202         up_read(&mm->mmap_sem);
203 
204         /* Kernel mode? Handle exceptions or die */
205         if (!user_mode(regs))
206                 goto no_context;
207 
208         force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current);
209 }
210 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp