~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/csky/mm/fault.c

Version: ~ [ linux-5.17-rc1 ] ~ [ linux-5.16.2 ] ~ [ linux-5.15.16 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.93 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.173 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.225 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.262 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.297 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.299 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3 
  4 #include <linux/extable.h>
  5 #include <linux/kprobes.h>
  6 #include <linux/mmu_context.h>
  7 #include <linux/perf_event.h>
  8 
  9 int fixup_exception(struct pt_regs *regs)
 10 {
 11         const struct exception_table_entry *fixup;
 12 
 13         fixup = search_exception_tables(instruction_pointer(regs));
 14         if (fixup) {
 15                 regs->pc = fixup->nextinsn;
 16 
 17                 return 1;
 18         }
 19 
 20         return 0;
 21 }
 22 
 23 static inline bool is_write(struct pt_regs *regs)
 24 {
 25         switch (trap_no(regs)) {
 26         case VEC_TLBINVALIDS:
 27                 return true;
 28         case VEC_TLBMODIFIED:
 29                 return true;
 30         }
 31 
 32         return false;
 33 }
 34 
 35 #ifdef CONFIG_CPU_HAS_LDSTEX
 36 static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
 37 {
 38         return;
 39 }
 40 #else
 41 extern unsigned long csky_cmpxchg_ldw;
 42 extern unsigned long csky_cmpxchg_stw;
 43 static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
 44 {
 45         if (trap_no(regs) != VEC_TLBMODIFIED)
 46                 return;
 47 
 48         if (instruction_pointer(regs) == csky_cmpxchg_stw)
 49                 instruction_pointer_set(regs, csky_cmpxchg_ldw);
 50         return;
 51 }
 52 #endif
 53 
 54 static inline void no_context(struct pt_regs *regs, unsigned long addr)
 55 {
 56         current->thread.trap_no = trap_no(regs);
 57 
 58         /* Are we prepared to handle this kernel fault? */
 59         if (fixup_exception(regs))
 60                 return;
 61 
 62         /*
 63          * Oops. The kernel tried to access some bad page. We'll have to
 64          * terminate things with extreme prejudice.
 65          */
 66         bust_spinlocks(1);
 67         pr_alert("Unable to handle kernel paging request at virtual "
 68                  "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
 69         die(regs, "Oops");
 70         do_exit(SIGKILL);
 71 }
 72 
 73 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
 74 {
 75         current->thread.trap_no = trap_no(regs);
 76 
 77         if (fault & VM_FAULT_OOM) {
 78                 /*
 79                  * We ran out of memory, call the OOM killer, and return the userspace
 80                  * (which will retry the fault, or kill us if we got oom-killed).
 81                  */
 82                 if (!user_mode(regs)) {
 83                         no_context(regs, addr);
 84                         return;
 85                 }
 86                 pagefault_out_of_memory();
 87                 return;
 88         } else if (fault & VM_FAULT_SIGBUS) {
 89                 /* Kernel mode? Handle exceptions or die */
 90                 if (!user_mode(regs)) {
 91                         no_context(regs, addr);
 92                         return;
 93                 }
 94                 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
 95                 return;
 96         }
 97         BUG();
 98 }
 99 
100 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101 {
102         /*
103          * Something tried to access memory that isn't in our memory map.
104          * Fix it, but check if it's kernel or user first.
105          */
106         mmap_read_unlock(mm);
107         /* User mode accesses just cause a SIGSEGV */
108         if (user_mode(regs)) {
109                 do_trap(regs, SIGSEGV, code, addr);
110                 return;
111         }
112 
113         no_context(regs, addr);
114 }
115 
116 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
117 {
118         pgd_t *pgd, *pgd_k;
119         pud_t *pud, *pud_k;
120         pmd_t *pmd, *pmd_k;
121         pte_t *pte_k;
122         int offset;
123 
124         /* User mode accesses just cause a SIGSEGV */
125         if (user_mode(regs)) {
126                 do_trap(regs, SIGSEGV, code, addr);
127                 return;
128         }
129 
130         /*
131          * Synchronize this task's top level page-table
132          * with the 'reference' page table.
133          *
134          * Do _not_ use "tsk" here. We might be inside
135          * an interrupt in the middle of a task switch..
136          */
137         offset = pgd_index(addr);
138 
139         pgd = get_pgd() + offset;
140         pgd_k = init_mm.pgd + offset;
141 
142         if (!pgd_present(*pgd_k)) {
143                 no_context(regs, addr);
144                 return;
145         }
146         set_pgd(pgd, *pgd_k);
147 
148         pud = (pud_t *)pgd;
149         pud_k = (pud_t *)pgd_k;
150         if (!pud_present(*pud_k)) {
151                 no_context(regs, addr);
152                 return;
153         }
154 
155         pmd = pmd_offset(pud, addr);
156         pmd_k = pmd_offset(pud_k, addr);
157         if (!pmd_present(*pmd_k)) {
158                 no_context(regs, addr);
159                 return;
160         }
161         set_pmd(pmd, *pmd_k);
162 
163         pte_k = pte_offset_kernel(pmd_k, addr);
164         if (!pte_present(*pte_k)) {
165                 no_context(regs, addr);
166                 return;
167         }
168 
169         flush_tlb_one(addr);
170 }
171 
172 static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma)
173 {
174         if (is_write(regs)) {
175                 if (!(vma->vm_flags & VM_WRITE))
176                         return true;
177         } else {
178                 if (unlikely(!vma_is_accessible(vma)))
179                         return true;
180         }
181         return false;
182 }
183 
184 /*
185  * This routine handles page faults.  It determines the address and the
186  * problem, and then passes it off to one of the appropriate routines.
187  */
188 asmlinkage void do_page_fault(struct pt_regs *regs)
189 {
190         struct task_struct *tsk;
191         struct vm_area_struct *vma;
192         struct mm_struct *mm;
193         unsigned long addr = read_mmu_entryhi() & PAGE_MASK;
194         unsigned int flags = FAULT_FLAG_DEFAULT;
195         int code = SEGV_MAPERR;
196         vm_fault_t fault;
197 
198         tsk = current;
199         mm = tsk->mm;
200 
201         csky_cmpxchg_fixup(regs);
202 
203         if (kprobe_page_fault(regs, tsk->thread.trap_no))
204                 return;
205 
206         /*
207          * Fault-in kernel-space virtual memory on-demand.
208          * The 'reference' page table is init_mm.pgd.
209          *
210          * NOTE! We MUST NOT take any locks for this case. We may
211          * be in an interrupt or a critical region, and should
212          * only copy the information from the master page table,
213          * nothing more.
214          */
215         if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
216                 vmalloc_fault(regs, code, addr);
217                 return;
218         }
219 
220         /* Enable interrupts if they were enabled in the parent context. */
221         if (likely(regs->sr & BIT(6)))
222                 local_irq_enable();
223 
224         /*
225          * If we're in an interrupt, have no user context, or are running
226          * in an atomic region, then we must not take the fault.
227          */
228         if (unlikely(faulthandler_disabled() || !mm)) {
229                 no_context(regs, addr);
230                 return;
231         }
232 
233         if (user_mode(regs))
234                 flags |= FAULT_FLAG_USER;
235 
236         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
237 
238         if (is_write(regs))
239                 flags |= FAULT_FLAG_WRITE;
240 retry:
241         mmap_read_lock(mm);
242         vma = find_vma(mm, addr);
243         if (unlikely(!vma)) {
244                 bad_area(regs, mm, code, addr);
245                 return;
246         }
247         if (likely(vma->vm_start <= addr))
248                 goto good_area;
249         if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
250                 bad_area(regs, mm, code, addr);
251                 return;
252         }
253         if (unlikely(expand_stack(vma, addr))) {
254                 bad_area(regs, mm, code, addr);
255                 return;
256         }
257 
258         /*
259          * Ok, we have a good vm_area for this memory access, so
260          * we can handle it.
261          */
262 good_area:
263         code = SEGV_ACCERR;
264 
265         if (unlikely(access_error(regs, vma))) {
266                 bad_area(regs, mm, code, addr);
267                 return;
268         }
269 
270         /*
271          * If for any reason at all we could not handle the fault,
272          * make sure we exit gracefully rather than endlessly redo
273          * the fault.
274          */
275         fault = handle_mm_fault(vma, addr, flags, regs);
276 
277         /*
278          * If we need to retry but a fatal signal is pending, handle the
279          * signal first. We do not need to release the mmap_lock because it
280          * would already be released in __lock_page_or_retry in mm/filemap.c.
281          */
282         if (fault_signal_pending(fault, regs)) {
283                 if (!user_mode(regs))
284                         no_context(regs, addr);
285                 return;
286         }
287 
288         if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
289                 flags |= FAULT_FLAG_TRIED;
290 
291                 /*
292                  * No need to mmap_read_unlock(mm) as we would
293                  * have already released it in __lock_page_or_retry
294                  * in mm/filemap.c.
295                  */
296                 goto retry;
297         }
298 
299         mmap_read_unlock(mm);
300 
301         if (unlikely(fault & VM_FAULT_ERROR)) {
302                 mm_fault_error(regs, addr, fault);
303                 return;
304         }
305         return;
306 }
307 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp