~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/kernel/traps.c

Version: ~ [ linux-5.10-rc1 ] ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
  7  * Copyright (C) 1995, 1996 Paul M. Antoine
  8  * Copyright (C) 1998 Ulf Carlsson
  9  * Copyright (C) 1999 Silicon Graphics, Inc.
 10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
 11  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
 12  * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
 13  */
 14 #include <linux/bug.h>
 15 #include <linux/compiler.h>
 16 #include <linux/kexec.h>
 17 #include <linux/init.h>
 18 #include <linux/kernel.h>
 19 #include <linux/module.h>
 20 #include <linux/mm.h>
 21 #include <linux/sched.h>
 22 #include <linux/smp.h>
 23 #include <linux/spinlock.h>
 24 #include <linux/kallsyms.h>
 25 #include <linux/bootmem.h>
 26 #include <linux/interrupt.h>
 27 #include <linux/ptrace.h>
 28 #include <linux/kgdb.h>
 29 #include <linux/kdebug.h>
 30 #include <linux/kprobes.h>
 31 #include <linux/notifier.h>
 32 #include <linux/kdb.h>
 33 #include <linux/irq.h>
 34 #include <linux/perf_event.h>
 35 
 36 #include <asm/bootinfo.h>
 37 #include <asm/branch.h>
 38 #include <asm/break.h>
 39 #include <asm/cop2.h>
 40 #include <asm/cpu.h>
 41 #include <asm/dsp.h>
 42 #include <asm/fpu.h>
 43 #include <asm/fpu_emulator.h>
 44 #include <asm/idle.h>
 45 #include <asm/mipsregs.h>
 46 #include <asm/mipsmtregs.h>
 47 #include <asm/module.h>
 48 #include <asm/pgtable.h>
 49 #include <asm/ptrace.h>
 50 #include <asm/sections.h>
 51 #include <asm/tlbdebug.h>
 52 #include <asm/traps.h>
 53 #include <asm/uaccess.h>
 54 #include <asm/watch.h>
 55 #include <asm/mmu_context.h>
 56 #include <asm/types.h>
 57 #include <asm/stacktrace.h>
 58 #include <asm/uasm.h>
 59 
 60 extern void check_wait(void);
 61 extern asmlinkage void rollback_handle_int(void);
 62 extern asmlinkage void handle_int(void);
 63 extern u32 handle_tlbl[];
 64 extern u32 handle_tlbs[];
 65 extern u32 handle_tlbm[];
 66 extern asmlinkage void handle_adel(void);
 67 extern asmlinkage void handle_ades(void);
 68 extern asmlinkage void handle_ibe(void);
 69 extern asmlinkage void handle_dbe(void);
 70 extern asmlinkage void handle_sys(void);
 71 extern asmlinkage void handle_bp(void);
 72 extern asmlinkage void handle_ri(void);
 73 extern asmlinkage void handle_ri_rdhwr_vivt(void);
 74 extern asmlinkage void handle_ri_rdhwr(void);
 75 extern asmlinkage void handle_cpu(void);
 76 extern asmlinkage void handle_ov(void);
 77 extern asmlinkage void handle_tr(void);
 78 extern asmlinkage void handle_fpe(void);
 79 extern asmlinkage void handle_mdmx(void);
 80 extern asmlinkage void handle_watch(void);
 81 extern asmlinkage void handle_mt(void);
 82 extern asmlinkage void handle_dsp(void);
 83 extern asmlinkage void handle_mcheck(void);
 84 extern asmlinkage void handle_reserved(void);
 85 
 86 void (*board_be_init)(void);
 87 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 88 void (*board_nmi_handler_setup)(void);
 89 void (*board_ejtag_handler_setup)(void);
 90 void (*board_bind_eic_interrupt)(int irq, int regset);
 91 void (*board_ebase_setup)(void);
 92 void __cpuinitdata(*board_cache_error_setup)(void);
 93 
 94 static void show_raw_backtrace(unsigned long reg29)
 95 {
 96         unsigned long *sp = (unsigned long *)(reg29 & ~3);
 97         unsigned long addr;
 98 
 99         printk("Call Trace:");
100 #ifdef CONFIG_KALLSYMS
101         printk("\n");
102 #endif
103         while (!kstack_end(sp)) {
104                 unsigned long __user *p =
105                         (unsigned long __user *)(unsigned long)sp++;
106                 if (__get_user(addr, p)) {
107                         printk(" (Bad stack address)");
108                         break;
109                 }
110                 if (__kernel_text_address(addr))
111                         print_ip_sym(addr);
112         }
113         printk("\n");
114 }
115 
116 #ifdef CONFIG_KALLSYMS
117 int raw_show_trace;
118 static int __init set_raw_show_trace(char *str)
119 {
120         raw_show_trace = 1;
121         return 1;
122 }
123 __setup("raw_show_trace", set_raw_show_trace);
124 #endif
125 
126 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
127 {
128         unsigned long sp = regs->regs[29];
129         unsigned long ra = regs->regs[31];
130         unsigned long pc = regs->cp0_epc;
131 
132         if (!task)
133                 task = current;
134 
135         if (raw_show_trace || !__kernel_text_address(pc)) {
136                 show_raw_backtrace(sp);
137                 return;
138         }
139         printk("Call Trace:\n");
140         do {
141                 print_ip_sym(pc);
142                 pc = unwind_stack(task, &sp, pc, &ra);
143         } while (pc);
144         printk("\n");
145 }
146 
147 /*
148  * This routine abuses get_user()/put_user() to reference pointers
149  * with at least a bit of error checking ...
150  */
151 static void show_stacktrace(struct task_struct *task,
152         const struct pt_regs *regs)
153 {
154         const int field = 2 * sizeof(unsigned long);
155         long stackdata;
156         int i;
157         unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
158 
159         printk("Stack :");
160         i = 0;
161         while ((unsigned long) sp & (PAGE_SIZE - 1)) {
162                 if (i && ((i % (64 / field)) == 0))
163                         printk("\n       ");
164                 if (i > 39) {
165                         printk(" ...");
166                         break;
167                 }
168 
169                 if (__get_user(stackdata, sp++)) {
170                         printk(" (Bad stack address)");
171                         break;
172                 }
173 
174                 printk(" %0*lx", field, stackdata);
175                 i++;
176         }
177         printk("\n");
178         show_backtrace(task, regs);
179 }
180 
181 void show_stack(struct task_struct *task, unsigned long *sp)
182 {
183         struct pt_regs regs;
184         if (sp) {
185                 regs.regs[29] = (unsigned long)sp;
186                 regs.regs[31] = 0;
187                 regs.cp0_epc = 0;
188         } else {
189                 if (task && task != current) {
190                         regs.regs[29] = task->thread.reg29;
191                         regs.regs[31] = 0;
192                         regs.cp0_epc = task->thread.reg31;
193 #ifdef CONFIG_KGDB_KDB
194                 } else if (atomic_read(&kgdb_active) != -1 &&
195                            kdb_current_regs) {
196                         memcpy(&regs, kdb_current_regs, sizeof(regs));
197 #endif /* CONFIG_KGDB_KDB */
198                 } else {
199                         prepare_frametrace(&regs);
200                 }
201         }
202         show_stacktrace(task, &regs);
203 }
204 
205 static void show_code(unsigned int __user *pc)
206 {
207         long i;
208         unsigned short __user *pc16 = NULL;
209 
210         printk("\nCode:");
211 
212         if ((unsigned long)pc & 1)
213                 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
214         for(i = -3 ; i < 6 ; i++) {
215                 unsigned int insn;
216                 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
217                         printk(" (Bad address in epc)\n");
218                         break;
219                 }
220                 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
221         }
222 }
223 
224 static void __show_regs(const struct pt_regs *regs)
225 {
226         const int field = 2 * sizeof(unsigned long);
227         unsigned int cause = regs->cp0_cause;
228         int i;
229 
230         show_regs_print_info(KERN_DEFAULT);
231 
232         /*
233          * Saved main processor registers
234          */
235         for (i = 0; i < 32; ) {
236                 if ((i % 4) == 0)
237                         printk("$%2d   :", i);
238                 if (i == 0)
239                         printk(" %0*lx", field, 0UL);
240                 else if (i == 26 || i == 27)
241                         printk(" %*s", field, "");
242                 else
243                         printk(" %0*lx", field, regs->regs[i]);
244 
245                 i++;
246                 if ((i % 4) == 0)
247                         printk("\n");
248         }
249 
250 #ifdef CONFIG_CPU_HAS_SMARTMIPS
251         printk("Acx    : %0*lx\n", field, regs->acx);
252 #endif
253         printk("Hi    : %0*lx\n", field, regs->hi);
254         printk("Lo    : %0*lx\n", field, regs->lo);
255 
256         /*
257          * Saved cp0 registers
258          */
259         printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
260                (void *) regs->cp0_epc);
261         printk("    %s\n", print_tainted());
262         printk("ra    : %0*lx %pS\n", field, regs->regs[31],
263                (void *) regs->regs[31]);
264 
265         printk("Status: %08x    ", (uint32_t) regs->cp0_status);
266 
267         if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
268                 if (regs->cp0_status & ST0_KUO)
269                         printk("KUo ");
270                 if (regs->cp0_status & ST0_IEO)
271                         printk("IEo ");
272                 if (regs->cp0_status & ST0_KUP)
273                         printk("KUp ");
274                 if (regs->cp0_status & ST0_IEP)
275                         printk("IEp ");
276                 if (regs->cp0_status & ST0_KUC)
277                         printk("KUc ");
278                 if (regs->cp0_status & ST0_IEC)
279                         printk("IEc ");
280         } else {
281                 if (regs->cp0_status & ST0_KX)
282                         printk("KX ");
283                 if (regs->cp0_status & ST0_SX)
284                         printk("SX ");
285                 if (regs->cp0_status & ST0_UX)
286                         printk("UX ");
287                 switch (regs->cp0_status & ST0_KSU) {
288                 case KSU_USER:
289                         printk("USER ");
290                         break;
291                 case KSU_SUPERVISOR:
292                         printk("SUPERVISOR ");
293                         break;
294                 case KSU_KERNEL:
295                         printk("KERNEL ");
296                         break;
297                 default:
298                         printk("BAD_MODE ");
299                         break;
300                 }
301                 if (regs->cp0_status & ST0_ERL)
302                         printk("ERL ");
303                 if (regs->cp0_status & ST0_EXL)
304                         printk("EXL ");
305                 if (regs->cp0_status & ST0_IE)
306                         printk("IE ");
307         }
308         printk("\n");
309 
310         printk("Cause : %08x\n", cause);
311 
312         cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
313         if (1 <= cause && cause <= 5)
314                 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
315 
316         printk("PrId  : %08x (%s)\n", read_c0_prid(),
317                cpu_name_string());
318 }
319 
320 /*
321  * FIXME: really the generic show_regs should take a const pointer argument.
322  */
323 void show_regs(struct pt_regs *regs)
324 {
325         __show_regs((struct pt_regs *)regs);
326 }
327 
328 void show_registers(struct pt_regs *regs)
329 {
330         const int field = 2 * sizeof(unsigned long);
331 
332         __show_regs(regs);
333         print_modules();
334         printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
335                current->comm, current->pid, current_thread_info(), current,
336               field, current_thread_info()->tp_value);
337         if (cpu_has_userlocal) {
338                 unsigned long tls;
339 
340                 tls = read_c0_userlocal();
341                 if (tls != current_thread_info()->tp_value)
342                         printk("*HwTLS: %0*lx\n", field, tls);
343         }
344 
345         show_stacktrace(current, regs);
346         show_code((unsigned int __user *) regs->cp0_epc);
347         printk("\n");
348 }
349 
350 static int regs_to_trapnr(struct pt_regs *regs)
351 {
352         return (regs->cp0_cause >> 2) & 0x1f;
353 }
354 
355 static DEFINE_RAW_SPINLOCK(die_lock);
356 
357 void __noreturn die(const char *str, struct pt_regs *regs)
358 {
359         static int die_counter;
360         int sig = SIGSEGV;
361 #ifdef CONFIG_MIPS_MT_SMTC
362         unsigned long dvpret;
363 #endif /* CONFIG_MIPS_MT_SMTC */
364 
365         oops_enter();
366 
367         if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
368                 sig = 0;
369 
370         console_verbose();
371         raw_spin_lock_irq(&die_lock);
372 #ifdef CONFIG_MIPS_MT_SMTC
373         dvpret = dvpe();
374 #endif /* CONFIG_MIPS_MT_SMTC */
375         bust_spinlocks(1);
376 #ifdef CONFIG_MIPS_MT_SMTC
377         mips_mt_regdump(dvpret);
378 #endif /* CONFIG_MIPS_MT_SMTC */
379 
380         printk("%s[#%d]:\n", str, ++die_counter);
381         show_registers(regs);
382         add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
383         raw_spin_unlock_irq(&die_lock);
384 
385         oops_exit();
386 
387         if (in_interrupt())
388                 panic("Fatal exception in interrupt");
389 
390         if (panic_on_oops) {
391                 printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
392                 ssleep(5);
393                 panic("Fatal exception");
394         }
395 
396         if (regs && kexec_should_crash(current))
397                 crash_kexec(regs);
398 
399         do_exit(sig);
400 }
401 
402 extern struct exception_table_entry __start___dbe_table[];
403 extern struct exception_table_entry __stop___dbe_table[];
404 
405 __asm__(
406 "       .section        __dbe_table, \"a\"\n"
407 "       .previous                       \n");
408 
409 /* Given an address, look for it in the exception tables. */
410 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
411 {
412         const struct exception_table_entry *e;
413 
414         e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
415         if (!e)
416                 e = search_module_dbetables(addr);
417         return e;
418 }
419 
420 asmlinkage void do_be(struct pt_regs *regs)
421 {
422         const int field = 2 * sizeof(unsigned long);
423         const struct exception_table_entry *fixup = NULL;
424         int data = regs->cp0_cause & 4;
425         int action = MIPS_BE_FATAL;
426 
427         /* XXX For now.  Fixme, this searches the wrong table ...  */
428         if (data && !user_mode(regs))
429                 fixup = search_dbe_tables(exception_epc(regs));
430 
431         if (fixup)
432                 action = MIPS_BE_FIXUP;
433 
434         if (board_be_handler)
435                 action = board_be_handler(regs, fixup != NULL);
436 
437         switch (action) {
438         case MIPS_BE_DISCARD:
439                 return;
440         case MIPS_BE_FIXUP:
441                 if (fixup) {
442                         regs->cp0_epc = fixup->nextinsn;
443                         return;
444                 }
445                 break;
446         default:
447                 break;
448         }
449 
450         /*
451          * Assume it would be too dangerous to continue ...
452          */
453         printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
454                data ? "Data" : "Instruction",
455                field, regs->cp0_epc, field, regs->regs[31]);
456         if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
457             == NOTIFY_STOP)
458                 return;
459 
460         die_if_kernel("Oops", regs);
461         force_sig(SIGBUS, current);
462 }
463 
464 /*
465  * ll/sc, rdhwr, sync emulation
466  */
467 
468 #define OPCODE 0xfc000000
469 #define BASE   0x03e00000
470 #define RT     0x001f0000
471 #define OFFSET 0x0000ffff
472 #define LL     0xc0000000
473 #define SC     0xe0000000
474 #define SPEC0  0x00000000
475 #define SPEC3  0x7c000000
476 #define RD     0x0000f800
477 #define FUNC   0x0000003f
478 #define SYNC   0x0000000f
479 #define RDHWR  0x0000003b
480 
481 /*  microMIPS definitions   */
482 #define MM_POOL32A_FUNC 0xfc00ffff
483 #define MM_RDHWR        0x00006b3c
484 #define MM_RS           0x001f0000
485 #define MM_RT           0x03e00000
486 
487 /*
488  * The ll_bit is cleared by r*_switch.S
489  */
490 
491 unsigned int ll_bit;
492 struct task_struct *ll_task;
493 
494 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
495 {
496         unsigned long value, __user *vaddr;
497         long offset;
498 
499         /*
500          * analyse the ll instruction that just caused a ri exception
501          * and put the referenced address to addr.
502          */
503 
504         /* sign extend offset */
505         offset = opcode & OFFSET;
506         offset <<= 16;
507         offset >>= 16;
508 
509         vaddr = (unsigned long __user *)
510                 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
511 
512         if ((unsigned long)vaddr & 3)
513                 return SIGBUS;
514         if (get_user(value, vaddr))
515                 return SIGSEGV;
516 
517         preempt_disable();
518 
519         if (ll_task == NULL || ll_task == current) {
520                 ll_bit = 1;
521         } else {
522                 ll_bit = 0;
523         }
524         ll_task = current;
525 
526         preempt_enable();
527 
528         regs->regs[(opcode & RT) >> 16] = value;
529 
530         return 0;
531 }
532 
533 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
534 {
535         unsigned long __user *vaddr;
536         unsigned long reg;
537         long offset;
538 
539         /*
540          * analyse the sc instruction that just caused a ri exception
541          * and put the referenced address to addr.
542          */
543 
544         /* sign extend offset */
545         offset = opcode & OFFSET;
546         offset <<= 16;
547         offset >>= 16;
548 
549         vaddr = (unsigned long __user *)
550                 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
551         reg = (opcode & RT) >> 16;
552 
553         if ((unsigned long)vaddr & 3)
554                 return SIGBUS;
555 
556         preempt_disable();
557 
558         if (ll_bit == 0 || ll_task != current) {
559                 regs->regs[reg] = 0;
560                 preempt_enable();
561                 return 0;
562         }
563 
564         preempt_enable();
565 
566         if (put_user(regs->regs[reg], vaddr))
567                 return SIGSEGV;
568 
569         regs->regs[reg] = 1;
570 
571         return 0;
572 }
573 
574 /*
575  * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
576  * opcodes are supposed to result in coprocessor unusable exceptions if
577  * executed on ll/sc-less processors.  That's the theory.  In practice a
578  * few processors such as NEC's VR4100 throw reserved instruction exceptions
579  * instead, so we're doing the emulation thing in both exception handlers.
580  */
581 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
582 {
583         if ((opcode & OPCODE) == LL) {
584                 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
585                                 1, regs, 0);
586                 return simulate_ll(regs, opcode);
587         }
588         if ((opcode & OPCODE) == SC) {
589                 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
590                                 1, regs, 0);
591                 return simulate_sc(regs, opcode);
592         }
593 
594         return -1;                      /* Must be something else ... */
595 }
596 
597 /*
598  * Simulate trapping 'rdhwr' instructions to provide user accessible
599  * registers not implemented in hardware.
600  */
601 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
602 {
603         struct thread_info *ti = task_thread_info(current);
604 
605         perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
606                         1, regs, 0);
607         switch (rd) {
608         case 0:         /* CPU number */
609                 regs->regs[rt] = smp_processor_id();
610                 return 0;
611         case 1:         /* SYNCI length */
612                 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
613                                      current_cpu_data.icache.linesz);
614                 return 0;
615         case 2:         /* Read count register */
616                 regs->regs[rt] = read_c0_count();
617                 return 0;
618         case 3:         /* Count register resolution */
619                 switch (current_cpu_data.cputype) {
620                 case CPU_20KC:
621                 case CPU_25KF:
622                         regs->regs[rt] = 1;
623                         break;
624                 default:
625                         regs->regs[rt] = 2;
626                 }
627                 return 0;
628         case 29:
629                 regs->regs[rt] = ti->tp_value;
630                 return 0;
631         default:
632                 return -1;
633         }
634 }
635 
636 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
637 {
638         if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
639                 int rd = (opcode & RD) >> 11;
640                 int rt = (opcode & RT) >> 16;
641 
642                 simulate_rdhwr(regs, rd, rt);
643                 return 0;
644         }
645 
646         /* Not ours.  */
647         return -1;
648 }
649 
650 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
651 {
652         if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
653                 int rd = (opcode & MM_RS) >> 16;
654                 int rt = (opcode & MM_RT) >> 21;
655                 simulate_rdhwr(regs, rd, rt);
656                 return 0;
657         }
658 
659         /* Not ours.  */
660         return -1;
661 }
662 
663 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
664 {
665         if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
666                 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
667                                 1, regs, 0);
668                 return 0;
669         }
670 
671         return -1;                      /* Must be something else ... */
672 }
673 
674 asmlinkage void do_ov(struct pt_regs *regs)
675 {
676         siginfo_t info;
677 
678         die_if_kernel("Integer overflow", regs);
679 
680         info.si_code = FPE_INTOVF;
681         info.si_signo = SIGFPE;
682         info.si_errno = 0;
683         info.si_addr = (void __user *) regs->cp0_epc;
684         force_sig_info(SIGFPE, &info, current);
685 }
686 
687 int process_fpemu_return(int sig, void __user *fault_addr)
688 {
689         if (sig == SIGSEGV || sig == SIGBUS) {
690                 struct siginfo si = {0};
691                 si.si_addr = fault_addr;
692                 si.si_signo = sig;
693                 if (sig == SIGSEGV) {
694                         if (find_vma(current->mm, (unsigned long)fault_addr))
695                                 si.si_code = SEGV_ACCERR;
696                         else
697                                 si.si_code = SEGV_MAPERR;
698                 } else {
699                         si.si_code = BUS_ADRERR;
700                 }
701                 force_sig_info(sig, &si, current);
702                 return 1;
703         } else if (sig) {
704                 force_sig(sig, current);
705                 return 1;
706         } else {
707                 return 0;
708         }
709 }
710 
711 /*
712  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
713  */
714 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
715 {
716         siginfo_t info = {0};
717 
718         if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
719             == NOTIFY_STOP)
720                 return;
721         die_if_kernel("FP exception in kernel code", regs);
722 
723         if (fcr31 & FPU_CSR_UNI_X) {
724                 int sig;
725                 void __user *fault_addr = NULL;
726 
727                 /*
728                  * Unimplemented operation exception.  If we've got the full
729                  * software emulator on-board, let's use it...
730                  *
731                  * Force FPU to dump state into task/thread context.  We're
732                  * moving a lot of data here for what is probably a single
733                  * instruction, but the alternative is to pre-decode the FP
734                  * register operands before invoking the emulator, which seems
735                  * a bit extreme for what should be an infrequent event.
736                  */
737                 /* Ensure 'resume' not overwrite saved fp context again. */
738                 lose_fpu(1);
739 
740                 /* Run the emulator */
741                 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
742                                                &fault_addr);
743 
744                 /*
745                  * We can't allow the emulated instruction to leave any of
746                  * the cause bit set in $fcr31.
747                  */
748                 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
749 
750                 /* Restore the hardware register state */
751                 own_fpu(1);     /* Using the FPU again.  */
752 
753                 /* If something went wrong, signal */
754                 process_fpemu_return(sig, fault_addr);
755 
756                 return;
757         } else if (fcr31 & FPU_CSR_INV_X)
758                 info.si_code = FPE_FLTINV;
759         else if (fcr31 & FPU_CSR_DIV_X)
760                 info.si_code = FPE_FLTDIV;
761         else if (fcr31 & FPU_CSR_OVF_X)
762                 info.si_code = FPE_FLTOVF;
763         else if (fcr31 & FPU_CSR_UDF_X)
764                 info.si_code = FPE_FLTUND;
765         else if (fcr31 & FPU_CSR_INE_X)
766                 info.si_code = FPE_FLTRES;
767         else
768                 info.si_code = __SI_FAULT;
769         info.si_signo = SIGFPE;
770         info.si_errno = 0;
771         info.si_addr = (void __user *) regs->cp0_epc;
772         force_sig_info(SIGFPE, &info, current);
773 }
774 
775 static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
776         const char *str)
777 {
778         siginfo_t info;
779         char b[40];
780 
781 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
782         if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
783                 return;
784 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
785 
786         if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
787                 return;
788 
789         /*
790          * A short test says that IRIX 5.3 sends SIGTRAP for all trap
791          * insns, even for trap and break codes that indicate arithmetic
792          * failures.  Weird ...
793          * But should we continue the brokenness???  --macro
794          */
795         switch (code) {
796         case BRK_OVERFLOW:
797         case BRK_DIVZERO:
798                 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
799                 die_if_kernel(b, regs);
800                 if (code == BRK_DIVZERO)
801                         info.si_code = FPE_INTDIV;
802                 else
803                         info.si_code = FPE_INTOVF;
804                 info.si_signo = SIGFPE;
805                 info.si_errno = 0;
806                 info.si_addr = (void __user *) regs->cp0_epc;
807                 force_sig_info(SIGFPE, &info, current);
808                 break;
809         case BRK_BUG:
810                 die_if_kernel("Kernel bug detected", regs);
811                 force_sig(SIGTRAP, current);
812                 break;
813         case BRK_MEMU:
814                 /*
815                  * Address errors may be deliberately induced by the FPU
816                  * emulator to retake control of the CPU after executing the
817                  * instruction in the delay slot of an emulated branch.
818                  *
819                  * Terminate if exception was recognized as a delay slot return
820                  * otherwise handle as normal.
821                  */
822                 if (do_dsemulret(regs))
823                         return;
824 
825                 die_if_kernel("Math emu break/trap", regs);
826                 force_sig(SIGTRAP, current);
827                 break;
828         default:
829                 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
830                 die_if_kernel(b, regs);
831                 force_sig(SIGTRAP, current);
832         }
833 }
834 
835 asmlinkage void do_bp(struct pt_regs *regs)
836 {
837         unsigned int opcode, bcode;
838         unsigned long epc;
839         u16 instr[2];
840 
841         if (get_isa16_mode(regs->cp0_epc)) {
842                 /* Calculate EPC. */
843                 epc = exception_epc(regs);
844                 if (cpu_has_mmips) {
845                         if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
846                             (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
847                                 goto out_sigsegv;
848                     opcode = (instr[0] << 16) | instr[1];
849                 } else {
850                     /* MIPS16e mode */
851                     if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
852                                 goto out_sigsegv;
853                     bcode = (instr[0] >> 6) & 0x3f;
854                     do_trap_or_bp(regs, bcode, "Break");
855                     return;
856                 }
857         } else {
858                 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
859                         goto out_sigsegv;
860         }
861 
862         /*
863          * There is the ancient bug in the MIPS assemblers that the break
864          * code starts left to bit 16 instead to bit 6 in the opcode.
865          * Gas is bug-compatible, but not always, grrr...
866          * We handle both cases with a simple heuristics.  --macro
867          */
868         bcode = ((opcode >> 6) & ((1 << 20) - 1));
869         if (bcode >= (1 << 10))
870                 bcode >>= 10;
871 
872         /*
873          * notify the kprobe handlers, if instruction is likely to
874          * pertain to them.
875          */
876         switch (bcode) {
877         case BRK_KPROBE_BP:
878                 if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
879                         return;
880                 else
881                         break;
882         case BRK_KPROBE_SSTEPBP:
883                 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
884                         return;
885                 else
886                         break;
887         default:
888                 break;
889         }
890 
891         do_trap_or_bp(regs, bcode, "Break");
892         return;
893 
894 out_sigsegv:
895         force_sig(SIGSEGV, current);
896 }
897 
898 asmlinkage void do_tr(struct pt_regs *regs)
899 {
900         u32 opcode, tcode = 0;
901         u16 instr[2];
902         unsigned long epc = msk_isa16_mode(exception_epc(regs));
903 
904         if (get_isa16_mode(regs->cp0_epc)) {
905                 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
906                     __get_user(instr[1], (u16 __user *)(epc + 2)))
907                         goto out_sigsegv;
908                 opcode = (instr[0] << 16) | instr[1];
909                 /* Immediate versions don't provide a code.  */
910                 if (!(opcode & OPCODE))
911                         tcode = (opcode >> 12) & ((1 << 4) - 1);
912         } else {
913                 if (__get_user(opcode, (u32 __user *)epc))
914                         goto out_sigsegv;
915                 /* Immediate versions don't provide a code.  */
916                 if (!(opcode & OPCODE))
917                         tcode = (opcode >> 6) & ((1 << 10) - 1);
918         }
919 
920         do_trap_or_bp(regs, tcode, "Trap");
921         return;
922 
923 out_sigsegv:
924         force_sig(SIGSEGV, current);
925 }
926 
927 asmlinkage void do_ri(struct pt_regs *regs)
928 {
929         unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
930         unsigned long old_epc = regs->cp0_epc;
931         unsigned long old31 = regs->regs[31];
932         unsigned int opcode = 0;
933         int status = -1;
934 
935         if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
936             == NOTIFY_STOP)
937                 return;
938 
939         die_if_kernel("Reserved instruction in kernel code", regs);
940 
941         if (unlikely(compute_return_epc(regs) < 0))
942                 return;
943 
944         if (get_isa16_mode(regs->cp0_epc)) {
945                 unsigned short mmop[2] = { 0 };
946 
947                 if (unlikely(get_user(mmop[0], epc) < 0))
948                         status = SIGSEGV;
949                 if (unlikely(get_user(mmop[1], epc) < 0))
950                         status = SIGSEGV;
951                 opcode = (mmop[0] << 16) | mmop[1];
952 
953                 if (status < 0)
954                         status = simulate_rdhwr_mm(regs, opcode);
955         } else {
956                 if (unlikely(get_user(opcode, epc) < 0))
957                         status = SIGSEGV;
958 
959                 if (!cpu_has_llsc && status < 0)
960                         status = simulate_llsc(regs, opcode);
961 
962                 if (status < 0)
963                         status = simulate_rdhwr_normal(regs, opcode);
964 
965                 if (status < 0)
966                         status = simulate_sync(regs, opcode);
967         }
968 
969         if (status < 0)
970                 status = SIGILL;
971 
972         if (unlikely(status > 0)) {
973                 regs->cp0_epc = old_epc;                /* Undo skip-over.  */
974                 regs->regs[31] = old31;
975                 force_sig(status, current);
976         }
977 }
978 
979 /*
980  * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
981  * emulated more than some threshold number of instructions, force migration to
982  * a "CPU" that has FP support.
983  */
984 static void mt_ase_fp_affinity(void)
985 {
986 #ifdef CONFIG_MIPS_MT_FPAFF
987         if (mt_fpemul_threshold > 0 &&
988              ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
989                 /*
990                  * If there's no FPU present, or if the application has already
991                  * restricted the allowed set to exclude any CPUs with FPUs,
992                  * we'll skip the procedure.
993                  */
994                 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
995                         cpumask_t tmask;
996 
997                         current->thread.user_cpus_allowed
998                                 = current->cpus_allowed;
999                         cpus_and(tmask, current->cpus_allowed,
1000                                 mt_fpu_cpumask);
1001                         set_cpus_allowed_ptr(current, &tmask);
1002                         set_thread_flag(TIF_FPUBOUND);
1003                 }
1004         }
1005 #endif /* CONFIG_MIPS_MT_FPAFF */
1006 }
1007 
1008 /*
1009  * No lock; only written during early bootup by CPU 0.
1010  */
1011 static RAW_NOTIFIER_HEAD(cu2_chain);
1012 
1013 int __ref register_cu2_notifier(struct notifier_block *nb)
1014 {
1015         return raw_notifier_chain_register(&cu2_chain, nb);
1016 }
1017 
1018 int cu2_notifier_call_chain(unsigned long val, void *v)
1019 {
1020         return raw_notifier_call_chain(&cu2_chain, val, v);
1021 }
1022 
1023 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1024         void *data)
1025 {
1026         struct pt_regs *regs = data;
1027 
1028         switch (action) {
1029         default:
1030                 die_if_kernel("Unhandled kernel unaligned access or invalid "
1031                               "instruction", regs);
1032                 /* Fall through  */
1033 
1034         case CU2_EXCEPTION:
1035                 force_sig(SIGILL, current);
1036         }
1037 
1038         return NOTIFY_OK;
1039 }
1040 
1041 asmlinkage void do_cpu(struct pt_regs *regs)
1042 {
1043         unsigned int __user *epc;
1044         unsigned long old_epc, old31;
1045         unsigned int opcode;
1046         unsigned int cpid;
1047         int status;
1048         unsigned long __maybe_unused flags;
1049 
1050         die_if_kernel("do_cpu invoked from kernel context!", regs);
1051 
1052         cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1053 
1054         switch (cpid) {
1055         case 0:
1056                 epc = (unsigned int __user *)exception_epc(regs);
1057                 old_epc = regs->cp0_epc;
1058                 old31 = regs->regs[31];
1059                 opcode = 0;
1060                 status = -1;
1061 
1062                 if (unlikely(compute_return_epc(regs) < 0))
1063                         return;
1064 
1065                 if (get_isa16_mode(regs->cp0_epc)) {
1066                         unsigned short mmop[2] = { 0 };
1067 
1068                         if (unlikely(get_user(mmop[0], epc) < 0))
1069                                 status = SIGSEGV;
1070                         if (unlikely(get_user(mmop[1], epc) < 0))
1071                                 status = SIGSEGV;
1072                         opcode = (mmop[0] << 16) | mmop[1];
1073 
1074                         if (status < 0)
1075                                 status = simulate_rdhwr_mm(regs, opcode);
1076                 } else {
1077                         if (unlikely(get_user(opcode, epc) < 0))
1078                                 status = SIGSEGV;
1079 
1080                         if (!cpu_has_llsc && status < 0)
1081                                 status = simulate_llsc(regs, opcode);
1082 
1083                         if (status < 0)
1084                                 status = simulate_rdhwr_normal(regs, opcode);
1085                 }
1086 
1087                 if (status < 0)
1088                         status = SIGILL;
1089 
1090                 if (unlikely(status > 0)) {
1091                         regs->cp0_epc = old_epc;        /* Undo skip-over.  */
1092                         regs->regs[31] = old31;
1093                         force_sig(status, current);
1094                 }
1095 
1096                 return;
1097 
1098         case 3:
1099                 /*
1100                  * Old (MIPS I and MIPS II) processors will set this code
1101                  * for COP1X opcode instructions that replaced the original
1102                  * COP3 space.  We don't limit COP1 space instructions in
1103                  * the emulator according to the CPU ISA, so we want to
1104                  * treat COP1X instructions consistently regardless of which
1105                  * code the CPU chose.  Therefore we redirect this trap to
1106                  * the FP emulator too.
1107                  *
1108                  * Then some newer FPU-less processors use this code
1109                  * erroneously too, so they are covered by this choice
1110                  * as well.
1111                  */
1112                 if (raw_cpu_has_fpu)
1113                         break;
1114                 /* Fall through.  */
1115 
1116         case 1:
1117                 if (used_math())        /* Using the FPU again.  */
1118                         own_fpu(1);
1119                 else {                  /* First time FPU user.  */
1120                         init_fpu();
1121                         set_used_math();
1122                 }
1123 
1124                 if (!raw_cpu_has_fpu) {
1125                         int sig;
1126                         void __user *fault_addr = NULL;
1127                         sig = fpu_emulator_cop1Handler(regs,
1128                                                        &current->thread.fpu,
1129                                                        0, &fault_addr);
1130                         if (!process_fpemu_return(sig, fault_addr))
1131                                 mt_ase_fp_affinity();
1132                 }
1133 
1134                 return;
1135 
1136         case 2:
1137                 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1138                 return;
1139         }
1140 
1141         force_sig(SIGILL, current);
1142 }
1143 
1144 asmlinkage void do_mdmx(struct pt_regs *regs)
1145 {
1146         force_sig(SIGILL, current);
1147 }
1148 
1149 /*
1150  * Called with interrupts disabled.
1151  */
1152 asmlinkage void do_watch(struct pt_regs *regs)
1153 {
1154         u32 cause;
1155 
1156         /*
1157          * Clear WP (bit 22) bit of cause register so we don't loop
1158          * forever.
1159          */
1160         cause = read_c0_cause();
1161         cause &= ~(1 << 22);
1162         write_c0_cause(cause);
1163 
1164         /*
1165          * If the current thread has the watch registers loaded, save
1166          * their values and send SIGTRAP.  Otherwise another thread
1167          * left the registers set, clear them and continue.
1168          */
1169         if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1170                 mips_read_watch_registers();
1171                 local_irq_enable();
1172                 force_sig(SIGTRAP, current);
1173         } else {
1174                 mips_clear_watch_registers();
1175                 local_irq_enable();
1176         }
1177 }
1178 
1179 asmlinkage void do_mcheck(struct pt_regs *regs)
1180 {
1181         const int field = 2 * sizeof(unsigned long);
1182         int multi_match = regs->cp0_status & ST0_TS;
1183 
1184         show_regs(regs);
1185 
1186         if (multi_match) {
1187                 printk("Index   : %0x\n", read_c0_index());
1188                 printk("Pagemask: %0x\n", read_c0_pagemask());
1189                 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
1190                 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1191                 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
1192                 printk("\n");
1193                 dump_tlb_all();
1194         }
1195 
1196         show_code((unsigned int __user *) regs->cp0_epc);
1197 
1198         /*
1199          * Some chips may have other causes of machine check (e.g. SB1
1200          * graduation timer)
1201          */
1202         panic("Caught Machine Check exception - %scaused by multiple "
1203               "matching entries in the TLB.",
1204               (multi_match) ? "" : "not ");
1205 }
1206 
1207 asmlinkage void do_mt(struct pt_regs *regs)
1208 {
1209         int subcode;
1210 
1211         subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1212                         >> VPECONTROL_EXCPT_SHIFT;
1213         switch (subcode) {
1214         case 0:
1215                 printk(KERN_DEBUG "Thread Underflow\n");
1216                 break;
1217         case 1:
1218                 printk(KERN_DEBUG "Thread Overflow\n");
1219                 break;
1220         case 2:
1221                 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1222                 break;
1223         case 3:
1224                 printk(KERN_DEBUG "Gating Storage Exception\n");
1225                 break;
1226         case 4:
1227                 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1228                 break;
1229         case 5:
1230                 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1231                 break;
1232         default:
1233                 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1234                         subcode);
1235                 break;
1236         }
1237         die_if_kernel("MIPS MT Thread exception in kernel", regs);
1238 
1239         force_sig(SIGILL, current);
1240 }
1241 
1242 
1243 asmlinkage void do_dsp(struct pt_regs *regs)
1244 {
1245         if (cpu_has_dsp)
1246                 panic("Unexpected DSP exception");
1247 
1248         force_sig(SIGILL, current);
1249 }
1250 
1251 asmlinkage void do_reserved(struct pt_regs *regs)
1252 {
1253         /*
1254          * Game over - no way to handle this if it ever occurs.  Most probably
1255          * caused by a new unknown cpu type or after another deadly
1256          * hard/software error.
1257          */
1258         show_regs(regs);
1259         panic("Caught reserved exception %ld - should not happen.",
1260               (regs->cp0_cause & 0x7f) >> 2);
1261 }
1262 
1263 static int __initdata l1parity = 1;
1264 static int __init nol1parity(char *s)
1265 {
1266         l1parity = 0;
1267         return 1;
1268 }
1269 __setup("nol1par", nol1parity);
1270 static int __initdata l2parity = 1;
1271 static int __init nol2parity(char *s)
1272 {
1273         l2parity = 0;
1274         return 1;
1275 }
1276 __setup("nol2par", nol2parity);
1277 
1278 /*
1279  * Some MIPS CPUs can enable/disable for cache parity detection, but do
1280  * it different ways.
1281  */
1282 static inline void parity_protection_init(void)
1283 {
1284         switch (current_cpu_type()) {
1285         case CPU_24K:
1286         case CPU_34K:
1287         case CPU_74K:
1288         case CPU_1004K:
1289                 {
1290 #define ERRCTL_PE       0x80000000
1291 #define ERRCTL_L2P      0x00800000
1292                         unsigned long errctl;
1293                         unsigned int l1parity_present, l2parity_present;
1294 
1295                         errctl = read_c0_ecc();
1296                         errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1297 
1298                         /* probe L1 parity support */
1299                         write_c0_ecc(errctl | ERRCTL_PE);
1300                         back_to_back_c0_hazard();
1301                         l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1302 
1303                         /* probe L2 parity support */
1304                         write_c0_ecc(errctl|ERRCTL_L2P);
1305                         back_to_back_c0_hazard();
1306                         l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1307 
1308                         if (l1parity_present && l2parity_present) {
1309                                 if (l1parity)
1310                                         errctl |= ERRCTL_PE;
1311                                 if (l1parity ^ l2parity)
1312                                         errctl |= ERRCTL_L2P;
1313                         } else if (l1parity_present) {
1314                                 if (l1parity)
1315                                         errctl |= ERRCTL_PE;
1316                         } else if (l2parity_present) {
1317                                 if (l2parity)
1318                                         errctl |= ERRCTL_L2P;
1319                         } else {
1320                                 /* No parity available */
1321                         }
1322 
1323                         printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1324 
1325                         write_c0_ecc(errctl);
1326                         back_to_back_c0_hazard();
1327                         errctl = read_c0_ecc();
1328                         printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1329 
1330                         if (l1parity_present)
1331                                 printk(KERN_INFO "Cache parity protection %sabled\n",
1332                                        (errctl & ERRCTL_PE) ? "en" : "dis");
1333 
1334                         if (l2parity_present) {
1335                                 if (l1parity_present && l1parity)
1336                                         errctl ^= ERRCTL_L2P;
1337                                 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1338                                        (errctl & ERRCTL_L2P) ? "en" : "dis");
1339                         }
1340                 }
1341                 break;
1342 
1343         case CPU_5KC:
1344         case CPU_5KE:
1345         case CPU_LOONGSON1:
1346                 write_c0_ecc(0x80000000);
1347                 back_to_back_c0_hazard();
1348                 /* Set the PE bit (bit 31) in the c0_errctl register. */
1349                 printk(KERN_INFO "Cache parity protection %sabled\n",
1350                        (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1351                 break;
1352         case CPU_20KC:
1353         case CPU_25KF:
1354                 /* Clear the DE bit (bit 16) in the c0_status register. */
1355                 printk(KERN_INFO "Enable cache parity protection for "
1356                        "MIPS 20KC/25KF CPUs.\n");
1357                 clear_c0_status(ST0_DE);
1358                 break;
1359         default:
1360                 break;
1361         }
1362 }
1363 
1364 asmlinkage void cache_parity_error(void)
1365 {
1366         const int field = 2 * sizeof(unsigned long);
1367         unsigned int reg_val;
1368 
1369         /* For the moment, report the problem and hang. */
1370         printk("Cache error exception:\n");
1371         printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1372         reg_val = read_c0_cacheerr();
1373         printk("c0_cacheerr == %08x\n", reg_val);
1374 
1375         printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1376                reg_val & (1<<30) ? "secondary" : "primary",
1377                reg_val & (1<<31) ? "data" : "insn");
1378         printk("Error bits: %s%s%s%s%s%s%s\n",
1379                reg_val & (1<<29) ? "ED " : "",
1380                reg_val & (1<<28) ? "ET " : "",
1381                reg_val & (1<<26) ? "EE " : "",
1382                reg_val & (1<<25) ? "EB " : "",
1383                reg_val & (1<<24) ? "EI " : "",
1384                reg_val & (1<<23) ? "E1 " : "",
1385                reg_val & (1<<22) ? "E0 " : "");
1386         printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1387 
1388 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1389         if (reg_val & (1<<22))
1390                 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1391 
1392         if (reg_val & (1<<23))
1393                 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1394 #endif
1395 
1396         panic("Can't handle the cache error!");
1397 }
1398 
1399 /*
1400  * SDBBP EJTAG debug exception handler.
1401  * We skip the instruction and return to the next instruction.
1402  */
1403 void ejtag_exception_handler(struct pt_regs *regs)
1404 {
1405         const int field = 2 * sizeof(unsigned long);
1406         unsigned long depc, old_epc, old_ra;
1407         unsigned int debug;
1408 
1409         printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1410         depc = read_c0_depc();
1411         debug = read_c0_debug();
1412         printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1413         if (debug & 0x80000000) {
1414                 /*
1415                  * In branch delay slot.
1416                  * We cheat a little bit here and use EPC to calculate the
1417                  * debug return address (DEPC). EPC is restored after the
1418                  * calculation.
1419                  */
1420                 old_epc = regs->cp0_epc;
1421                 old_ra = regs->regs[31];
1422                 regs->cp0_epc = depc;
1423                 compute_return_epc(regs);
1424                 depc = regs->cp0_epc;
1425                 regs->cp0_epc = old_epc;
1426                 regs->regs[31] = old_ra;
1427         } else
1428                 depc += 4;
1429         write_c0_depc(depc);
1430 
1431 #if 0
1432         printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1433         write_c0_debug(debug | 0x100);
1434 #endif
1435 }
1436 
1437 /*
1438  * NMI exception handler.
1439  * No lock; only written during early bootup by CPU 0.
1440  */
1441 static RAW_NOTIFIER_HEAD(nmi_chain);
1442 
1443 int register_nmi_notifier(struct notifier_block *nb)
1444 {
1445         return raw_notifier_chain_register(&nmi_chain, nb);
1446 }
1447 
1448 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1449 {
1450         raw_notifier_call_chain(&nmi_chain, 0, regs);
1451         bust_spinlocks(1);
1452         printk("NMI taken!!!!\n");
1453         die("NMI", regs);
1454 }
1455 
1456 #define VECTORSPACING 0x100     /* for EI/VI mode */
1457 
1458 unsigned long ebase;
1459 unsigned long exception_handlers[32];
1460 unsigned long vi_handlers[64];
1461 
1462 void __init *set_except_vector(int n, void *addr)
1463 {
1464         unsigned long handler = (unsigned long) addr;
1465         unsigned long old_handler;
1466 
1467 #ifdef CONFIG_CPU_MICROMIPS
1468         /*
1469          * Only the TLB handlers are cache aligned with an even
1470          * address. All other handlers are on an odd address and
1471          * require no modification. Otherwise, MIPS32 mode will
1472          * be entered when handling any TLB exceptions. That
1473          * would be bad...since we must stay in microMIPS mode.
1474          */
1475         if (!(handler & 0x1))
1476                 handler |= 1;
1477 #endif
1478         old_handler = xchg(&exception_handlers[n], handler);
1479 
1480         if (n == 0 && cpu_has_divec) {
1481 #ifdef CONFIG_CPU_MICROMIPS
1482                 unsigned long jump_mask = ~((1 << 27) - 1);
1483 #else
1484                 unsigned long jump_mask = ~((1 << 28) - 1);
1485 #endif
1486                 u32 *buf = (u32 *)(ebase + 0x200);
1487                 unsigned int k0 = 26;
1488                 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1489                         uasm_i_j(&buf, handler & ~jump_mask);
1490                         uasm_i_nop(&buf);
1491                 } else {
1492                         UASM_i_LA(&buf, k0, handler);
1493                         uasm_i_jr(&buf, k0);
1494                         uasm_i_nop(&buf);
1495                 }
1496                 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1497         }
1498         return (void *)old_handler;
1499 }
1500 
1501 static void do_default_vi(void)
1502 {
1503         show_regs(get_irq_regs());
1504         panic("Caught unexpected vectored interrupt.");
1505 }
1506 
1507 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1508 {
1509         unsigned long handler;
1510         unsigned long old_handler = vi_handlers[n];
1511         int srssets = current_cpu_data.srsets;
1512         u16 *h;
1513         unsigned char *b;
1514 
1515         BUG_ON(!cpu_has_veic && !cpu_has_vint);
1516         BUG_ON((n < 0) && (n > 9));
1517 
1518         if (addr == NULL) {
1519                 handler = (unsigned long) do_default_vi;
1520                 srs = 0;
1521         } else
1522                 handler = (unsigned long) addr;
1523         vi_handlers[n] = handler;
1524 
1525         b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1526 
1527         if (srs >= srssets)
1528                 panic("Shadow register set %d not supported", srs);
1529 
1530         if (cpu_has_veic) {
1531                 if (board_bind_eic_interrupt)
1532                         board_bind_eic_interrupt(n, srs);
1533         } else if (cpu_has_vint) {
1534                 /* SRSMap is only defined if shadow sets are implemented */
1535                 if (srssets > 1)
1536                         change_c0_srsmap(0xf << n*4, srs << n*4);
1537         }
1538 
1539         if (srs == 0) {
1540                 /*
1541                  * If no shadow set is selected then use the default handler
1542                  * that does normal register saving and standard interrupt exit
1543                  */
1544                 extern char except_vec_vi, except_vec_vi_lui;
1545                 extern char except_vec_vi_ori, except_vec_vi_end;
1546                 extern char rollback_except_vec_vi;
1547                 char *vec_start = using_rollback_handler() ?
1548                         &rollback_except_vec_vi : &except_vec_vi;
1549 #ifdef CONFIG_MIPS_MT_SMTC
1550                 /*
1551                  * We need to provide the SMTC vectored interrupt handler
1552                  * not only with the address of the handler, but with the
1553                  * Status.IM bit to be masked before going there.
1554                  */
1555                 extern char except_vec_vi_mori;
1556 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1557                 const int mori_offset = &except_vec_vi_mori - vec_start + 2;
1558 #else
1559                 const int mori_offset = &except_vec_vi_mori - vec_start;
1560 #endif
1561 #endif /* CONFIG_MIPS_MT_SMTC */
1562 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1563                 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1564                 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1565 #else
1566                 const int lui_offset = &except_vec_vi_lui - vec_start;
1567                 const int ori_offset = &except_vec_vi_ori - vec_start;
1568 #endif
1569                 const int handler_len = &except_vec_vi_end - vec_start;
1570 
1571                 if (handler_len > VECTORSPACING) {
1572                         /*
1573                          * Sigh... panicing won't help as the console
1574                          * is probably not configured :(
1575                          */
1576                         panic("VECTORSPACING too small");
1577                 }
1578 
1579                 set_handler(((unsigned long)b - ebase), vec_start,
1580 #ifdef CONFIG_CPU_MICROMIPS
1581                                 (handler_len - 1));
1582 #else
1583                                 handler_len);
1584 #endif
1585 #ifdef CONFIG_MIPS_MT_SMTC
1586                 BUG_ON(n > 7);  /* Vector index %d exceeds SMTC maximum. */
1587 
1588                 h = (u16 *)(b + mori_offset);
1589                 *h = (0x100 << n);
1590 #endif /* CONFIG_MIPS_MT_SMTC */
1591                 h = (u16 *)(b + lui_offset);
1592                 *h = (handler >> 16) & 0xffff;
1593                 h = (u16 *)(b + ori_offset);
1594                 *h = (handler & 0xffff);
1595                 local_flush_icache_range((unsigned long)b,
1596                                          (unsigned long)(b+handler_len));
1597         }
1598         else {
1599                 /*
1600                  * In other cases jump directly to the interrupt handler. It
1601                  * is the handler's responsibility to save registers if required
1602                  * (eg hi/lo) and return from the exception using "eret".
1603                  */
1604                 u32 insn;
1605 
1606                 h = (u16 *)b;
1607                 /* j handler */
1608 #ifdef CONFIG_CPU_MICROMIPS
1609                 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1610 #else
1611                 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1612 #endif
1613                 h[0] = (insn >> 16) & 0xffff;
1614                 h[1] = insn & 0xffff;
1615                 h[2] = 0;
1616                 h[3] = 0;
1617                 local_flush_icache_range((unsigned long)b,
1618                                          (unsigned long)(b+8));
1619         }
1620 
1621         return (void *)old_handler;
1622 }
1623 
1624 void *set_vi_handler(int n, vi_handler_t addr)
1625 {
1626         return set_vi_srs_handler(n, addr, 0);
1627 }
1628 
1629 extern void tlb_init(void);
1630 extern void flush_tlb_handlers(void);
1631 
1632 /*
1633  * Timer interrupt
1634  */
1635 int cp0_compare_irq;
1636 EXPORT_SYMBOL_GPL(cp0_compare_irq);
1637 int cp0_compare_irq_shift;
1638 
1639 /*
1640  * Performance counter IRQ or -1 if shared with timer
1641  */
1642 int cp0_perfcount_irq;
1643 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1644 
1645 static int __cpuinitdata noulri;
1646 
1647 static int __init ulri_disable(char *s)
1648 {
1649         pr_info("Disabling ulri\n");
1650         noulri = 1;
1651 
1652         return 1;
1653 }
1654 __setup("noulri", ulri_disable);
1655 
1656 void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1657 {
1658         unsigned int cpu = smp_processor_id();
1659         unsigned int status_set = ST0_CU0;
1660         unsigned int hwrena = cpu_hwrena_impl_bits;
1661 #ifdef CONFIG_MIPS_MT_SMTC
1662         int secondaryTC = 0;
1663         int bootTC = (cpu == 0);
1664 
1665         /*
1666          * Only do per_cpu_trap_init() for first TC of Each VPE.
1667          * Note that this hack assumes that the SMTC init code
1668          * assigns TCs consecutively and in ascending order.
1669          */
1670 
1671         if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1672             ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1673                 secondaryTC = 1;
1674 #endif /* CONFIG_MIPS_MT_SMTC */
1675 
1676         /*
1677          * Disable coprocessors and select 32-bit or 64-bit addressing
1678          * and the 16/32 or 32/32 FPR register model.  Reset the BEV
1679          * flag that some firmware may have left set and the TS bit (for
1680          * IP27).  Set XX for ISA IV code to work.
1681          */
1682 #ifdef CONFIG_64BIT
1683         status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1684 #endif
1685         if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
1686                 status_set |= ST0_XX;
1687         if (cpu_has_dsp)
1688                 status_set |= ST0_MX;
1689 
1690         change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1691                          status_set);
1692 
1693         if (cpu_has_mips_r2)
1694                 hwrena |= 0x0000000f;
1695 
1696         if (!noulri && cpu_has_userlocal)
1697                 hwrena |= (1 << 29);
1698 
1699         if (hwrena)
1700                 write_c0_hwrena(hwrena);
1701 
1702 #ifdef CONFIG_MIPS_MT_SMTC
1703         if (!secondaryTC) {
1704 #endif /* CONFIG_MIPS_MT_SMTC */
1705 
1706         if (cpu_has_veic || cpu_has_vint) {
1707                 unsigned long sr = set_c0_status(ST0_BEV);
1708                 write_c0_ebase(ebase);
1709                 write_c0_status(sr);
1710                 /* Setting vector spacing enables EI/VI mode  */
1711                 change_c0_intctl(0x3e0, VECTORSPACING);
1712         }
1713         if (cpu_has_divec) {
1714                 if (cpu_has_mipsmt) {
1715                         unsigned int vpflags = dvpe();
1716                         set_c0_cause(CAUSEF_IV);
1717                         evpe(vpflags);
1718                 } else
1719                         set_c0_cause(CAUSEF_IV);
1720         }
1721 
1722         /*
1723          * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
1724          *
1725          *  o read IntCtl.IPTI to determine the timer interrupt
1726          *  o read IntCtl.IPPCI to determine the performance counter interrupt
1727          */
1728         if (cpu_has_mips_r2) {
1729                 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
1730                 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
1731                 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
1732                 if (cp0_perfcount_irq == cp0_compare_irq)
1733                         cp0_perfcount_irq = -1;
1734         } else {
1735                 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
1736                 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
1737                 cp0_perfcount_irq = -1;
1738         }
1739 
1740 #ifdef CONFIG_MIPS_MT_SMTC
1741         }
1742 #endif /* CONFIG_MIPS_MT_SMTC */
1743 
1744         if (!cpu_data[cpu].asid_cache)
1745                 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1746 
1747         atomic_inc(&init_mm.mm_count);
1748         current->active_mm = &init_mm;
1749         BUG_ON(current->mm);
1750         enter_lazy_tlb(&init_mm, current);
1751 
1752 #ifdef CONFIG_MIPS_MT_SMTC
1753         if (bootTC) {
1754 #endif /* CONFIG_MIPS_MT_SMTC */
1755                 /* Boot CPU's cache setup in setup_arch(). */
1756                 if (!is_boot_cpu)
1757                         cpu_cache_init();
1758                 tlb_init();
1759 #ifdef CONFIG_MIPS_MT_SMTC
1760         } else if (!secondaryTC) {
1761                 /*
1762                  * First TC in non-boot VPE must do subset of tlb_init()
1763                  * for MMU countrol registers.
1764                  */
1765                 write_c0_pagemask(PM_DEFAULT_MASK);
1766                 write_c0_wired(0);
1767         }
1768 #endif /* CONFIG_MIPS_MT_SMTC */
1769         TLBMISS_HANDLER_SETUP();
1770 }
1771 
1772 /* Install CPU exception handler */
1773 void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
1774 {
1775 #ifdef CONFIG_CPU_MICROMIPS
1776         memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
1777 #else
1778         memcpy((void *)(ebase + offset), addr, size);
1779 #endif
1780         local_flush_icache_range(ebase + offset, ebase + offset + size);
1781 }
1782 
1783 static char panic_null_cerr[] __cpuinitdata =
1784         "Trying to set NULL cache error exception handler";
1785 
1786 /*
1787  * Install uncached CPU exception handler.
1788  * This is suitable only for the cache error exception which is the only
1789  * exception handler that is being run uncached.
1790  */
1791 void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
1792         unsigned long size)
1793 {
1794         unsigned long uncached_ebase = CKSEG1ADDR(ebase);
1795 
1796         if (!addr)
1797                 panic(panic_null_cerr);
1798 
1799         memcpy((void *)(uncached_ebase + offset), addr, size);
1800 }
1801 
1802 static int __initdata rdhwr_noopt;
1803 static int __init set_rdhwr_noopt(char *str)
1804 {
1805         rdhwr_noopt = 1;
1806         return 1;
1807 }
1808 
1809 __setup("rdhwr_noopt", set_rdhwr_noopt);
1810 
1811 void __init trap_init(void)
1812 {
1813         extern char except_vec3_generic;
1814         extern char except_vec4;
1815         extern char except_vec3_r4000;
1816         unsigned long i;
1817 
1818         check_wait();
1819 
1820 #if defined(CONFIG_KGDB)
1821         if (kgdb_early_setup)
1822                 return; /* Already done */
1823 #endif
1824 
1825         if (cpu_has_veic || cpu_has_vint) {
1826                 unsigned long size = 0x200 + VECTORSPACING*64;
1827                 ebase = (unsigned long)
1828                         __alloc_bootmem(size, 1 << fls(size), 0);
1829         } else {
1830 #ifdef CONFIG_KVM_GUEST
1831 #define KVM_GUEST_KSEG0     0x40000000
1832         ebase = KVM_GUEST_KSEG0;
1833 #else
1834         ebase = CKSEG0;
1835 #endif
1836                 if (cpu_has_mips_r2)
1837                         ebase += (read_c0_ebase() & 0x3ffff000);
1838         }
1839 
1840         if (board_ebase_setup)
1841                 board_ebase_setup();
1842         per_cpu_trap_init(true);
1843 
1844         /*
1845          * Copy the generic exception handlers to their final destination.
1846          * This will be overriden later as suitable for a particular
1847          * configuration.
1848          */
1849         set_handler(0x180, &except_vec3_generic, 0x80);
1850 
1851         /*
1852          * Setup default vectors
1853          */
1854         for (i = 0; i <= 31; i++)
1855                 set_except_vector(i, handle_reserved);
1856 
1857         /*
1858          * Copy the EJTAG debug exception vector handler code to it's final
1859          * destination.
1860          */
1861         if (cpu_has_ejtag && board_ejtag_handler_setup)
1862                 board_ejtag_handler_setup();
1863 
1864         /*
1865          * Only some CPUs have the watch exceptions.
1866          */
1867         if (cpu_has_watch)
1868                 set_except_vector(23, handle_watch);
1869 
1870         /*
1871          * Initialise interrupt handlers
1872          */
1873         if (cpu_has_veic || cpu_has_vint) {
1874                 int nvec = cpu_has_veic ? 64 : 8;
1875                 for (i = 0; i < nvec; i++)
1876                         set_vi_handler(i, NULL);
1877         }
1878         else if (cpu_has_divec)
1879                 set_handler(0x200, &except_vec4, 0x8);
1880 
1881         /*
1882          * Some CPUs can enable/disable for cache parity detection, but does
1883          * it different ways.
1884          */
1885         parity_protection_init();
1886 
1887         /*
1888          * The Data Bus Errors / Instruction Bus Errors are signaled
1889          * by external hardware.  Therefore these two exceptions
1890          * may have board specific handlers.
1891          */
1892         if (board_be_init)
1893                 board_be_init();
1894 
1895         set_except_vector(0, using_rollback_handler() ? rollback_handle_int
1896                                                       : handle_int);
1897         set_except_vector(1, handle_tlbm);
1898         set_except_vector(2, handle_tlbl);
1899         set_except_vector(3, handle_tlbs);
1900 
1901         set_except_vector(4, handle_adel);
1902         set_except_vector(5, handle_ades);
1903 
1904         set_except_vector(6, handle_ibe);
1905         set_except_vector(7, handle_dbe);
1906 
1907         set_except_vector(8, handle_sys);
1908         set_except_vector(9, handle_bp);
1909         set_except_vector(10, rdhwr_noopt ? handle_ri :
1910                           (cpu_has_vtag_icache ?
1911                            handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1912         set_except_vector(11, handle_cpu);
1913         set_except_vector(12, handle_ov);
1914         set_except_vector(13, handle_tr);
1915 
1916         if (current_cpu_type() == CPU_R6000 ||
1917             current_cpu_type() == CPU_R6000A) {
1918                 /*
1919                  * The R6000 is the only R-series CPU that features a machine
1920                  * check exception (similar to the R4000 cache error) and
1921                  * unaligned ldc1/sdc1 exception.  The handlers have not been
1922                  * written yet.  Well, anyway there is no R6000 machine on the
1923                  * current list of targets for Linux/MIPS.
1924                  * (Duh, crap, there is someone with a triple R6k machine)
1925                  */
1926                 //set_except_vector(14, handle_mc);
1927                 //set_except_vector(15, handle_ndc);
1928         }
1929 
1930 
1931         if (board_nmi_handler_setup)
1932                 board_nmi_handler_setup();
1933 
1934         if (cpu_has_fpu && !cpu_has_nofpuex)
1935                 set_except_vector(15, handle_fpe);
1936 
1937         set_except_vector(22, handle_mdmx);
1938 
1939         if (cpu_has_mcheck)
1940                 set_except_vector(24, handle_mcheck);
1941 
1942         if (cpu_has_mipsmt)
1943                 set_except_vector(25, handle_mt);
1944 
1945         set_except_vector(26, handle_dsp);
1946 
1947         if (board_cache_error_setup)
1948                 board_cache_error_setup();
1949 
1950         if (cpu_has_vce)
1951                 /* Special exception: R4[04]00 uses also the divec space. */
1952                 set_handler(0x180, &except_vec3_r4000, 0x100);
1953         else if (cpu_has_4kex)
1954                 set_handler(0x180, &except_vec3_generic, 0x80);
1955         else
1956                 set_handler(0x080, &except_vec3_generic, 0x80);
1957 
1958         local_flush_icache_range(ebase, ebase + 0x400);
1959         flush_tlb_handlers();
1960 
1961         sort_extable(__start___dbe_table, __stop___dbe_table);
1962 
1963         cu2_notifier(default_cu2_call, 0x80000000);     /* Run last  */
1964 }
1965 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp