~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc64/kernel/traps.c

Version: ~ [ linux-5.9 ] ~ [ linux-5.8.14 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.70 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.150 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.200 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.238 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.238 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
  2  * arch/sparc64/kernel/traps.c
  3  *
  4  * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
  5  * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
  6  */
  7 
  8 /*
  9  * I like traps on v9, :))))
 10  */
 11 
 12 #include <linux/config.h>
 13 #include <linux/module.h>
 14 #include <linux/sched.h>  /* for jiffies */
 15 #include <linux/kernel.h>
 16 #include <linux/kallsyms.h>
 17 #include <linux/signal.h>
 18 #include <linux/smp.h>
 19 #include <linux/smp_lock.h>
 20 #include <linux/mm.h>
 21 #include <linux/init.h>
 22 
 23 #include <asm/delay.h>
 24 #include <asm/system.h>
 25 #include <asm/ptrace.h>
 26 #include <asm/oplib.h>
 27 #include <asm/page.h>
 28 #include <asm/pgtable.h>
 29 #include <asm/unistd.h>
 30 #include <asm/uaccess.h>
 31 #include <asm/fpumacro.h>
 32 #include <asm/lsu.h>
 33 #include <asm/dcu.h>
 34 #include <asm/estate.h>
 35 #include <asm/chafsr.h>
 36 #include <asm/psrcompat.h>
 37 #include <asm/processor.h>
 38 #include <asm/timer.h>
 39 #ifdef CONFIG_KMOD
 40 #include <linux/kmod.h>
 41 #endif
 42 
 43 /* When an irrecoverable trap occurs at tl > 0, the trap entry
 44  * code logs the trap state registers at every level in the trap
 45  * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
 46  * is as follows:
 47  */
 48 struct tl1_traplog {
 49         struct {
 50                 unsigned long tstate;
 51                 unsigned long tpc;
 52                 unsigned long tnpc;
 53                 unsigned long tt;
 54         } trapstack[4];
 55         unsigned long tl;
 56 };
 57 
 58 static void dump_tl1_traplog(struct tl1_traplog *p)
 59 {
 60         int i;
 61 
 62         printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
 63                p->tl);
 64         for (i = 0; i < 4; i++) {
 65                 printk(KERN_CRIT
 66                        "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
 67                        "TNPC[%016lx] TT[%lx]\n",
 68                        i + 1,
 69                        p->trapstack[i].tstate, p->trapstack[i].tpc,
 70                        p->trapstack[i].tnpc, p->trapstack[i].tt);
 71         }
 72 }
 73 
 74 void bad_trap (struct pt_regs *regs, long lvl)
 75 {
 76         char buffer[32];
 77         siginfo_t info;
 78 
 79         if (lvl < 0x100) {
 80                 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
 81                 die_if_kernel(buffer, regs);
 82         }
 83 
 84         lvl -= 0x100;
 85         if (regs->tstate & TSTATE_PRIV) {
 86                 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
 87                 die_if_kernel (buffer, regs);
 88         }
 89         if (test_thread_flag(TIF_32BIT)) {
 90                 regs->tpc &= 0xffffffff;
 91                 regs->tnpc &= 0xffffffff;
 92         }
 93         info.si_signo = SIGILL;
 94         info.si_errno = 0;
 95         info.si_code = ILL_ILLTRP;
 96         info.si_addr = (void *)regs->tpc;
 97         info.si_trapno = lvl;
 98         force_sig_info(SIGILL, &info, current);
 99 }
100 
101 void bad_trap_tl1 (struct pt_regs *regs, long lvl)
102 {
103         char buffer[32];
104         
105         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
106 
107         sprintf (buffer, "Bad trap %lx at tl>0", lvl);
108         die_if_kernel (buffer, regs);
109 }
110 
111 #ifdef CONFIG_DEBUG_BUGVERBOSE
112 void do_BUG(const char *file, int line)
113 {
114         bust_spinlocks(1);
115         printk("kernel BUG at %s:%d!\n", file, line);
116 }
117 #endif
118 
119 void instruction_access_exception(struct pt_regs *regs,
120                                   unsigned long sfsr, unsigned long sfar)
121 {
122         siginfo_t info;
123 
124         if (regs->tstate & TSTATE_PRIV) {
125                 printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
126                        sfsr, sfar);
127                 die_if_kernel("Iax", regs);
128         }
129         if (test_thread_flag(TIF_32BIT)) {
130                 regs->tpc &= 0xffffffff;
131                 regs->tnpc &= 0xffffffff;
132         }
133         info.si_signo = SIGSEGV;
134         info.si_errno = 0;
135         info.si_code = SEGV_MAPERR;
136         info.si_addr = (void *)regs->tpc;
137         info.si_trapno = 0;
138         force_sig_info(SIGSEGV, &info, current);
139 }
140 
141 void instruction_access_exception_tl1(struct pt_regs *regs,
142                                       unsigned long sfsr, unsigned long sfar)
143 {
144         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
145         instruction_access_exception(regs, sfsr, sfar);
146 }
147 
148 void data_access_exception (struct pt_regs *regs,
149                             unsigned long sfsr, unsigned long sfar)
150 {
151         siginfo_t info;
152 
153         if (regs->tstate & TSTATE_PRIV) {
154                 /* Test if this comes from uaccess places. */
155                 unsigned long fixup;
156                 unsigned long g2 = regs->u_regs[UREG_G2];
157 
158                 if ((fixup = search_extables_range(regs->tpc, &g2))) {
159                         /* Ouch, somebody is trying ugly VM hole tricks on us... */
160 #ifdef DEBUG_EXCEPTIONS
161                         printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
162                         printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
163                                "g2<%016lx>\n", regs->tpc, fixup, g2);
164 #endif
165                         regs->tpc = fixup;
166                         regs->tnpc = regs->tpc + 4;
167                         regs->u_regs[UREG_G2] = g2;
168                         return;
169                 }
170                 /* Shit... */
171                 printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
172                        sfsr, sfar);
173                 die_if_kernel("Dax", regs);
174         }
175 
176         info.si_signo = SIGSEGV;
177         info.si_errno = 0;
178         info.si_code = SEGV_MAPERR;
179         info.si_addr = (void *)sfar;
180         info.si_trapno = 0;
181         force_sig_info(SIGSEGV, &info, current);
182 }
183 
184 #ifdef CONFIG_PCI
185 /* This is really pathetic... */
186 extern volatile int pci_poke_in_progress;
187 extern volatile int pci_poke_cpu;
188 extern volatile int pci_poke_faulted;
189 #endif
190 
191 /* When access exceptions happen, we must do this. */
192 static void spitfire_clean_and_reenable_l1_caches(void)
193 {
194         unsigned long va;
195 
196         if (tlb_type != spitfire)
197                 BUG();
198 
199         /* Clean 'em. */
200         for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
201                 spitfire_put_icache_tag(va, 0x0);
202                 spitfire_put_dcache_tag(va, 0x0);
203         }
204 
205         /* Re-enable in LSU. */
206         __asm__ __volatile__("flush %%g6\n\t"
207                              "membar #Sync\n\t"
208                              "stxa %0, [%%g0] %1\n\t"
209                              "membar #Sync"
210                              : /* no outputs */
211                              : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
212                                     LSU_CONTROL_IM | LSU_CONTROL_DM),
213                              "i" (ASI_LSU_CONTROL)
214                              : "memory");
215 }
216 
217 void do_iae(struct pt_regs *regs)
218 {
219         siginfo_t info;
220 
221         spitfire_clean_and_reenable_l1_caches();
222 
223         info.si_signo = SIGBUS;
224         info.si_errno = 0;
225         info.si_code = BUS_OBJERR;
226         info.si_addr = (void *)0;
227         info.si_trapno = 0;
228         force_sig_info(SIGBUS, &info, current);
229 }
230 
231 void do_dae(struct pt_regs *regs)
232 {
233 #ifdef CONFIG_PCI
234         if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
235                 spitfire_clean_and_reenable_l1_caches();
236 
237                 pci_poke_faulted = 1;
238 
239                 /* Why the fuck did they have to change this? */
240                 if (tlb_type == cheetah || tlb_type == cheetah_plus)
241                         regs->tpc += 4;
242 
243                 regs->tnpc = regs->tpc + 4;
244                 return;
245         }
246 #endif
247         do_iae(regs);
248 }
249 
250 static char ecc_syndrome_table[] = {
251         0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
252         0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
253         0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
254         0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
255         0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
256         0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
257         0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
258         0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
259         0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
260         0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
261         0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
262         0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
263         0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
264         0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
265         0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
266         0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
267         0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
268         0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
269         0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
270         0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
271         0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
272         0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
273         0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
274         0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
275         0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
276         0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
277         0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
278         0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
279         0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
280         0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
281         0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
282         0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
283 };
284 
285 /* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
286  * in the following format.  The AFAR is left as is, with
287  * reserved bits cleared, and is a raw 40-bit physical
288  * address.
289  */
290 #define CE_STATUS_UDBH_UE               (1UL << (43 + 9))
291 #define CE_STATUS_UDBH_CE               (1UL << (43 + 8))
292 #define CE_STATUS_UDBH_ESYNDR           (0xffUL << 43)
293 #define CE_STATUS_UDBH_SHIFT            43
294 #define CE_STATUS_UDBL_UE               (1UL << (33 + 9))
295 #define CE_STATUS_UDBL_CE               (1UL << (33 + 8))
296 #define CE_STATUS_UDBL_ESYNDR           (0xffUL << 33)
297 #define CE_STATUS_UDBL_SHIFT            33
298 #define CE_STATUS_AFSR_MASK             (0x1ffffffffUL)
299 #define CE_STATUS_AFSR_ME               (1UL << 32)
300 #define CE_STATUS_AFSR_PRIV             (1UL << 31)
301 #define CE_STATUS_AFSR_ISAP             (1UL << 30)
302 #define CE_STATUS_AFSR_ETP              (1UL << 29)
303 #define CE_STATUS_AFSR_IVUE             (1UL << 28)
304 #define CE_STATUS_AFSR_TO               (1UL << 27)
305 #define CE_STATUS_AFSR_BERR             (1UL << 26)
306 #define CE_STATUS_AFSR_LDP              (1UL << 25)
307 #define CE_STATUS_AFSR_CP               (1UL << 24)
308 #define CE_STATUS_AFSR_WP               (1UL << 23)
309 #define CE_STATUS_AFSR_EDP              (1UL << 22)
310 #define CE_STATUS_AFSR_UE               (1UL << 21)
311 #define CE_STATUS_AFSR_CE               (1UL << 20)
312 #define CE_STATUS_AFSR_ETS              (0xfUL << 16)
313 #define CE_STATUS_AFSR_ETS_SHIFT        16
314 #define CE_STATUS_AFSR_PSYND            (0xffffUL << 0)
315 #define CE_STATUS_AFSR_PSYND_SHIFT      0
316 
317 /* Layout of Ecache TAG Parity Syndrome of AFSR */
318 #define AFSR_ETSYNDROME_7_0             0x1UL /* E$-tag bus bits  <7:0> */
319 #define AFSR_ETSYNDROME_15_8            0x2UL /* E$-tag bus bits <15:8> */
320 #define AFSR_ETSYNDROME_21_16           0x4UL /* E$-tag bus bits <21:16> */
321 #define AFSR_ETSYNDROME_24_22           0x8UL /* E$-tag bus bits <24:22> */
322 
323 static char *syndrome_unknown = "<Unknown>";
324 
325 asmlinkage void cee_log(unsigned long ce_status,
326                         unsigned long afar,
327                         struct pt_regs *regs)
328 {
329         char memmod_str[64];
330         char *p;
331         unsigned short scode, udb_reg;
332 
333         printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
334                "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
335                smp_processor_id(),
336                (ce_status & CE_STATUS_AFSR_MASK),
337                afar,
338                ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
339                ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
340 
341         udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
342         if (udb_reg & (1 << 8)) {
343                 scode = ecc_syndrome_table[udb_reg & 0xff];
344                 if (prom_getunumber(scode, afar,
345                                     memmod_str, sizeof(memmod_str)) == -1)
346                         p = syndrome_unknown;
347                 else
348                         p = memmod_str;
349                 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
350                        "Memory Module \"%s\"\n",
351                        smp_processor_id(), scode, p);
352         }
353 
354         udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
355         if (udb_reg & (1 << 8)) {
356                 scode = ecc_syndrome_table[udb_reg & 0xff];
357                 if (prom_getunumber(scode, afar,
358                                     memmod_str, sizeof(memmod_str)) == -1)
359                         p = syndrome_unknown;
360                 else
361                         p = memmod_str;
362                 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
363                        "Memory Module \"%s\"\n",
364                        smp_processor_id(), scode, p);
365         }
366 }
367 
368 /* Cheetah error trap handling. */
369 static unsigned long ecache_flush_physbase;
370 static unsigned long ecache_flush_linesize;
371 static unsigned long ecache_flush_size;
372 
373 /* WARNING: The error trap handlers in assembly know the precise
374  *          layout of the following structure.
375  *
376  * C-level handlers below use this information to log the error
377  * and then determine how to recover (if possible).
378  */
379 struct cheetah_err_info {
380 /*0x00*/u64 afsr;
381 /*0x08*/u64 afar;
382 
383         /* D-cache state */
384 /*0x10*/u64 dcache_data[4];     /* The actual data      */
385 /*0x30*/u64 dcache_index;       /* D-cache index        */
386 /*0x38*/u64 dcache_tag;         /* D-cache tag/valid    */
387 /*0x40*/u64 dcache_utag;        /* D-cache microtag     */
388 /*0x48*/u64 dcache_stag;        /* D-cache snooptag     */
389 
390         /* I-cache state */
391 /*0x50*/u64 icache_data[8];     /* The actual insns + predecode */
392 /*0x90*/u64 icache_index;       /* I-cache index        */
393 /*0x98*/u64 icache_tag;         /* I-cache phys tag     */
394 /*0xa0*/u64 icache_utag;        /* I-cache microtag     */
395 /*0xa8*/u64 icache_stag;        /* I-cache snooptag     */
396 /*0xb0*/u64 icache_upper;       /* I-cache upper-tag    */
397 /*0xb8*/u64 icache_lower;       /* I-cache lower-tag    */
398 
399         /* E-cache state */
400 /*0xc0*/u64 ecache_data[4];     /* 32 bytes from staging registers */
401 /*0xe0*/u64 ecache_index;       /* E-cache index        */
402 /*0xe8*/u64 ecache_tag;         /* E-cache tag/state    */
403 
404 /*0xf0*/u64 __pad[32 - 30];
405 };
406 #define CHAFSR_INVALID          ((u64)-1L)
407 
408 /* This table is ordered in priority of errors and matches the
409  * AFAR overwrite policy as well.
410  */
411 
412 struct afsr_error_table {
413         unsigned long mask;
414         const char *name;
415 };
416 
417 static const char CHAFSR_PERR_msg[] =
418         "System interface protocol error";
419 static const char CHAFSR_IERR_msg[] =
420         "Internal processor error";
421 static const char CHAFSR_ISAP_msg[] =
422         "System request parity error on incoming addresss";
423 static const char CHAFSR_UCU_msg[] =
424         "Uncorrectable E-cache ECC error for ifetch/data";
425 static const char CHAFSR_UCC_msg[] =
426         "SW Correctable E-cache ECC error for ifetch/data";
427 static const char CHAFSR_UE_msg[] =
428         "Uncorrectable system bus data ECC error for read";
429 static const char CHAFSR_EDU_msg[] =
430         "Uncorrectable E-cache ECC error for stmerge/blkld";
431 static const char CHAFSR_EMU_msg[] =
432         "Uncorrectable system bus MTAG error";
433 static const char CHAFSR_WDU_msg[] =
434         "Uncorrectable E-cache ECC error for writeback";
435 static const char CHAFSR_CPU_msg[] =
436         "Uncorrectable ECC error for copyout";
437 static const char CHAFSR_CE_msg[] =
438         "HW corrected system bus data ECC error for read";
439 static const char CHAFSR_EDC_msg[] =
440         "HW corrected E-cache ECC error for stmerge/blkld";
441 static const char CHAFSR_EMC_msg[] =
442         "HW corrected system bus MTAG ECC error";
443 static const char CHAFSR_WDC_msg[] =
444         "HW corrected E-cache ECC error for writeback";
445 static const char CHAFSR_CPC_msg[] =
446         "HW corrected ECC error for copyout";
447 static const char CHAFSR_TO_msg[] =
448         "Unmapped error from system bus";
449 static const char CHAFSR_BERR_msg[] =
450         "Bus error response from system bus";
451 static const char CHAFSR_IVC_msg[] =
452         "HW corrected system bus data ECC error for ivec read";
453 static const char CHAFSR_IVU_msg[] =
454         "Uncorrectable system bus data ECC error for ivec read";
455 static struct afsr_error_table __cheetah_error_table[] = {
456         {       CHAFSR_PERR,    CHAFSR_PERR_msg         },
457         {       CHAFSR_IERR,    CHAFSR_IERR_msg         },
458         {       CHAFSR_ISAP,    CHAFSR_ISAP_msg         },
459         {       CHAFSR_UCU,     CHAFSR_UCU_msg          },
460         {       CHAFSR_UCC,     CHAFSR_UCC_msg          },
461         {       CHAFSR_UE,      CHAFSR_UE_msg           },
462         {       CHAFSR_EDU,     CHAFSR_EDU_msg          },
463         {       CHAFSR_EMU,     CHAFSR_EMU_msg          },
464         {       CHAFSR_WDU,     CHAFSR_WDU_msg          },
465         {       CHAFSR_CPU,     CHAFSR_CPU_msg          },
466         {       CHAFSR_CE,      CHAFSR_CE_msg           },
467         {       CHAFSR_EDC,     CHAFSR_EDC_msg          },
468         {       CHAFSR_EMC,     CHAFSR_EMC_msg          },
469         {       CHAFSR_WDC,     CHAFSR_WDC_msg          },
470         {       CHAFSR_CPC,     CHAFSR_CPC_msg          },
471         {       CHAFSR_TO,      CHAFSR_TO_msg           },
472         {       CHAFSR_BERR,    CHAFSR_BERR_msg         },
473         /* These two do not update the AFAR. */
474         {       CHAFSR_IVC,     CHAFSR_IVC_msg          },
475         {       CHAFSR_IVU,     CHAFSR_IVU_msg          },
476         {       0,              NULL                    },
477 };
478 static const char CHPAFSR_DTO_msg[] =
479         "System bus unmapped error for prefetch/storequeue-read";
480 static const char CHPAFSR_DBERR_msg[] =
481         "System bus error for prefetch/storequeue-read";
482 static const char CHPAFSR_THCE_msg[] =
483         "Hardware corrected E-cache Tag ECC error";
484 static const char CHPAFSR_TSCE_msg[] =
485         "SW handled correctable E-cache Tag ECC error";
486 static const char CHPAFSR_TUE_msg[] =
487         "Uncorrectable E-cache Tag ECC error";
488 static const char CHPAFSR_DUE_msg[] =
489         "System bus uncorrectable data ECC error due to prefetch/store-fill";
490 static struct afsr_error_table __cheetah_plus_error_table[] = {
491         {       CHAFSR_PERR,    CHAFSR_PERR_msg         },
492         {       CHAFSR_IERR,    CHAFSR_IERR_msg         },
493         {       CHAFSR_ISAP,    CHAFSR_ISAP_msg         },
494         {       CHAFSR_UCU,     CHAFSR_UCU_msg          },
495         {       CHAFSR_UCC,     CHAFSR_UCC_msg          },
496         {       CHAFSR_UE,      CHAFSR_UE_msg           },
497         {       CHAFSR_EDU,     CHAFSR_EDU_msg          },
498         {       CHAFSR_EMU,     CHAFSR_EMU_msg          },
499         {       CHAFSR_WDU,     CHAFSR_WDU_msg          },
500         {       CHAFSR_CPU,     CHAFSR_CPU_msg          },
501         {       CHAFSR_CE,      CHAFSR_CE_msg           },
502         {       CHAFSR_EDC,     CHAFSR_EDC_msg          },
503         {       CHAFSR_EMC,     CHAFSR_EMC_msg          },
504         {       CHAFSR_WDC,     CHAFSR_WDC_msg          },
505         {       CHAFSR_CPC,     CHAFSR_CPC_msg          },
506         {       CHAFSR_TO,      CHAFSR_TO_msg           },
507         {       CHAFSR_BERR,    CHAFSR_BERR_msg         },
508         {       CHPAFSR_DTO,    CHPAFSR_DTO_msg         },
509         {       CHPAFSR_DBERR,  CHPAFSR_DBERR_msg       },
510         {       CHPAFSR_THCE,   CHPAFSR_THCE_msg        },
511         {       CHPAFSR_TSCE,   CHPAFSR_TSCE_msg        },
512         {       CHPAFSR_TUE,    CHPAFSR_TUE_msg         },
513         {       CHPAFSR_DUE,    CHPAFSR_DUE_msg         },
514         /* These two do not update the AFAR. */
515         {       CHAFSR_IVC,     CHAFSR_IVC_msg          },
516         {       CHAFSR_IVU,     CHAFSR_IVU_msg          },
517         {       0,              NULL                    },
518 };
519 static const char JPAFSR_JETO_msg[] =
520         "System interface protocol error, hw timeout caused";
521 static const char JPAFSR_SCE_msg[] =
522         "Parity error on system snoop results";
523 static const char JPAFSR_JEIC_msg[] =
524         "System interface protocol error, illegal command detected";
525 static const char JPAFSR_JEIT_msg[] =
526         "System interface protocol error, illegal ADTYPE detected";
527 static const char JPAFSR_OM_msg[] =
528         "Out of range memory error has occurred";
529 static const char JPAFSR_ETP_msg[] =
530         "Parity error on L2 cache tag SRAM";
531 static const char JPAFSR_UMS_msg[] =
532         "Error due to unsupported store";
533 static const char JPAFSR_RUE_msg[] =
534         "Uncorrectable ECC error from remote cache/memory";
535 static const char JPAFSR_RCE_msg[] =
536         "Correctable ECC error from remote cache/memory";
537 static const char JPAFSR_BP_msg[] =
538         "JBUS parity error on returned read data";
539 static const char JPAFSR_WBP_msg[] =
540         "JBUS parity error on data for writeback or block store";
541 static const char JPAFSR_FRC_msg[] =
542         "Foreign read to DRAM incurring correctable ECC error";
543 static const char JPAFSR_FRU_msg[] =
544         "Foreign read to DRAM incurring uncorrectable ECC error";
545 static struct afsr_error_table __jalapeno_error_table[] = {
546         {       JPAFSR_JETO,    JPAFSR_JETO_msg         },
547         {       JPAFSR_SCE,     JPAFSR_SCE_msg          },
548         {       JPAFSR_JEIC,    JPAFSR_JEIC_msg         },
549         {       JPAFSR_JEIT,    JPAFSR_JEIT_msg         },
550         {       CHAFSR_PERR,    CHAFSR_PERR_msg         },
551         {       CHAFSR_IERR,    CHAFSR_IERR_msg         },
552         {       CHAFSR_ISAP,    CHAFSR_ISAP_msg         },
553         {       CHAFSR_UCU,     CHAFSR_UCU_msg          },
554         {       CHAFSR_UCC,     CHAFSR_UCC_msg          },
555         {       CHAFSR_UE,      CHAFSR_UE_msg           },
556         {       CHAFSR_EDU,     CHAFSR_EDU_msg          },
557         {       JPAFSR_OM,      JPAFSR_OM_msg           },
558         {       CHAFSR_WDU,     CHAFSR_WDU_msg          },
559         {       CHAFSR_CPU,     CHAFSR_CPU_msg          },
560         {       CHAFSR_CE,      CHAFSR_CE_msg           },
561         {       CHAFSR_EDC,     CHAFSR_EDC_msg          },
562         {       JPAFSR_ETP,     JPAFSR_ETP_msg          },
563         {       CHAFSR_WDC,     CHAFSR_WDC_msg          },
564         {       CHAFSR_CPC,     CHAFSR_CPC_msg          },
565         {       CHAFSR_TO,      CHAFSR_TO_msg           },
566         {       CHAFSR_BERR,    CHAFSR_BERR_msg         },
567         {       JPAFSR_UMS,     JPAFSR_UMS_msg          },
568         {       JPAFSR_RUE,     JPAFSR_RUE_msg          },
569         {       JPAFSR_RCE,     JPAFSR_RCE_msg          },
570         {       JPAFSR_BP,      JPAFSR_BP_msg           },
571         {       JPAFSR_WBP,     JPAFSR_WBP_msg          },
572         {       JPAFSR_FRC,     JPAFSR_FRC_msg          },
573         {       JPAFSR_FRU,     JPAFSR_FRU_msg          },
574         /* These two do not update the AFAR. */
575         {       CHAFSR_IVU,     CHAFSR_IVU_msg          },
576         {       0,              NULL                    },
577 };
578 static struct afsr_error_table *cheetah_error_table;
579 static unsigned long cheetah_afsr_errors;
580 
581 /* This is allocated at boot time based upon the largest hardware
582  * cpu ID in the system.  We allocate two entries per cpu, one for
583  * TL==0 logging and one for TL >= 1 logging.
584  */
585 struct cheetah_err_info *cheetah_error_log;
586 
587 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
588 {
589         struct cheetah_err_info *p;
590         int cpu = smp_processor_id();
591 
592         if (!cheetah_error_log)
593                 return NULL;
594 
595         p = cheetah_error_log + (cpu * 2);
596         if ((afsr & CHAFSR_TL1) != 0UL)
597                 p++;
598 
599         return p;
600 }
601 
602 extern unsigned int tl0_icpe[], tl1_icpe[];
603 extern unsigned int tl0_dcpe[], tl1_dcpe[];
604 extern unsigned int tl0_fecc[], tl1_fecc[];
605 extern unsigned int tl0_cee[], tl1_cee[];
606 extern unsigned int tl0_iae[], tl1_iae[];
607 extern unsigned int tl0_dae[], tl1_dae[];
608 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
609 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
610 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
611 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
612 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
613 
614 void __init cheetah_ecache_flush_init(void)
615 {
616         unsigned long largest_size, smallest_linesize, order, ver;
617         int node, i, instance;
618 
619         /* Scan all cpu device tree nodes, note two values:
620          * 1) largest E-cache size
621          * 2) smallest E-cache line size
622          */
623         largest_size = 0UL;
624         smallest_linesize = ~0UL;
625 
626         instance = 0;
627         while (!cpu_find_by_instance(instance, &node, NULL)) {
628                 unsigned long val;
629 
630                 val = prom_getintdefault(node, "ecache-size",
631                                          (2 * 1024 * 1024));
632                 if (val > largest_size)
633                         largest_size = val;
634                 val = prom_getintdefault(node, "ecache-line-size", 64);
635                 if (val < smallest_linesize)
636                         smallest_linesize = val;
637                 instance++;
638         }
639 
640         if (largest_size == 0UL || smallest_linesize == ~0UL) {
641                 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
642                             "parameters.\n");
643                 prom_halt();
644         }
645 
646         ecache_flush_size = (2 * largest_size);
647         ecache_flush_linesize = smallest_linesize;
648 
649         /* Discover a physically contiguous chunk of physical
650          * memory in 'sp_banks' of size ecache_flush_size calculated
651          * above.  Store the physical base of this area at
652          * ecache_flush_physbase.
653          */
654         for (node = 0; ; node++) {
655                 if (sp_banks[node].num_bytes == 0)
656                         break;
657                 if (sp_banks[node].num_bytes >= ecache_flush_size) {
658                         ecache_flush_physbase = sp_banks[node].base_addr;
659                         break;
660                 }
661         }
662 
663         /* Note: Zero would be a valid value of ecache_flush_physbase so
664          * don't use that as the success test. :-)
665          */
666         if (sp_banks[node].num_bytes == 0) {
667                 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
668                             "contiguous physical memory.\n", ecache_flush_size);
669                 prom_halt();
670         }
671 
672         /* Now allocate error trap reporting scoreboard. */
673         node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
674         for (order = 0; order < MAX_ORDER; order++) {
675                 if ((PAGE_SIZE << order) >= node)
676                         break;
677         }
678         cheetah_error_log = (struct cheetah_err_info *)
679                 __get_free_pages(GFP_KERNEL, order);
680         if (!cheetah_error_log) {
681                 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
682                             "error logging scoreboard (%d bytes).\n", node);
683                 prom_halt();
684         }
685         memset(cheetah_error_log, 0, PAGE_SIZE << order);
686 
687         /* Mark all AFSRs as invalid so that the trap handler will
688          * log new new information there.
689          */
690         for (i = 0; i < 2 * NR_CPUS; i++)
691                 cheetah_error_log[i].afsr = CHAFSR_INVALID;
692 
693         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
694         if ((ver >> 32) == 0x003e0016) {
695                 cheetah_error_table = &__jalapeno_error_table[0];
696                 cheetah_afsr_errors = JPAFSR_ERRORS;
697         } else if ((ver >> 32) == 0x003e0015) {
698                 cheetah_error_table = &__cheetah_plus_error_table[0];
699                 cheetah_afsr_errors = CHPAFSR_ERRORS;
700         } else {
701                 cheetah_error_table = &__cheetah_error_table[0];
702                 cheetah_afsr_errors = CHAFSR_ERRORS;
703         }
704 
705         /* Now patch trap tables. */
706         memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
707         memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
708         memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
709         memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
710         memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
711         memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
712         memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
713         memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
714         if (tlb_type == cheetah_plus) {
715                 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
716                 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
717                 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
718                 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
719         }
720         flushi(PAGE_OFFSET);
721 }
722 
723 static void cheetah_flush_ecache(void)
724 {
725         unsigned long flush_base = ecache_flush_physbase;
726         unsigned long flush_linesize = ecache_flush_linesize;
727         unsigned long flush_size = ecache_flush_size;
728 
729         __asm__ __volatile__("1: subcc  %0, %4, %0\n\t"
730                              "   bne,pt %%xcc, 1b\n\t"
731                              "    ldxa  [%2 + %0] %3, %%g0\n\t"
732                              : "=&r" (flush_size)
733                              : "" (flush_size), "r" (flush_base),
734                                "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
735 }
736 
737 static void cheetah_flush_ecache_line(unsigned long physaddr)
738 {
739         unsigned long alias;
740 
741         physaddr &= ~(8UL - 1UL);
742         physaddr = (ecache_flush_physbase +
743                     (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
744         alias = physaddr + (ecache_flush_size >> 1UL);
745         __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
746                              "ldxa [%1] %2, %%g0\n\t"
747                              "membar #Sync"
748                              : /* no outputs */
749                              : "r" (physaddr), "r" (alias),
750                                "i" (ASI_PHYS_USE_EC));
751 }
752 
753 #ifdef CONFIG_SMP
754 unsigned long __init cheetah_tune_scheduling(void)
755 {
756         unsigned long tick1, tick2, raw;
757         unsigned long flush_base = ecache_flush_physbase;
758         unsigned long flush_linesize = ecache_flush_linesize;
759         unsigned long flush_size = ecache_flush_size;
760 
761         /* Run through the whole cache to guarantee the timed loop
762          * is really displacing cache lines.
763          */
764         __asm__ __volatile__("1: subcc  %0, %4, %0\n\t"
765                              "   bne,pt %%xcc, 1b\n\t"
766                              "    ldxa  [%2 + %0] %3, %%g0\n\t"
767                              : "=&r" (flush_size)
768                              : "" (flush_size), "r" (flush_base),
769                                "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
770 
771         /* The flush area is 2 X Ecache-size, so cut this in half for
772          * the timed loop.
773          */
774         flush_base = ecache_flush_physbase;
775         flush_linesize = ecache_flush_linesize;
776         flush_size = ecache_flush_size >> 1;
777 
778         tick1 = tick_ops->get_tick();
779 
780         __asm__ __volatile__("1: subcc  %0, %4, %0\n\t"
781                              "   bne,pt %%xcc, 1b\n\t"
782                              "    ldxa  [%2 + %0] %3, %%g0\n\t"
783                              : "=&r" (flush_size)
784                              : "" (flush_size), "r" (flush_base),
785                                "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
786 
787         tick2 = tick_ops->get_tick();
788 
789         raw = (tick2 - tick1);
790 
791         return (raw - (raw >> 2));
792 }
793 #endif
794 
795 /* Unfortunately, the diagnostic access to the I-cache tags we need to
796  * use to clear the thing interferes with I-cache coherency transactions.
797  *
798  * So we must only flush the I-cache when it is disabled.
799  */
800 static void __cheetah_flush_icache(void)
801 {
802         unsigned long i;
803 
804         /* Clear the valid bits in all the tags. */
805         for (i = 0; i < (1 << 15); i += (1 << 5)) {
806                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
807                                      "membar #Sync"
808                                      : /* no outputs */
809                                      : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
810         }
811 }
812 
813 static void cheetah_flush_icache(void)
814 {
815         unsigned long dcu_save;
816 
817         /* Save current DCU, disable I-cache. */
818         __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
819                              "or %0, %2, %%g1\n\t"
820                              "stxa %%g1, [%%g0] %1\n\t"
821                              "membar #Sync"
822                              : "=r" (dcu_save)
823                              : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
824                              : "g1");
825 
826         __cheetah_flush_icache();
827 
828         /* Restore DCU register */
829         __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
830                              "membar #Sync"
831                              : /* no outputs */
832                              : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
833 }
834 
835 static void cheetah_flush_dcache(void)
836 {
837         unsigned long i;
838 
839         for (i = 0; i < (1 << 16); i += (1 << 5)) {
840                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
841                                      "membar #Sync"
842                                      : /* no outputs */
843                                      : "r" (i), "i" (ASI_DCACHE_TAG));
844         }
845 }
846 
847 /* In order to make the even parity correct we must do two things.
848  * First, we clear DC_data_parity and set DC_utag to an appropriate value.
849  * Next, we clear out all 32-bytes of data for that line.  Data of
850  * all-zero + tag parity value of zero == correct parity.
851  */
852 static void cheetah_plus_zap_dcache_parity(void)
853 {
854         unsigned long i;
855 
856         for (i = 0; i < (1 << 16); i += (1 << 5)) {
857                 unsigned long tag = (i >> 14);
858                 unsigned long j;
859 
860                 __asm__ __volatile__("membar    #Sync\n\t"
861                                      "stxa      %0, [%1] %2\n\t"
862                                      "membar    #Sync"
863                                      : /* no outputs */
864                                      : "r" (tag), "r" (i),
865                                        "i" (ASI_DCACHE_UTAG));
866                 for (j = i; j < i + (1 << 5); j += (1 << 3))
867                         __asm__ __volatile__("membar    #Sync\n\t"
868                                              "stxa      %%g0, [%0] %1\n\t"
869                                              "membar    #Sync"
870                                              : /* no outputs */
871                                              : "r" (j), "i" (ASI_DCACHE_DATA));
872         }
873 }
874 
875 /* Conversion tables used to frob Cheetah AFSR syndrome values into
876  * something palatable to the memory controller driver get_unumber
877  * routine.
878  */
879 #define MT0     137
880 #define MT1     138
881 #define MT2     139
882 #define NONE    254
883 #define MTC0    140
884 #define MTC1    141
885 #define MTC2    142
886 #define MTC3    143
887 #define C0      128
888 #define C1      129
889 #define C2      130
890 #define C3      131
891 #define C4      132
892 #define C5      133
893 #define C6      134
894 #define C7      135
895 #define C8      136
896 #define M2      144
897 #define M3      145
898 #define M4      146
899 #define M       147
900 static unsigned char cheetah_ecc_syntab[] = {
901 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
902 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
903 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
904 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
905 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
906 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
907 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
908 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
909 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
910 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
911 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
912 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
913 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
914 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
915 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
916 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
917 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
918 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
919 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
920 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
921 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
922 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
923 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
924 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
925 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
926 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
927 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
928 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
929 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
930 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
931 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
932 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
933 };
934 static unsigned char cheetah_mtag_syntab[] = {
935        NONE, MTC0,
936        MTC1, NONE,
937        MTC2, NONE,
938        NONE, MT0,
939        MTC3, NONE,
940        NONE, MT1,
941        NONE, MT2,
942        NONE, NONE
943 };
944 
945 /* Return the highest priority error conditon mentioned. */
946 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
947 {
948         unsigned long tmp = 0;
949         int i;
950 
951         for (i = 0; cheetah_error_table[i].mask; i++) {
952                 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
953                         return tmp;
954         }
955         return tmp;
956 }
957 
958 static const char *cheetah_get_string(unsigned long bit)
959 {
960         int i;
961 
962         for (i = 0; cheetah_error_table[i].mask; i++) {
963                 if ((bit & cheetah_error_table[i].mask) != 0UL)
964                         return cheetah_error_table[i].name;
965         }
966         return "???";
967 }
968 
969 extern int chmc_getunumber(int, unsigned long, char *, int);
970 
971 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
972                                unsigned long afsr, unsigned long afar, int recoverable)
973 {
974         unsigned long hipri;
975         char unum[256];
976 
977         printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
978                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
979                afsr, afar,
980                (afsr & CHAFSR_TL1) ? 1 : 0);
981         printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
982                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
983                regs->tpc, regs->tnpc, regs->tstate);
984         printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
985                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
986                (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
987                (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
988                (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
989                (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
990         hipri = cheetah_get_hipri(afsr);
991         printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
992                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
993                hipri, cheetah_get_string(hipri));
994 
995         /* Try to get unumber if relevant. */
996 #define ESYND_ERRORS    (CHAFSR_IVC | CHAFSR_IVU | \
997                          CHAFSR_CPC | CHAFSR_CPU | \
998                          CHAFSR_UE  | CHAFSR_CE  | \
999                          CHAFSR_EDC | CHAFSR_EDU  | \
1000                          CHAFSR_UCC | CHAFSR_UCU  | \
1001                          CHAFSR_WDU | CHAFSR_WDC)
1002 #define MSYND_ERRORS    (CHAFSR_EMC | CHAFSR_EMU)
1003         if (afsr & ESYND_ERRORS) {
1004                 int syndrome;
1005                 int ret;
1006 
1007                 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1008                 syndrome = cheetah_ecc_syntab[syndrome];
1009                 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1010                 if (ret != -1)
1011                         printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1012                                (recoverable ? KERN_WARNING : KERN_CRIT),
1013                                smp_processor_id(), unum);
1014         } else if (afsr & MSYND_ERRORS) {
1015                 int syndrome;
1016                 int ret;
1017 
1018                 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1019                 syndrome = cheetah_mtag_syntab[syndrome];
1020                 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1021                 if (ret != -1)
1022                         printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1023                                (recoverable ? KERN_WARNING : KERN_CRIT),
1024                                smp_processor_id(), unum);
1025         }
1026 
1027         /* Now dump the cache snapshots. */
1028         printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1029                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1030                (int) info->dcache_index,
1031                info->dcache_tag,
1032                info->dcache_utag,
1033                info->dcache_stag);
1034         printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1035                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1036                info->dcache_data[0],
1037                info->dcache_data[1],
1038                info->dcache_data[2],
1039                info->dcache_data[3]);
1040         printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1041                "u[%016lx] l[%016lx]\n",
1042                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1043                (int) info->icache_index,
1044                info->icache_tag,
1045                info->icache_utag,
1046                info->icache_stag,
1047                info->icache_upper,
1048                info->icache_lower);
1049         printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1050                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1051                info->icache_data[0],
1052                info->icache_data[1],
1053                info->icache_data[2],
1054                info->icache_data[3]);
1055         printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1056                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1057                info->icache_data[4],
1058                info->icache_data[5],
1059                info->icache_data[6],
1060                info->icache_data[7]);
1061         printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1062                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1063                (int) info->ecache_index, info->ecache_tag);
1064         printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1065                (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1066                info->ecache_data[0],
1067                info->ecache_data[1],
1068                info->ecache_data[2],
1069                info->ecache_data[3]);
1070 
1071         afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1072         while (afsr != 0UL) {
1073                 unsigned long bit = cheetah_get_hipri(afsr);
1074 
1075                 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1076                        (recoverable ? KERN_WARNING : KERN_CRIT),
1077                        bit, cheetah_get_string(bit));
1078 
1079                 afsr &= ~bit;
1080         }
1081 
1082         if (!recoverable)
1083                 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1084 }
1085 
1086 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1087 {
1088         unsigned long afsr, afar;
1089         int ret = 0;
1090 
1091         __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1092                              : "=r" (afsr)
1093                              : "i" (ASI_AFSR));
1094         if ((afsr & cheetah_afsr_errors) != 0) {
1095                 if (logp != NULL) {
1096                         __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1097                                              : "=r" (afar)
1098                                              : "i" (ASI_AFAR));
1099                         logp->afsr = afsr;
1100                         logp->afar = afar;
1101                 }
1102                 ret = 1;
1103         }
1104         __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1105                              "membar #Sync\n\t"
1106                              : : "r" (afsr), "i" (ASI_AFSR));
1107 
1108         return ret;
1109 }
1110 
1111 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1112 {
1113         struct cheetah_err_info local_snapshot, *p;
1114         int recoverable;
1115 
1116         /* Flush E-cache */
1117         cheetah_flush_ecache();
1118 
1119         p = cheetah_get_error_log(afsr);
1120         if (!p) {
1121                 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1122                             afsr, afar);
1123                 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1124                             smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1125                 prom_halt();
1126         }
1127 
1128         /* Grab snapshot of logged error. */
1129         memcpy(&local_snapshot, p, sizeof(local_snapshot));
1130 
1131         /* If the current trap snapshot does not match what the
1132          * trap handler passed along into our args, big trouble.
1133          * In such a case, mark the local copy as invalid.
1134          *
1135          * Else, it matches and we mark the afsr in the non-local
1136          * copy as invalid so we may log new error traps there.
1137          */
1138         if (p->afsr != afsr || p->afar != afar)
1139                 local_snapshot.afsr = CHAFSR_INVALID;
1140         else
1141                 p->afsr = CHAFSR_INVALID;
1142 
1143         cheetah_flush_icache();
1144         cheetah_flush_dcache();
1145 
1146         /* Re-enable I-cache/D-cache */
1147         __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1148                              "or %%g1, %1, %%g1\n\t"
1149                              "stxa %%g1, [%%g0] %0\n\t"
1150                              "membar #Sync"
1151                              : /* no outputs */
1152                              : "i" (ASI_DCU_CONTROL_REG),
1153                                "i" (DCU_DC | DCU_IC)
1154                              : "g1");
1155 
1156         /* Re-enable error reporting */
1157         __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1158                              "or %%g1, %1, %%g1\n\t"
1159                              "stxa %%g1, [%%g0] %0\n\t"
1160                              "membar #Sync"
1161                              : /* no outputs */
1162                              : "i" (ASI_ESTATE_ERROR_EN),
1163                                "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1164                              : "g1");
1165 
1166         /* Decide if we can continue after handling this trap and
1167          * logging the error.
1168          */
1169         recoverable = 1;
1170         if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1171                 recoverable = 0;
1172 
1173         /* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1174          * error was logged while we had error reporting traps disabled.
1175          */
1176         if (cheetah_recheck_errors(&local_snapshot)) {
1177                 unsigned long new_afsr = local_snapshot.afsr;
1178 
1179                 /* If we got a new asynchronous error, die... */
1180                 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1181                                 CHAFSR_WDU | CHAFSR_CPU |
1182                                 CHAFSR_IVU | CHAFSR_UE |
1183                                 CHAFSR_BERR | CHAFSR_TO))
1184                         recoverable = 0;
1185         }
1186 
1187         /* Log errors. */
1188         cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1189 
1190         if (!recoverable)
1191                 panic("Irrecoverable Fast-ECC error trap.\n");
1192 
1193         /* Flush E-cache to kick the error trap handlers out. */
1194         cheetah_flush_ecache();
1195 }
1196 
1197 /* Try to fix a correctable error by pushing the line out from
1198  * the E-cache.  Recheck error reporting registers to see if the
1199  * problem is intermittent.
1200  */
1201 static int cheetah_fix_ce(unsigned long physaddr)
1202 {
1203         unsigned long orig_estate;
1204         unsigned long alias1, alias2;
1205         int ret;
1206 
1207         /* Make sure correctable error traps are disabled. */
1208         __asm__ __volatile__("ldxa      [%%g0] %2, %0\n\t"
1209                              "andn      %0, %1, %%g1\n\t"
1210                              "stxa      %%g1, [%%g0] %2\n\t"
1211                              "membar    #Sync"
1212                              : "=&r" (orig_estate)
1213                              : "i" (ESTATE_ERROR_CEEN),
1214                                "i" (ASI_ESTATE_ERROR_EN)
1215                              : "g1");
1216 
1217         /* We calculate alias addresses that will force the
1218          * cache line in question out of the E-cache.  Then
1219          * we bring it back in with an atomic instruction so
1220          * that we get it in some modified/exclusive state,
1221          * then we displace it again to try and get proper ECC
1222          * pushed back into the system.
1223          */
1224         physaddr &= ~(8UL - 1UL);
1225         alias1 = (ecache_flush_physbase +
1226                   (physaddr & ((ecache_flush_size >> 1) - 1)));
1227         alias2 = alias1 + (ecache_flush_size >> 1);
1228         __asm__ __volatile__("ldxa      [%0] %3, %%g0\n\t"
1229                              "ldxa      [%1] %3, %%g0\n\t"
1230                              "casxa     [%2] %3, %%g0, %%g0\n\t"
1231                              "membar    #StoreLoad | #StoreStore\n\t"
1232                              "ldxa      [%0] %3, %%g0\n\t"
1233                              "ldxa      [%1] %3, %%g0\n\t"
1234                              "membar    #Sync"
1235                              : /* no outputs */
1236                              : "r" (alias1), "r" (alias2),
1237                                "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1238 
1239         /* Did that trigger another error? */
1240         if (cheetah_recheck_errors(NULL)) {
1241                 /* Try one more time. */
1242                 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1243                                      "membar #Sync"
1244                                      : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1245                 if (cheetah_recheck_errors(NULL))
1246                         ret = 2;
1247                 else
1248                         ret = 1;
1249         } else {
1250                 /* No new error, intermittent problem. */
1251                 ret = 0;
1252         }
1253 
1254         /* Restore error enables. */
1255         __asm__ __volatile__("stxa      %0, [%%g0] %1\n\t"
1256                              "membar    #Sync"
1257                              : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1258 
1259         return ret;
1260 }
1261 
1262 /* Return non-zero if PADDR is a valid physical memory address. */
1263 static int cheetah_check_main_memory(unsigned long paddr)
1264 {
1265         int i;
1266 
1267         for (i = 0; ; i++) {
1268                 if (sp_banks[i].num_bytes == 0)
1269                         break;
1270                 if (paddr >= sp_banks[i].base_addr &&
1271                     paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1272                         return 1;
1273         }
1274         return 0;
1275 }
1276 
1277 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1278 {
1279         struct cheetah_err_info local_snapshot, *p;
1280         int recoverable, is_memory;
1281 
1282         p = cheetah_get_error_log(afsr);
1283         if (!p) {
1284                 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1285                             afsr, afar);
1286                 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1287                             smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1288                 prom_halt();
1289         }
1290 
1291         /* Grab snapshot of logged error. */
1292         memcpy(&local_snapshot, p, sizeof(local_snapshot));
1293 
1294         /* If the current trap snapshot does not match what the
1295          * trap handler passed along into our args, big trouble.
1296          * In such a case, mark the local copy as invalid.
1297          *
1298          * Else, it matches and we mark the afsr in the non-local
1299          * copy as invalid so we may log new error traps there.
1300          */
1301         if (p->afsr != afsr || p->afar != afar)
1302                 local_snapshot.afsr = CHAFSR_INVALID;
1303         else
1304                 p->afsr = CHAFSR_INVALID;
1305 
1306         is_memory = cheetah_check_main_memory(afar);
1307 
1308         if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1309                 /* XXX Might want to log the results of this operation
1310                  * XXX somewhere... -DaveM
1311                  */
1312                 cheetah_fix_ce(afar);
1313         }
1314 
1315         {
1316                 int flush_all, flush_line;
1317 
1318                 flush_all = flush_line = 0;
1319                 if ((afsr & CHAFSR_EDC) != 0UL) {
1320                         if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1321                                 flush_line = 1;
1322                         else
1323                                 flush_all = 1;
1324                 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1325                         if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1326                                 flush_line = 1;
1327                         else
1328                                 flush_all = 1;
1329                 }
1330 
1331                 /* Trap handler only disabled I-cache, flush it. */
1332                 cheetah_flush_icache();
1333 
1334                 /* Re-enable I-cache */
1335                 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1336                                      "or %%g1, %1, %%g1\n\t"
1337                                      "stxa %%g1, [%%g0] %0\n\t"
1338                                      "membar #Sync"
1339                                      : /* no outputs */
1340                                      : "i" (ASI_DCU_CONTROL_REG),
1341                                      "i" (DCU_IC)
1342                                      : "g1");
1343 
1344                 if (flush_all)
1345                         cheetah_flush_ecache();
1346                 else if (flush_line)
1347                         cheetah_flush_ecache_line(afar);
1348         }
1349 
1350         /* Re-enable error reporting */
1351         __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1352                              "or %%g1, %1, %%g1\n\t"
1353                              "stxa %%g1, [%%g0] %0\n\t"
1354                              "membar #Sync"
1355                              : /* no outputs */
1356                              : "i" (ASI_ESTATE_ERROR_EN),
1357                                "i" (ESTATE_ERROR_CEEN)
1358                              : "g1");
1359 
1360         /* Decide if we can continue after handling this trap and
1361          * logging the error.
1362          */
1363         recoverable = 1;
1364         if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1365                 recoverable = 0;
1366 
1367         /* Re-check AFSR/AFAR */
1368         (void) cheetah_recheck_errors(&local_snapshot);
1369 
1370         /* Log errors. */
1371         cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1372 
1373         if (!recoverable)
1374                 panic("Irrecoverable Correctable-ECC error trap.\n");
1375 }
1376 
1377 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1378 {
1379         struct cheetah_err_info local_snapshot, *p;
1380         int recoverable, is_memory;
1381 
1382 #ifdef CONFIG_PCI
1383         /* Check for the special PCI poke sequence. */
1384         if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1385                 cheetah_flush_icache();
1386                 cheetah_flush_dcache();
1387 
1388                 /* Re-enable I-cache/D-cache */
1389                 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1390                                      "or %%g1, %1, %%g1\n\t"
1391                                      "stxa %%g1, [%%g0] %0\n\t"
1392                                      "membar #Sync"
1393                                      : /* no outputs */
1394                                      : "i" (ASI_DCU_CONTROL_REG),
1395                                        "i" (DCU_DC | DCU_IC)
1396                                      : "g1");
1397 
1398                 /* Re-enable error reporting */
1399                 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1400                                      "or %%g1, %1, %%g1\n\t"
1401                                      "stxa %%g1, [%%g0] %0\n\t"
1402                                      "membar #Sync"
1403                                      : /* no outputs */
1404                                      : "i" (ASI_ESTATE_ERROR_EN),
1405                                        "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1406                                      : "g1");
1407 
1408                 (void) cheetah_recheck_errors(NULL);
1409 
1410                 pci_poke_faulted = 1;
1411                 regs->tpc += 4;
1412                 regs->tnpc = regs->tpc + 4;
1413                 return;
1414         }
1415 #endif
1416 
1417         p = cheetah_get_error_log(afsr);
1418         if (!p) {
1419                 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1420                             afsr, afar);
1421                 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1422                             smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1423                 prom_halt();
1424         }
1425 
1426         /* Grab snapshot of logged error. */
1427         memcpy(&local_snapshot, p, sizeof(local_snapshot));
1428 
1429         /* If the current trap snapshot does not match what the
1430          * trap handler passed along into our args, big trouble.
1431          * In such a case, mark the local copy as invalid.
1432          *
1433          * Else, it matches and we mark the afsr in the non-local
1434          * copy as invalid so we may log new error traps there.
1435          */
1436         if (p->afsr != afsr || p->afar != afar)
1437                 local_snapshot.afsr = CHAFSR_INVALID;
1438         else
1439                 p->afsr = CHAFSR_INVALID;
1440 
1441         is_memory = cheetah_check_main_memory(afar);
1442 
1443         {
1444                 int flush_all, flush_line;
1445 
1446                 flush_all = flush_line = 0;
1447                 if ((afsr & CHAFSR_EDU) != 0UL) {
1448                         if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1449                                 flush_line = 1;
1450                         else
1451                                 flush_all = 1;
1452                 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1453                         if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1454                                 flush_line = 1;
1455                         else
1456                                 flush_all = 1;
1457                 }
1458 
1459                 cheetah_flush_icache();
1460                 cheetah_flush_dcache();
1461 
1462                 /* Re-enable I/D caches */
1463                 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1464                                      "or %%g1, %1, %%g1\n\t"
1465                                      "stxa %%g1, [%%g0] %0\n\t"
1466                                      "membar #Sync"
1467                                      : /* no outputs */
1468                                      : "i" (ASI_DCU_CONTROL_REG),
1469                                      "i" (DCU_IC | DCU_DC)
1470                                      : "g1");
1471 
1472                 if (flush_all)
1473                         cheetah_flush_ecache();
1474                 else if (flush_line)
1475                         cheetah_flush_ecache_line(afar);
1476         }
1477 
1478         /* Re-enable error reporting */
1479         __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1480                              "or %%g1, %1, %%g1\n\t"
1481                              "stxa %%g1, [%%g0] %0\n\t"
1482                              "membar #Sync"
1483                              : /* no outputs */
1484                              : "i" (ASI_ESTATE_ERROR_EN),
1485                              "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1486                              : "g1");
1487 
1488         /* Decide if we can continue after handling this trap and
1489          * logging the error.
1490          */
1491         recoverable = 1;
1492         if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1493                 recoverable = 0;
1494 
1495         /* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1496          * error was logged while we had error reporting traps disabled.
1497          */
1498         if (cheetah_recheck_errors(&local_snapshot)) {
1499                 unsigned long new_afsr = local_snapshot.afsr;
1500 
1501                 /* If we got a new asynchronous error, die... */
1502                 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1503                                 CHAFSR_WDU | CHAFSR_CPU |
1504                                 CHAFSR_IVU | CHAFSR_UE |
1505                                 CHAFSR_BERR | CHAFSR_TO))
1506                         recoverable = 0;
1507         }
1508 
1509         /* Log errors. */
1510         cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1511 
1512         /* "Recoverable" here means we try to yank the page from ever
1513          * being newly used again.  This depends upon a few things:
1514          * 1) Must be main memory, and AFAR must be valid.
1515          * 2) If we trapped from user, OK.
1516          * 3) Else, if we trapped from kernel we must find exception
1517          *    table entry (ie. we have to have been accessing user
1518          *    space).
1519          *
1520          * If AFAR is not in main memory, or we trapped from kernel
1521          * and cannot find an exception table entry, it is unacceptable
1522          * to try and continue.
1523          */
1524         if (recoverable && is_memory) {
1525                 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1526                         /* OK, usermode access. */
1527                         recoverable = 1;
1528                 } else {
1529                         unsigned long g2 = regs->u_regs[UREG_G2];
1530                         unsigned long fixup = search_extables_range(regs->tpc, &g2);
1531 
1532                         if (fixup != 0UL) {
1533                                 /* OK, kernel access to userspace. */
1534                                 recoverable = 1;
1535 
1536                         } else {
1537                                 /* BAD, privileged state is corrupted. */
1538                                 recoverable = 0;
1539                         }
1540 
1541                         if (recoverable) {
1542                                 if (pfn_valid(afar >> PAGE_SHIFT))
1543                                         get_page(pfn_to_page(afar >> PAGE_SHIFT));
1544                                 else
1545                                         recoverable = 0;
1546 
1547                                 /* Only perform fixup if we still have a
1548                                  * recoverable condition.
1549                                  */
1550                                 if (recoverable) {
1551                                         regs->tpc = fixup;
1552                                         regs->tnpc = regs->tpc + 4;
1553                                         regs->u_regs[UREG_G2] = g2;
1554                                 }
1555                         }
1556                 }
1557         } else {
1558                 recoverable = 0;
1559         }
1560 
1561         if (!recoverable)
1562                 panic("Irrecoverable deferred error trap.\n");
1563 }
1564 
1565 /* Handle a D/I cache parity error trap.  TYPE is encoded as:
1566  *
1567  * Bit0:        0=dcache,1=icache
1568  * Bit1:        0=recoverable,1=unrecoverable
1569  *
1570  * The hardware has disabled both the I-cache and D-cache in
1571  * the %dcr register.  
1572  */
1573 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1574 {
1575         if (type & 0x1)
1576                 __cheetah_flush_icache();
1577         else
1578                 cheetah_plus_zap_dcache_parity();
1579         cheetah_flush_dcache();
1580 
1581         /* Re-enable I-cache/D-cache */
1582         __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1583                              "or %%g1, %1, %%g1\n\t"
1584                              "stxa %%g1, [%%g0] %0\n\t"
1585                              "membar #Sync"
1586                              : /* no outputs */
1587                              : "i" (ASI_DCU_CONTROL_REG),
1588                                "i" (DCU_DC | DCU_IC)
1589                              : "g1");
1590 
1591         if (type & 0x2) {
1592                 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1593                        smp_processor_id(),
1594                        (type & 0x1) ? 'I' : 'D',
1595                        regs->tpc);
1596                 panic("Irrecoverable Cheetah+ parity error.");
1597         }
1598 
1599         printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1600                smp_processor_id(),
1601                (type & 0x1) ? 'I' : 'D',
1602                regs->tpc);
1603 }
1604 
1605 void do_fpe_common(struct pt_regs *regs)
1606 {
1607         if (regs->tstate & TSTATE_PRIV) {
1608                 regs->tpc = regs->tnpc;
1609                 regs->tnpc += 4;
1610         } else {
1611                 unsigned long fsr = current_thread_info()->xfsr[0];
1612                 siginfo_t info;
1613 
1614                 if (test_thread_flag(TIF_32BIT)) {
1615                         regs->tpc &= 0xffffffff;
1616                         regs->tnpc &= 0xffffffff;
1617                 }
1618                 info.si_signo = SIGFPE;
1619                 info.si_errno = 0;
1620                 info.si_addr = (void *)regs->tpc;
1621                 info.si_trapno = 0;
1622                 info.si_code = __SI_FAULT;
1623                 if ((fsr & 0x1c000) == (1 << 14)) {
1624                         if (fsr & 0x10)
1625                                 info.si_code = FPE_FLTINV;
1626                         else if (fsr & 0x08)
1627                                 info.si_code = FPE_FLTOVF;
1628                         else if (fsr & 0x04)
1629                                 info.si_code = FPE_FLTUND;
1630                         else if (fsr & 0x02)
1631                                 info.si_code = FPE_FLTDIV;
1632                         else if (fsr & 0x01)
1633                                 info.si_code = FPE_FLTRES;
1634                 }
1635                 force_sig_info(SIGFPE, &info, current);
1636         }
1637 }
1638 
1639 void do_fpieee(struct pt_regs *regs)
1640 {
1641         do_fpe_common(regs);
1642 }
1643 
1644 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1645 
1646 void do_fpother(struct pt_regs *regs)
1647 {
1648         struct fpustate *f = FPUSTATE;
1649         int ret = 0;
1650 
1651         switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1652         case (2 << 14): /* unfinished_FPop */
1653         case (3 << 14): /* unimplemented_FPop */
1654                 ret = do_mathemu(regs, f);
1655                 break;
1656         }
1657         if (ret)
1658                 return;
1659         do_fpe_common(regs);
1660 }
1661 
1662 void do_tof(struct pt_regs *regs)
1663 {
1664         siginfo_t info;
1665 
1666         if (regs->tstate & TSTATE_PRIV)
1667                 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1668         if (test_thread_flag(TIF_32BIT)) {
1669                 regs->tpc &= 0xffffffff;
1670                 regs->tnpc &= 0xffffffff;
1671         }
1672         info.si_signo = SIGEMT;
1673         info.si_errno = 0;
1674         info.si_code = EMT_TAGOVF;
1675         info.si_addr = (void *)regs->tpc;
1676         info.si_trapno = 0;
1677         force_sig_info(SIGEMT, &info, current);
1678 }
1679 
1680 void do_div0(struct pt_regs *regs)
1681 {
1682         siginfo_t info;
1683 
1684         if (regs->tstate & TSTATE_PRIV)
1685                 die_if_kernel("TL0: Kernel divide by zero.", regs);
1686         if (test_thread_flag(TIF_32BIT)) {
1687                 regs->tpc &= 0xffffffff;
1688                 regs->tnpc &= 0xffffffff;
1689         }
1690         info.si_signo = SIGFPE;
1691         info.si_errno = 0;
1692         info.si_code = FPE_INTDIV;
1693         info.si_addr = (void *)regs->tpc;
1694         info.si_trapno = 0;
1695         force_sig_info(SIGFPE, &info, current);
1696 }
1697 
1698 void instruction_dump (unsigned int *pc)
1699 {
1700         int i;
1701 
1702         if ((((unsigned long) pc) & 3))
1703                 return;
1704 
1705         printk("Instruction DUMP:");
1706         for (i = -3; i < 6; i++)
1707                 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1708         printk("\n");
1709 }
1710 
1711 void user_instruction_dump (unsigned int *pc)
1712 {
1713         int i;
1714         unsigned int buf[9];
1715         
1716         if ((((unsigned long) pc) & 3))
1717                 return;
1718                 
1719         if (copy_from_user(buf, pc - 3, sizeof(buf)))
1720                 return;
1721 
1722         printk("Instruction DUMP:");
1723         for (i = 0; i < 9; i++)
1724                 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1725         printk("\n");
1726 }
1727 
1728 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1729 {
1730         unsigned long pc, fp, thread_base, ksp;
1731         struct thread_info *tp = tsk->thread_info;
1732         struct reg_window *rw;
1733         int count = 0;
1734 
1735         ksp = (unsigned long) _ksp;
1736 
1737         if (tp == current_thread_info())
1738                 flushw_all();
1739 
1740         fp = ksp + STACK_BIAS;
1741         thread_base = (unsigned long) tp;
1742         do {
1743                 /* Bogus frame pointer? */
1744                 if (fp < (thread_base + sizeof(struct thread_info)) ||
1745                     fp >= (thread_base + THREAD_SIZE))
1746                         break;
1747                 rw = (struct reg_window *)fp;
1748                 pc = rw->ins[7];
1749                 printk("[%016lx] ", pc);
1750                 fp = rw->ins[6] + STACK_BIAS;
1751         } while (++count < 16);
1752         printk("\n");
1753 }
1754 
1755 void show_trace_task(struct task_struct *tsk)
1756 {
1757         if (tsk)
1758                 show_stack(tsk,
1759                            (unsigned long *) tsk->thread_info->ksp);
1760 }
1761 
1762 void dump_stack(void)
1763 {
1764         unsigned long *ksp;
1765 
1766         __asm__ __volatile__("mov       %%fp, %0"
1767                              : "=r" (ksp));
1768         show_stack(current, ksp);
1769 }
1770 
1771 EXPORT_SYMBOL(dump_stack);
1772 
1773 void die_if_kernel(char *str, struct pt_regs *regs)
1774 {
1775         static int die_counter;
1776         extern void __show_regs(struct pt_regs * regs);
1777         extern void smp_report_regs(void);
1778         int count = 0;
1779         struct reg_window *lastrw;
1780         
1781         /* Amuse the user. */
1782         printk(
1783 "              \\|/ ____ \\|/\n"
1784 "              \"@'/ .. \\`@\"\n"
1785 "              /_| \\__/ |_\\\n"
1786 "                 \\__U_/\n");
1787 
1788         printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1789         __asm__ __volatile__("flushw");
1790         __show_regs(regs);
1791         if (regs->tstate & TSTATE_PRIV) {
1792                 struct reg_window *rw = (struct reg_window *)
1793                         (regs->u_regs[UREG_FP] + STACK_BIAS);
1794 
1795                 /* Stop the back trace when we hit userland or we
1796                  * find some badly aligned kernel stack.
1797                  */
1798                 lastrw = (struct reg_window *)current;
1799                 while (rw                                       &&
1800                        count++ < 30                             &&
1801                        rw >= lastrw                             &&
1802                        (char *) rw < ((char *) current)
1803                        + sizeof (union thread_union)            &&
1804                        !(((unsigned long) rw) & 0x7)) {
1805                         printk("Caller[%016lx]", rw->ins[7]);
1806                         print_symbol(": %s\n", rw->ins[7]);
1807                         printk("\n");
1808                         lastrw = rw;
1809                         rw = (struct reg_window *)
1810                                 (rw->ins[6] + STACK_BIAS);
1811                 }
1812                 instruction_dump ((unsigned int *) regs->tpc);
1813         } else {
1814                 if (test_thread_flag(TIF_32BIT)) {
1815                         regs->tpc &= 0xffffffff;
1816                         regs->tnpc &= 0xffffffff;
1817                 }
1818                 user_instruction_dump ((unsigned int *) regs->tpc);
1819         }
1820 #ifdef CONFIG_SMP
1821         smp_report_regs();
1822 #endif
1823                                                         
1824         if (regs->tstate & TSTATE_PRIV)
1825                 do_exit(SIGKILL);
1826         do_exit(SIGSEGV);
1827 }
1828 
1829 extern int handle_popc(u32 insn, struct pt_regs *regs);
1830 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1831 
1832 void do_illegal_instruction(struct pt_regs *regs)
1833 {
1834         unsigned long pc = regs->tpc;
1835         unsigned long tstate = regs->tstate;
1836         u32 insn;
1837         siginfo_t info;
1838 
1839         if (tstate & TSTATE_PRIV)
1840                 die_if_kernel("Kernel illegal instruction", regs);
1841         if (test_thread_flag(TIF_32BIT))
1842                 pc = (u32)pc;
1843         if (get_user(insn, (u32 *)pc) != -EFAULT) {
1844                 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1845                         if (handle_popc(insn, regs))
1846                                 return;
1847                 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1848                         if (handle_ldf_stq(insn, regs))
1849                                 return;
1850                 }
1851         }
1852         info.si_signo = SIGILL;
1853         info.si_errno = 0;
1854         info.si_code = ILL_ILLOPC;
1855         info.si_addr = (void *)pc;
1856         info.si_trapno = 0;
1857         force_sig_info(SIGILL, &info, current);
1858 }
1859 
1860 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1861 {
1862         siginfo_t info;
1863 
1864         if (regs->tstate & TSTATE_PRIV) {
1865                 extern void kernel_unaligned_trap(struct pt_regs *regs,
1866                                                   unsigned int insn, 
1867                                                   unsigned long sfar, unsigned long sfsr);
1868 
1869                 return kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc), sfar, sfsr);
1870         }
1871         info.si_signo = SIGBUS;
1872         info.si_errno = 0;
1873         info.si_code = BUS_ADRALN;
1874         info.si_addr = (void *)sfar;
1875         info.si_trapno = 0;
1876         force_sig_info(SIGBUS, &info, current);
1877 }
1878 
1879 void do_privop(struct pt_regs *regs)
1880 {
1881         siginfo_t info;
1882 
1883         if (test_thread_flag(TIF_32BIT)) {
1884                 regs->tpc &= 0xffffffff;
1885                 regs->tnpc &= 0xffffffff;
1886         }
1887         info.si_signo = SIGILL;
1888         info.si_errno = 0;
1889         info.si_code = ILL_PRVOPC;
1890         info.si_addr = (void *)regs->tpc;
1891         info.si_trapno = 0;
1892         force_sig_info(SIGILL, &info, current);
1893 }
1894 
1895 void do_privact(struct pt_regs *regs)
1896 {
1897         do_privop(regs);
1898 }
1899 
1900 /* Trap level 1 stuff or other traps we should never see... */
1901 void do_cee(struct pt_regs *regs)
1902 {
1903         die_if_kernel("TL0: Cache Error Exception", regs);
1904 }
1905 
1906 void do_cee_tl1(struct pt_regs *regs)
1907 {
1908         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1909         die_if_kernel("TL1: Cache Error Exception", regs);
1910 }
1911 
1912 void do_dae_tl1(struct pt_regs *regs)
1913 {
1914         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1915         die_if_kernel("TL1: Data Access Exception", regs);
1916 }
1917 
1918 void do_iae_tl1(struct pt_regs *regs)
1919 {
1920         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1921         die_if_kernel("TL1: Instruction Access Exception", regs);
1922 }
1923 
1924 void do_div0_tl1(struct pt_regs *regs)
1925 {
1926         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1927         die_if_kernel("TL1: DIV0 Exception", regs);
1928 }
1929 
1930 void do_fpdis_tl1(struct pt_regs *regs)
1931 {
1932         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1933         die_if_kernel("TL1: FPU Disabled", regs);
1934 }
1935 
1936 void do_fpieee_tl1(struct pt_regs *regs)
1937 {
1938         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1939         die_if_kernel("TL1: FPU IEEE Exception", regs);
1940 }
1941 
1942 void do_fpother_tl1(struct pt_regs *regs)
1943 {
1944         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1945         die_if_kernel("TL1: FPU Other Exception", regs);
1946 }
1947 
1948 void do_ill_tl1(struct pt_regs *regs)
1949 {
1950         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1951         die_if_kernel("TL1: Illegal Instruction Exception", regs);
1952 }
1953 
1954 void do_irq_tl1(struct pt_regs *regs)
1955 {
1956         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1957         die_if_kernel("TL1: IRQ Exception", regs);
1958 }
1959 
1960 void do_lddfmna_tl1(struct pt_regs *regs)
1961 {
1962         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1963         die_if_kernel("TL1: LDDF Exception", regs);
1964 }
1965 
1966 void do_stdfmna_tl1(struct pt_regs *regs)
1967 {
1968         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1969         die_if_kernel("TL1: STDF Exception", regs);
1970 }
1971 
1972 void do_paw(struct pt_regs *regs)
1973 {
1974         die_if_kernel("TL0: Phys Watchpoint Exception", regs);
1975 }
1976 
1977 void do_paw_tl1(struct pt_regs *regs)
1978 {
1979         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1980         die_if_kernel("TL1: Phys Watchpoint Exception", regs);
1981 }
1982 
1983 void do_vaw(struct pt_regs *regs)
1984 {
1985         die_if_kernel("TL0: Virt Watchpoint Exception", regs);
1986 }
1987 
1988 void do_vaw_tl1(struct pt_regs *regs)
1989 {
1990         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1991         die_if_kernel("TL1: Virt Watchpoint Exception", regs);
1992 }
1993 
1994 void do_tof_tl1(struct pt_regs *regs)
1995 {
1996         dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1997         die_if_kernel("TL1: Tag Overflow Exception", regs);
1998 }
1999 
2000 void do_getpsr(struct pt_regs *regs)
2001 {
2002         regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2003         regs->tpc   = regs->tnpc;
2004         regs->tnpc += 4;
2005         if (test_thread_flag(TIF_32BIT)) {
2006                 regs->tpc &= 0xffffffff;
2007                 regs->tnpc &= 0xffffffff;
2008         }
2009 }
2010 
2011 extern void thread_info_offsets_are_bolixed_dave(void);
2012 
2013 /* Only invoked on boot processor. */
2014 void __init trap_init(void)
2015 {
2016         /* Compile time sanity check. */
2017         if (TI_TASK != offsetof(struct thread_info, task) ||
2018             TI_FLAGS != offsetof(struct thread_info, flags) ||
2019             TI_CPU != offsetof(struct thread_info, cpu) ||
2020             TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2021             TI_KSP != offsetof(struct thread_info, ksp) ||
2022             TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2023             TI_KREGS != offsetof(struct thread_info, kregs) ||
2024             TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2025             TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2026             TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2027             TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2028             TI_GSR != offsetof(struct thread_info, gsr) ||
2029             TI_XFSR != offsetof(struct thread_info, xfsr) ||
2030             TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2031             TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2032             TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2033             TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2034             TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2035             TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2036             TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2037             TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2038             (TI_FPREGS & (64 - 1)))
2039                 thread_info_offsets_are_bolixed_dave();
2040 
2041         /* Attach to the address space of init_task.  On SMP we
2042          * do this in smp.c:smp_callin for other cpus.
2043          */
2044         atomic_inc(&init_mm.mm_count);
2045         current->active_mm = &init_mm;
2046 
2047 #ifdef CONFIG_SMP
2048         current_thread_info()->cpu = hard_smp_processor_id();
2049 #endif
2050 }
2051 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp