~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/mm/tlbex.c

Version: ~ [ linux-5.3-rc8 ] ~ [ linux-5.2.13 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.71 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.142 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.191 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.191 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.73 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Synthesize TLB refill handlers at runtime.
  7  *
  8  * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
  9  * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
 10  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
 11  * Copyright (C) 2008, 2009 Cavium Networks, Inc.
 12  * Copyright (C) 2011  MIPS Technologies, Inc.
 13  *
 14  * ... and the days got worse and worse and now you see
 15  * I've gone completly out of my mind.
 16  *
 17  * They're coming to take me a away haha
 18  * they're coming to take me a away hoho hihi haha
 19  * to the funny farm where code is beautiful all the time ...
 20  *
 21  * (Condolences to Napoleon XIV)
 22  */
 23 
 24 #include <linux/bug.h>
 25 #include <linux/kernel.h>
 26 #include <linux/types.h>
 27 #include <linux/smp.h>
 28 #include <linux/string.h>
 29 #include <linux/init.h>
 30 #include <linux/cache.h>
 31 
 32 #include <asm/cacheflush.h>
 33 #include <asm/pgtable.h>
 34 #include <asm/war.h>
 35 #include <asm/uasm.h>
 36 #include <asm/setup.h>
 37 
 38 /*
 39  * TLB load/store/modify handlers.
 40  *
 41  * Only the fastpath gets synthesized at runtime, the slowpath for
 42  * do_page_fault remains normal asm.
 43  */
 44 extern void tlb_do_page_fault_0(void);
 45 extern void tlb_do_page_fault_1(void);
 46 
 47 struct work_registers {
 48         int r1;
 49         int r2;
 50         int r3;
 51 };
 52 
 53 struct tlb_reg_save {
 54         unsigned long a;
 55         unsigned long b;
 56 } ____cacheline_aligned_in_smp;
 57 
 58 static struct tlb_reg_save handler_reg_save[NR_CPUS];
 59 
 60 static inline int r45k_bvahwbug(void)
 61 {
 62         /* XXX: We should probe for the presence of this bug, but we don't. */
 63         return 0;
 64 }
 65 
 66 static inline int r4k_250MHZhwbug(void)
 67 {
 68         /* XXX: We should probe for the presence of this bug, but we don't. */
 69         return 0;
 70 }
 71 
 72 static inline int __maybe_unused bcm1250_m3_war(void)
 73 {
 74         return BCM1250_M3_WAR;
 75 }
 76 
 77 static inline int __maybe_unused r10000_llsc_war(void)
 78 {
 79         return R10000_LLSC_WAR;
 80 }
 81 
 82 static int use_bbit_insns(void)
 83 {
 84         switch (current_cpu_type()) {
 85         case CPU_CAVIUM_OCTEON:
 86         case CPU_CAVIUM_OCTEON_PLUS:
 87         case CPU_CAVIUM_OCTEON2:
 88                 return 1;
 89         default:
 90                 return 0;
 91         }
 92 }
 93 
 94 static int use_lwx_insns(void)
 95 {
 96         switch (current_cpu_type()) {
 97         case CPU_CAVIUM_OCTEON2:
 98                 return 1;
 99         default:
100                 return 0;
101         }
102 }
103 #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
104     CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
105 static bool scratchpad_available(void)
106 {
107         return true;
108 }
109 static int scratchpad_offset(int i)
110 {
111         /*
112          * CVMSEG starts at address -32768 and extends for
113          * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
114          */
115         i += 1; /* Kernel use starts at the top and works down. */
116         return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
117 }
118 #else
119 static bool scratchpad_available(void)
120 {
121         return false;
122 }
123 static int scratchpad_offset(int i)
124 {
125         BUG();
126         /* Really unreachable, but evidently some GCC want this. */
127         return 0;
128 }
129 #endif
130 /*
131  * Found by experiment: At least some revisions of the 4kc throw under
132  * some circumstances a machine check exception, triggered by invalid
133  * values in the index register.  Delaying the tlbp instruction until
134  * after the next branch,  plus adding an additional nop in front of
135  * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
136  * why; it's not an issue caused by the core RTL.
137  *
138  */
139 static int __cpuinit m4kc_tlbp_war(void)
140 {
141         return (current_cpu_data.processor_id & 0xffff00) ==
142                (PRID_COMP_MIPS | PRID_IMP_4KC);
143 }
144 
145 /* Handle labels (which must be positive integers). */
146 enum label_id {
147         label_second_part = 1,
148         label_leave,
149         label_vmalloc,
150         label_vmalloc_done,
151         label_tlbw_hazard_0,
152         label_split = label_tlbw_hazard_0 + 8,
153         label_tlbl_goaround1,
154         label_tlbl_goaround2,
155         label_nopage_tlbl,
156         label_nopage_tlbs,
157         label_nopage_tlbm,
158         label_smp_pgtable_change,
159         label_r3000_write_probe_fail,
160         label_large_segbits_fault,
161 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
162         label_tlb_huge_update,
163 #endif
164 };
165 
166 UASM_L_LA(_second_part)
167 UASM_L_LA(_leave)
168 UASM_L_LA(_vmalloc)
169 UASM_L_LA(_vmalloc_done)
170 /* _tlbw_hazard_x is handled differently.  */
171 UASM_L_LA(_split)
172 UASM_L_LA(_tlbl_goaround1)
173 UASM_L_LA(_tlbl_goaround2)
174 UASM_L_LA(_nopage_tlbl)
175 UASM_L_LA(_nopage_tlbs)
176 UASM_L_LA(_nopage_tlbm)
177 UASM_L_LA(_smp_pgtable_change)
178 UASM_L_LA(_r3000_write_probe_fail)
179 UASM_L_LA(_large_segbits_fault)
180 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
181 UASM_L_LA(_tlb_huge_update)
182 #endif
183 
184 static int __cpuinitdata hazard_instance;
185 
186 static void __cpuinit uasm_bgezl_hazard(u32 **p,
187                                         struct uasm_reloc **r,
188                                         int instance)
189 {
190         switch (instance) {
191         case 0 ... 7:
192                 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
193                 return;
194         default:
195                 BUG();
196         }
197 }
198 
199 static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
200                                        u32 **p,
201                                        int instance)
202 {
203         switch (instance) {
204         case 0 ... 7:
205                 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
206                 break;
207         default:
208                 BUG();
209         }
210 }
211 
212 /*
213  * pgtable bits are assigned dynamically depending on processor feature
214  * and statically based on kernel configuration.  This spits out the actual
215  * values the kernel is using.  Required to make sense from disassembled
216  * TLB exception handlers.
217  */
218 static void output_pgtable_bits_defines(void)
219 {
220 #define pr_define(fmt, ...)                                     \
221         pr_debug("#define " fmt, ##__VA_ARGS__)
222 
223         pr_debug("#include <asm/asm.h>\n");
224         pr_debug("#include <asm/regdef.h>\n");
225         pr_debug("\n");
226 
227         pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
228         pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
229         pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
230         pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
231         pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
232 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
233         pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
234         pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
235 #endif
236         if (cpu_has_rixi) {
237 #ifdef _PAGE_NO_EXEC_SHIFT
238                 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
239 #endif
240 #ifdef _PAGE_NO_READ_SHIFT
241                 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
242 #endif
243         }
244         pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
245         pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
246         pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
247         pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
248         pr_debug("\n");
249 }
250 
251 static inline void dump_handler(const char *symbol, const u32 *handler, int count)
252 {
253         int i;
254 
255         pr_debug("LEAF(%s)\n", symbol);
256 
257         pr_debug("\t.set push\n");
258         pr_debug("\t.set noreorder\n");
259 
260         for (i = 0; i < count; i++)
261                 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
262 
263         pr_debug("\t.set\tpop\n");
264 
265         pr_debug("\tEND(%s)\n", symbol);
266 }
267 
268 /* The only general purpose registers allowed in TLB handlers. */
269 #define K0              26
270 #define K1              27
271 
272 /* Some CP0 registers */
273 #define C0_INDEX        0, 0
274 #define C0_ENTRYLO0     2, 0
275 #define C0_TCBIND       2, 2
276 #define C0_ENTRYLO1     3, 0
277 #define C0_CONTEXT      4, 0
278 #define C0_PAGEMASK     5, 0
279 #define C0_BADVADDR     8, 0
280 #define C0_ENTRYHI      10, 0
281 #define C0_EPC          14, 0
282 #define C0_XCONTEXT     20, 0
283 
284 #ifdef CONFIG_64BIT
285 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
286 #else
287 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
288 #endif
289 
290 /* The worst case length of the handler is around 18 instructions for
291  * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
292  * Maximum space available is 32 instructions for R3000 and 64
293  * instructions for R4000.
294  *
295  * We deliberately chose a buffer size of 128, so we won't scribble
296  * over anything important on overflow before we panic.
297  */
298 static u32 tlb_handler[128] __cpuinitdata;
299 
300 /* simply assume worst case size for labels and relocs */
301 static struct uasm_label labels[128] __cpuinitdata;
302 static struct uasm_reloc relocs[128] __cpuinitdata;
303 
304 #ifdef CONFIG_64BIT
305 static int check_for_high_segbits __cpuinitdata;
306 #endif
307 
308 static int check_for_high_segbits __cpuinitdata;
309 
310 static unsigned int kscratch_used_mask __cpuinitdata;
311 
312 static int __cpuinit allocate_kscratch(void)
313 {
314         int r;
315         unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
316 
317         r = ffs(a);
318 
319         if (r == 0)
320                 return -1;
321 
322         r--; /* make it zero based */
323 
324         kscratch_used_mask |= (1 << r);
325 
326         return r;
327 }
328 
329 static int scratch_reg __cpuinitdata;
330 static int pgd_reg __cpuinitdata;
331 enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
332 
333 static struct work_registers __cpuinit build_get_work_registers(u32 **p)
334 {
335         struct work_registers r;
336 
337         int smp_processor_id_reg;
338         int smp_processor_id_sel;
339         int smp_processor_id_shift;
340 
341         if (scratch_reg > 0) {
342                 /* Save in CPU local C0_KScratch? */
343                 UASM_i_MTC0(p, 1, 31, scratch_reg);
344                 r.r1 = K0;
345                 r.r2 = K1;
346                 r.r3 = 1;
347                 return r;
348         }
349 
350         if (num_possible_cpus() > 1) {
351 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
352                 smp_processor_id_shift = 51;
353                 smp_processor_id_reg = 20; /* XContext */
354                 smp_processor_id_sel = 0;
355 #else
356 # ifdef CONFIG_32BIT
357                 smp_processor_id_shift = 25;
358                 smp_processor_id_reg = 4; /* Context */
359                 smp_processor_id_sel = 0;
360 # endif
361 # ifdef CONFIG_64BIT
362                 smp_processor_id_shift = 26;
363                 smp_processor_id_reg = 4; /* Context */
364                 smp_processor_id_sel = 0;
365 # endif
366 #endif
367                 /* Get smp_processor_id */
368                 UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
369                 UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
370 
371                 /* handler_reg_save index in K0 */
372                 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
373 
374                 UASM_i_LA(p, K1, (long)&handler_reg_save);
375                 UASM_i_ADDU(p, K0, K0, K1);
376         } else {
377                 UASM_i_LA(p, K0, (long)&handler_reg_save);
378         }
379         /* K0 now points to save area, save $1 and $2  */
380         UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
381         UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
382 
383         r.r1 = K1;
384         r.r2 = 1;
385         r.r3 = 2;
386         return r;
387 }
388 
389 static void __cpuinit build_restore_work_registers(u32 **p)
390 {
391         if (scratch_reg > 0) {
392                 UASM_i_MFC0(p, 1, 31, scratch_reg);
393                 return;
394         }
395         /* K0 already points to save area, restore $1 and $2  */
396         UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
397         UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
398 }
399 
400 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
401 
402 /*
403  * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
404  * we cannot do r3000 under these circumstances.
405  *
406  * Declare pgd_current here instead of including mmu_context.h to avoid type
407  * conflicts for tlbmiss_handler_setup_pgd
408  */
409 extern unsigned long pgd_current[];
410 
411 /*
412  * The R3000 TLB handler is simple.
413  */
414 static void __cpuinit build_r3000_tlb_refill_handler(void)
415 {
416         long pgdc = (long)pgd_current;
417         u32 *p;
418 
419         memset(tlb_handler, 0, sizeof(tlb_handler));
420         p = tlb_handler;
421 
422         uasm_i_mfc0(&p, K0, C0_BADVADDR);
423         uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
424         uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
425         uasm_i_srl(&p, K0, K0, 22); /* load delay */
426         uasm_i_sll(&p, K0, K0, 2);
427         uasm_i_addu(&p, K1, K1, K0);
428         uasm_i_mfc0(&p, K0, C0_CONTEXT);
429         uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
430         uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
431         uasm_i_addu(&p, K1, K1, K0);
432         uasm_i_lw(&p, K0, 0, K1);
433         uasm_i_nop(&p); /* load delay */
434         uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
435         uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
436         uasm_i_tlbwr(&p); /* cp0 delay */
437         uasm_i_jr(&p, K1);
438         uasm_i_rfe(&p); /* branch delay */
439 
440         if (p > tlb_handler + 32)
441                 panic("TLB refill handler space exceeded");
442 
443         pr_debug("Wrote TLB refill handler (%u instructions).\n",
444                  (unsigned int)(p - tlb_handler));
445 
446         memcpy((void *)ebase, tlb_handler, 0x80);
447 
448         dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
449 }
450 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
451 
452 /*
453  * The R4000 TLB handler is much more complicated. We have two
454  * consecutive handler areas with 32 instructions space each.
455  * Since they aren't used at the same time, we can overflow in the
456  * other one.To keep things simple, we first assume linear space,
457  * then we relocate it to the final handler layout as needed.
458  */
459 static u32 final_handler[64] __cpuinitdata;
460 
461 /*
462  * Hazards
463  *
464  * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
465  * 2. A timing hazard exists for the TLBP instruction.
466  *
467  *      stalling_instruction
468  *      TLBP
469  *
470  * The JTLB is being read for the TLBP throughout the stall generated by the
471  * previous instruction. This is not really correct as the stalling instruction
472  * can modify the address used to access the JTLB.  The failure symptom is that
473  * the TLBP instruction will use an address created for the stalling instruction
474  * and not the address held in C0_ENHI and thus report the wrong results.
475  *
476  * The software work-around is to not allow the instruction preceding the TLBP
477  * to stall - make it an NOP or some other instruction guaranteed not to stall.
478  *
479  * Errata 2 will not be fixed.  This errata is also on the R5000.
480  *
481  * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
482  */
483 static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
484 {
485         switch (current_cpu_type()) {
486         /* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
487         case CPU_R4600:
488         case CPU_R4700:
489         case CPU_R5000:
490         case CPU_NEVADA:
491                 uasm_i_nop(p);
492                 uasm_i_tlbp(p);
493                 break;
494 
495         default:
496                 uasm_i_tlbp(p);
497                 break;
498         }
499 }
500 
501 /*
502  * Write random or indexed TLB entry, and care about the hazards from
503  * the preceding mtc0 and for the following eret.
504  */
505 enum tlb_write_entry { tlb_random, tlb_indexed };
506 
507 static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
508                                          struct uasm_reloc **r,
509                                          enum tlb_write_entry wmode)
510 {
511         void(*tlbw)(u32 **) = NULL;
512 
513         switch (wmode) {
514         case tlb_random: tlbw = uasm_i_tlbwr; break;
515         case tlb_indexed: tlbw = uasm_i_tlbwi; break;
516         }
517 
518         if (cpu_has_mips_r2) {
519                 /*
520                  * The architecture spec says an ehb is required here,
521                  * but a number of cores do not have the hazard and
522                  * using an ehb causes an expensive pipeline stall.
523                  */
524                 switch (current_cpu_type()) {
525                 case CPU_M14KC:
526                 case CPU_74K:
527                         break;
528 
529                 default:
530                         uasm_i_ehb(p);
531                         break;
532                 }
533                 tlbw(p);
534                 return;
535         }
536 
537         switch (current_cpu_type()) {
538         case CPU_R4000PC:
539         case CPU_R4000SC:
540         case CPU_R4000MC:
541         case CPU_R4400PC:
542         case CPU_R4400SC:
543         case CPU_R4400MC:
544                 /*
545                  * This branch uses up a mtc0 hazard nop slot and saves
546                  * two nops after the tlbw instruction.
547                  */
548                 uasm_bgezl_hazard(p, r, hazard_instance);
549                 tlbw(p);
550                 uasm_bgezl_label(l, p, hazard_instance);
551                 hazard_instance++;
552                 uasm_i_nop(p);
553                 break;
554 
555         case CPU_R4600:
556         case CPU_R4700:
557                 uasm_i_nop(p);
558                 tlbw(p);
559                 uasm_i_nop(p);
560                 break;
561 
562         case CPU_R5000:
563         case CPU_NEVADA:
564                 uasm_i_nop(p); /* QED specifies 2 nops hazard */
565                 uasm_i_nop(p); /* QED specifies 2 nops hazard */
566                 tlbw(p);
567                 break;
568 
569         case CPU_R4300:
570         case CPU_5KC:
571         case CPU_TX49XX:
572         case CPU_PR4450:
573         case CPU_XLR:
574                 uasm_i_nop(p);
575                 tlbw(p);
576                 break;
577 
578         case CPU_R10000:
579         case CPU_R12000:
580         case CPU_R14000:
581         case CPU_4KC:
582         case CPU_4KEC:
583         case CPU_M14KC:
584         case CPU_SB1:
585         case CPU_SB1A:
586         case CPU_4KSC:
587         case CPU_20KC:
588         case CPU_25KF:
589         case CPU_BMIPS32:
590         case CPU_BMIPS3300:
591         case CPU_BMIPS4350:
592         case CPU_BMIPS4380:
593         case CPU_BMIPS5000:
594         case CPU_LOONGSON2:
595         case CPU_R5500:
596                 if (m4kc_tlbp_war())
597                         uasm_i_nop(p);
598         case CPU_ALCHEMY:
599                 tlbw(p);
600                 break;
601 
602         case CPU_RM7000:
603                 uasm_i_nop(p);
604                 uasm_i_nop(p);
605                 uasm_i_nop(p);
606                 uasm_i_nop(p);
607                 tlbw(p);
608                 break;
609 
610         case CPU_VR4111:
611         case CPU_VR4121:
612         case CPU_VR4122:
613         case CPU_VR4181:
614         case CPU_VR4181A:
615                 uasm_i_nop(p);
616                 uasm_i_nop(p);
617                 tlbw(p);
618                 uasm_i_nop(p);
619                 uasm_i_nop(p);
620                 break;
621 
622         case CPU_VR4131:
623         case CPU_VR4133:
624         case CPU_R5432:
625                 uasm_i_nop(p);
626                 uasm_i_nop(p);
627                 tlbw(p);
628                 break;
629 
630         case CPU_JZRISC:
631                 tlbw(p);
632                 uasm_i_nop(p);
633                 break;
634 
635         default:
636                 panic("No TLB refill handler yet (CPU type: %d)",
637                       current_cpu_data.cputype);
638                 break;
639         }
640 }
641 
642 static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
643                                                                   unsigned int reg)
644 {
645         if (cpu_has_rixi) {
646                 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
647         } else {
648 #ifdef CONFIG_64BIT_PHYS_ADDR
649                 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
650 #else
651                 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
652 #endif
653         }
654 }
655 
656 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
657 
658 static __cpuinit void build_restore_pagemask(u32 **p,
659                                              struct uasm_reloc **r,
660                                              unsigned int tmp,
661                                              enum label_id lid,
662                                              int restore_scratch)
663 {
664         if (restore_scratch) {
665                 /* Reset default page size */
666                 if (PM_DEFAULT_MASK >> 16) {
667                         uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
668                         uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
669                         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
670                         uasm_il_b(p, r, lid);
671                 } else if (PM_DEFAULT_MASK) {
672                         uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
673                         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
674                         uasm_il_b(p, r, lid);
675                 } else {
676                         uasm_i_mtc0(p, 0, C0_PAGEMASK);
677                         uasm_il_b(p, r, lid);
678                 }
679                 if (scratch_reg > 0)
680                         UASM_i_MFC0(p, 1, 31, scratch_reg);
681                 else
682                         UASM_i_LW(p, 1, scratchpad_offset(0), 0);
683         } else {
684                 /* Reset default page size */
685                 if (PM_DEFAULT_MASK >> 16) {
686                         uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
687                         uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
688                         uasm_il_b(p, r, lid);
689                         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
690                 } else if (PM_DEFAULT_MASK) {
691                         uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
692                         uasm_il_b(p, r, lid);
693                         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
694                 } else {
695                         uasm_il_b(p, r, lid);
696                         uasm_i_mtc0(p, 0, C0_PAGEMASK);
697                 }
698         }
699 }
700 
701 static __cpuinit void build_huge_tlb_write_entry(u32 **p,
702                                                  struct uasm_label **l,
703                                                  struct uasm_reloc **r,
704                                                  unsigned int tmp,
705                                                  enum tlb_write_entry wmode,
706                                                  int restore_scratch)
707 {
708         /* Set huge page tlb entry size */
709         uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
710         uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
711         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
712 
713         build_tlb_write_entry(p, l, r, wmode);
714 
715         build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
716 }
717 
718 /*
719  * Check if Huge PTE is present, if so then jump to LABEL.
720  */
721 static void __cpuinit
722 build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
723                 unsigned int pmd, int lid)
724 {
725         UASM_i_LW(p, tmp, 0, pmd);
726         if (use_bbit_insns()) {
727                 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
728         } else {
729                 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
730                 uasm_il_bnez(p, r, tmp, lid);
731         }
732 }
733 
734 static __cpuinit void build_huge_update_entries(u32 **p,
735                                                 unsigned int pte,
736                                                 unsigned int tmp)
737 {
738         int small_sequence;
739 
740         /*
741          * A huge PTE describes an area the size of the
742          * configured huge page size. This is twice the
743          * of the large TLB entry size we intend to use.
744          * A TLB entry half the size of the configured
745          * huge page size is configured into entrylo0
746          * and entrylo1 to cover the contiguous huge PTE
747          * address space.
748          */
749         small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
750 
751         /* We can clobber tmp.  It isn't used after this.*/
752         if (!small_sequence)
753                 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
754 
755         build_convert_pte_to_entrylo(p, pte);
756         UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
757         /* convert to entrylo1 */
758         if (small_sequence)
759                 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
760         else
761                 UASM_i_ADDU(p, pte, pte, tmp);
762 
763         UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
764 }
765 
766 static __cpuinit void build_huge_handler_tail(u32 **p,
767                                               struct uasm_reloc **r,
768                                               struct uasm_label **l,
769                                               unsigned int pte,
770                                               unsigned int ptr)
771 {
772 #ifdef CONFIG_SMP
773         UASM_i_SC(p, pte, 0, ptr);
774         uasm_il_beqz(p, r, pte, label_tlb_huge_update);
775         UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
776 #else
777         UASM_i_SW(p, pte, 0, ptr);
778 #endif
779         build_huge_update_entries(p, pte, ptr);
780         build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
781 }
782 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
783 
784 #ifdef CONFIG_64BIT
785 /*
786  * TMP and PTR are scratch.
787  * TMP will be clobbered, PTR will hold the pmd entry.
788  */
789 static void __cpuinit
790 build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
791                  unsigned int tmp, unsigned int ptr)
792 {
793 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
794         long pgdc = (long)pgd_current;
795 #endif
796         /*
797          * The vmalloc handling is not in the hotpath.
798          */
799         uasm_i_dmfc0(p, tmp, C0_BADVADDR);
800 
801         if (check_for_high_segbits) {
802                 /*
803                  * The kernel currently implicitely assumes that the
804                  * MIPS SEGBITS parameter for the processor is
805                  * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
806                  * allocate virtual addresses outside the maximum
807                  * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
808                  * that doesn't prevent user code from accessing the
809                  * higher xuseg addresses.  Here, we make sure that
810                  * everything but the lower xuseg addresses goes down
811                  * the module_alloc/vmalloc path.
812                  */
813                 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
814                 uasm_il_bnez(p, r, ptr, label_vmalloc);
815         } else {
816                 uasm_il_bltz(p, r, tmp, label_vmalloc);
817         }
818         /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
819 
820 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
821         if (pgd_reg != -1) {
822                 /* pgd is in pgd_reg */
823                 UASM_i_MFC0(p, ptr, 31, pgd_reg);
824         } else {
825                 /*
826                  * &pgd << 11 stored in CONTEXT [23..63].
827                  */
828                 UASM_i_MFC0(p, ptr, C0_CONTEXT);
829 
830                 /* Clear lower 23 bits of context. */
831                 uasm_i_dins(p, ptr, 0, 0, 23);
832 
833                 /* 1 0  1 0 1  << 6  xkphys cached */
834                 uasm_i_ori(p, ptr, ptr, 0x540);
835                 uasm_i_drotr(p, ptr, ptr, 11);
836         }
837 #elif defined(CONFIG_SMP)
838 # ifdef  CONFIG_MIPS_MT_SMTC
839         /*
840          * SMTC uses TCBind value as "CPU" index
841          */
842         uasm_i_mfc0(p, ptr, C0_TCBIND);
843         uasm_i_dsrl_safe(p, ptr, ptr, 19);
844 # else
845         /*
846          * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
847          * stored in CONTEXT.
848          */
849         uasm_i_dmfc0(p, ptr, C0_CONTEXT);
850         uasm_i_dsrl_safe(p, ptr, ptr, 23);
851 # endif
852         UASM_i_LA_mostly(p, tmp, pgdc);
853         uasm_i_daddu(p, ptr, ptr, tmp);
854         uasm_i_dmfc0(p, tmp, C0_BADVADDR);
855         uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
856 #else
857         UASM_i_LA_mostly(p, ptr, pgdc);
858         uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
859 #endif
860 
861         uasm_l_vmalloc_done(l, *p);
862 
863         /* get pgd offset in bytes */
864         uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
865 
866         uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
867         uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
868 #ifndef __PAGETABLE_PMD_FOLDED
869         uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
870         uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
871         uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
872         uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
873         uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
874 #endif
875 }
876 
877 /*
878  * BVADDR is the faulting address, PTR is scratch.
879  * PTR will hold the pgd for vmalloc.
880  */
881 static void __cpuinit
882 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
883                         unsigned int bvaddr, unsigned int ptr,
884                         enum vmalloc64_mode mode)
885 {
886         long swpd = (long)swapper_pg_dir;
887         int single_insn_swpd;
888         int did_vmalloc_branch = 0;
889 
890         single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
891 
892         uasm_l_vmalloc(l, *p);
893 
894         if (mode != not_refill && check_for_high_segbits) {
895                 if (single_insn_swpd) {
896                         uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
897                         uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
898                         did_vmalloc_branch = 1;
899                         /* fall through */
900                 } else {
901                         uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
902                 }
903         }
904         if (!did_vmalloc_branch) {
905                 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
906                         uasm_il_b(p, r, label_vmalloc_done);
907                         uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
908                 } else {
909                         UASM_i_LA_mostly(p, ptr, swpd);
910                         uasm_il_b(p, r, label_vmalloc_done);
911                         if (uasm_in_compat_space_p(swpd))
912                                 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
913                         else
914                                 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
915                 }
916         }
917         if (mode != not_refill && check_for_high_segbits) {
918                 uasm_l_large_segbits_fault(l, *p);
919                 /*
920                  * We get here if we are an xsseg address, or if we are
921                  * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
922                  *
923                  * Ignoring xsseg (assume disabled so would generate
924                  * (address errors?), the only remaining possibility
925                  * is the upper xuseg addresses.  On processors with
926                  * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
927                  * addresses would have taken an address error. We try
928                  * to mimic that here by taking a load/istream page
929                  * fault.
930                  */
931                 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
932                 uasm_i_jr(p, ptr);
933 
934                 if (mode == refill_scratch) {
935                         if (scratch_reg > 0)
936                                 UASM_i_MFC0(p, 1, 31, scratch_reg);
937                         else
938                                 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
939                 } else {
940                         uasm_i_nop(p);
941                 }
942         }
943 }
944 
945 #else /* !CONFIG_64BIT */
946 
947 /*
948  * TMP and PTR are scratch.
949  * TMP will be clobbered, PTR will hold the pgd entry.
950  */
951 static void __cpuinit __maybe_unused
952 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
953 {
954         long pgdc = (long)pgd_current;
955 
956         /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
957 #ifdef CONFIG_SMP
958 #ifdef  CONFIG_MIPS_MT_SMTC
959         /*
960          * SMTC uses TCBind value as "CPU" index
961          */
962         uasm_i_mfc0(p, ptr, C0_TCBIND);
963         UASM_i_LA_mostly(p, tmp, pgdc);
964         uasm_i_srl(p, ptr, ptr, 19);
965 #else
966         /*
967          * smp_processor_id() << 3 is stored in CONTEXT.
968          */
969         uasm_i_mfc0(p, ptr, C0_CONTEXT);
970         UASM_i_LA_mostly(p, tmp, pgdc);
971         uasm_i_srl(p, ptr, ptr, 23);
972 #endif
973         uasm_i_addu(p, ptr, tmp, ptr);
974 #else
975         UASM_i_LA_mostly(p, ptr, pgdc);
976 #endif
977         uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
978         uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
979         uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
980         uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
981         uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
982 }
983 
984 #endif /* !CONFIG_64BIT */
985 
986 static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
987 {
988         unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
989         unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
990 
991         switch (current_cpu_type()) {
992         case CPU_VR41XX:
993         case CPU_VR4111:
994         case CPU_VR4121:
995         case CPU_VR4122:
996         case CPU_VR4131:
997         case CPU_VR4181:
998         case CPU_VR4181A:
999         case CPU_VR4133:
1000                 shift += 2;
1001                 break;
1002 
1003         default:
1004                 break;
1005         }
1006 
1007         if (shift)
1008                 UASM_i_SRL(p, ctx, ctx, shift);
1009         uasm_i_andi(p, ctx, ctx, mask);
1010 }
1011 
1012 static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1013 {
1014         /*
1015          * Bug workaround for the Nevada. It seems as if under certain
1016          * circumstances the move from cp0_context might produce a
1017          * bogus result when the mfc0 instruction and its consumer are
1018          * in a different cacheline or a load instruction, probably any
1019          * memory reference, is between them.
1020          */
1021         switch (current_cpu_type()) {
1022         case CPU_NEVADA:
1023                 UASM_i_LW(p, ptr, 0, ptr);
1024                 GET_CONTEXT(p, tmp); /* get context reg */
1025                 break;
1026 
1027         default:
1028                 GET_CONTEXT(p, tmp); /* get context reg */
1029                 UASM_i_LW(p, ptr, 0, ptr);
1030                 break;
1031         }
1032 
1033         build_adjust_context(p, tmp);
1034         UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1035 }
1036 
1037 static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
1038                                         unsigned int ptep)
1039 {
1040         /*
1041          * 64bit address support (36bit on a 32bit CPU) in a 32bit
1042          * Kernel is a special case. Only a few CPUs use it.
1043          */
1044 #ifdef CONFIG_64BIT_PHYS_ADDR
1045         if (cpu_has_64bits) {
1046                 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
1047                 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1048                 if (cpu_has_rixi) {
1049                         UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1050                         UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1051                         UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1052                 } else {
1053                         uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
1054                         UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1055                         uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1056                 }
1057                 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1058         } else {
1059                 int pte_off_even = sizeof(pte_t) / 2;
1060                 int pte_off_odd = pte_off_even + sizeof(pte_t);
1061 
1062                 /* The pte entries are pre-shifted */
1063                 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
1064                 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1065                 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
1066                 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1067         }
1068 #else
1069         UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
1070         UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1071         if (r45k_bvahwbug())
1072                 build_tlb_probe_entry(p);
1073         if (cpu_has_rixi) {
1074                 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1075                 if (r4k_250MHZhwbug())
1076                         UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1077                 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1078                 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1079         } else {
1080                 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
1081                 if (r4k_250MHZhwbug())
1082                         UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1083                 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1084                 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1085                 if (r45k_bvahwbug())
1086                         uasm_i_mfc0(p, tmp, C0_INDEX);
1087         }
1088         if (r4k_250MHZhwbug())
1089                 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1090         UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1091 #endif
1092 }
1093 
1094 struct mips_huge_tlb_info {
1095         int huge_pte;
1096         int restore_scratch;
1097 };
1098 
1099 static struct mips_huge_tlb_info __cpuinit
1100 build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1101                                struct uasm_reloc **r, unsigned int tmp,
1102                                unsigned int ptr, int c0_scratch)
1103 {
1104         struct mips_huge_tlb_info rv;
1105         unsigned int even, odd;
1106         int vmalloc_branch_delay_filled = 0;
1107         const int scratch = 1; /* Our extra working register */
1108 
1109         rv.huge_pte = scratch;
1110         rv.restore_scratch = 0;
1111 
1112         if (check_for_high_segbits) {
1113                 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1114 
1115                 if (pgd_reg != -1)
1116                         UASM_i_MFC0(p, ptr, 31, pgd_reg);
1117                 else
1118                         UASM_i_MFC0(p, ptr, C0_CONTEXT);
1119 
1120                 if (c0_scratch >= 0)
1121                         UASM_i_MTC0(p, scratch, 31, c0_scratch);
1122                 else
1123                         UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1124 
1125                 uasm_i_dsrl_safe(p, scratch, tmp,
1126                                  PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1127                 uasm_il_bnez(p, r, scratch, label_vmalloc);
1128 
1129                 if (pgd_reg == -1) {
1130                         vmalloc_branch_delay_filled = 1;
1131                         /* Clear lower 23 bits of context. */
1132                         uasm_i_dins(p, ptr, 0, 0, 23);
1133                 }
1134         } else {
1135                 if (pgd_reg != -1)
1136                         UASM_i_MFC0(p, ptr, 31, pgd_reg);
1137                 else
1138                         UASM_i_MFC0(p, ptr, C0_CONTEXT);
1139 
1140                 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1141 
1142                 if (c0_scratch >= 0)
1143                         UASM_i_MTC0(p, scratch, 31, c0_scratch);
1144                 else
1145                         UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1146 
1147                 if (pgd_reg == -1)
1148                         /* Clear lower 23 bits of context. */
1149                         uasm_i_dins(p, ptr, 0, 0, 23);
1150 
1151                 uasm_il_bltz(p, r, tmp, label_vmalloc);
1152         }
1153 
1154         if (pgd_reg == -1) {
1155                 vmalloc_branch_delay_filled = 1;
1156                 /* 1 0  1 0 1  << 6  xkphys cached */
1157                 uasm_i_ori(p, ptr, ptr, 0x540);
1158                 uasm_i_drotr(p, ptr, ptr, 11);
1159         }
1160 
1161 #ifdef __PAGETABLE_PMD_FOLDED
1162 #define LOC_PTEP scratch
1163 #else
1164 #define LOC_PTEP ptr
1165 #endif
1166 
1167         if (!vmalloc_branch_delay_filled)
1168                 /* get pgd offset in bytes */
1169                 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1170 
1171         uasm_l_vmalloc_done(l, *p);
1172 
1173         /*
1174          *                         tmp          ptr
1175          * fall-through case =   badvaddr  *pgd_current
1176          * vmalloc case      =   badvaddr  swapper_pg_dir
1177          */
1178 
1179         if (vmalloc_branch_delay_filled)
1180                 /* get pgd offset in bytes */
1181                 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1182 
1183 #ifdef __PAGETABLE_PMD_FOLDED
1184         GET_CONTEXT(p, tmp); /* get context reg */
1185 #endif
1186         uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1187 
1188         if (use_lwx_insns()) {
1189                 UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1190         } else {
1191                 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1192                 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1193         }
1194 
1195 #ifndef __PAGETABLE_PMD_FOLDED
1196         /* get pmd offset in bytes */
1197         uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1198         uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1199         GET_CONTEXT(p, tmp); /* get context reg */
1200 
1201         if (use_lwx_insns()) {
1202                 UASM_i_LWX(p, scratch, scratch, ptr);
1203         } else {
1204                 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1205                 UASM_i_LW(p, scratch, 0, ptr);
1206         }
1207 #endif
1208         /* Adjust the context during the load latency. */
1209         build_adjust_context(p, tmp);
1210 
1211 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1212         uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1213         /*
1214          * The in the LWX case we don't want to do the load in the
1215          * delay slot.  It cannot issue in the same cycle and may be
1216          * speculative and unneeded.
1217          */
1218         if (use_lwx_insns())
1219                 uasm_i_nop(p);
1220 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
1221 
1222 
1223         /* build_update_entries */
1224         if (use_lwx_insns()) {
1225                 even = ptr;
1226                 odd = tmp;
1227                 UASM_i_LWX(p, even, scratch, tmp);
1228                 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1229                 UASM_i_LWX(p, odd, scratch, tmp);
1230         } else {
1231                 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1232                 even = tmp;
1233                 odd = ptr;
1234                 UASM_i_LW(p, even, 0, ptr); /* get even pte */
1235                 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1236         }
1237         if (cpu_has_rixi) {
1238                 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
1239                 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1240                 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
1241         } else {
1242                 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1243                 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1244                 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1245         }
1246         UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1247 
1248         if (c0_scratch >= 0) {
1249                 UASM_i_MFC0(p, scratch, 31, c0_scratch);
1250                 build_tlb_write_entry(p, l, r, tlb_random);
1251                 uasm_l_leave(l, *p);
1252                 rv.restore_scratch = 1;
1253         } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
1254                 build_tlb_write_entry(p, l, r, tlb_random);
1255                 uasm_l_leave(l, *p);
1256                 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1257         } else {
1258                 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1259                 build_tlb_write_entry(p, l, r, tlb_random);
1260                 uasm_l_leave(l, *p);
1261                 rv.restore_scratch = 1;
1262         }
1263 
1264         uasm_i_eret(p); /* return from trap */
1265 
1266         return rv;
1267 }
1268 
1269 /*
1270  * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1271  * because EXL == 0.  If we wrap, we can also use the 32 instruction
1272  * slots before the XTLB refill exception handler which belong to the
1273  * unused TLB refill exception.
1274  */
1275 #define MIPS64_REFILL_INSNS 32
1276 
1277 static void __cpuinit build_r4000_tlb_refill_handler(void)
1278 {
1279         u32 *p = tlb_handler;
1280         struct uasm_label *l = labels;
1281         struct uasm_reloc *r = relocs;
1282         u32 *f;
1283         unsigned int final_len;
1284         struct mips_huge_tlb_info htlb_info __maybe_unused;
1285         enum vmalloc64_mode vmalloc_mode __maybe_unused;
1286 
1287         memset(tlb_handler, 0, sizeof(tlb_handler));
1288         memset(labels, 0, sizeof(labels));
1289         memset(relocs, 0, sizeof(relocs));
1290         memset(final_handler, 0, sizeof(final_handler));
1291 
1292         if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
1293                 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1294                                                           scratch_reg);
1295                 vmalloc_mode = refill_scratch;
1296         } else {
1297                 htlb_info.huge_pte = K0;
1298                 htlb_info.restore_scratch = 0;
1299                 vmalloc_mode = refill_noscratch;
1300                 /*
1301                  * create the plain linear handler
1302                  */
1303                 if (bcm1250_m3_war()) {
1304                         unsigned int segbits = 44;
1305 
1306                         uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1307                         uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1308                         uasm_i_xor(&p, K0, K0, K1);
1309                         uasm_i_dsrl_safe(&p, K1, K0, 62);
1310                         uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1311                         uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1312                         uasm_i_or(&p, K0, K0, K1);
1313                         uasm_il_bnez(&p, &r, K0, label_leave);
1314                         /* No need for uasm_i_nop */
1315                 }
1316 
1317 #ifdef CONFIG_64BIT
1318                 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
1319 #else
1320                 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
1321 #endif
1322 
1323 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1324                 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
1325 #endif
1326 
1327                 build_get_ptep(&p, K0, K1);
1328                 build_update_entries(&p, K0, K1);
1329                 build_tlb_write_entry(&p, &l, &r, tlb_random);
1330                 uasm_l_leave(&l, p);
1331                 uasm_i_eret(&p); /* return from trap */
1332         }
1333 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1334         uasm_l_tlb_huge_update(&l, p);
1335         build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1336         build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1337                                    htlb_info.restore_scratch);
1338 #endif
1339 
1340 #ifdef CONFIG_64BIT
1341         build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
1342 #endif
1343 
1344         /*
1345          * Overflow check: For the 64bit handler, we need at least one
1346          * free instruction slot for the wrap-around branch. In worst
1347          * case, if the intended insertion point is a delay slot, we
1348          * need three, with the second nop'ed and the third being
1349          * unused.
1350          */
1351         /* Loongson2 ebase is different than r4k, we have more space */
1352 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1353         if ((p - tlb_handler) > 64)
1354                 panic("TLB refill handler space exceeded");
1355 #else
1356         if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1357             || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1358                 && uasm_insn_has_bdelay(relocs,
1359                                         tlb_handler + MIPS64_REFILL_INSNS - 3)))
1360                 panic("TLB refill handler space exceeded");
1361 #endif
1362 
1363         /*
1364          * Now fold the handler in the TLB refill handler space.
1365          */
1366 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1367         f = final_handler;
1368         /* Simplest case, just copy the handler. */
1369         uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1370         final_len = p - tlb_handler;
1371 #else /* CONFIG_64BIT */
1372         f = final_handler + MIPS64_REFILL_INSNS;
1373         if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1374                 /* Just copy the handler. */
1375                 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1376                 final_len = p - tlb_handler;
1377         } else {
1378 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1379                 const enum label_id ls = label_tlb_huge_update;
1380 #else
1381                 const enum label_id ls = label_vmalloc;
1382 #endif
1383                 u32 *split;
1384                 int ov = 0;
1385                 int i;
1386 
1387                 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1388                         ;
1389                 BUG_ON(i == ARRAY_SIZE(labels));
1390                 split = labels[i].addr;
1391 
1392                 /*
1393                  * See if we have overflown one way or the other.
1394                  */
1395                 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1396                     split < p - MIPS64_REFILL_INSNS)
1397                         ov = 1;
1398 
1399                 if (ov) {
1400                         /*
1401                          * Split two instructions before the end.  One
1402                          * for the branch and one for the instruction
1403                          * in the delay slot.
1404                          */
1405                         split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1406 
1407                         /*
1408                          * If the branch would fall in a delay slot,
1409                          * we must back up an additional instruction
1410                          * so that it is no longer in a delay slot.
1411                          */
1412                         if (uasm_insn_has_bdelay(relocs, split - 1))
1413                                 split--;
1414                 }
1415                 /* Copy first part of the handler. */
1416                 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1417                 f += split - tlb_handler;
1418 
1419                 if (ov) {
1420                         /* Insert branch. */
1421                         uasm_l_split(&l, final_handler);
1422                         uasm_il_b(&f, &r, label_split);
1423                         if (uasm_insn_has_bdelay(relocs, split))
1424                                 uasm_i_nop(&f);
1425                         else {
1426                                 uasm_copy_handler(relocs, labels,
1427                                                   split, split + 1, f);
1428                                 uasm_move_labels(labels, f, f + 1, -1);
1429                                 f++;
1430                                 split++;
1431                         }
1432                 }
1433 
1434                 /* Copy the rest of the handler. */
1435                 uasm_copy_handler(relocs, labels, split, p, final_handler);
1436                 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1437                             (p - split);
1438         }
1439 #endif /* CONFIG_64BIT */
1440 
1441         uasm_resolve_relocs(relocs, labels);
1442         pr_debug("Wrote TLB refill handler (%u instructions).\n",
1443                  final_len);
1444 
1445         memcpy((void *)ebase, final_handler, 0x100);
1446 
1447         dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
1448 }
1449 
1450 /*
1451  * 128 instructions for the fastpath handler is generous and should
1452  * never be exceeded.
1453  */
1454 #define FASTPATH_SIZE 128
1455 
1456 u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1457 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1458 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
1459 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1460 u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
1461 
1462 static void __cpuinit build_r4000_setup_pgd(void)
1463 {
1464         const int a0 = 4;
1465         const int a1 = 5;
1466         u32 *p = tlbmiss_handler_setup_pgd;
1467         struct uasm_label *l = labels;
1468         struct uasm_reloc *r = relocs;
1469 
1470         memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
1471         memset(labels, 0, sizeof(labels));
1472         memset(relocs, 0, sizeof(relocs));
1473 
1474         pgd_reg = allocate_kscratch();
1475 
1476         if (pgd_reg == -1) {
1477                 /* PGD << 11 in c0_Context */
1478                 /*
1479                  * If it is a ckseg0 address, convert to a physical
1480                  * address.  Shifting right by 29 and adding 4 will
1481                  * result in zero for these addresses.
1482                  *
1483                  */
1484                 UASM_i_SRA(&p, a1, a0, 29);
1485                 UASM_i_ADDIU(&p, a1, a1, 4);
1486                 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1487                 uasm_i_nop(&p);
1488                 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1489                 uasm_l_tlbl_goaround1(&l, p);
1490                 UASM_i_SLL(&p, a0, a0, 11);
1491                 uasm_i_jr(&p, 31);
1492                 UASM_i_MTC0(&p, a0, C0_CONTEXT);
1493         } else {
1494                 /* PGD in c0_KScratch */
1495                 uasm_i_jr(&p, 31);
1496                 UASM_i_MTC0(&p, a0, 31, pgd_reg);
1497         }
1498         if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
1499                 panic("tlbmiss_handler_setup_pgd space exceeded");
1500         uasm_resolve_relocs(relocs, labels);
1501         pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1502                  (unsigned int)(p - tlbmiss_handler_setup_pgd));
1503 
1504         dump_handler("tlbmiss_handler",
1505                      tlbmiss_handler_setup_pgd,
1506                      ARRAY_SIZE(tlbmiss_handler_setup_pgd));
1507 }
1508 #endif
1509 
1510 static void __cpuinit
1511 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1512 {
1513 #ifdef CONFIG_SMP
1514 # ifdef CONFIG_64BIT_PHYS_ADDR
1515         if (cpu_has_64bits)
1516                 uasm_i_lld(p, pte, 0, ptr);
1517         else
1518 # endif
1519                 UASM_i_LL(p, pte, 0, ptr);
1520 #else
1521 # ifdef CONFIG_64BIT_PHYS_ADDR
1522         if (cpu_has_64bits)
1523                 uasm_i_ld(p, pte, 0, ptr);
1524         else
1525 # endif
1526                 UASM_i_LW(p, pte, 0, ptr);
1527 #endif
1528 }
1529 
1530 static void __cpuinit
1531 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1532         unsigned int mode)
1533 {
1534 #ifdef CONFIG_64BIT_PHYS_ADDR
1535         unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1536 #endif
1537 
1538         uasm_i_ori(p, pte, pte, mode);
1539 #ifdef CONFIG_SMP
1540 # ifdef CONFIG_64BIT_PHYS_ADDR
1541         if (cpu_has_64bits)
1542                 uasm_i_scd(p, pte, 0, ptr);
1543         else
1544 # endif
1545                 UASM_i_SC(p, pte, 0, ptr);
1546 
1547         if (r10000_llsc_war())
1548                 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
1549         else
1550                 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1551 
1552 # ifdef CONFIG_64BIT_PHYS_ADDR
1553         if (!cpu_has_64bits) {
1554                 /* no uasm_i_nop needed */
1555                 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1556                 uasm_i_ori(p, pte, pte, hwmode);
1557                 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1558                 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1559                 /* no uasm_i_nop needed */
1560                 uasm_i_lw(p, pte, 0, ptr);
1561         } else
1562                 uasm_i_nop(p);
1563 # else
1564         uasm_i_nop(p);
1565 # endif
1566 #else
1567 # ifdef CONFIG_64BIT_PHYS_ADDR
1568         if (cpu_has_64bits)
1569                 uasm_i_sd(p, pte, 0, ptr);
1570         else
1571 # endif
1572                 UASM_i_SW(p, pte, 0, ptr);
1573 
1574 # ifdef CONFIG_64BIT_PHYS_ADDR
1575         if (!cpu_has_64bits) {
1576                 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1577                 uasm_i_ori(p, pte, pte, hwmode);
1578                 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1579                 uasm_i_lw(p, pte, 0, ptr);
1580         }
1581 # endif
1582 #endif
1583 }
1584 
1585 /*
1586  * Check if PTE is present, if not then jump to LABEL. PTR points to
1587  * the page table where this PTE is located, PTE will be re-loaded
1588  * with it's original value.
1589  */
1590 static void __cpuinit
1591 build_pte_present(u32 **p, struct uasm_reloc **r,
1592                   int pte, int ptr, int scratch, enum label_id lid)
1593 {
1594         int t = scratch >= 0 ? scratch : pte;
1595 
1596         if (cpu_has_rixi) {
1597                 if (use_bbit_insns()) {
1598                         uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1599                         uasm_i_nop(p);
1600                 } else {
1601                         uasm_i_andi(p, t, pte, _PAGE_PRESENT);
1602                         uasm_il_beqz(p, r, t, lid);
1603                         if (pte == t)
1604                                 /* You lose the SMP race :-(*/
1605                                 iPTE_LW(p, pte, ptr);
1606                 }
1607         } else {
1608                 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
1609                 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
1610                 uasm_il_bnez(p, r, t, lid);
1611                 if (pte == t)
1612                         /* You lose the SMP race :-(*/
1613                         iPTE_LW(p, pte, ptr);
1614         }
1615 }
1616 
1617 /* Make PTE valid, store result in PTR. */
1618 static void __cpuinit
1619 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1620                  unsigned int ptr)
1621 {
1622         unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1623 
1624         iPTE_SW(p, r, pte, ptr, mode);
1625 }
1626 
1627 /*
1628  * Check if PTE can be written to, if not branch to LABEL. Regardless
1629  * restore PTE with value from PTR when done.
1630  */
1631 static void __cpuinit
1632 build_pte_writable(u32 **p, struct uasm_reloc **r,
1633                    unsigned int pte, unsigned int ptr, int scratch,
1634                    enum label_id lid)
1635 {
1636         int t = scratch >= 0 ? scratch : pte;
1637 
1638         uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
1639         uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
1640         uasm_il_bnez(p, r, t, lid);
1641         if (pte == t)
1642                 /* You lose the SMP race :-(*/
1643                 iPTE_LW(p, pte, ptr);
1644         else
1645                 uasm_i_nop(p);
1646 }
1647 
1648 /* Make PTE writable, update software status bits as well, then store
1649  * at PTR.
1650  */
1651 static void __cpuinit
1652 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1653                  unsigned int ptr)
1654 {
1655         unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1656                              | _PAGE_DIRTY);
1657 
1658         iPTE_SW(p, r, pte, ptr, mode);
1659 }
1660 
1661 /*
1662  * Check if PTE can be modified, if not branch to LABEL. Regardless
1663  * restore PTE with value from PTR when done.
1664  */
1665 static void __cpuinit
1666 build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1667                      unsigned int pte, unsigned int ptr, int scratch,
1668                      enum label_id lid)
1669 {
1670         if (use_bbit_insns()) {
1671                 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1672                 uasm_i_nop(p);
1673         } else {
1674                 int t = scratch >= 0 ? scratch : pte;
1675                 uasm_i_andi(p, t, pte, _PAGE_WRITE);
1676                 uasm_il_beqz(p, r, t, lid);
1677                 if (pte == t)
1678                         /* You lose the SMP race :-(*/
1679                         iPTE_LW(p, pte, ptr);
1680         }
1681 }
1682 
1683 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1684 
1685 
1686 /*
1687  * R3000 style TLB load/store/modify handlers.
1688  */
1689 
1690 /*
1691  * This places the pte into ENTRYLO0 and writes it with tlbwi.
1692  * Then it returns.
1693  */
1694 static void __cpuinit
1695 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1696 {
1697         uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1698         uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1699         uasm_i_tlbwi(p);
1700         uasm_i_jr(p, tmp);
1701         uasm_i_rfe(p); /* branch delay */
1702 }
1703 
1704 /*
1705  * This places the pte into ENTRYLO0 and writes it with tlbwi
1706  * or tlbwr as appropriate.  This is because the index register
1707  * may have the probe fail bit set as a result of a trap on a
1708  * kseg2 access, i.e. without refill.  Then it returns.
1709  */
1710 static void __cpuinit
1711 build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1712                              struct uasm_reloc **r, unsigned int pte,
1713                              unsigned int tmp)
1714 {
1715         uasm_i_mfc0(p, tmp, C0_INDEX);
1716         uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1717         uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1718         uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1719         uasm_i_tlbwi(p); /* cp0 delay */
1720         uasm_i_jr(p, tmp);
1721         uasm_i_rfe(p); /* branch delay */
1722         uasm_l_r3000_write_probe_fail(l, *p);
1723         uasm_i_tlbwr(p); /* cp0 delay */
1724         uasm_i_jr(p, tmp);
1725         uasm_i_rfe(p); /* branch delay */
1726 }
1727 
1728 static void __cpuinit
1729 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1730                                    unsigned int ptr)
1731 {
1732         long pgdc = (long)pgd_current;
1733 
1734         uasm_i_mfc0(p, pte, C0_BADVADDR);
1735         uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1736         uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1737         uasm_i_srl(p, pte, pte, 22); /* load delay */
1738         uasm_i_sll(p, pte, pte, 2);
1739         uasm_i_addu(p, ptr, ptr, pte);
1740         uasm_i_mfc0(p, pte, C0_CONTEXT);
1741         uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1742         uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1743         uasm_i_addu(p, ptr, ptr, pte);
1744         uasm_i_lw(p, pte, 0, ptr);
1745         uasm_i_tlbp(p); /* load delay */
1746 }
1747 
1748 static void __cpuinit build_r3000_tlb_load_handler(void)
1749 {
1750         u32 *p = handle_tlbl;
1751         struct uasm_label *l = labels;
1752         struct uasm_reloc *r = relocs;
1753 
1754         memset(handle_tlbl, 0, sizeof(handle_tlbl));
1755         memset(labels, 0, sizeof(labels));
1756         memset(relocs, 0, sizeof(relocs));
1757 
1758         build_r3000_tlbchange_handler_head(&p, K0, K1);
1759         build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1760         uasm_i_nop(&p); /* load delay */
1761         build_make_valid(&p, &r, K0, K1);
1762         build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1763 
1764         uasm_l_nopage_tlbl(&l, p);
1765         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1766         uasm_i_nop(&p);
1767 
1768         if ((p - handle_tlbl) > FASTPATH_SIZE)
1769                 panic("TLB load handler fastpath space exceeded");
1770 
1771         uasm_resolve_relocs(relocs, labels);
1772         pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1773                  (unsigned int)(p - handle_tlbl));
1774 
1775         dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
1776 }
1777 
1778 static void __cpuinit build_r3000_tlb_store_handler(void)
1779 {
1780         u32 *p = handle_tlbs;
1781         struct uasm_label *l = labels;
1782         struct uasm_reloc *r = relocs;
1783 
1784         memset(handle_tlbs, 0, sizeof(handle_tlbs));
1785         memset(labels, 0, sizeof(labels));
1786         memset(relocs, 0, sizeof(relocs));
1787 
1788         build_r3000_tlbchange_handler_head(&p, K0, K1);
1789         build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1790         uasm_i_nop(&p); /* load delay */
1791         build_make_write(&p, &r, K0, K1);
1792         build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1793 
1794         uasm_l_nopage_tlbs(&l, p);
1795         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1796         uasm_i_nop(&p);
1797 
1798         if ((p - handle_tlbs) > FASTPATH_SIZE)
1799                 panic("TLB store handler fastpath space exceeded");
1800 
1801         uasm_resolve_relocs(relocs, labels);
1802         pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1803                  (unsigned int)(p - handle_tlbs));
1804 
1805         dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
1806 }
1807 
1808 static void __cpuinit build_r3000_tlb_modify_handler(void)
1809 {
1810         u32 *p = handle_tlbm;
1811         struct uasm_label *l = labels;
1812         struct uasm_reloc *r = relocs;
1813 
1814         memset(handle_tlbm, 0, sizeof(handle_tlbm));
1815         memset(labels, 0, sizeof(labels));
1816         memset(relocs, 0, sizeof(relocs));
1817 
1818         build_r3000_tlbchange_handler_head(&p, K0, K1);
1819         build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
1820         uasm_i_nop(&p); /* load delay */
1821         build_make_write(&p, &r, K0, K1);
1822         build_r3000_pte_reload_tlbwi(&p, K0, K1);
1823 
1824         uasm_l_nopage_tlbm(&l, p);
1825         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1826         uasm_i_nop(&p);
1827 
1828         if ((p - handle_tlbm) > FASTPATH_SIZE)
1829                 panic("TLB modify handler fastpath space exceeded");
1830 
1831         uasm_resolve_relocs(relocs, labels);
1832         pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1833                  (unsigned int)(p - handle_tlbm));
1834 
1835         dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
1836 }
1837 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1838 
1839 /*
1840  * R4000 style TLB load/store/modify handlers.
1841  */
1842 static struct work_registers __cpuinit
1843 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1844                                    struct uasm_reloc **r)
1845 {
1846         struct work_registers wr = build_get_work_registers(p);
1847 
1848 #ifdef CONFIG_64BIT
1849         build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
1850 #else
1851         build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
1852 #endif
1853 
1854 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1855         /*
1856          * For huge tlb entries, pmd doesn't contain an address but
1857          * instead contains the tlb pte. Check the PAGE_HUGE bit and
1858          * see if we need to jump to huge tlb processing.
1859          */
1860         build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
1861 #endif
1862 
1863         UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
1864         UASM_i_LW(p, wr.r2, 0, wr.r2);
1865         UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1866         uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1867         UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
1868 
1869 #ifdef CONFIG_SMP
1870         uasm_l_smp_pgtable_change(l, *p);
1871 #endif
1872         iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
1873         if (!m4kc_tlbp_war())
1874                 build_tlb_probe_entry(p);
1875         return wr;
1876 }
1877 
1878 static void __cpuinit
1879 build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1880                                    struct uasm_reloc **r, unsigned int tmp,
1881                                    unsigned int ptr)
1882 {
1883         uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
1884         uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
1885         build_update_entries(p, tmp, ptr);
1886         build_tlb_write_entry(p, l, r, tlb_indexed);
1887         uasm_l_leave(l, *p);
1888         build_restore_work_registers(p);
1889         uasm_i_eret(p); /* return from trap */
1890 
1891 #ifdef CONFIG_64BIT
1892         build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
1893 #endif
1894 }
1895 
1896 static void __cpuinit build_r4000_tlb_load_handler(void)
1897 {
1898         u32 *p = handle_tlbl;
1899         struct uasm_label *l = labels;
1900         struct uasm_reloc *r = relocs;
1901         struct work_registers wr;
1902 
1903         memset(handle_tlbl, 0, sizeof(handle_tlbl));
1904         memset(labels, 0, sizeof(labels));
1905         memset(relocs, 0, sizeof(relocs));
1906 
1907         if (bcm1250_m3_war()) {
1908                 unsigned int segbits = 44;
1909 
1910                 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1911                 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1912                 uasm_i_xor(&p, K0, K0, K1);
1913                 uasm_i_dsrl_safe(&p, K1, K0, 62);
1914                 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1915                 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1916                 uasm_i_or(&p, K0, K0, K1);
1917                 uasm_il_bnez(&p, &r, K0, label_leave);
1918                 /* No need for uasm_i_nop */
1919         }
1920 
1921         wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1922         build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1923         if (m4kc_tlbp_war())
1924                 build_tlb_probe_entry(&p);
1925 
1926         if (cpu_has_rixi) {
1927                 /*
1928                  * If the page is not _PAGE_VALID, RI or XI could not
1929                  * have triggered it.  Skip the expensive test..
1930                  */
1931                 if (use_bbit_insns()) {
1932                         uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1933                                       label_tlbl_goaround1);
1934                 } else {
1935                         uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1936                         uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
1937                 }
1938                 uasm_i_nop(&p);
1939 
1940                 uasm_i_tlbr(&p);
1941                 /* Examine  entrylo 0 or 1 based on ptr. */
1942                 if (use_bbit_insns()) {
1943                         uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1944                 } else {
1945                         uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1946                         uasm_i_beqz(&p, wr.r3, 8);
1947                 }
1948                 /* load it in the delay slot*/
1949                 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1950                 /* load it if ptr is odd */
1951                 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1952                 /*
1953                  * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1954                  * XI must have triggered it.
1955                  */
1956                 if (use_bbit_insns()) {
1957                         uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
1958                         uasm_i_nop(&p);
1959                         uasm_l_tlbl_goaround1(&l, p);
1960                 } else {
1961                         uasm_i_andi(&p, wr.r3, wr.r3, 2);
1962                         uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
1963                         uasm_i_nop(&p);
1964                 }
1965                 uasm_l_tlbl_goaround1(&l, p);
1966         }
1967         build_make_valid(&p, &r, wr.r1, wr.r2);
1968         build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1969 
1970 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1971         /*
1972          * This is the entry point when build_r4000_tlbchange_handler_head
1973          * spots a huge page.
1974          */
1975         uasm_l_tlb_huge_update(&l, p);
1976         iPTE_LW(&p, wr.r1, wr.r2);
1977         build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1978         build_tlb_probe_entry(&p);
1979 
1980         if (cpu_has_rixi) {
1981                 /*
1982                  * If the page is not _PAGE_VALID, RI or XI could not
1983                  * have triggered it.  Skip the expensive test..
1984                  */
1985                 if (use_bbit_insns()) {
1986                         uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1987                                       label_tlbl_goaround2);
1988                 } else {
1989                         uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1990                         uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1991                 }
1992                 uasm_i_nop(&p);
1993 
1994                 uasm_i_tlbr(&p);
1995                 /* Examine  entrylo 0 or 1 based on ptr. */
1996                 if (use_bbit_insns()) {
1997                         uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1998                 } else {
1999                         uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2000                         uasm_i_beqz(&p, wr.r3, 8);
2001                 }
2002                 /* load it in the delay slot*/
2003                 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2004                 /* load it if ptr is odd */
2005                 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2006                 /*
2007                  * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2008                  * XI must have triggered it.
2009                  */
2010                 if (use_bbit_insns()) {
2011                         uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
2012                 } else {
2013                         uasm_i_andi(&p, wr.r3, wr.r3, 2);
2014                         uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2015                 }
2016                 if (PM_DEFAULT_MASK == 0)
2017                         uasm_i_nop(&p);
2018                 /*
2019                  * We clobbered C0_PAGEMASK, restore it.  On the other branch
2020                  * it is restored in build_huge_tlb_write_entry.
2021                  */
2022                 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
2023 
2024                 uasm_l_tlbl_goaround2(&l, p);
2025         }
2026         uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2027         build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2028 #endif
2029 
2030         uasm_l_nopage_tlbl(&l, p);
2031         build_restore_work_registers(&p);
2032         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2033         uasm_i_nop(&p);
2034 
2035         if ((p - handle_tlbl) > FASTPATH_SIZE)
2036                 panic("TLB load handler fastpath space exceeded");
2037 
2038         uasm_resolve_relocs(relocs, labels);
2039         pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2040                  (unsigned int)(p - handle_tlbl));
2041 
2042         dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
2043 }
2044 
2045 static void __cpuinit build_r4000_tlb_store_handler(void)
2046 {
2047         u32 *p = handle_tlbs;
2048         struct uasm_label *l = labels;
2049         struct uasm_reloc *r = relocs;
2050         struct work_registers wr;
2051 
2052         memset(handle_tlbs, 0, sizeof(handle_tlbs));
2053         memset(labels, 0, sizeof(labels));
2054         memset(relocs, 0, sizeof(relocs));
2055 
2056         wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2057         build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2058         if (m4kc_tlbp_war())
2059                 build_tlb_probe_entry(&p);
2060         build_make_write(&p, &r, wr.r1, wr.r2);
2061         build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2062 
2063 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2064         /*
2065          * This is the entry point when
2066          * build_r4000_tlbchange_handler_head spots a huge page.
2067          */
2068         uasm_l_tlb_huge_update(&l, p);
2069         iPTE_LW(&p, wr.r1, wr.r2);
2070         build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2071         build_tlb_probe_entry(&p);
2072         uasm_i_ori(&p, wr.r1, wr.r1,
2073                    _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2074         build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2075 #endif
2076 
2077         uasm_l_nopage_tlbs(&l, p);
2078         build_restore_work_registers(&p);
2079         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2080         uasm_i_nop(&p);
2081 
2082         if ((p - handle_tlbs) > FASTPATH_SIZE)
2083                 panic("TLB store handler fastpath space exceeded");
2084 
2085         uasm_resolve_relocs(relocs, labels);
2086         pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2087                  (unsigned int)(p - handle_tlbs));
2088 
2089         dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
2090 }
2091 
2092 static void __cpuinit build_r4000_tlb_modify_handler(void)
2093 {
2094         u32 *p = handle_tlbm;
2095         struct uasm_label *l = labels;
2096         struct uasm_reloc *r = relocs;
2097         struct work_registers wr;
2098 
2099         memset(handle_tlbm, 0, sizeof(handle_tlbm));
2100         memset(labels, 0, sizeof(labels));
2101         memset(relocs, 0, sizeof(relocs));
2102 
2103         wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2104         build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2105         if (m4kc_tlbp_war())
2106                 build_tlb_probe_entry(&p);
2107         /* Present and writable bits set, set accessed and dirty bits. */
2108         build_make_write(&p, &r, wr.r1, wr.r2);
2109         build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2110 
2111 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2112         /*
2113          * This is the entry point when
2114          * build_r4000_tlbchange_handler_head spots a huge page.
2115          */
2116         uasm_l_tlb_huge_update(&l, p);
2117         iPTE_LW(&p, wr.r1, wr.r2);
2118         build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
2119         build_tlb_probe_entry(&p);
2120         uasm_i_ori(&p, wr.r1, wr.r1,
2121                    _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2122         build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2123 #endif
2124 
2125         uasm_l_nopage_tlbm(&l, p);
2126         build_restore_work_registers(&p);
2127         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2128         uasm_i_nop(&p);
2129 
2130         if ((p - handle_tlbm) > FASTPATH_SIZE)
2131                 panic("TLB modify handler fastpath space exceeded");
2132 
2133         uasm_resolve_relocs(relocs, labels);
2134         pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2135                  (unsigned int)(p - handle_tlbm));
2136 
2137         dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
2138 }
2139 
2140 void __cpuinit build_tlb_refill_handler(void)
2141 {
2142         /*
2143          * The refill handler is generated per-CPU, multi-node systems
2144          * may have local storage for it. The other handlers are only
2145          * needed once.
2146          */
2147         static int run_once = 0;
2148 
2149         output_pgtable_bits_defines();
2150 
2151 #ifdef CONFIG_64BIT
2152         check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
2153 #endif
2154 
2155         switch (current_cpu_type()) {
2156         case CPU_R2000:
2157         case CPU_R3000:
2158         case CPU_R3000A:
2159         case CPU_R3081E:
2160         case CPU_TX3912:
2161         case CPU_TX3922:
2162         case CPU_TX3927:
2163 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2164                 build_r3000_tlb_refill_handler();
2165                 if (!run_once) {
2166                         build_r3000_tlb_load_handler();
2167                         build_r3000_tlb_store_handler();
2168                         build_r3000_tlb_modify_handler();
2169                         run_once++;
2170                 }
2171 #else
2172                 panic("No R3000 TLB refill handler");
2173 #endif
2174                 break;
2175 
2176         case CPU_R6000:
2177         case CPU_R6000A:
2178                 panic("No R6000 TLB refill handler yet");
2179                 break;
2180 
2181         case CPU_R8000:
2182                 panic("No R8000 TLB refill handler yet");
2183                 break;
2184 
2185         default:
2186                 if (!run_once) {
2187                         scratch_reg = allocate_kscratch();
2188 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2189                         build_r4000_setup_pgd();
2190 #endif
2191                         build_r4000_tlb_load_handler();
2192                         build_r4000_tlb_store_handler();
2193                         build_r4000_tlb_modify_handler();
2194                         run_once++;
2195                 }
2196                 build_r4000_tlb_refill_handler();
2197         }
2198 }
2199 
2200 void __cpuinit flush_tlb_handlers(void)
2201 {
2202         local_flush_icache_range((unsigned long)handle_tlbl,
2203                            (unsigned long)handle_tlbl + sizeof(handle_tlbl));
2204         local_flush_icache_range((unsigned long)handle_tlbs,
2205                            (unsigned long)handle_tlbs + sizeof(handle_tlbs));
2206         local_flush_icache_range((unsigned long)handle_tlbm,
2207                            (unsigned long)handle_tlbm + sizeof(handle_tlbm));
2208 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2209         local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2210                            (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
2211 #endif
2212 }
2213 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp