~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/mm/tlbex.c

Version: ~ [ linux-5.10-rc1 ] ~ [ linux-5.9.1 ] ~ [ linux-5.8.16 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.72 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.152 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.202 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.240 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.240 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Synthesize TLB refill handlers at runtime.
  7  *
  8  * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
  9  * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
 10  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
 11  * Copyright (C) 2008, 2009 Cavium Networks, Inc.
 12  * Copyright (C) 2011  MIPS Technologies, Inc.
 13  *
 14  * ... and the days got worse and worse and now you see
 15  * I've gone completely out of my mind.
 16  *
 17  * They're coming to take me a away haha
 18  * they're coming to take me a away hoho hihi haha
 19  * to the funny farm where code is beautiful all the time ...
 20  *
 21  * (Condolences to Napoleon XIV)
 22  */
 23 
 24 #include <linux/bug.h>
 25 #include <linux/export.h>
 26 #include <linux/kernel.h>
 27 #include <linux/types.h>
 28 #include <linux/smp.h>
 29 #include <linux/string.h>
 30 #include <linux/cache.h>
 31 
 32 #include <asm/cacheflush.h>
 33 #include <asm/cpu-type.h>
 34 #include <asm/pgtable.h>
 35 #include <asm/war.h>
 36 #include <asm/uasm.h>
 37 #include <asm/setup.h>
 38 #include <asm/tlbex.h>
 39 
 40 static int mips_xpa_disabled;
 41 
 42 static int __init xpa_disable(char *s)
 43 {
 44         mips_xpa_disabled = 1;
 45 
 46         return 1;
 47 }
 48 
 49 __setup("noxpa", xpa_disable);
 50 
 51 /*
 52  * TLB load/store/modify handlers.
 53  *
 54  * Only the fastpath gets synthesized at runtime, the slowpath for
 55  * do_page_fault remains normal asm.
 56  */
 57 extern void tlb_do_page_fault_0(void);
 58 extern void tlb_do_page_fault_1(void);
 59 
 60 struct work_registers {
 61         int r1;
 62         int r2;
 63         int r3;
 64 };
 65 
 66 struct tlb_reg_save {
 67         unsigned long a;
 68         unsigned long b;
 69 } ____cacheline_aligned_in_smp;
 70 
 71 static struct tlb_reg_save handler_reg_save[NR_CPUS];
 72 
 73 static inline int r45k_bvahwbug(void)
 74 {
 75         /* XXX: We should probe for the presence of this bug, but we don't. */
 76         return 0;
 77 }
 78 
 79 static inline int r4k_250MHZhwbug(void)
 80 {
 81         /* XXX: We should probe for the presence of this bug, but we don't. */
 82         return 0;
 83 }
 84 
 85 static inline int __maybe_unused bcm1250_m3_war(void)
 86 {
 87         return BCM1250_M3_WAR;
 88 }
 89 
 90 static inline int __maybe_unused r10000_llsc_war(void)
 91 {
 92         return R10000_LLSC_WAR;
 93 }
 94 
 95 static int use_bbit_insns(void)
 96 {
 97         switch (current_cpu_type()) {
 98         case CPU_CAVIUM_OCTEON:
 99         case CPU_CAVIUM_OCTEON_PLUS:
100         case CPU_CAVIUM_OCTEON2:
101         case CPU_CAVIUM_OCTEON3:
102                 return 1;
103         default:
104                 return 0;
105         }
106 }
107 
108 static int use_lwx_insns(void)
109 {
110         switch (current_cpu_type()) {
111         case CPU_CAVIUM_OCTEON2:
112         case CPU_CAVIUM_OCTEON3:
113                 return 1;
114         default:
115                 return 0;
116         }
117 }
118 #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
119     CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
120 static bool scratchpad_available(void)
121 {
122         return true;
123 }
124 static int scratchpad_offset(int i)
125 {
126         /*
127          * CVMSEG starts at address -32768 and extends for
128          * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
129          */
130         i += 1; /* Kernel use starts at the top and works down. */
131         return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
132 }
133 #else
134 static bool scratchpad_available(void)
135 {
136         return false;
137 }
138 static int scratchpad_offset(int i)
139 {
140         BUG();
141         /* Really unreachable, but evidently some GCC want this. */
142         return 0;
143 }
144 #endif
145 /*
146  * Found by experiment: At least some revisions of the 4kc throw under
147  * some circumstances a machine check exception, triggered by invalid
148  * values in the index register.  Delaying the tlbp instruction until
149  * after the next branch,  plus adding an additional nop in front of
150  * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
151  * why; it's not an issue caused by the core RTL.
152  *
153  */
154 static int m4kc_tlbp_war(void)
155 {
156         return current_cpu_type() == CPU_4KC;
157 }
158 
159 /* Handle labels (which must be positive integers). */
160 enum label_id {
161         label_second_part = 1,
162         label_leave,
163         label_vmalloc,
164         label_vmalloc_done,
165         label_tlbw_hazard_0,
166         label_split = label_tlbw_hazard_0 + 8,
167         label_tlbl_goaround1,
168         label_tlbl_goaround2,
169         label_nopage_tlbl,
170         label_nopage_tlbs,
171         label_nopage_tlbm,
172         label_smp_pgtable_change,
173         label_r3000_write_probe_fail,
174         label_large_segbits_fault,
175 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
176         label_tlb_huge_update,
177 #endif
178 };
179 
180 UASM_L_LA(_second_part)
181 UASM_L_LA(_leave)
182 UASM_L_LA(_vmalloc)
183 UASM_L_LA(_vmalloc_done)
184 /* _tlbw_hazard_x is handled differently.  */
185 UASM_L_LA(_split)
186 UASM_L_LA(_tlbl_goaround1)
187 UASM_L_LA(_tlbl_goaround2)
188 UASM_L_LA(_nopage_tlbl)
189 UASM_L_LA(_nopage_tlbs)
190 UASM_L_LA(_nopage_tlbm)
191 UASM_L_LA(_smp_pgtable_change)
192 UASM_L_LA(_r3000_write_probe_fail)
193 UASM_L_LA(_large_segbits_fault)
194 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
195 UASM_L_LA(_tlb_huge_update)
196 #endif
197 
198 static int hazard_instance;
199 
200 static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
201 {
202         switch (instance) {
203         case 0 ... 7:
204                 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
205                 return;
206         default:
207                 BUG();
208         }
209 }
210 
211 static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
212 {
213         switch (instance) {
214         case 0 ... 7:
215                 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
216                 break;
217         default:
218                 BUG();
219         }
220 }
221 
222 /*
223  * pgtable bits are assigned dynamically depending on processor feature
224  * and statically based on kernel configuration.  This spits out the actual
225  * values the kernel is using.  Required to make sense from disassembled
226  * TLB exception handlers.
227  */
228 static void output_pgtable_bits_defines(void)
229 {
230 #define pr_define(fmt, ...)                                     \
231         pr_debug("#define " fmt, ##__VA_ARGS__)
232 
233         pr_debug("#include <asm/asm.h>\n");
234         pr_debug("#include <asm/regdef.h>\n");
235         pr_debug("\n");
236 
237         pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
238         pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
239         pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
240         pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
241         pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
242 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
243         pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
244 #endif
245 #ifdef _PAGE_NO_EXEC_SHIFT
246         if (cpu_has_rixi)
247                 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
248 #endif
249         pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
250         pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
251         pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
252         pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
253         pr_debug("\n");
254 }
255 
256 static inline void dump_handler(const char *symbol, const u32 *handler, int count)
257 {
258         int i;
259 
260         pr_debug("LEAF(%s)\n", symbol);
261 
262         pr_debug("\t.set push\n");
263         pr_debug("\t.set noreorder\n");
264 
265         for (i = 0; i < count; i++)
266                 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
267 
268         pr_debug("\t.set\tpop\n");
269 
270         pr_debug("\tEND(%s)\n", symbol);
271 }
272 
273 /* The only general purpose registers allowed in TLB handlers. */
274 #define K0              26
275 #define K1              27
276 
277 /* Some CP0 registers */
278 #define C0_INDEX        0, 0
279 #define C0_ENTRYLO0     2, 0
280 #define C0_TCBIND       2, 2
281 #define C0_ENTRYLO1     3, 0
282 #define C0_CONTEXT      4, 0
283 #define C0_PAGEMASK     5, 0
284 #define C0_PWBASE       5, 5
285 #define C0_PWFIELD      5, 6
286 #define C0_PWSIZE       5, 7
287 #define C0_PWCTL        6, 6
288 #define C0_BADVADDR     8, 0
289 #define C0_PGD          9, 7
290 #define C0_ENTRYHI      10, 0
291 #define C0_EPC          14, 0
292 #define C0_XCONTEXT     20, 0
293 
294 #ifdef CONFIG_64BIT
295 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
296 #else
297 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
298 #endif
299 
300 /* The worst case length of the handler is around 18 instructions for
301  * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
302  * Maximum space available is 32 instructions for R3000 and 64
303  * instructions for R4000.
304  *
305  * We deliberately chose a buffer size of 128, so we won't scribble
306  * over anything important on overflow before we panic.
307  */
308 static u32 tlb_handler[128];
309 
310 /* simply assume worst case size for labels and relocs */
311 static struct uasm_label labels[128];
312 static struct uasm_reloc relocs[128];
313 
314 static int check_for_high_segbits;
315 static bool fill_includes_sw_bits;
316 
317 static unsigned int kscratch_used_mask;
318 
319 static inline int __maybe_unused c0_kscratch(void)
320 {
321         switch (current_cpu_type()) {
322         case CPU_XLP:
323         case CPU_XLR:
324                 return 22;
325         default:
326                 return 31;
327         }
328 }
329 
330 static int allocate_kscratch(void)
331 {
332         int r;
333         unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
334 
335         r = ffs(a);
336 
337         if (r == 0)
338                 return -1;
339 
340         r--; /* make it zero based */
341 
342         kscratch_used_mask |= (1 << r);
343 
344         return r;
345 }
346 
347 static int scratch_reg;
348 int pgd_reg;
349 EXPORT_SYMBOL_GPL(pgd_reg);
350 enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
351 
352 static struct work_registers build_get_work_registers(u32 **p)
353 {
354         struct work_registers r;
355 
356         if (scratch_reg >= 0) {
357                 /* Save in CPU local C0_KScratch? */
358                 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
359                 r.r1 = K0;
360                 r.r2 = K1;
361                 r.r3 = 1;
362                 return r;
363         }
364 
365         if (num_possible_cpus() > 1) {
366                 /* Get smp_processor_id */
367                 UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
368                 UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
369 
370                 /* handler_reg_save index in K0 */
371                 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
372 
373                 UASM_i_LA(p, K1, (long)&handler_reg_save);
374                 UASM_i_ADDU(p, K0, K0, K1);
375         } else {
376                 UASM_i_LA(p, K0, (long)&handler_reg_save);
377         }
378         /* K0 now points to save area, save $1 and $2  */
379         UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
380         UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
381 
382         r.r1 = K1;
383         r.r2 = 1;
384         r.r3 = 2;
385         return r;
386 }
387 
388 static void build_restore_work_registers(u32 **p)
389 {
390         if (scratch_reg >= 0) {
391                 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
392                 return;
393         }
394         /* K0 already points to save area, restore $1 and $2  */
395         UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
396         UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
397 }
398 
399 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
400 
401 /*
402  * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
403  * we cannot do r3000 under these circumstances.
404  *
405  * Declare pgd_current here instead of including mmu_context.h to avoid type
406  * conflicts for tlbmiss_handler_setup_pgd
407  */
408 extern unsigned long pgd_current[];
409 
410 /*
411  * The R3000 TLB handler is simple.
412  */
413 static void build_r3000_tlb_refill_handler(void)
414 {
415         long pgdc = (long)pgd_current;
416         u32 *p;
417 
418         memset(tlb_handler, 0, sizeof(tlb_handler));
419         p = tlb_handler;
420 
421         uasm_i_mfc0(&p, K0, C0_BADVADDR);
422         uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
423         uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
424         uasm_i_srl(&p, K0, K0, 22); /* load delay */
425         uasm_i_sll(&p, K0, K0, 2);
426         uasm_i_addu(&p, K1, K1, K0);
427         uasm_i_mfc0(&p, K0, C0_CONTEXT);
428         uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
429         uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
430         uasm_i_addu(&p, K1, K1, K0);
431         uasm_i_lw(&p, K0, 0, K1);
432         uasm_i_nop(&p); /* load delay */
433         uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
434         uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
435         uasm_i_tlbwr(&p); /* cp0 delay */
436         uasm_i_jr(&p, K1);
437         uasm_i_rfe(&p); /* branch delay */
438 
439         if (p > tlb_handler + 32)
440                 panic("TLB refill handler space exceeded");
441 
442         pr_debug("Wrote TLB refill handler (%u instructions).\n",
443                  (unsigned int)(p - tlb_handler));
444 
445         memcpy((void *)ebase, tlb_handler, 0x80);
446         local_flush_icache_range(ebase, ebase + 0x80);
447 
448         dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
449 }
450 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
451 
452 /*
453  * The R4000 TLB handler is much more complicated. We have two
454  * consecutive handler areas with 32 instructions space each.
455  * Since they aren't used at the same time, we can overflow in the
456  * other one.To keep things simple, we first assume linear space,
457  * then we relocate it to the final handler layout as needed.
458  */
459 static u32 final_handler[64];
460 
461 /*
462  * Hazards
463  *
464  * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
465  * 2. A timing hazard exists for the TLBP instruction.
466  *
467  *      stalling_instruction
468  *      TLBP
469  *
470  * The JTLB is being read for the TLBP throughout the stall generated by the
471  * previous instruction. This is not really correct as the stalling instruction
472  * can modify the address used to access the JTLB.  The failure symptom is that
473  * the TLBP instruction will use an address created for the stalling instruction
474  * and not the address held in C0_ENHI and thus report the wrong results.
475  *
476  * The software work-around is to not allow the instruction preceding the TLBP
477  * to stall - make it an NOP or some other instruction guaranteed not to stall.
478  *
479  * Errata 2 will not be fixed.  This errata is also on the R5000.
480  *
481  * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
482  */
483 static void __maybe_unused build_tlb_probe_entry(u32 **p)
484 {
485         switch (current_cpu_type()) {
486         /* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
487         case CPU_R4600:
488         case CPU_R4700:
489         case CPU_R5000:
490         case CPU_NEVADA:
491                 uasm_i_nop(p);
492                 uasm_i_tlbp(p);
493                 break;
494 
495         default:
496                 uasm_i_tlbp(p);
497                 break;
498         }
499 }
500 
501 void build_tlb_write_entry(u32 **p, struct uasm_label **l,
502                            struct uasm_reloc **r,
503                            enum tlb_write_entry wmode)
504 {
505         void(*tlbw)(u32 **) = NULL;
506 
507         switch (wmode) {
508         case tlb_random: tlbw = uasm_i_tlbwr; break;
509         case tlb_indexed: tlbw = uasm_i_tlbwi; break;
510         }
511 
512         if (cpu_has_mips_r2_r6) {
513                 if (cpu_has_mips_r2_exec_hazard)
514                         uasm_i_ehb(p);
515                 tlbw(p);
516                 return;
517         }
518 
519         switch (current_cpu_type()) {
520         case CPU_R4000PC:
521         case CPU_R4000SC:
522         case CPU_R4000MC:
523         case CPU_R4400PC:
524         case CPU_R4400SC:
525         case CPU_R4400MC:
526                 /*
527                  * This branch uses up a mtc0 hazard nop slot and saves
528                  * two nops after the tlbw instruction.
529                  */
530                 uasm_bgezl_hazard(p, r, hazard_instance);
531                 tlbw(p);
532                 uasm_bgezl_label(l, p, hazard_instance);
533                 hazard_instance++;
534                 uasm_i_nop(p);
535                 break;
536 
537         case CPU_R4600:
538         case CPU_R4700:
539                 uasm_i_nop(p);
540                 tlbw(p);
541                 uasm_i_nop(p);
542                 break;
543 
544         case CPU_R5000:
545         case CPU_NEVADA:
546                 uasm_i_nop(p); /* QED specifies 2 nops hazard */
547                 uasm_i_nop(p); /* QED specifies 2 nops hazard */
548                 tlbw(p);
549                 break;
550 
551         case CPU_R4300:
552         case CPU_5KC:
553         case CPU_TX49XX:
554         case CPU_PR4450:
555         case CPU_XLR:
556                 uasm_i_nop(p);
557                 tlbw(p);
558                 break;
559 
560         case CPU_R10000:
561         case CPU_R12000:
562         case CPU_R14000:
563         case CPU_R16000:
564         case CPU_4KC:
565         case CPU_4KEC:
566         case CPU_M14KC:
567         case CPU_M14KEC:
568         case CPU_SB1:
569         case CPU_SB1A:
570         case CPU_4KSC:
571         case CPU_20KC:
572         case CPU_25KF:
573         case CPU_BMIPS32:
574         case CPU_BMIPS3300:
575         case CPU_BMIPS4350:
576         case CPU_BMIPS4380:
577         case CPU_BMIPS5000:
578         case CPU_LOONGSON2:
579         case CPU_LOONGSON3:
580         case CPU_R5500:
581                 if (m4kc_tlbp_war())
582                         uasm_i_nop(p);
583         case CPU_ALCHEMY:
584                 tlbw(p);
585                 break;
586 
587         case CPU_RM7000:
588                 uasm_i_nop(p);
589                 uasm_i_nop(p);
590                 uasm_i_nop(p);
591                 uasm_i_nop(p);
592                 tlbw(p);
593                 break;
594 
595         case CPU_VR4111:
596         case CPU_VR4121:
597         case CPU_VR4122:
598         case CPU_VR4181:
599         case CPU_VR4181A:
600                 uasm_i_nop(p);
601                 uasm_i_nop(p);
602                 tlbw(p);
603                 uasm_i_nop(p);
604                 uasm_i_nop(p);
605                 break;
606 
607         case CPU_VR4131:
608         case CPU_VR4133:
609         case CPU_R5432:
610                 uasm_i_nop(p);
611                 uasm_i_nop(p);
612                 tlbw(p);
613                 break;
614 
615         case CPU_JZRISC:
616                 tlbw(p);
617                 uasm_i_nop(p);
618                 break;
619 
620         default:
621                 panic("No TLB refill handler yet (CPU type: %d)",
622                       current_cpu_type());
623                 break;
624         }
625 }
626 EXPORT_SYMBOL_GPL(build_tlb_write_entry);
627 
628 static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
629                                                         unsigned int reg)
630 {
631         if (_PAGE_GLOBAL_SHIFT == 0) {
632                 /* pte_t is already in EntryLo format */
633                 return;
634         }
635 
636         if (cpu_has_rixi && _PAGE_NO_EXEC) {
637                 if (fill_includes_sw_bits) {
638                         UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
639                 } else {
640                         UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
641                         UASM_i_ROTR(p, reg, reg,
642                                     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
643                 }
644         } else {
645 #ifdef CONFIG_PHYS_ADDR_T_64BIT
646                 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
647 #else
648                 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
649 #endif
650         }
651 }
652 
653 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
654 
655 static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
656                                    unsigned int tmp, enum label_id lid,
657                                    int restore_scratch)
658 {
659         if (restore_scratch) {
660                 /* Reset default page size */
661                 if (PM_DEFAULT_MASK >> 16) {
662                         uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
663                         uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
664                         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
665                         uasm_il_b(p, r, lid);
666                 } else if (PM_DEFAULT_MASK) {
667                         uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
668                         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
669                         uasm_il_b(p, r, lid);
670                 } else {
671                         uasm_i_mtc0(p, 0, C0_PAGEMASK);
672                         uasm_il_b(p, r, lid);
673                 }
674                 if (scratch_reg >= 0)
675                         UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
676                 else
677                         UASM_i_LW(p, 1, scratchpad_offset(0), 0);
678         } else {
679                 /* Reset default page size */
680                 if (PM_DEFAULT_MASK >> 16) {
681                         uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
682                         uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
683                         uasm_il_b(p, r, lid);
684                         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
685                 } else if (PM_DEFAULT_MASK) {
686                         uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
687                         uasm_il_b(p, r, lid);
688                         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
689                 } else {
690                         uasm_il_b(p, r, lid);
691                         uasm_i_mtc0(p, 0, C0_PAGEMASK);
692                 }
693         }
694 }
695 
696 static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
697                                        struct uasm_reloc **r,
698                                        unsigned int tmp,
699                                        enum tlb_write_entry wmode,
700                                        int restore_scratch)
701 {
702         /* Set huge page tlb entry size */
703         uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
704         uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
705         uasm_i_mtc0(p, tmp, C0_PAGEMASK);
706 
707         build_tlb_write_entry(p, l, r, wmode);
708 
709         build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
710 }
711 
712 /*
713  * Check if Huge PTE is present, if so then jump to LABEL.
714  */
715 static void
716 build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
717                   unsigned int pmd, int lid)
718 {
719         UASM_i_LW(p, tmp, 0, pmd);
720         if (use_bbit_insns()) {
721                 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
722         } else {
723                 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
724                 uasm_il_bnez(p, r, tmp, lid);
725         }
726 }
727 
728 static void build_huge_update_entries(u32 **p, unsigned int pte,
729                                       unsigned int tmp)
730 {
731         int small_sequence;
732 
733         /*
734          * A huge PTE describes an area the size of the
735          * configured huge page size. This is twice the
736          * of the large TLB entry size we intend to use.
737          * A TLB entry half the size of the configured
738          * huge page size is configured into entrylo0
739          * and entrylo1 to cover the contiguous huge PTE
740          * address space.
741          */
742         small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
743 
744         /* We can clobber tmp.  It isn't used after this.*/
745         if (!small_sequence)
746                 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
747 
748         build_convert_pte_to_entrylo(p, pte);
749         UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
750         /* convert to entrylo1 */
751         if (small_sequence)
752                 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
753         else
754                 UASM_i_ADDU(p, pte, pte, tmp);
755 
756         UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
757 }
758 
759 static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
760                                     struct uasm_label **l,
761                                     unsigned int pte,
762                                     unsigned int ptr,
763                                     unsigned int flush)
764 {
765 #ifdef CONFIG_SMP
766         UASM_i_SC(p, pte, 0, ptr);
767         uasm_il_beqz(p, r, pte, label_tlb_huge_update);
768         UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
769 #else
770         UASM_i_SW(p, pte, 0, ptr);
771 #endif
772         if (cpu_has_ftlb && flush) {
773                 BUG_ON(!cpu_has_tlbinv);
774 
775                 UASM_i_MFC0(p, ptr, C0_ENTRYHI);
776                 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
777                 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
778                 build_tlb_write_entry(p, l, r, tlb_indexed);
779 
780                 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
781                 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
782                 build_huge_update_entries(p, pte, ptr);
783                 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
784 
785                 return;
786         }
787 
788         build_huge_update_entries(p, pte, ptr);
789         build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
790 }
791 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
792 
793 #ifdef CONFIG_64BIT
794 /*
795  * TMP and PTR are scratch.
796  * TMP will be clobbered, PTR will hold the pmd entry.
797  */
798 void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
799                       unsigned int tmp, unsigned int ptr)
800 {
801 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
802         long pgdc = (long)pgd_current;
803 #endif
804         /*
805          * The vmalloc handling is not in the hotpath.
806          */
807         uasm_i_dmfc0(p, tmp, C0_BADVADDR);
808 
809         if (check_for_high_segbits) {
810                 /*
811                  * The kernel currently implicitely assumes that the
812                  * MIPS SEGBITS parameter for the processor is
813                  * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
814                  * allocate virtual addresses outside the maximum
815                  * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
816                  * that doesn't prevent user code from accessing the
817                  * higher xuseg addresses.  Here, we make sure that
818                  * everything but the lower xuseg addresses goes down
819                  * the module_alloc/vmalloc path.
820                  */
821                 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
822                 uasm_il_bnez(p, r, ptr, label_vmalloc);
823         } else {
824                 uasm_il_bltz(p, r, tmp, label_vmalloc);
825         }
826         /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
827 
828         if (pgd_reg != -1) {
829                 /* pgd is in pgd_reg */
830                 if (cpu_has_ldpte)
831                         UASM_i_MFC0(p, ptr, C0_PWBASE);
832                 else
833                         UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
834         } else {
835 #if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
836                 /*
837                  * &pgd << 11 stored in CONTEXT [23..63].
838                  */
839                 UASM_i_MFC0(p, ptr, C0_CONTEXT);
840 
841                 /* Clear lower 23 bits of context. */
842                 uasm_i_dins(p, ptr, 0, 0, 23);
843 
844                 /* 1 0  1 0 1  << 6  xkphys cached */
845                 uasm_i_ori(p, ptr, ptr, 0x540);
846                 uasm_i_drotr(p, ptr, ptr, 11);
847 #elif defined(CONFIG_SMP)
848                 UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
849                 uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
850                 UASM_i_LA_mostly(p, tmp, pgdc);
851                 uasm_i_daddu(p, ptr, ptr, tmp);
852                 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
853                 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
854 #else
855                 UASM_i_LA_mostly(p, ptr, pgdc);
856                 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
857 #endif
858         }
859 
860         uasm_l_vmalloc_done(l, *p);
861 
862         /* get pgd offset in bytes */
863         uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
864 
865         uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
866         uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
867 #ifndef __PAGETABLE_PUD_FOLDED
868         uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
869         uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
870         uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
871         uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
872         uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
873 #endif
874 #ifndef __PAGETABLE_PMD_FOLDED
875         uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
876         uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
877         uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
878         uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
879         uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
880 #endif
881 }
882 EXPORT_SYMBOL_GPL(build_get_pmde64);
883 
884 /*
885  * BVADDR is the faulting address, PTR is scratch.
886  * PTR will hold the pgd for vmalloc.
887  */
888 static void
889 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
890                         unsigned int bvaddr, unsigned int ptr,
891                         enum vmalloc64_mode mode)
892 {
893         long swpd = (long)swapper_pg_dir;
894         int single_insn_swpd;
895         int did_vmalloc_branch = 0;
896 
897         single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
898 
899         uasm_l_vmalloc(l, *p);
900 
901         if (mode != not_refill && check_for_high_segbits) {
902                 if (single_insn_swpd) {
903                         uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
904                         uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
905                         did_vmalloc_branch = 1;
906                         /* fall through */
907                 } else {
908                         uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
909                 }
910         }
911         if (!did_vmalloc_branch) {
912                 if (single_insn_swpd) {
913                         uasm_il_b(p, r, label_vmalloc_done);
914                         uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
915                 } else {
916                         UASM_i_LA_mostly(p, ptr, swpd);
917                         uasm_il_b(p, r, label_vmalloc_done);
918                         if (uasm_in_compat_space_p(swpd))
919                                 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
920                         else
921                                 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
922                 }
923         }
924         if (mode != not_refill && check_for_high_segbits) {
925                 uasm_l_large_segbits_fault(l, *p);
926                 /*
927                  * We get here if we are an xsseg address, or if we are
928                  * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
929                  *
930                  * Ignoring xsseg (assume disabled so would generate
931                  * (address errors?), the only remaining possibility
932                  * is the upper xuseg addresses.  On processors with
933                  * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
934                  * addresses would have taken an address error. We try
935                  * to mimic that here by taking a load/istream page
936                  * fault.
937                  */
938                 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
939                 uasm_i_jr(p, ptr);
940 
941                 if (mode == refill_scratch) {
942                         if (scratch_reg >= 0)
943                                 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
944                         else
945                                 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
946                 } else {
947                         uasm_i_nop(p);
948                 }
949         }
950 }
951 
952 #else /* !CONFIG_64BIT */
953 
954 /*
955  * TMP and PTR are scratch.
956  * TMP will be clobbered, PTR will hold the pgd entry.
957  */
958 void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
959 {
960         if (pgd_reg != -1) {
961                 /* pgd is in pgd_reg */
962                 uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
963                 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
964         } else {
965                 long pgdc = (long)pgd_current;
966 
967                 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
968 #ifdef CONFIG_SMP
969                 uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
970                 UASM_i_LA_mostly(p, tmp, pgdc);
971                 uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
972                 uasm_i_addu(p, ptr, tmp, ptr);
973 #else
974                 UASM_i_LA_mostly(p, ptr, pgdc);
975 #endif
976                 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
977                 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
978         }
979         uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
980         uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
981         uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
982 }
983 EXPORT_SYMBOL_GPL(build_get_pgde32);
984 
985 #endif /* !CONFIG_64BIT */
986 
987 static void build_adjust_context(u32 **p, unsigned int ctx)
988 {
989         unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
990         unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
991 
992         switch (current_cpu_type()) {
993         case CPU_VR41XX:
994         case CPU_VR4111:
995         case CPU_VR4121:
996         case CPU_VR4122:
997         case CPU_VR4131:
998         case CPU_VR4181:
999         case CPU_VR4181A:
1000         case CPU_VR4133:
1001                 shift += 2;
1002                 break;
1003 
1004         default:
1005                 break;
1006         }
1007 
1008         if (shift)
1009                 UASM_i_SRL(p, ctx, ctx, shift);
1010         uasm_i_andi(p, ctx, ctx, mask);
1011 }
1012 
1013 void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1014 {
1015         /*
1016          * Bug workaround for the Nevada. It seems as if under certain
1017          * circumstances the move from cp0_context might produce a
1018          * bogus result when the mfc0 instruction and its consumer are
1019          * in a different cacheline or a load instruction, probably any
1020          * memory reference, is between them.
1021          */
1022         switch (current_cpu_type()) {
1023         case CPU_NEVADA:
1024                 UASM_i_LW(p, ptr, 0, ptr);
1025                 GET_CONTEXT(p, tmp); /* get context reg */
1026                 break;
1027 
1028         default:
1029                 GET_CONTEXT(p, tmp); /* get context reg */
1030                 UASM_i_LW(p, ptr, 0, ptr);
1031                 break;
1032         }
1033 
1034         build_adjust_context(p, tmp);
1035         UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1036 }
1037 EXPORT_SYMBOL_GPL(build_get_ptep);
1038 
1039 void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1040 {
1041         int pte_off_even = 0;
1042         int pte_off_odd = sizeof(pte_t);
1043 
1044 #if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
1045         /* The low 32 bits of EntryLo is stored in pte_high */
1046         pte_off_even += offsetof(pte_t, pte_high);
1047         pte_off_odd += offsetof(pte_t, pte_high);
1048 #endif
1049 
1050         if (IS_ENABLED(CONFIG_XPA)) {
1051                 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1052                 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1053                 UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1054 
1055                 if (cpu_has_xpa && !mips_xpa_disabled) {
1056                         uasm_i_lw(p, tmp, 0, ptep);
1057                         uasm_i_ext(p, tmp, tmp, 0, 24);
1058                         uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1059                 }
1060 
1061                 uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
1062                 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1063                 UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
1064 
1065                 if (cpu_has_xpa && !mips_xpa_disabled) {
1066                         uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
1067                         uasm_i_ext(p, tmp, tmp, 0, 24);
1068                         uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
1069                 }
1070                 return;
1071         }
1072 
1073         UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
1074         UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
1075         if (r45k_bvahwbug())
1076                 build_tlb_probe_entry(p);
1077         build_convert_pte_to_entrylo(p, tmp);
1078         if (r4k_250MHZhwbug())
1079                 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1080         UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1081         build_convert_pte_to_entrylo(p, ptep);
1082         if (r45k_bvahwbug())
1083                 uasm_i_mfc0(p, tmp, C0_INDEX);
1084         if (r4k_250MHZhwbug())
1085                 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1086         UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1087 }
1088 EXPORT_SYMBOL_GPL(build_update_entries);
1089 
1090 struct mips_huge_tlb_info {
1091         int huge_pte;
1092         int restore_scratch;
1093         bool need_reload_pte;
1094 };
1095 
1096 static struct mips_huge_tlb_info
1097 build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1098                                struct uasm_reloc **r, unsigned int tmp,
1099                                unsigned int ptr, int c0_scratch_reg)
1100 {
1101         struct mips_huge_tlb_info rv;
1102         unsigned int even, odd;
1103         int vmalloc_branch_delay_filled = 0;
1104         const int scratch = 1; /* Our extra working register */
1105 
1106         rv.huge_pte = scratch;
1107         rv.restore_scratch = 0;
1108         rv.need_reload_pte = false;
1109 
1110         if (check_for_high_segbits) {
1111                 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1112 
1113                 if (pgd_reg != -1)
1114                         UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1115                 else
1116                         UASM_i_MFC0(p, ptr, C0_CONTEXT);
1117 
1118                 if (c0_scratch_reg >= 0)
1119                         UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1120                 else
1121                         UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1122 
1123                 uasm_i_dsrl_safe(p, scratch, tmp,
1124                                  PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1125                 uasm_il_bnez(p, r, scratch, label_vmalloc);
1126 
1127                 if (pgd_reg == -1) {
1128                         vmalloc_branch_delay_filled = 1;
1129                         /* Clear lower 23 bits of context. */
1130                         uasm_i_dins(p, ptr, 0, 0, 23);
1131                 }
1132         } else {
1133                 if (pgd_reg != -1)
1134                         UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1135                 else
1136                         UASM_i_MFC0(p, ptr, C0_CONTEXT);
1137 
1138                 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1139 
1140                 if (c0_scratch_reg >= 0)
1141                         UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1142                 else
1143                         UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1144 
1145                 if (pgd_reg == -1)
1146                         /* Clear lower 23 bits of context. */
1147                         uasm_i_dins(p, ptr, 0, 0, 23);
1148 
1149                 uasm_il_bltz(p, r, tmp, label_vmalloc);
1150         }
1151 
1152         if (pgd_reg == -1) {
1153                 vmalloc_branch_delay_filled = 1;
1154                 /* 1 0  1 0 1  << 6  xkphys cached */
1155                 uasm_i_ori(p, ptr, ptr, 0x540);
1156                 uasm_i_drotr(p, ptr, ptr, 11);
1157         }
1158 
1159 #ifdef __PAGETABLE_PMD_FOLDED
1160 #define LOC_PTEP scratch
1161 #else
1162 #define LOC_PTEP ptr
1163 #endif
1164 
1165         if (!vmalloc_branch_delay_filled)
1166                 /* get pgd offset in bytes */
1167                 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1168 
1169         uasm_l_vmalloc_done(l, *p);
1170 
1171         /*
1172          *                         tmp          ptr
1173          * fall-through case =   badvaddr  *pgd_current
1174          * vmalloc case      =   badvaddr  swapper_pg_dir
1175          */
1176 
1177         if (vmalloc_branch_delay_filled)
1178                 /* get pgd offset in bytes */
1179                 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1180 
1181 #ifdef __PAGETABLE_PMD_FOLDED
1182         GET_CONTEXT(p, tmp); /* get context reg */
1183 #endif
1184         uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1185 
1186         if (use_lwx_insns()) {
1187                 UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1188         } else {
1189                 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1190                 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1191         }
1192 
1193 #ifndef __PAGETABLE_PUD_FOLDED
1194         /* get pud offset in bytes */
1195         uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
1196         uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
1197 
1198         if (use_lwx_insns()) {
1199                 UASM_i_LWX(p, ptr, scratch, ptr);
1200         } else {
1201                 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1202                 UASM_i_LW(p, ptr, 0, ptr);
1203         }
1204         /* ptr contains a pointer to PMD entry */
1205         /* tmp contains the address */
1206 #endif
1207 
1208 #ifndef __PAGETABLE_PMD_FOLDED
1209         /* get pmd offset in bytes */
1210         uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1211         uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1212         GET_CONTEXT(p, tmp); /* get context reg */
1213 
1214         if (use_lwx_insns()) {
1215                 UASM_i_LWX(p, scratch, scratch, ptr);
1216         } else {
1217                 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1218                 UASM_i_LW(p, scratch, 0, ptr);
1219         }
1220 #endif
1221         /* Adjust the context during the load latency. */
1222         build_adjust_context(p, tmp);
1223 
1224 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1225         uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1226         /*
1227          * The in the LWX case we don't want to do the load in the
1228          * delay slot.  It cannot issue in the same cycle and may be
1229          * speculative and unneeded.
1230          */
1231         if (use_lwx_insns())
1232                 uasm_i_nop(p);
1233 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
1234 
1235 
1236         /* build_update_entries */
1237         if (use_lwx_insns()) {
1238                 even = ptr;
1239                 odd = tmp;
1240                 UASM_i_LWX(p, even, scratch, tmp);
1241                 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1242                 UASM_i_LWX(p, odd, scratch, tmp);
1243         } else {
1244                 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1245                 even = tmp;
1246                 odd = ptr;
1247                 UASM_i_LW(p, even, 0, ptr); /* get even pte */
1248                 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1249         }
1250         if (cpu_has_rixi) {
1251                 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
1252                 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1253                 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
1254         } else {
1255                 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1256                 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1257                 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1258         }
1259         UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1260 
1261         if (c0_scratch_reg >= 0) {
1262                 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1263                 build_tlb_write_entry(p, l, r, tlb_random);
1264                 uasm_l_leave(l, *p);
1265                 rv.restore_scratch = 1;
1266         } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
1267                 build_tlb_write_entry(p, l, r, tlb_random);
1268                 uasm_l_leave(l, *p);
1269                 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1270         } else {
1271                 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1272                 build_tlb_write_entry(p, l, r, tlb_random);
1273                 uasm_l_leave(l, *p);
1274                 rv.restore_scratch = 1;
1275         }
1276 
1277         uasm_i_eret(p); /* return from trap */
1278 
1279         return rv;
1280 }
1281 
1282 /*
1283  * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1284  * because EXL == 0.  If we wrap, we can also use the 32 instruction
1285  * slots before the XTLB refill exception handler which belong to the
1286  * unused TLB refill exception.
1287  */
1288 #define MIPS64_REFILL_INSNS 32
1289 
1290 static void build_r4000_tlb_refill_handler(void)
1291 {
1292         u32 *p = tlb_handler;
1293         struct uasm_label *l = labels;
1294         struct uasm_reloc *r = relocs;
1295         u32 *f;
1296         unsigned int final_len;
1297         struct mips_huge_tlb_info htlb_info __maybe_unused;
1298         enum vmalloc64_mode vmalloc_mode __maybe_unused;
1299 
1300         memset(tlb_handler, 0, sizeof(tlb_handler));
1301         memset(labels, 0, sizeof(labels));
1302         memset(relocs, 0, sizeof(relocs));
1303         memset(final_handler, 0, sizeof(final_handler));
1304 
1305         if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
1306                 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1307                                                           scratch_reg);
1308                 vmalloc_mode = refill_scratch;
1309         } else {
1310                 htlb_info.huge_pte = K0;
1311                 htlb_info.restore_scratch = 0;
1312                 htlb_info.need_reload_pte = true;
1313                 vmalloc_mode = refill_noscratch;
1314                 /*
1315                  * create the plain linear handler
1316                  */
1317                 if (bcm1250_m3_war()) {
1318                         unsigned int segbits = 44;
1319 
1320                         uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1321                         uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1322                         uasm_i_xor(&p, K0, K0, K1);
1323                         uasm_i_dsrl_safe(&p, K1, K0, 62);
1324                         uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1325                         uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1326                         uasm_i_or(&p, K0, K0, K1);
1327                         uasm_il_bnez(&p, &r, K0, label_leave);
1328                         /* No need for uasm_i_nop */
1329                 }
1330 
1331 #ifdef CONFIG_64BIT
1332                 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
1333 #else
1334                 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
1335 #endif
1336 
1337 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1338                 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
1339 #endif
1340 
1341                 build_get_ptep(&p, K0, K1);
1342                 build_update_entries(&p, K0, K1);
1343                 build_tlb_write_entry(&p, &l, &r, tlb_random);
1344                 uasm_l_leave(&l, p);
1345                 uasm_i_eret(&p); /* return from trap */
1346         }
1347 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1348         uasm_l_tlb_huge_update(&l, p);
1349         if (htlb_info.need_reload_pte)
1350                 UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
1351         build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1352         build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1353                                    htlb_info.restore_scratch);
1354 #endif
1355 
1356 #ifdef CONFIG_64BIT
1357         build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
1358 #endif
1359 
1360         /*
1361          * Overflow check: For the 64bit handler, we need at least one
1362          * free instruction slot for the wrap-around branch. In worst
1363          * case, if the intended insertion point is a delay slot, we
1364          * need three, with the second nop'ed and the third being
1365          * unused.
1366          */
1367         switch (boot_cpu_type()) {
1368         default:
1369                 if (sizeof(long) == 4) {
1370         case CPU_LOONGSON2:
1371                 /* Loongson2 ebase is different than r4k, we have more space */
1372                         if ((p - tlb_handler) > 64)
1373                                 panic("TLB refill handler space exceeded");
1374                         /*
1375                          * Now fold the handler in the TLB refill handler space.
1376                          */
1377                         f = final_handler;
1378                         /* Simplest case, just copy the handler. */
1379                         uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1380                         final_len = p - tlb_handler;
1381                         break;
1382                 } else {
1383                         if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1384                             || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1385                                 && uasm_insn_has_bdelay(relocs,
1386                                                         tlb_handler + MIPS64_REFILL_INSNS - 3)))
1387                                 panic("TLB refill handler space exceeded");
1388                         /*
1389                          * Now fold the handler in the TLB refill handler space.
1390                          */
1391                         f = final_handler + MIPS64_REFILL_INSNS;
1392                         if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1393                                 /* Just copy the handler. */
1394                                 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1395                                 final_len = p - tlb_handler;
1396                         } else {
1397 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1398                                 const enum label_id ls = label_tlb_huge_update;
1399 #else
1400                                 const enum label_id ls = label_vmalloc;
1401 #endif
1402                                 u32 *split;
1403                                 int ov = 0;
1404                                 int i;
1405 
1406                                 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1407                                         ;
1408                                 BUG_ON(i == ARRAY_SIZE(labels));
1409                                 split = labels[i].addr;
1410 
1411                                 /*
1412                                  * See if we have overflown one way or the other.
1413                                  */
1414                                 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1415                                     split < p - MIPS64_REFILL_INSNS)
1416                                         ov = 1;
1417 
1418                                 if (ov) {
1419                                         /*
1420                                          * Split two instructions before the end.  One
1421                                          * for the branch and one for the instruction
1422                                          * in the delay slot.
1423                                          */
1424                                         split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1425 
1426                                         /*
1427                                          * If the branch would fall in a delay slot,
1428                                          * we must back up an additional instruction
1429                                          * so that it is no longer in a delay slot.
1430                                          */
1431                                         if (uasm_insn_has_bdelay(relocs, split - 1))
1432                                                 split--;
1433                                 }
1434                                 /* Copy first part of the handler. */
1435                                 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1436                                 f += split - tlb_handler;
1437 
1438                                 if (ov) {
1439                                         /* Insert branch. */
1440                                         uasm_l_split(&l, final_handler);
1441                                         uasm_il_b(&f, &r, label_split);
1442                                         if (uasm_insn_has_bdelay(relocs, split))
1443                                                 uasm_i_nop(&f);
1444                                         else {
1445                                                 uasm_copy_handler(relocs, labels,
1446                                                                   split, split + 1, f);
1447                                                 uasm_move_labels(labels, f, f + 1, -1);
1448                                                 f++;
1449                                                 split++;
1450                                         }
1451                                 }
1452 
1453                                 /* Copy the rest of the handler. */
1454                                 uasm_copy_handler(relocs, labels, split, p, final_handler);
1455                                 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1456                                             (p - split);
1457                         }
1458                 }
1459                 break;
1460         }
1461 
1462         uasm_resolve_relocs(relocs, labels);
1463         pr_debug("Wrote TLB refill handler (%u instructions).\n",
1464                  final_len);
1465 
1466         memcpy((void *)ebase, final_handler, 0x100);
1467         local_flush_icache_range(ebase, ebase + 0x100);
1468 
1469         dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
1470 }
1471 
1472 static void setup_pw(void)
1473 {
1474         unsigned long pgd_i, pgd_w;
1475 #ifndef __PAGETABLE_PMD_FOLDED
1476         unsigned long pmd_i, pmd_w;
1477 #endif
1478         unsigned long pt_i, pt_w;
1479         unsigned long pte_i, pte_w;
1480 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1481         unsigned long psn;
1482 
1483         psn = ilog2(_PAGE_HUGE);     /* bit used to indicate huge page */
1484 #endif
1485         pgd_i = PGDIR_SHIFT;  /* 1st level PGD */
1486 #ifndef __PAGETABLE_PMD_FOLDED
1487         pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
1488 
1489         pmd_i = PMD_SHIFT;    /* 2nd level PMD */
1490         pmd_w = PMD_SHIFT - PAGE_SHIFT;
1491 #else
1492         pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
1493 #endif
1494 
1495         pt_i  = PAGE_SHIFT;    /* 3rd level PTE */
1496         pt_w  = PAGE_SHIFT - 3;
1497 
1498         pte_i = ilog2(_PAGE_GLOBAL);
1499         pte_w = 0;
1500 
1501 #ifndef __PAGETABLE_PMD_FOLDED
1502         write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
1503         write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
1504 #else
1505         write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
1506         write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
1507 #endif
1508 
1509 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1510         write_c0_pwctl(1 << 6 | psn);
1511 #endif
1512         write_c0_kpgd(swapper_pg_dir);
1513         kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
1514 }
1515 
1516 static void build_loongson3_tlb_refill_handler(void)
1517 {
1518         u32 *p = tlb_handler;
1519         struct uasm_label *l = labels;
1520         struct uasm_reloc *r = relocs;
1521 
1522         memset(labels, 0, sizeof(labels));
1523         memset(relocs, 0, sizeof(relocs));
1524         memset(tlb_handler, 0, sizeof(tlb_handler));
1525 
1526         if (check_for_high_segbits) {
1527                 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1528                 uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1529                 uasm_il_beqz(&p, &r, K1, label_vmalloc);
1530                 uasm_i_nop(&p);
1531 
1532                 uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
1533                 uasm_i_nop(&p);
1534                 uasm_l_vmalloc(&l, p);
1535         }
1536 
1537         uasm_i_dmfc0(&p, K1, C0_PGD);
1538 
1539         uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
1540 #ifndef __PAGETABLE_PMD_FOLDED
1541         uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
1542 #endif
1543         uasm_i_ldpte(&p, K1, 0);      /* even */
1544         uasm_i_ldpte(&p, K1, 1);      /* odd */
1545         uasm_i_tlbwr(&p);
1546 
1547         /* restore page mask */
1548         if (PM_DEFAULT_MASK >> 16) {
1549                 uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
1550                 uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
1551                 uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1552         } else if (PM_DEFAULT_MASK) {
1553                 uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
1554                 uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1555         } else {
1556                 uasm_i_mtc0(&p, 0, C0_PAGEMASK);
1557         }
1558 
1559         uasm_i_eret(&p);
1560 
1561         if (check_for_high_segbits) {
1562                 uasm_l_large_segbits_fault(&l, p);
1563                 UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
1564                 uasm_i_jr(&p, K1);
1565                 uasm_i_nop(&p);
1566         }
1567 
1568         uasm_resolve_relocs(relocs, labels);
1569         memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
1570         local_flush_icache_range(ebase + 0x80, ebase + 0x100);
1571         dump_handler("loongson3_tlb_refill", (u32 *)(ebase + 0x80), 32);
1572 }
1573 
1574 extern u32 handle_tlbl[], handle_tlbl_end[];
1575 extern u32 handle_tlbs[], handle_tlbs_end[];
1576 extern u32 handle_tlbm[], handle_tlbm_end[];
1577 extern u32 tlbmiss_handler_setup_pgd_start[];
1578 extern u32 tlbmiss_handler_setup_pgd[];
1579 EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd);
1580 extern u32 tlbmiss_handler_setup_pgd_end[];
1581 
1582 static void build_setup_pgd(void)
1583 {
1584         const int a0 = 4;
1585         const int __maybe_unused a1 = 5;
1586         const int __maybe_unused a2 = 6;
1587         u32 *p = tlbmiss_handler_setup_pgd_start;
1588         const int tlbmiss_handler_setup_pgd_size =
1589                 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
1590 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1591         long pgdc = (long)pgd_current;
1592 #endif
1593 
1594         memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
1595                                         sizeof(tlbmiss_handler_setup_pgd[0]));
1596         memset(labels, 0, sizeof(labels));
1597         memset(relocs, 0, sizeof(relocs));
1598         pgd_reg = allocate_kscratch();
1599 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1600         if (pgd_reg == -1) {
1601                 struct uasm_label *l = labels;
1602                 struct uasm_reloc *r = relocs;
1603 
1604                 /* PGD << 11 in c0_Context */
1605                 /*
1606                  * If it is a ckseg0 address, convert to a physical
1607                  * address.  Shifting right by 29 and adding 4 will
1608                  * result in zero for these addresses.
1609                  *
1610                  */
1611                 UASM_i_SRA(&p, a1, a0, 29);
1612                 UASM_i_ADDIU(&p, a1, a1, 4);
1613                 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1614                 uasm_i_nop(&p);
1615                 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1616                 uasm_l_tlbl_goaround1(&l, p);
1617                 UASM_i_SLL(&p, a0, a0, 11);
1618                 uasm_i_jr(&p, 31);
1619                 UASM_i_MTC0(&p, a0, C0_CONTEXT);
1620         } else {
1621                 /* PGD in c0_KScratch */
1622                 uasm_i_jr(&p, 31);
1623                 if (cpu_has_ldpte)
1624                         UASM_i_MTC0(&p, a0, C0_PWBASE);
1625                 else
1626                         UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1627         }
1628 #else
1629 #ifdef CONFIG_SMP
1630         /* Save PGD to pgd_current[smp_processor_id()] */
1631         UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
1632         UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
1633         UASM_i_LA_mostly(&p, a2, pgdc);
1634         UASM_i_ADDU(&p, a2, a2, a1);
1635         UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1636 #else
1637         UASM_i_LA_mostly(&p, a2, pgdc);
1638         UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1639 #endif /* SMP */
1640         uasm_i_jr(&p, 31);
1641 
1642         /* if pgd_reg is allocated, save PGD also to scratch register */
1643         if (pgd_reg != -1)
1644                 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1645         else
1646                 uasm_i_nop(&p);
1647 #endif
1648         if (p >= tlbmiss_handler_setup_pgd_end)
1649                 panic("tlbmiss_handler_setup_pgd space exceeded");
1650 
1651         uasm_resolve_relocs(relocs, labels);
1652         pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1653                  (unsigned int)(p - tlbmiss_handler_setup_pgd));
1654 
1655         dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
1656                                         tlbmiss_handler_setup_pgd_size);
1657 }
1658 
1659 static void
1660 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1661 {
1662 #ifdef CONFIG_SMP
1663 # ifdef CONFIG_PHYS_ADDR_T_64BIT
1664         if (cpu_has_64bits)
1665                 uasm_i_lld(p, pte, 0, ptr);
1666         else
1667 # endif
1668                 UASM_i_LL(p, pte, 0, ptr);
1669 #else
1670 # ifdef CONFIG_PHYS_ADDR_T_64BIT
1671         if (cpu_has_64bits)
1672                 uasm_i_ld(p, pte, 0, ptr);
1673         else
1674 # endif
1675                 UASM_i_LW(p, pte, 0, ptr);
1676 #endif
1677 }
1678 
1679 static void
1680 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1681         unsigned int mode, unsigned int scratch)
1682 {
1683         unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1684         unsigned int swmode = mode & ~hwmode;
1685 
1686         if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
1687                 uasm_i_lui(p, scratch, swmode >> 16);
1688                 uasm_i_or(p, pte, pte, scratch);
1689                 BUG_ON(swmode & 0xffff);
1690         } else {
1691                 uasm_i_ori(p, pte, pte, mode);
1692         }
1693 
1694 #ifdef CONFIG_SMP
1695 # ifdef CONFIG_PHYS_ADDR_T_64BIT
1696         if (cpu_has_64bits)
1697                 uasm_i_scd(p, pte, 0, ptr);
1698         else
1699 # endif
1700                 UASM_i_SC(p, pte, 0, ptr);
1701 
1702         if (r10000_llsc_war())
1703                 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
1704         else
1705                 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1706 
1707 # ifdef CONFIG_PHYS_ADDR_T_64BIT
1708         if (!cpu_has_64bits) {
1709                 /* no uasm_i_nop needed */
1710                 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1711                 uasm_i_ori(p, pte, pte, hwmode);
1712                 BUG_ON(hwmode & ~0xffff);
1713                 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1714                 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1715                 /* no uasm_i_nop needed */
1716                 uasm_i_lw(p, pte, 0, ptr);
1717         } else
1718                 uasm_i_nop(p);
1719 # else
1720         uasm_i_nop(p);
1721 # endif
1722 #else
1723 # ifdef CONFIG_PHYS_ADDR_T_64BIT
1724         if (cpu_has_64bits)
1725                 uasm_i_sd(p, pte, 0, ptr);
1726         else
1727 # endif
1728                 UASM_i_SW(p, pte, 0, ptr);
1729 
1730 # ifdef CONFIG_PHYS_ADDR_T_64BIT
1731         if (!cpu_has_64bits) {
1732                 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1733                 uasm_i_ori(p, pte, pte, hwmode);
1734                 BUG_ON(hwmode & ~0xffff);
1735                 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1736                 uasm_i_lw(p, pte, 0, ptr);
1737         }
1738 # endif
1739 #endif
1740 }
1741 
1742 /*
1743  * Check if PTE is present, if not then jump to LABEL. PTR points to
1744  * the page table where this PTE is located, PTE will be re-loaded
1745  * with it's original value.
1746  */
1747 static void
1748 build_pte_present(u32 **p, struct uasm_reloc **r,
1749                   int pte, int ptr, int scratch, enum label_id lid)
1750 {
1751         int t = scratch >= 0 ? scratch : pte;
1752         int cur = pte;
1753 
1754         if (cpu_has_rixi) {
1755                 if (use_bbit_insns()) {
1756                         uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1757                         uasm_i_nop(p);
1758                 } else {
1759                         if (_PAGE_PRESENT_SHIFT) {
1760                                 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1761                                 cur = t;
1762                         }
1763                         uasm_i_andi(p, t, cur, 1);
1764                         uasm_il_beqz(p, r, t, lid);
1765                         if (pte == t)
1766                                 /* You lose the SMP race :-(*/
1767                                 iPTE_LW(p, pte, ptr);
1768                 }
1769         } else {
1770                 if (_PAGE_PRESENT_SHIFT) {
1771                         uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1772                         cur = t;
1773                 }
1774                 uasm_i_andi(p, t, cur,
1775                         (_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
1776                 uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
1777                 uasm_il_bnez(p, r, t, lid);
1778                 if (pte == t)
1779                         /* You lose the SMP race :-(*/
1780                         iPTE_LW(p, pte, ptr);
1781         }
1782 }
1783 
1784 /* Make PTE valid, store result in PTR. */
1785 static void
1786 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1787                  unsigned int ptr, unsigned int scratch)
1788 {
1789         unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1790 
1791         iPTE_SW(p, r, pte, ptr, mode, scratch);
1792 }
1793 
1794 /*
1795  * Check if PTE can be written to, if not branch to LABEL. Regardless
1796  * restore PTE with value from PTR when done.
1797  */
1798 static void
1799 build_pte_writable(u32 **p, struct uasm_reloc **r,
1800                    unsigned int pte, unsigned int ptr, int scratch,
1801                    enum label_id lid)
1802 {
1803         int t = scratch >= 0 ? scratch : pte;
1804         int cur = pte;
1805 
1806         if (_PAGE_PRESENT_SHIFT) {
1807                 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1808                 cur = t;
1809         }
1810         uasm_i_andi(p, t, cur,
1811                     (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1812         uasm_i_xori(p, t, t,
1813                     (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1814         uasm_il_bnez(p, r, t, lid);
1815         if (pte == t)
1816                 /* You lose the SMP race :-(*/
1817                 iPTE_LW(p, pte, ptr);
1818         else
1819                 uasm_i_nop(p);
1820 }
1821 
1822 /* Make PTE writable, update software status bits as well, then store
1823  * at PTR.
1824  */
1825 static void
1826 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1827                  unsigned int ptr, unsigned int scratch)
1828 {
1829         unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1830                              | _PAGE_DIRTY);
1831 
1832         iPTE_SW(p, r, pte, ptr, mode, scratch);
1833 }
1834 
1835 /*
1836  * Check if PTE can be modified, if not branch to LABEL. Regardless
1837  * restore PTE with value from PTR when done.
1838  */
1839 static void
1840 build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1841                      unsigned int pte, unsigned int ptr, int scratch,
1842                      enum label_id lid)
1843 {
1844         if (use_bbit_insns()) {
1845                 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1846                 uasm_i_nop(p);
1847         } else {
1848                 int t = scratch >= 0 ? scratch : pte;
1849                 uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
1850                 uasm_i_andi(p, t, t, 1);
1851                 uasm_il_beqz(p, r, t, lid);
1852                 if (pte == t)
1853                         /* You lose the SMP race :-(*/
1854                         iPTE_LW(p, pte, ptr);
1855         }
1856 }
1857 
1858 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1859 
1860 
1861 /*
1862  * R3000 style TLB load/store/modify handlers.
1863  */
1864 
1865 /*
1866  * This places the pte into ENTRYLO0 and writes it with tlbwi.
1867  * Then it returns.
1868  */
1869 static void
1870 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1871 {
1872         uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1873         uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1874         uasm_i_tlbwi(p);
1875         uasm_i_jr(p, tmp);
1876         uasm_i_rfe(p); /* branch delay */
1877 }
1878 
1879 /*
1880  * This places the pte into ENTRYLO0 and writes it with tlbwi
1881  * or tlbwr as appropriate.  This is because the index register
1882  * may have the probe fail bit set as a result of a trap on a
1883  * kseg2 access, i.e. without refill.  Then it returns.
1884  */
1885 static void
1886 build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1887                              struct uasm_reloc **r, unsigned int pte,
1888                              unsigned int tmp)
1889 {
1890         uasm_i_mfc0(p, tmp, C0_INDEX);
1891         uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1892         uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1893         uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1894         uasm_i_tlbwi(p); /* cp0 delay */
1895         uasm_i_jr(p, tmp);
1896         uasm_i_rfe(p); /* branch delay */
1897         uasm_l_r3000_write_probe_fail(l, *p);
1898         uasm_i_tlbwr(p); /* cp0 delay */
1899         uasm_i_jr(p, tmp);
1900         uasm_i_rfe(p); /* branch delay */
1901 }
1902 
1903 static void
1904 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1905                                    unsigned int ptr)
1906 {
1907         long pgdc = (long)pgd_current;
1908 
1909         uasm_i_mfc0(p, pte, C0_BADVADDR);
1910         uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1911         uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1912         uasm_i_srl(p, pte, pte, 22); /* load delay */
1913         uasm_i_sll(p, pte, pte, 2);
1914         uasm_i_addu(p, ptr, ptr, pte);
1915         uasm_i_mfc0(p, pte, C0_CONTEXT);
1916         uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1917         uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1918         uasm_i_addu(p, ptr, ptr, pte);
1919         uasm_i_lw(p, pte, 0, ptr);
1920         uasm_i_tlbp(p); /* load delay */
1921 }
1922 
1923 static void build_r3000_tlb_load_handler(void)
1924 {
1925         u32 *p = handle_tlbl;
1926         const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
1927         struct uasm_label *l = labels;
1928         struct uasm_reloc *r = relocs;
1929 
1930         memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
1931         memset(labels, 0, sizeof(labels));
1932         memset(relocs, 0, sizeof(relocs));
1933 
1934         build_r3000_tlbchange_handler_head(&p, K0, K1);
1935         build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1936         uasm_i_nop(&p); /* load delay */
1937         build_make_valid(&p, &r, K0, K1, -1);
1938         build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1939 
1940         uasm_l_nopage_tlbl(&l, p);
1941         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1942         uasm_i_nop(&p);
1943 
1944         if (p >= handle_tlbl_end)
1945                 panic("TLB load handler fastpath space exceeded");
1946 
1947         uasm_resolve_relocs(relocs, labels);
1948         pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1949                  (unsigned int)(p - handle_tlbl));
1950 
1951         dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
1952 }
1953 
1954 static void build_r3000_tlb_store_handler(void)
1955 {
1956         u32 *p = handle_tlbs;
1957         const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
1958         struct uasm_label *l = labels;
1959         struct uasm_reloc *r = relocs;
1960 
1961         memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
1962         memset(labels, 0, sizeof(labels));
1963         memset(relocs, 0, sizeof(relocs));
1964 
1965         build_r3000_tlbchange_handler_head(&p, K0, K1);
1966         build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1967         uasm_i_nop(&p); /* load delay */
1968         build_make_write(&p, &r, K0, K1, -1);
1969         build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1970 
1971         uasm_l_nopage_tlbs(&l, p);
1972         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1973         uasm_i_nop(&p);
1974 
1975         if (p >= handle_tlbs_end)
1976                 panic("TLB store handler fastpath space exceeded");
1977 
1978         uasm_resolve_relocs(relocs, labels);
1979         pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1980                  (unsigned int)(p - handle_tlbs));
1981 
1982         dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
1983 }
1984 
1985 static void build_r3000_tlb_modify_handler(void)
1986 {
1987         u32 *p = handle_tlbm;
1988         const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
1989         struct uasm_label *l = labels;
1990         struct uasm_reloc *r = relocs;
1991 
1992         memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
1993         memset(labels, 0, sizeof(labels));
1994         memset(relocs, 0, sizeof(relocs));
1995 
1996         build_r3000_tlbchange_handler_head(&p, K0, K1);
1997         build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
1998         uasm_i_nop(&p); /* load delay */
1999         build_make_write(&p, &r, K0, K1, -1);
2000         build_r3000_pte_reload_tlbwi(&p, K0, K1);
2001 
2002         uasm_l_nopage_tlbm(&l, p);
2003         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2004         uasm_i_nop(&p);
2005 
2006         if (p >= handle_tlbm_end)
2007                 panic("TLB modify handler fastpath space exceeded");
2008 
2009         uasm_resolve_relocs(relocs, labels);
2010         pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2011                  (unsigned int)(p - handle_tlbm));
2012 
2013         dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size);
2014 }
2015 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
2016 
2017 static bool cpu_has_tlbex_tlbp_race(void)
2018 {
2019         /*
2020          * When a Hardware Table Walker is running it can replace TLB entries
2021          * at any time, leading to a race between it & the CPU.
2022          */
2023         if (cpu_has_htw)
2024                 return true;
2025 
2026         /*
2027          * If the CPU shares FTLB RAM with its siblings then our entry may be
2028          * replaced at any time by a sibling performing a write to the FTLB.
2029          */
2030         if (cpu_has_shared_ftlb_ram)
2031                 return true;
2032 
2033         /* In all other cases there ought to be no race condition to handle */
2034         return false;
2035 }
2036 
2037 /*
2038  * R4000 style TLB load/store/modify handlers.
2039  */
2040 static struct work_registers
2041 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
2042                                    struct uasm_reloc **r)
2043 {
2044         struct work_registers wr = build_get_work_registers(p);
2045 
2046 #ifdef CONFIG_64BIT
2047         build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
2048 #else
2049         build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
2050 #endif
2051 
2052 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2053         /*
2054          * For huge tlb entries, pmd doesn't contain an address but
2055          * instead contains the tlb pte. Check the PAGE_HUGE bit and
2056          * see if we need to jump to huge tlb processing.
2057          */
2058         build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
2059 #endif
2060 
2061         UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
2062         UASM_i_LW(p, wr.r2, 0, wr.r2);
2063         UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
2064         uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
2065         UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
2066 
2067 #ifdef CONFIG_SMP
2068         uasm_l_smp_pgtable_change(l, *p);
2069 #endif
2070         iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
2071         if (!m4kc_tlbp_war()) {
2072                 build_tlb_probe_entry(p);
2073                 if (cpu_has_tlbex_tlbp_race()) {
2074                         /* race condition happens, leaving */
2075                         uasm_i_ehb(p);
2076                         uasm_i_mfc0(p, wr.r3, C0_INDEX);
2077                         uasm_il_bltz(p, r, wr.r3, label_leave);
2078                         uasm_i_nop(p);
2079                 }
2080         }
2081         return wr;
2082 }
2083 
2084 static void
2085 build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
2086                                    struct uasm_reloc **r, unsigned int tmp,
2087                                    unsigned int ptr)
2088 {
2089         uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
2090         uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
2091         build_update_entries(p, tmp, ptr);
2092         build_tlb_write_entry(p, l, r, tlb_indexed);
2093         uasm_l_leave(l, *p);
2094         build_restore_work_registers(p);
2095         uasm_i_eret(p); /* return from trap */
2096 
2097 #ifdef CONFIG_64BIT
2098         build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
2099 #endif
2100 }
2101 
2102 static void build_r4000_tlb_load_handler(void)
2103 {
2104         u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
2105         const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
2106         struct uasm_label *l = labels;
2107         struct uasm_reloc *r = relocs;
2108         struct work_registers wr;
2109 
2110         memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
2111         memset(labels, 0, sizeof(labels));
2112         memset(relocs, 0, sizeof(relocs));
2113 
2114         if (bcm1250_m3_war()) {
2115                 unsigned int segbits = 44;
2116 
2117                 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
2118                 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
2119                 uasm_i_xor(&p, K0, K0, K1);
2120                 uasm_i_dsrl_safe(&p, K1, K0, 62);
2121                 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
2122                 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
2123                 uasm_i_or(&p, K0, K0, K1);
2124                 uasm_il_bnez(&p, &r, K0, label_leave);
2125                 /* No need for uasm_i_nop */
2126         }
2127 
2128         wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2129         build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2130         if (m4kc_tlbp_war())
2131                 build_tlb_probe_entry(&p);
2132 
2133         if (cpu_has_rixi && !cpu_has_rixiex) {
2134                 /*
2135                  * If the page is not _PAGE_VALID, RI or XI could not
2136                  * have triggered it.  Skip the expensive test..
2137                  */
2138                 if (use_bbit_insns()) {
2139                         uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2140                                       label_tlbl_goaround1);
2141                 } else {
2142                         uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2143                         uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
2144                 }
2145                 uasm_i_nop(&p);
2146 
2147                 /*
2148                  * Warn if something may race with us & replace the TLB entry
2149                  * before we read it here. Everything with such races should
2150                  * also have dedicated RiXi exception handlers, so this
2151                  * shouldn't be hit.
2152                  */
2153                 WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
2154 
2155                 uasm_i_tlbr(&p);
2156 
2157                 switch (current_cpu_type()) {
2158                 default:
2159                         if (cpu_has_mips_r2_exec_hazard) {
2160                                 uasm_i_ehb(&p);
2161 
2162                 case CPU_CAVIUM_OCTEON:
2163                 case CPU_CAVIUM_OCTEON_PLUS:
2164                 case CPU_CAVIUM_OCTEON2:
2165                                 break;
2166                         }
2167                 }
2168 
2169                 /* Examine  entrylo 0 or 1 based on ptr. */
2170                 if (use_bbit_insns()) {
2171                         uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2172                 } else {
2173                         uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2174                         uasm_i_beqz(&p, wr.r3, 8);
2175                 }
2176                 /* load it in the delay slot*/
2177                 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2178                 /* load it if ptr is odd */
2179                 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2180                 /*
2181                  * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2182                  * XI must have triggered it.
2183                  */
2184                 if (use_bbit_insns()) {
2185                         uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
2186                         uasm_i_nop(&p);
2187                         uasm_l_tlbl_goaround1(&l, p);
2188                 } else {
2189                         uasm_i_andi(&p, wr.r3, wr.r3, 2);
2190                         uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
2191                         uasm_i_nop(&p);
2192                 }
2193                 uasm_l_tlbl_goaround1(&l, p);
2194         }
2195         build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
2196         build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2197 
2198 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2199         /*
2200          * This is the entry point when build_r4000_tlbchange_handler_head
2201          * spots a huge page.
2202          */
2203         uasm_l_tlb_huge_update(&l, p);
2204         iPTE_LW(&p, wr.r1, wr.r2);
2205         build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2206         build_tlb_probe_entry(&p);
2207 
2208         if (cpu_has_rixi && !cpu_has_rixiex) {
2209                 /*
2210                  * If the page is not _PAGE_VALID, RI or XI could not
2211                  * have triggered it.  Skip the expensive test..
2212                  */
2213                 if (use_bbit_insns()) {
2214                         uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2215                                       label_tlbl_goaround2);
2216                 } else {
2217                         uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2218                         uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2219                 }
2220                 uasm_i_nop(&p);
2221 
2222                 /*
2223                  * Warn if something may race with us & replace the TLB entry
2224                  * before we read it here. Everything with such races should
2225                  * also have dedicated RiXi exception handlers, so this
2226                  * shouldn't be hit.
2227                  */
2228                 WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
2229 
2230                 uasm_i_tlbr(&p);
2231 
2232                 switch (current_cpu_type()) {
2233                 default:
2234                         if (cpu_has_mips_r2_exec_hazard) {
2235                                 uasm_i_ehb(&p);
2236 
2237                 case CPU_CAVIUM_OCTEON:
2238                 case CPU_CAVIUM_OCTEON_PLUS:
2239                 case CPU_CAVIUM_OCTEON2:
2240                                 break;
2241                         }
2242                 }
2243 
2244                 /* Examine  entrylo 0 or 1 based on ptr. */
2245                 if (use_bbit_insns()) {
2246                         uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2247                 } else {
2248                         uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2249                         uasm_i_beqz(&p, wr.r3, 8);
2250                 }
2251                 /* load it in the delay slot*/
2252                 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2253                 /* load it if ptr is odd */
2254                 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2255                 /*
2256                  * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2257                  * XI must have triggered it.
2258                  */
2259                 if (use_bbit_insns()) {
2260                         uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
2261                 } else {
2262                         uasm_i_andi(&p, wr.r3, wr.r3, 2);
2263                         uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2264                 }
2265                 if (PM_DEFAULT_MASK == 0)
2266                         uasm_i_nop(&p);
2267                 /*
2268                  * We clobbered C0_PAGEMASK, restore it.  On the other branch
2269                  * it is restored in build_huge_tlb_write_entry.
2270                  */
2271                 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
2272 
2273                 uasm_l_tlbl_goaround2(&l, p);
2274         }
2275         uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2276         build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2277 #endif
2278 
2279         uasm_l_nopage_tlbl(&l, p);
2280         build_restore_work_registers(&p);
2281 #ifdef CONFIG_CPU_MICROMIPS
2282         if ((unsigned long)tlb_do_page_fault_0 & 1) {
2283                 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2284                 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2285                 uasm_i_jr(&p, K0);
2286         } else
2287 #endif
2288         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2289         uasm_i_nop(&p);
2290 
2291         if (p >= handle_tlbl_end)
2292                 panic("TLB load handler fastpath space exceeded");
2293 
2294         uasm_resolve_relocs(relocs, labels);
2295         pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2296                  (unsigned int)(p - handle_tlbl));
2297 
2298         dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
2299 }
2300 
2301 static void build_r4000_tlb_store_handler(void)
2302 {
2303         u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
2304         const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
2305         struct uasm_label *l = labels;
2306         struct uasm_reloc *r = relocs;
2307         struct work_registers wr;
2308 
2309         memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
2310         memset(labels, 0, sizeof(labels));
2311         memset(relocs, 0, sizeof(relocs));
2312 
2313         wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2314         build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2315         if (m4kc_tlbp_war())
2316                 build_tlb_probe_entry(&p);
2317         build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2318         build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2319 
2320 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2321         /*
2322          * This is the entry point when
2323          * build_r4000_tlbchange_handler_head spots a huge page.
2324          */
2325         uasm_l_tlb_huge_update(&l, p);
2326         iPTE_LW(&p, wr.r1, wr.r2);
2327         build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2328         build_tlb_probe_entry(&p);
2329         uasm_i_ori(&p, wr.r1, wr.r1,
2330                    _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2331         build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2332 #endif
2333 
2334         uasm_l_nopage_tlbs(&l, p);
2335         build_restore_work_registers(&p);
2336 #ifdef CONFIG_CPU_MICROMIPS
2337         if ((unsigned long)tlb_do_page_fault_1 & 1) {
2338                 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2339                 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2340                 uasm_i_jr(&p, K0);
2341         } else
2342 #endif
2343         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2344         uasm_i_nop(&p);
2345 
2346         if (p >= handle_tlbs_end)
2347                 panic("TLB store handler fastpath space exceeded");
2348 
2349         uasm_resolve_relocs(relocs, labels);
2350         pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2351                  (unsigned int)(p - handle_tlbs));
2352 
2353         dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
2354 }
2355 
2356 static void build_r4000_tlb_modify_handler(void)
2357 {
2358         u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
2359         const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
2360         struct uasm_label *l = labels;
2361         struct uasm_reloc *r = relocs;
2362         struct work_registers wr;
2363 
2364         memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
2365         memset(labels, 0, sizeof(labels));
2366         memset(relocs, 0, sizeof(relocs));
2367 
2368         wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2369         build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2370         if (m4kc_tlbp_war())
2371                 build_tlb_probe_entry(&p);
2372         /* Present and writable bits set, set accessed and dirty bits. */
2373         build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2374         build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2375 
2376 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2377         /*
2378          * This is the entry point when
2379          * build_r4000_tlbchange_handler_head spots a huge page.
2380          */
2381         uasm_l_tlb_huge_update(&l, p);
2382         iPTE_LW(&p, wr.r1, wr.r2);
2383         build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
2384         build_tlb_probe_entry(&p);
2385         uasm_i_ori(&p, wr.r1, wr.r1,
2386                    _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2387         build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2388 #endif
2389 
2390         uasm_l_nopage_tlbm(&l, p);
2391         build_restore_work_registers(&p);
2392 #ifdef CONFIG_CPU_MICROMIPS
2393         if ((unsigned long)tlb_do_page_fault_1 & 1) {
2394                 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2395                 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2396                 uasm_i_jr(&p, K0);
2397         } else
2398 #endif
2399         uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2400         uasm_i_nop(&p);
2401 
2402         if (p >= handle_tlbm_end)
2403                 panic("TLB modify handler fastpath space exceeded");
2404 
2405         uasm_resolve_relocs(relocs, labels);
2406         pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2407                  (unsigned int)(p - handle_tlbm));
2408 
2409         dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
2410 }
2411 
2412 static void flush_tlb_handlers(void)
2413 {
2414         local_flush_icache_range((unsigned long)handle_tlbl,
2415                            (unsigned long)handle_tlbl_end);
2416         local_flush_icache_range((unsigned long)handle_tlbs,
2417                            (unsigned long)handle_tlbs_end);
2418         local_flush_icache_range((unsigned long)handle_tlbm,
2419                            (unsigned long)handle_tlbm_end);
2420         local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2421                            (unsigned long)tlbmiss_handler_setup_pgd_end);
2422 }
2423 
2424 static void print_htw_config(void)
2425 {
2426         unsigned long config;
2427         unsigned int pwctl;
2428         const int field = 2 * sizeof(unsigned long);
2429 
2430         config = read_c0_pwfield();
2431         pr_debug("PWField (0x%0*lx): GDI: 0x%02lx  UDI: 0x%02lx  MDI: 0x%02lx  PTI: 0x%02lx  PTEI: 0x%02lx\n",
2432                 field, config,
2433                 (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
2434                 (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
2435                 (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
2436                 (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
2437                 (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
2438 
2439         config = read_c0_pwsize();
2440         pr_debug("PWSize  (0x%0*lx): PS: 0x%lx  GDW: 0x%02lx  UDW: 0x%02lx  MDW: 0x%02lx  PTW: 0x%02lx  PTEW: 0x%02lx\n",
2441                 field, config,
2442                 (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
2443                 (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
2444                 (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
2445                 (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
2446                 (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
2447                 (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
2448 
2449         pwctl = read_c0_pwctl();
2450         pr_debug("PWCtl   (0x%x): PWEn: 0x%x  XK: 0x%x  XS: 0x%x  XU: 0x%x  DPH: 0x%x  HugePg: 0x%x  Psn: 0x%x\n",
2451                 pwctl,
2452                 (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
2453                 (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
2454                 (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
2455                 (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
2456                 (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
2457                 (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
2458                 (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
2459 }
2460 
2461 static void config_htw_params(void)
2462 {
2463         unsigned long pwfield, pwsize, ptei;
2464         unsigned int config;
2465 
2466         /*
2467          * We are using 2-level page tables, so we only need to
2468          * setup GDW and PTW appropriately. UDW and MDW will remain 0.
2469          * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
2470          * write values less than 0xc in these fields because the entire
2471          * write will be dropped. As a result of which, we must preserve
2472          * the original reset values and overwrite only what we really want.
2473          */
2474 
2475         pwfield = read_c0_pwfield();
2476         /* re-initialize the GDI field */
2477         pwfield &= ~MIPS_PWFIELD_GDI_MASK;
2478         pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
2479         /* re-initialize the PTI field including the even/odd bit */
2480         pwfield &= ~MIPS_PWFIELD_PTI_MASK;
2481         pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
2482         if (CONFIG_PGTABLE_LEVELS >= 3) {
2483                 pwfield &= ~MIPS_PWFIELD_MDI_MASK;
2484                 pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT;
2485         }
2486         /* Set the PTEI right shift */
2487         ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
2488         pwfield |= ptei;
2489         write_c0_pwfield(pwfield);
2490         /* Check whether the PTEI value is supported */
2491         back_to_back_c0_hazard();
2492         pwfield = read_c0_pwfield();
2493         if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
2494                 != ptei) {
2495                 pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
2496                         ptei);
2497                 /*
2498                  * Drop option to avoid HTW being enabled via another path
2499                  * (eg htw_reset())
2500                  */
2501                 current_cpu_data.options &= ~MIPS_CPU_HTW;
2502                 return;
2503         }
2504 
2505         pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
2506         pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
2507         if (CONFIG_PGTABLE_LEVELS >= 3)
2508                 pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
2509 
2510         /* Set pointer size to size of directory pointers */
2511         if (IS_ENABLED(CONFIG_64BIT))
2512                 pwsize |= MIPS_PWSIZE_PS_MASK;
2513         /* PTEs may be multiple pointers long (e.g. with XPA) */
2514         pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
2515                         & MIPS_PWSIZE_PTEW_MASK;
2516 
2517         write_c0_pwsize(pwsize);
2518 
2519         /* Make sure everything is set before we enable the HTW */
2520         back_to_back_c0_hazard();
2521 
2522         /*
2523          * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
2524          * the pwctl fields.
2525          */
2526         config = 1 << MIPS_PWCTL_PWEN_SHIFT;
2527         if (IS_ENABLED(CONFIG_64BIT))
2528                 config |= MIPS_PWCTL_XU_MASK;
2529         write_c0_pwctl(config);
2530         pr_info("Hardware Page Table Walker enabled\n");
2531 
2532         print_htw_config();
2533 }
2534 
2535 static void config_xpa_params(void)
2536 {
2537 #ifdef CONFIG_XPA
2538         unsigned int pagegrain;
2539 
2540         if (mips_xpa_disabled) {
2541                 pr_info("Extended Physical Addressing (XPA) disabled\n");
2542                 return;
2543         }
2544 
2545         pagegrain = read_c0_pagegrain();
2546         write_c0_pagegrain(pagegrain | PG_ELPA);
2547         back_to_back_c0_hazard();
2548         pagegrain = read_c0_pagegrain();
2549 
2550         if (pagegrain & PG_ELPA)
2551                 pr_info("Extended Physical Addressing (XPA) enabled\n");
2552         else
2553                 panic("Extended Physical Addressing (XPA) disabled");
2554 #endif
2555 }
2556 
2557 static void check_pabits(void)
2558 {
2559         unsigned long entry;
2560         unsigned pabits, fillbits;
2561 
2562         if (!cpu_has_rixi || !_PAGE_NO_EXEC) {
2563                 /*
2564                  * We'll only be making use of the fact that we can rotate bits
2565                  * into the fill if the CPU supports RIXI, so don't bother
2566                  * probing this for CPUs which don't.
2567                  */
2568                 return;
2569         }
2570 
2571         write_c0_entrylo0(~0ul);
2572         back_to_back_c0_hazard();
2573         entry = read_c0_entrylo0();
2574 
2575         /* clear all non-PFN bits */
2576         entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1);
2577         entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
2578 
2579         /* find a lower bound on PABITS, and upper bound on fill bits */
2580         pabits = fls_long(entry) + 6;
2581         fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0);
2582 
2583         /* minus the RI & XI bits */
2584         fillbits -= min_t(unsigned, fillbits, 2);
2585 
2586         if (fillbits >= ilog2(_PAGE_NO_EXEC))
2587                 fill_includes_sw_bits = true;
2588 
2589         pr_debug("Entry* registers contain %u fill bits\n", fillbits);
2590 }
2591 
2592 void build_tlb_refill_handler(void)
2593 {
2594         /*
2595          * The refill handler is generated per-CPU, multi-node systems
2596          * may have local storage for it. The other handlers are only
2597          * needed once.
2598          */
2599         static int run_once = 0;
2600 
2601         if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
2602                 panic("Kernels supporting XPA currently require CPUs with RIXI");
2603 
2604         output_pgtable_bits_defines();
2605         check_pabits();
2606 
2607 #ifdef CONFIG_64BIT
2608         check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
2609 #endif
2610 
2611         switch (current_cpu_type()) {
2612         case CPU_R2000:
2613         case CPU_R3000:
2614         case CPU_R3000A:
2615         case CPU_R3081E:
2616         case CPU_TX3912:
2617         case CPU_TX3922:
2618         case CPU_TX3927:
2619 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2620                 if (cpu_has_local_ebase)
2621                         build_r3000_tlb_refill_handler();
2622                 if (!run_once) {
2623                         if (!cpu_has_local_ebase)
2624                                 build_r3000_tlb_refill_handler();
2625                         build_setup_pgd();
2626                         build_r3000_tlb_load_handler();
2627                         build_r3000_tlb_store_handler();
2628                         build_r3000_tlb_modify_handler();
2629                         flush_tlb_handlers();
2630                         run_once++;
2631                 }
2632 #else
2633                 panic("No R3000 TLB refill handler");
2634 #endif
2635                 break;
2636 
2637         case CPU_R8000:
2638                 panic("No R8000 TLB refill handler yet");
2639                 break;
2640 
2641         default:
2642                 if (cpu_has_ldpte)
2643                         setup_pw();
2644 
2645                 if (!run_once) {
2646                         scratch_reg = allocate_kscratch();
2647                         build_setup_pgd();
2648                         build_r4000_tlb_load_handler();
2649                         build_r4000_tlb_store_handler();
2650                         build_r4000_tlb_modify_handler();
2651                         if (cpu_has_ldpte)
2652                                 build_loongson3_tlb_refill_handler();
2653                         else if (!cpu_has_local_ebase)
2654                                 build_r4000_tlb_refill_handler();
2655                         flush_tlb_handlers();
2656                         run_once++;
2657                 }
2658                 if (cpu_has_local_ebase)
2659                         build_r4000_tlb_refill_handler();
2660                 if (cpu_has_xpa)
2661                         config_xpa_params();
2662                 if (cpu_has_htw)
2663                         config_htw_params();
2664         }
2665 }
2666 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp