~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/mm/tlb_nohash.c

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file contains the routines for TLB flushing.
  3  * On machines where the MMU does not use a hash table to store virtual to
  4  * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
  5  * this does -not- include 603 however which shares the implementation with
  6  * hash based processors)
  7  *
  8  *  -- BenH
  9  *
 10  * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
 11  *                     IBM Corp.
 12  *
 13  *  Derived from arch/ppc/mm/init.c:
 14  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 15  *
 16  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 17  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 18  *    Copyright (C) 1996 Paul Mackerras
 19  *
 20  *  Derived from "arch/i386/mm/init.c"
 21  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 22  *
 23  *  This program is free software; you can redistribute it and/or
 24  *  modify it under the terms of the GNU General Public License
 25  *  as published by the Free Software Foundation; either version
 26  *  2 of the License, or (at your option) any later version.
 27  *
 28  */
 29 
 30 #include <linux/kernel.h>
 31 #include <linux/export.h>
 32 #include <linux/mm.h>
 33 #include <linux/init.h>
 34 #include <linux/highmem.h>
 35 #include <linux/pagemap.h>
 36 #include <linux/preempt.h>
 37 #include <linux/spinlock.h>
 38 #include <linux/memblock.h>
 39 #include <linux/of_fdt.h>
 40 #include <linux/hugetlb.h>
 41 
 42 #include <asm/tlbflush.h>
 43 #include <asm/tlb.h>
 44 #include <asm/code-patching.h>
 45 #include <asm/cputhreads.h>
 46 #include <asm/hugetlb.h>
 47 #include <asm/paca.h>
 48 
 49 #include "mmu_decl.h"
 50 
 51 /*
 52  * This struct lists the sw-supported page sizes.  The hardawre MMU may support
 53  * other sizes not listed here.   The .ind field is only used on MMUs that have
 54  * indirect page table entries.
 55  */
 56 #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
 57 #ifdef CONFIG_PPC_FSL_BOOK3E
 58 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 59         [MMU_PAGE_4K] = {
 60                 .shift  = 12,
 61                 .enc    = BOOK3E_PAGESZ_4K,
 62         },
 63         [MMU_PAGE_2M] = {
 64                 .shift  = 21,
 65                 .enc    = BOOK3E_PAGESZ_2M,
 66         },
 67         [MMU_PAGE_4M] = {
 68                 .shift  = 22,
 69                 .enc    = BOOK3E_PAGESZ_4M,
 70         },
 71         [MMU_PAGE_16M] = {
 72                 .shift  = 24,
 73                 .enc    = BOOK3E_PAGESZ_16M,
 74         },
 75         [MMU_PAGE_64M] = {
 76                 .shift  = 26,
 77                 .enc    = BOOK3E_PAGESZ_64M,
 78         },
 79         [MMU_PAGE_256M] = {
 80                 .shift  = 28,
 81                 .enc    = BOOK3E_PAGESZ_256M,
 82         },
 83         [MMU_PAGE_1G] = {
 84                 .shift  = 30,
 85                 .enc    = BOOK3E_PAGESZ_1GB,
 86         },
 87 };
 88 #elif defined(CONFIG_PPC_8xx)
 89 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 90         /* we only manage 4k and 16k pages as normal pages */
 91 #ifdef CONFIG_PPC_4K_PAGES
 92         [MMU_PAGE_4K] = {
 93                 .shift  = 12,
 94         },
 95 #else
 96         [MMU_PAGE_16K] = {
 97                 .shift  = 14,
 98         },
 99 #endif
100         [MMU_PAGE_512K] = {
101                 .shift  = 19,
102         },
103         [MMU_PAGE_8M] = {
104                 .shift  = 23,
105         },
106 };
107 #else
108 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
109         [MMU_PAGE_4K] = {
110                 .shift  = 12,
111                 .ind    = 20,
112                 .enc    = BOOK3E_PAGESZ_4K,
113         },
114         [MMU_PAGE_16K] = {
115                 .shift  = 14,
116                 .enc    = BOOK3E_PAGESZ_16K,
117         },
118         [MMU_PAGE_64K] = {
119                 .shift  = 16,
120                 .ind    = 28,
121                 .enc    = BOOK3E_PAGESZ_64K,
122         },
123         [MMU_PAGE_1M] = {
124                 .shift  = 20,
125                 .enc    = BOOK3E_PAGESZ_1M,
126         },
127         [MMU_PAGE_16M] = {
128                 .shift  = 24,
129                 .ind    = 36,
130                 .enc    = BOOK3E_PAGESZ_16M,
131         },
132         [MMU_PAGE_256M] = {
133                 .shift  = 28,
134                 .enc    = BOOK3E_PAGESZ_256M,
135         },
136         [MMU_PAGE_1G] = {
137                 .shift  = 30,
138                 .enc    = BOOK3E_PAGESZ_1GB,
139         },
140 };
141 #endif /* CONFIG_FSL_BOOKE */
142 
143 static inline int mmu_get_tsize(int psize)
144 {
145         return mmu_psize_defs[psize].enc;
146 }
147 #else
148 static inline int mmu_get_tsize(int psize)
149 {
150         /* This isn't used on !Book3E for now */
151         return 0;
152 }
153 #endif /* CONFIG_PPC_BOOK3E_MMU */
154 
155 /* The variables below are currently only used on 64-bit Book3E
156  * though this will probably be made common with other nohash
157  * implementations at some point
158  */
159 #ifdef CONFIG_PPC64
160 
161 int mmu_linear_psize;           /* Page size used for the linear mapping */
162 int mmu_pte_psize;              /* Page size used for PTE pages */
163 int mmu_vmemmap_psize;          /* Page size used for the virtual mem map */
164 int book3e_htw_mode;            /* HW tablewalk?  Value is PPC_HTW_* */
165 unsigned long linear_map_top;   /* Top of linear mapping */
166 
167 
168 /*
169  * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
170  * exceptions.  This is used for bolted and e6500 TLB miss handlers which
171  * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
172  * this is set to zero.
173  */
174 int extlb_level_exc;
175 
176 #endif /* CONFIG_PPC64 */
177 
178 #ifdef CONFIG_PPC_FSL_BOOK3E
179 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
180 DEFINE_PER_CPU(int, next_tlbcam_idx);
181 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
182 #endif
183 
184 /*
185  * Base TLB flushing operations:
186  *
187  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
188  *  - flush_tlb_page(vma, vmaddr) flushes one page
189  *  - flush_tlb_range(vma, start, end) flushes a range of pages
190  *  - flush_tlb_kernel_range(start, end) flushes kernel pages
191  *
192  *  - local_* variants of page and mm only apply to the current
193  *    processor
194  */
195 
196 /*
197  * These are the base non-SMP variants of page and mm flushing
198  */
199 void local_flush_tlb_mm(struct mm_struct *mm)
200 {
201         unsigned int pid;
202 
203         preempt_disable();
204         pid = mm->context.id;
205         if (pid != MMU_NO_CONTEXT)
206                 _tlbil_pid(pid);
207         preempt_enable();
208 }
209 EXPORT_SYMBOL(local_flush_tlb_mm);
210 
211 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
212                             int tsize, int ind)
213 {
214         unsigned int pid;
215 
216         preempt_disable();
217         pid = mm ? mm->context.id : 0;
218         if (pid != MMU_NO_CONTEXT)
219                 _tlbil_va(vmaddr, pid, tsize, ind);
220         preempt_enable();
221 }
222 
223 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
224 {
225         __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
226                                mmu_get_tsize(mmu_virtual_psize), 0);
227 }
228 EXPORT_SYMBOL(local_flush_tlb_page);
229 
230 /*
231  * And here are the SMP non-local implementations
232  */
233 #ifdef CONFIG_SMP
234 
235 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
236 
237 struct tlb_flush_param {
238         unsigned long addr;
239         unsigned int pid;
240         unsigned int tsize;
241         unsigned int ind;
242 };
243 
244 static void do_flush_tlb_mm_ipi(void *param)
245 {
246         struct tlb_flush_param *p = param;
247 
248         _tlbil_pid(p ? p->pid : 0);
249 }
250 
251 static void do_flush_tlb_page_ipi(void *param)
252 {
253         struct tlb_flush_param *p = param;
254 
255         _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
256 }
257 
258 
259 /* Note on invalidations and PID:
260  *
261  * We snapshot the PID with preempt disabled. At this point, it can still
262  * change either because:
263  * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
264  * - we are invaliating some target that isn't currently running here
265  *   and is concurrently acquiring a new PID on another CPU
266  * - some other CPU is re-acquiring a lost PID for this mm
267  * etc...
268  *
269  * However, this shouldn't be a problem as we only guarantee
270  * invalidation of TLB entries present prior to this call, so we
271  * don't care about the PID changing, and invalidating a stale PID
272  * is generally harmless.
273  */
274 
275 void flush_tlb_mm(struct mm_struct *mm)
276 {
277         unsigned int pid;
278 
279         preempt_disable();
280         pid = mm->context.id;
281         if (unlikely(pid == MMU_NO_CONTEXT))
282                 goto no_context;
283         if (!mm_is_core_local(mm)) {
284                 struct tlb_flush_param p = { .pid = pid };
285                 /* Ignores smp_processor_id() even if set. */
286                 smp_call_function_many(mm_cpumask(mm),
287                                        do_flush_tlb_mm_ipi, &p, 1);
288         }
289         _tlbil_pid(pid);
290  no_context:
291         preempt_enable();
292 }
293 EXPORT_SYMBOL(flush_tlb_mm);
294 
295 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
296                       int tsize, int ind)
297 {
298         struct cpumask *cpu_mask;
299         unsigned int pid;
300 
301         /*
302          * This function as well as __local_flush_tlb_page() must only be called
303          * for user contexts.
304          */
305         if (unlikely(WARN_ON(!mm)))
306                 return;
307 
308         preempt_disable();
309         pid = mm->context.id;
310         if (unlikely(pid == MMU_NO_CONTEXT))
311                 goto bail;
312         cpu_mask = mm_cpumask(mm);
313         if (!mm_is_core_local(mm)) {
314                 /* If broadcast tlbivax is supported, use it */
315                 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
316                         int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
317                         if (lock)
318                                 raw_spin_lock(&tlbivax_lock);
319                         _tlbivax_bcast(vmaddr, pid, tsize, ind);
320                         if (lock)
321                                 raw_spin_unlock(&tlbivax_lock);
322                         goto bail;
323                 } else {
324                         struct tlb_flush_param p = {
325                                 .pid = pid,
326                                 .addr = vmaddr,
327                                 .tsize = tsize,
328                                 .ind = ind,
329                         };
330                         /* Ignores smp_processor_id() even if set in cpu_mask */
331                         smp_call_function_many(cpu_mask,
332                                                do_flush_tlb_page_ipi, &p, 1);
333                 }
334         }
335         _tlbil_va(vmaddr, pid, tsize, ind);
336  bail:
337         preempt_enable();
338 }
339 
340 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
341 {
342 #ifdef CONFIG_HUGETLB_PAGE
343         if (vma && is_vm_hugetlb_page(vma))
344                 flush_hugetlb_page(vma, vmaddr);
345 #endif
346 
347         __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
348                          mmu_get_tsize(mmu_virtual_psize), 0);
349 }
350 EXPORT_SYMBOL(flush_tlb_page);
351 
352 #endif /* CONFIG_SMP */
353 
354 #ifdef CONFIG_PPC_47x
355 void __init early_init_mmu_47x(void)
356 {
357 #ifdef CONFIG_SMP
358         unsigned long root = of_get_flat_dt_root();
359         if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
360                 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
361 #endif /* CONFIG_SMP */
362 }
363 #endif /* CONFIG_PPC_47x */
364 
365 /*
366  * Flush kernel TLB entries in the given range
367  */
368 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
369 {
370 #ifdef CONFIG_SMP
371         preempt_disable();
372         smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
373         _tlbil_pid(0);
374         preempt_enable();
375 #else
376         _tlbil_pid(0);
377 #endif
378 }
379 EXPORT_SYMBOL(flush_tlb_kernel_range);
380 
381 /*
382  * Currently, for range flushing, we just do a full mm flush. This should
383  * be optimized based on a threshold on the size of the range, since
384  * some implementation can stack multiple tlbivax before a tlbsync but
385  * for now, we keep it that way
386  */
387 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
388                      unsigned long end)
389 
390 {
391         flush_tlb_mm(vma->vm_mm);
392 }
393 EXPORT_SYMBOL(flush_tlb_range);
394 
395 void tlb_flush(struct mmu_gather *tlb)
396 {
397         flush_tlb_mm(tlb->mm);
398 }
399 
400 /*
401  * Below are functions specific to the 64-bit variant of Book3E though that
402  * may change in the future
403  */
404 
405 #ifdef CONFIG_PPC64
406 
407 /*
408  * Handling of virtual linear page tables or indirect TLB entries
409  * flushing when PTE pages are freed
410  */
411 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
412 {
413         int tsize = mmu_psize_defs[mmu_pte_psize].enc;
414 
415         if (book3e_htw_mode != PPC_HTW_NONE) {
416                 unsigned long start = address & PMD_MASK;
417                 unsigned long end = address + PMD_SIZE;
418                 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
419 
420                 /* This isn't the most optimal, ideally we would factor out the
421                  * while preempt & CPU mask mucking around, or even the IPI but
422                  * it will do for now
423                  */
424                 while (start < end) {
425                         __flush_tlb_page(tlb->mm, start, tsize, 1);
426                         start += size;
427                 }
428         } else {
429                 unsigned long rmask = 0xf000000000000000ul;
430                 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
431                 unsigned long vpte = address & ~rmask;
432 
433 #ifdef CONFIG_PPC_64K_PAGES
434                 vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
435 #else
436                 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
437 #endif
438                 vpte |= rid;
439                 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
440         }
441 }
442 
443 static void setup_page_sizes(void)
444 {
445         unsigned int tlb0cfg;
446         unsigned int tlb0ps;
447         unsigned int eptcfg;
448         int i, psize;
449 
450 #ifdef CONFIG_PPC_FSL_BOOK3E
451         unsigned int mmucfg = mfspr(SPRN_MMUCFG);
452         int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
453 
454         if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
455                 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
456                 unsigned int min_pg, max_pg;
457 
458                 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
459                 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
460 
461                 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
462                         struct mmu_psize_def *def;
463                         unsigned int shift;
464 
465                         def = &mmu_psize_defs[psize];
466                         shift = def->shift;
467 
468                         if (shift == 0 || shift & 1)
469                                 continue;
470 
471                         /* adjust to be in terms of 4^shift Kb */
472                         shift = (shift - 10) >> 1;
473 
474                         if ((shift >= min_pg) && (shift <= max_pg))
475                                 def->flags |= MMU_PAGE_SIZE_DIRECT;
476                 }
477 
478                 goto out;
479         }
480 
481         if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
482                 u32 tlb1cfg, tlb1ps;
483 
484                 tlb0cfg = mfspr(SPRN_TLB0CFG);
485                 tlb1cfg = mfspr(SPRN_TLB1CFG);
486                 tlb1ps = mfspr(SPRN_TLB1PS);
487                 eptcfg = mfspr(SPRN_EPTCFG);
488 
489                 if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
490                         book3e_htw_mode = PPC_HTW_E6500;
491 
492                 /*
493                  * We expect 4K subpage size and unrestricted indirect size.
494                  * The lack of a restriction on indirect size is a Freescale
495                  * extension, indicated by PSn = 0 but SPSn != 0.
496                  */
497                 if (eptcfg != 2)
498                         book3e_htw_mode = PPC_HTW_NONE;
499 
500                 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
501                         struct mmu_psize_def *def = &mmu_psize_defs[psize];
502 
503                         if (tlb1ps & (1U << (def->shift - 10))) {
504                                 def->flags |= MMU_PAGE_SIZE_DIRECT;
505 
506                                 if (book3e_htw_mode && psize == MMU_PAGE_2M)
507                                         def->flags |= MMU_PAGE_SIZE_INDIRECT;
508                         }
509                 }
510 
511                 goto out;
512         }
513 #endif
514 
515         tlb0cfg = mfspr(SPRN_TLB0CFG);
516         tlb0ps = mfspr(SPRN_TLB0PS);
517         eptcfg = mfspr(SPRN_EPTCFG);
518 
519         /* Look for supported direct sizes */
520         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
521                 struct mmu_psize_def *def = &mmu_psize_defs[psize];
522 
523                 if (tlb0ps & (1U << (def->shift - 10)))
524                         def->flags |= MMU_PAGE_SIZE_DIRECT;
525         }
526 
527         /* Indirect page sizes supported ? */
528         if ((tlb0cfg & TLBnCFG_IND) == 0 ||
529             (tlb0cfg & TLBnCFG_PT) == 0)
530                 goto out;
531 
532         book3e_htw_mode = PPC_HTW_IBM;
533 
534         /* Now, we only deal with one IND page size for each
535          * direct size. Hopefully all implementations today are
536          * unambiguous, but we might want to be careful in the
537          * future.
538          */
539         for (i = 0; i < 3; i++) {
540                 unsigned int ps, sps;
541 
542                 sps = eptcfg & 0x1f;
543                 eptcfg >>= 5;
544                 ps = eptcfg & 0x1f;
545                 eptcfg >>= 5;
546                 if (!ps || !sps)
547                         continue;
548                 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
549                         struct mmu_psize_def *def = &mmu_psize_defs[psize];
550 
551                         if (ps == (def->shift - 10))
552                                 def->flags |= MMU_PAGE_SIZE_INDIRECT;
553                         if (sps == (def->shift - 10))
554                                 def->ind = ps + 10;
555                 }
556         }
557 
558 out:
559         /* Cleanup array and print summary */
560         pr_info("MMU: Supported page sizes\n");
561         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
562                 struct mmu_psize_def *def = &mmu_psize_defs[psize];
563                 const char *__page_type_names[] = {
564                         "unsupported",
565                         "direct",
566                         "indirect",
567                         "direct & indirect"
568                 };
569                 if (def->flags == 0) {
570                         def->shift = 0; 
571                         continue;
572                 }
573                 pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
574                         __page_type_names[def->flags & 0x3]);
575         }
576 }
577 
578 static void setup_mmu_htw(void)
579 {
580         /*
581          * If we want to use HW tablewalk, enable it by patching the TLB miss
582          * handlers to branch to the one dedicated to it.
583          */
584 
585         switch (book3e_htw_mode) {
586         case PPC_HTW_IBM:
587                 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
588                 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
589                 break;
590 #ifdef CONFIG_PPC_FSL_BOOK3E
591         case PPC_HTW_E6500:
592                 extlb_level_exc = EX_TLB_SIZE;
593                 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
594                 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
595                 break;
596 #endif
597         }
598         pr_info("MMU: Book3E HW tablewalk %s\n",
599                 book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
600 }
601 
602 /*
603  * Early initialization of the MMU TLB code
604  */
605 static void early_init_this_mmu(void)
606 {
607         unsigned int mas4;
608 
609         /* Set MAS4 based on page table setting */
610 
611         mas4 = 0x4 << MAS4_WIMGED_SHIFT;
612         switch (book3e_htw_mode) {
613         case PPC_HTW_E6500:
614                 mas4 |= MAS4_INDD;
615                 mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
616                 mas4 |= MAS4_TLBSELD(1);
617                 mmu_pte_psize = MMU_PAGE_2M;
618                 break;
619 
620         case PPC_HTW_IBM:
621                 mas4 |= MAS4_INDD;
622 #ifdef CONFIG_PPC_64K_PAGES
623                 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
624                 mmu_pte_psize = MMU_PAGE_256M;
625 #else
626                 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
627                 mmu_pte_psize = MMU_PAGE_1M;
628 #endif
629                 break;
630 
631         case PPC_HTW_NONE:
632 #ifdef CONFIG_PPC_64K_PAGES
633                 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
634 #else
635                 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
636 #endif
637                 mmu_pte_psize = mmu_virtual_psize;
638                 break;
639         }
640         mtspr(SPRN_MAS4, mas4);
641 
642 #ifdef CONFIG_PPC_FSL_BOOK3E
643         if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
644                 unsigned int num_cams;
645                 int __maybe_unused cpu = smp_processor_id();
646                 bool map = true;
647 
648                 /* use a quarter of the TLBCAM for bolted linear map */
649                 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
650 
651                 /*
652                  * Only do the mapping once per core, or else the
653                  * transient mapping would cause problems.
654                  */
655 #ifdef CONFIG_SMP
656                 if (hweight32(get_tensr()) > 1)
657                         map = false;
658 #endif
659 
660                 if (map)
661                         linear_map_top = map_mem_in_cams(linear_map_top,
662                                                          num_cams, false);
663         }
664 #endif
665 
666         /* A sync won't hurt us after mucking around with
667          * the MMU configuration
668          */
669         mb();
670 }
671 
672 static void __init early_init_mmu_global(void)
673 {
674         /* XXX This will have to be decided at runtime, but right
675          * now our boot and TLB miss code hard wires it. Ideally
676          * we should find out a suitable page size and patch the
677          * TLB miss code (either that or use the PACA to store
678          * the value we want)
679          */
680         mmu_linear_psize = MMU_PAGE_1G;
681 
682         /* XXX This should be decided at runtime based on supported
683          * page sizes in the TLB, but for now let's assume 16M is
684          * always there and a good fit (which it probably is)
685          *
686          * Freescale booke only supports 4K pages in TLB0, so use that.
687          */
688         if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
689                 mmu_vmemmap_psize = MMU_PAGE_4K;
690         else
691                 mmu_vmemmap_psize = MMU_PAGE_16M;
692 
693         /* XXX This code only checks for TLB 0 capabilities and doesn't
694          *     check what page size combos are supported by the HW. It
695          *     also doesn't handle the case where a separate array holds
696          *     the IND entries from the array loaded by the PT.
697          */
698         /* Look for supported page sizes */
699         setup_page_sizes();
700 
701         /* Look for HW tablewalk support */
702         setup_mmu_htw();
703 
704 #ifdef CONFIG_PPC_FSL_BOOK3E
705         if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
706                 if (book3e_htw_mode == PPC_HTW_NONE) {
707                         extlb_level_exc = EX_TLB_SIZE;
708                         patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
709                         patch_exception(0x1e0,
710                                 exc_instruction_tlb_miss_bolted_book3e);
711                 }
712         }
713 #endif
714 
715         /* Set the global containing the top of the linear mapping
716          * for use by the TLB miss code
717          */
718         linear_map_top = memblock_end_of_DRAM();
719 }
720 
721 static void __init early_mmu_set_memory_limit(void)
722 {
723 #ifdef CONFIG_PPC_FSL_BOOK3E
724         if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
725                 /*
726                  * Limit memory so we dont have linear faults.
727                  * Unlike memblock_set_current_limit, which limits
728                  * memory available during early boot, this permanently
729                  * reduces the memory available to Linux.  We need to
730                  * do this because highmem is not supported on 64-bit.
731                  */
732                 memblock_enforce_memory_limit(linear_map_top);
733         }
734 #endif
735 
736         memblock_set_current_limit(linear_map_top);
737 }
738 
739 /* boot cpu only */
740 void __init early_init_mmu(void)
741 {
742         early_init_mmu_global();
743         early_init_this_mmu();
744         early_mmu_set_memory_limit();
745 }
746 
747 void early_init_mmu_secondary(void)
748 {
749         early_init_this_mmu();
750 }
751 
752 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
753                                 phys_addr_t first_memblock_size)
754 {
755         /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
756          * the bolted TLB entry. We know for now that only 1G
757          * entries are supported though that may eventually
758          * change.
759          *
760          * on FSL Embedded 64-bit, usually all RAM is bolted, but with
761          * unusual memory sizes it's possible for some RAM to not be mapped
762          * (such RAM is not used at all by Linux, since we don't support
763          * highmem on 64-bit).  We limit ppc64_rma_size to what would be
764          * mappable if this memblock is the only one.  Additional memblocks
765          * can only increase, not decrease, the amount that ends up getting
766          * mapped.  We still limit max to 1G even if we'll eventually map
767          * more.  This is due to what the early init code is set up to do.
768          *
769          * We crop it to the size of the first MEMBLOCK to
770          * avoid going over total available memory just in case...
771          */
772 #ifdef CONFIG_PPC_FSL_BOOK3E
773         if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
774                 unsigned long linear_sz;
775                 unsigned int num_cams;
776 
777                 /* use a quarter of the TLBCAM for bolted linear map */
778                 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
779 
780                 linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
781                                             true);
782 
783                 ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
784         } else
785 #endif
786                 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
787 
788         /* Finally limit subsequent allocations */
789         memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
790 }
791 #else /* ! CONFIG_PPC64 */
792 void __init early_init_mmu(void)
793 {
794 #ifdef CONFIG_PPC_47x
795         early_init_mmu_47x();
796 #endif
797 }
798 #endif /* CONFIG_PPC64 */
799 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp