~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/mm/mmu.c

Version: ~ [ linux-5.6-rc1 ] ~ [ linux-5.5.2 ] ~ [ linux-5.4.17 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.102 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.170 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.213 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.213 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Based on arch/arm/mm/mmu.c
  3  *
  4  * Copyright (C) 1995-2005 Russell King
  5  * Copyright (C) 2012 ARM Ltd.
  6  *
  7  * This program is free software; you can redistribute it and/or modify
  8  * it under the terms of the GNU General Public License version 2 as
  9  * published by the Free Software Foundation.
 10  *
 11  * This program is distributed in the hope that it will be useful,
 12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14  * GNU General Public License for more details.
 15  *
 16  * You should have received a copy of the GNU General Public License
 17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18  */
 19 
 20 #include <linux/export.h>
 21 #include <linux/kernel.h>
 22 #include <linux/errno.h>
 23 #include <linux/init.h>
 24 #include <linux/libfdt.h>
 25 #include <linux/mman.h>
 26 #include <linux/nodemask.h>
 27 #include <linux/memblock.h>
 28 #include <linux/fs.h>
 29 #include <linux/io.h>
 30 #include <linux/slab.h>
 31 #include <linux/stop_machine.h>
 32 
 33 #include <asm/cputype.h>
 34 #include <asm/fixmap.h>
 35 #include <asm/sections.h>
 36 #include <asm/setup.h>
 37 #include <asm/sizes.h>
 38 #include <asm/tlb.h>
 39 #include <asm/memblock.h>
 40 #include <asm/mmu_context.h>
 41 
 42 #include "mm.h"
 43 
 44 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 45 
 46 /*
 47  * Empty_zero_page is a special page that is used for zero-initialized data
 48  * and COW.
 49  */
 50 struct page *empty_zero_page;
 51 EXPORT_SYMBOL(empty_zero_page);
 52 
 53 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 54                               unsigned long size, pgprot_t vma_prot)
 55 {
 56         if (!pfn_valid(pfn))
 57                 return pgprot_noncached(vma_prot);
 58         else if (file->f_flags & O_SYNC)
 59                 return pgprot_writecombine(vma_prot);
 60         return vma_prot;
 61 }
 62 EXPORT_SYMBOL(phys_mem_access_prot);
 63 
 64 static void __init *early_alloc(unsigned long sz)
 65 {
 66         void *ptr = __va(memblock_alloc(sz, sz));
 67         BUG_ON(!ptr);
 68         memset(ptr, 0, sz);
 69         return ptr;
 70 }
 71 
 72 /*
 73  * remap a PMD into pages
 74  */
 75 static void split_pmd(pmd_t *pmd, pte_t *pte)
 76 {
 77         unsigned long pfn = pmd_pfn(*pmd);
 78         int i = 0;
 79 
 80         do {
 81                 /*
 82                  * Need to have the least restrictive permissions available
 83                  * permissions will be fixed up later
 84                  */
 85                 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
 86                 pfn++;
 87         } while (pte++, i++, i < PTRS_PER_PTE);
 88 }
 89 
 90 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 91                                   unsigned long end, unsigned long pfn,
 92                                   pgprot_t prot,
 93                                   void *(*alloc)(unsigned long size))
 94 {
 95         pte_t *pte;
 96 
 97         if (pmd_none(*pmd) || pmd_sect(*pmd)) {
 98                 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
 99                 if (pmd_sect(*pmd))
100                         split_pmd(pmd, pte);
101                 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
102                 flush_tlb_all();
103         }
104         BUG_ON(pmd_bad(*pmd));
105 
106         pte = pte_offset_kernel(pmd, addr);
107         do {
108                 set_pte(pte, pfn_pte(pfn, prot));
109                 pfn++;
110         } while (pte++, addr += PAGE_SIZE, addr != end);
111 }
112 
113 void split_pud(pud_t *old_pud, pmd_t *pmd)
114 {
115         unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
116         pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
117         int i = 0;
118 
119         do {
120                 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
121                 addr += PMD_SIZE;
122         } while (pmd++, i++, i < PTRS_PER_PMD);
123 }
124 
125 static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
126                                   unsigned long addr, unsigned long end,
127                                   phys_addr_t phys, pgprot_t prot,
128                                   void *(*alloc)(unsigned long size))
129 {
130         pmd_t *pmd;
131         unsigned long next;
132 
133         /*
134          * Check for initial section mappings in the pgd/pud and remove them.
135          */
136         if (pud_none(*pud) || pud_sect(*pud)) {
137                 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
138                 if (pud_sect(*pud)) {
139                         /*
140                          * need to have the 1G of mappings continue to be
141                          * present
142                          */
143                         split_pud(pud, pmd);
144                 }
145                 pud_populate(mm, pud, pmd);
146                 flush_tlb_all();
147         }
148         BUG_ON(pud_bad(*pud));
149 
150         pmd = pmd_offset(pud, addr);
151         do {
152                 next = pmd_addr_end(addr, end);
153                 /* try section mapping first */
154                 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
155                         pmd_t old_pmd =*pmd;
156                         set_pmd(pmd, __pmd(phys |
157                                            pgprot_val(mk_sect_prot(prot))));
158                         /*
159                          * Check for previous table entries created during
160                          * boot (__create_page_tables) and flush them.
161                          */
162                         if (!pmd_none(old_pmd)) {
163                                 flush_tlb_all();
164                                 if (pmd_table(old_pmd)) {
165                                         phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
166                                         if (!WARN_ON_ONCE(slab_is_available()))
167                                                 memblock_free(table, PAGE_SIZE);
168                                 }
169                         }
170                 } else {
171                         alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
172                                        prot, alloc);
173                 }
174                 phys += next - addr;
175         } while (pmd++, addr = next, addr != end);
176 }
177 
178 static inline bool use_1G_block(unsigned long addr, unsigned long next,
179                         unsigned long phys)
180 {
181         if (PAGE_SHIFT != 12)
182                 return false;
183 
184         if (((addr | next | phys) & ~PUD_MASK) != 0)
185                 return false;
186 
187         return true;
188 }
189 
190 static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
191                                   unsigned long addr, unsigned long end,
192                                   phys_addr_t phys, pgprot_t prot,
193                                   void *(*alloc)(unsigned long size))
194 {
195         pud_t *pud;
196         unsigned long next;
197 
198         if (pgd_none(*pgd)) {
199                 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
200                 pgd_populate(mm, pgd, pud);
201         }
202         BUG_ON(pgd_bad(*pgd));
203 
204         pud = pud_offset(pgd, addr);
205         do {
206                 next = pud_addr_end(addr, end);
207 
208                 /*
209                  * For 4K granule only, attempt to put down a 1GB block
210                  */
211                 if (use_1G_block(addr, next, phys)) {
212                         pud_t old_pud = *pud;
213                         set_pud(pud, __pud(phys |
214                                            pgprot_val(mk_sect_prot(prot))));
215 
216                         /*
217                          * If we have an old value for a pud, it will
218                          * be pointing to a pmd table that we no longer
219                          * need (from swapper_pg_dir).
220                          *
221                          * Look up the old pmd table and free it.
222                          */
223                         if (!pud_none(old_pud)) {
224                                 flush_tlb_all();
225                                 if (pud_table(old_pud)) {
226                                         phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
227                                         if (!WARN_ON_ONCE(slab_is_available()))
228                                                 memblock_free(table, PAGE_SIZE);
229                                 }
230                         }
231                 } else {
232                         alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
233                 }
234                 phys += next - addr;
235         } while (pud++, addr = next, addr != end);
236 }
237 
238 /*
239  * Create the page directory entries and any necessary page tables for the
240  * mapping specified by 'md'.
241  */
242 static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
243                                     phys_addr_t phys, unsigned long virt,
244                                     phys_addr_t size, pgprot_t prot,
245                                     void *(*alloc)(unsigned long size))
246 {
247         unsigned long addr, length, end, next;
248 
249         addr = virt & PAGE_MASK;
250         length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
251 
252         end = addr + length;
253         do {
254                 next = pgd_addr_end(addr, end);
255                 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
256                 phys += next - addr;
257         } while (pgd++, addr = next, addr != end);
258 }
259 
260 static void *late_alloc(unsigned long size)
261 {
262         void *ptr;
263 
264         BUG_ON(size > PAGE_SIZE);
265         ptr = (void *)__get_free_page(PGALLOC_GFP);
266         BUG_ON(!ptr);
267         return ptr;
268 }
269 
270 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
271                                   phys_addr_t size, pgprot_t prot)
272 {
273         if (virt < VMALLOC_START) {
274                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
275                         &phys, virt);
276                 return;
277         }
278         __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
279                          size, prot, early_alloc);
280 }
281 
282 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
283                                unsigned long virt, phys_addr_t size,
284                                pgprot_t prot)
285 {
286         __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
287                                 late_alloc);
288 }
289 
290 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
291                                   phys_addr_t size, pgprot_t prot)
292 {
293         if (virt < VMALLOC_START) {
294                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
295                         &phys, virt);
296                 return;
297         }
298 
299         return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
300                                 phys, virt, size, prot, late_alloc);
301 }
302 
303 #ifdef CONFIG_DEBUG_RODATA
304 #define SWAPPER_BLOCK_SIZE      (PAGE_SHIFT == 12 ? SECTION_SIZE : PAGE_SIZE)
305 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
306 {
307         /*
308          * Set up the executable regions using the existing section mappings
309          * for now. This will get more fine grained later once all memory
310          * is mapped
311          */
312         unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
313         unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
314 
315         if (end < kernel_x_start) {
316                 create_mapping(start, __phys_to_virt(start),
317                         end - start, PAGE_KERNEL);
318         } else if (start >= kernel_x_end) {
319                 create_mapping(start, __phys_to_virt(start),
320                         end - start, PAGE_KERNEL);
321         } else {
322                 if (start < kernel_x_start)
323                         create_mapping(start, __phys_to_virt(start),
324                                 kernel_x_start - start,
325                                 PAGE_KERNEL);
326                 create_mapping(kernel_x_start,
327                                 __phys_to_virt(kernel_x_start),
328                                 kernel_x_end - kernel_x_start,
329                                 PAGE_KERNEL_EXEC);
330                 if (kernel_x_end < end)
331                         create_mapping(kernel_x_end,
332                                 __phys_to_virt(kernel_x_end),
333                                 end - kernel_x_end,
334                                 PAGE_KERNEL);
335         }
336 
337 }
338 #else
339 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
340 {
341         create_mapping(start, __phys_to_virt(start), end - start,
342                         PAGE_KERNEL_EXEC);
343 }
344 #endif
345 
346 static void __init map_mem(void)
347 {
348         struct memblock_region *reg;
349         phys_addr_t limit;
350 
351         /*
352          * Temporarily limit the memblock range. We need to do this as
353          * create_mapping requires puds, pmds and ptes to be allocated from
354          * memory addressable from the initial direct kernel mapping.
355          *
356          * The initial direct kernel mapping, located at swapper_pg_dir, gives
357          * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
358          * PHYS_OFFSET (which must be aligned to 2MB as per
359          * Documentation/arm64/booting.txt).
360          */
361         if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
362                 limit = PHYS_OFFSET + PMD_SIZE;
363         else
364                 limit = PHYS_OFFSET + PUD_SIZE;
365         memblock_set_current_limit(limit);
366 
367         /* map all the memory banks */
368         for_each_memblock(memory, reg) {
369                 phys_addr_t start = reg->base;
370                 phys_addr_t end = start + reg->size;
371 
372                 if (start >= end)
373                         break;
374 
375 #ifndef CONFIG_ARM64_64K_PAGES
376                 /*
377                  * For the first memory bank align the start address and
378                  * current memblock limit to prevent create_mapping() from
379                  * allocating pte page tables from unmapped memory.
380                  * When 64K pages are enabled, the pte page table for the
381                  * first PGDIR_SIZE is already present in swapper_pg_dir.
382                  */
383                 if (start < limit)
384                         start = ALIGN(start, PMD_SIZE);
385                 if (end < limit) {
386                         limit = end & PMD_MASK;
387                         memblock_set_current_limit(limit);
388                 }
389 #endif
390                 __map_memblock(start, end);
391         }
392 
393         /* Limit no longer required. */
394         memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
395 }
396 
397 void __init fixup_executable(void)
398 {
399 #ifdef CONFIG_DEBUG_RODATA
400         /* now that we are actually fully mapped, make the start/end more fine grained */
401         if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
402                 unsigned long aligned_start = round_down(__pa(_stext),
403                                                          SWAPPER_BLOCK_SIZE);
404 
405                 create_mapping(aligned_start, __phys_to_virt(aligned_start),
406                                 __pa(_stext) - aligned_start,
407                                 PAGE_KERNEL);
408         }
409 
410         if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
411                 unsigned long aligned_end = round_up(__pa(__init_end),
412                                                           SWAPPER_BLOCK_SIZE);
413                 create_mapping(__pa(__init_end), (unsigned long)__init_end,
414                                 aligned_end - __pa(__init_end),
415                                 PAGE_KERNEL);
416         }
417 #endif
418 }
419 
420 #ifdef CONFIG_DEBUG_RODATA
421 void mark_rodata_ro(void)
422 {
423         create_mapping_late(__pa(_stext), (unsigned long)_stext,
424                                 (unsigned long)_etext - (unsigned long)_stext,
425                                 PAGE_KERNEL_EXEC | PTE_RDONLY);
426 
427 }
428 #endif
429 
430 void fixup_init(void)
431 {
432         create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
433                         (unsigned long)__init_end - (unsigned long)__init_begin,
434                         PAGE_KERNEL);
435 }
436 
437 /*
438  * paging_init() sets up the page tables, initialises the zone memory
439  * maps and sets up the zero page.
440  */
441 void __init paging_init(void)
442 {
443         void *zero_page;
444 
445         map_mem();
446         fixup_executable();
447 
448         /* allocate the zero page. */
449         zero_page = early_alloc(PAGE_SIZE);
450 
451         bootmem_init();
452 
453         empty_zero_page = virt_to_page(zero_page);
454 
455         /* Ensure the zero page is visible to the page table walker */
456         dsb(ishst);
457 
458         /*
459          * TTBR0 is only used for the identity mapping at this stage. Make it
460          * point to zero page to avoid speculatively fetching new entries.
461          */
462         cpu_set_reserved_ttbr0();
463         flush_tlb_all();
464         cpu_set_default_tcr_t0sz();
465 }
466 
467 /*
468  * Check whether a kernel address is valid (derived from arch/x86/).
469  */
470 int kern_addr_valid(unsigned long addr)
471 {
472         pgd_t *pgd;
473         pud_t *pud;
474         pmd_t *pmd;
475         pte_t *pte;
476 
477         if ((((long)addr) >> VA_BITS) != -1UL)
478                 return 0;
479 
480         pgd = pgd_offset_k(addr);
481         if (pgd_none(*pgd))
482                 return 0;
483 
484         pud = pud_offset(pgd, addr);
485         if (pud_none(*pud))
486                 return 0;
487 
488         if (pud_sect(*pud))
489                 return pfn_valid(pud_pfn(*pud));
490 
491         pmd = pmd_offset(pud, addr);
492         if (pmd_none(*pmd))
493                 return 0;
494 
495         if (pmd_sect(*pmd))
496                 return pfn_valid(pmd_pfn(*pmd));
497 
498         pte = pte_offset_kernel(pmd, addr);
499         if (pte_none(*pte))
500                 return 0;
501 
502         return pfn_valid(pte_pfn(*pte));
503 }
504 #ifdef CONFIG_SPARSEMEM_VMEMMAP
505 #ifdef CONFIG_ARM64_64K_PAGES
506 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
507 {
508         return vmemmap_populate_basepages(start, end, node);
509 }
510 #else   /* !CONFIG_ARM64_64K_PAGES */
511 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
512 {
513         unsigned long addr = start;
514         unsigned long next;
515         pgd_t *pgd;
516         pud_t *pud;
517         pmd_t *pmd;
518 
519         do {
520                 next = pmd_addr_end(addr, end);
521 
522                 pgd = vmemmap_pgd_populate(addr, node);
523                 if (!pgd)
524                         return -ENOMEM;
525 
526                 pud = vmemmap_pud_populate(pgd, addr, node);
527                 if (!pud)
528                         return -ENOMEM;
529 
530                 pmd = pmd_offset(pud, addr);
531                 if (pmd_none(*pmd)) {
532                         void *p = NULL;
533 
534                         p = vmemmap_alloc_block_buf(PMD_SIZE, node);
535                         if (!p)
536                                 return -ENOMEM;
537 
538                         set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
539                 } else
540                         vmemmap_verify((pte_t *)pmd, node, addr, next);
541         } while (addr = next, addr != end);
542 
543         return 0;
544 }
545 #endif  /* CONFIG_ARM64_64K_PAGES */
546 void vmemmap_free(unsigned long start, unsigned long end)
547 {
548 }
549 #endif  /* CONFIG_SPARSEMEM_VMEMMAP */
550 
551 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
552 #if CONFIG_PGTABLE_LEVELS > 2
553 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
554 #endif
555 #if CONFIG_PGTABLE_LEVELS > 3
556 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
557 #endif
558 
559 static inline pud_t * fixmap_pud(unsigned long addr)
560 {
561         pgd_t *pgd = pgd_offset_k(addr);
562 
563         BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
564 
565         return pud_offset(pgd, addr);
566 }
567 
568 static inline pmd_t * fixmap_pmd(unsigned long addr)
569 {
570         pud_t *pud = fixmap_pud(addr);
571 
572         BUG_ON(pud_none(*pud) || pud_bad(*pud));
573 
574         return pmd_offset(pud, addr);
575 }
576 
577 static inline pte_t * fixmap_pte(unsigned long addr)
578 {
579         pmd_t *pmd = fixmap_pmd(addr);
580 
581         BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
582 
583         return pte_offset_kernel(pmd, addr);
584 }
585 
586 void __init early_fixmap_init(void)
587 {
588         pgd_t *pgd;
589         pud_t *pud;
590         pmd_t *pmd;
591         unsigned long addr = FIXADDR_START;
592 
593         pgd = pgd_offset_k(addr);
594         pgd_populate(&init_mm, pgd, bm_pud);
595         pud = pud_offset(pgd, addr);
596         pud_populate(&init_mm, pud, bm_pmd);
597         pmd = pmd_offset(pud, addr);
598         pmd_populate_kernel(&init_mm, pmd, bm_pte);
599 
600         /*
601          * The boot-ioremap range spans multiple pmds, for which
602          * we are not preparted:
603          */
604         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
605                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
606 
607         if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
608              || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
609                 WARN_ON(1);
610                 pr_warn("pmd %p != %p, %p\n",
611                         pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
612                         fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
613                 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
614                         fix_to_virt(FIX_BTMAP_BEGIN));
615                 pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
616                         fix_to_virt(FIX_BTMAP_END));
617 
618                 pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
619                 pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
620         }
621 }
622 
623 void __set_fixmap(enum fixed_addresses idx,
624                                phys_addr_t phys, pgprot_t flags)
625 {
626         unsigned long addr = __fix_to_virt(idx);
627         pte_t *pte;
628 
629         BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
630 
631         pte = fixmap_pte(addr);
632 
633         if (pgprot_val(flags)) {
634                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
635         } else {
636                 pte_clear(&init_mm, addr, pte);
637                 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
638         }
639 }
640 
641 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
642 {
643         const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
644         pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
645         int granularity, size, offset;
646         void *dt_virt;
647 
648         /*
649          * Check whether the physical FDT address is set and meets the minimum
650          * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
651          * at least 8 bytes so that we can always access the size field of the
652          * FDT header after mapping the first chunk, double check here if that
653          * is indeed the case.
654          */
655         BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
656         if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
657                 return NULL;
658 
659         /*
660          * Make sure that the FDT region can be mapped without the need to
661          * allocate additional translation table pages, so that it is safe
662          * to call create_mapping() this early.
663          *
664          * On 64k pages, the FDT will be mapped using PTEs, so we need to
665          * be in the same PMD as the rest of the fixmap.
666          * On 4k pages, we'll use section mappings for the FDT so we only
667          * have to be in the same PUD.
668          */
669         BUILD_BUG_ON(dt_virt_base % SZ_2M);
670 
671         if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) {
672                 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT !=
673                              __fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
674 
675                 granularity = PAGE_SIZE;
676         } else {
677                 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
678                              __fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
679 
680                 granularity = PMD_SIZE;
681         }
682 
683         offset = dt_phys % granularity;
684         dt_virt = (void *)dt_virt_base + offset;
685 
686         /* map the first chunk so we can read the size from the header */
687         create_mapping(round_down(dt_phys, granularity), dt_virt_base,
688                        granularity, prot);
689 
690         if (fdt_check_header(dt_virt) != 0)
691                 return NULL;
692 
693         size = fdt_totalsize(dt_virt);
694         if (size > MAX_FDT_SIZE)
695                 return NULL;
696 
697         if (offset + size > granularity)
698                 create_mapping(round_down(dt_phys, granularity), dt_virt_base,
699                                round_up(offset + size, granularity), prot);
700 
701         memblock_reserve(dt_phys, size);
702 
703         return dt_virt;
704 }
705 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp