~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/mm/kasan/kasan_init_32.c

Version: ~ [ linux-5.9-rc6 ] ~ [ linux-5.8.10 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.66 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.146 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.198 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.236 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.236 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 #define DISABLE_BRANCH_PROFILING
  4 
  5 #include <linux/kasan.h>
  6 #include <linux/printk.h>
  7 #include <linux/memblock.h>
  8 #include <linux/moduleloader.h>
  9 #include <linux/sched/task.h>
 10 #include <linux/vmalloc.h>
 11 #include <asm/pgalloc.h>
 12 #include <asm/code-patching.h>
 13 #include <mm/mmu_decl.h>
 14 
 15 static pgprot_t kasan_prot_ro(void)
 16 {
 17         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
 18                 return PAGE_READONLY;
 19 
 20         return PAGE_KERNEL_RO;
 21 }
 22 
 23 static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
 24 {
 25         unsigned long va = (unsigned long)kasan_early_shadow_page;
 26         phys_addr_t pa = __pa(kasan_early_shadow_page);
 27         int i;
 28 
 29         for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
 30                 __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
 31 }
 32 
 33 static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
 34 {
 35         pmd_t *pmd;
 36         unsigned long k_cur, k_next;
 37         pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL;
 38 
 39         pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
 40 
 41         for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
 42                 pte_t *new;
 43 
 44                 k_next = pgd_addr_end(k_cur, k_end);
 45                 if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
 46                         continue;
 47 
 48                 if (slab_is_available())
 49                         new = pte_alloc_one_kernel(&init_mm);
 50                 else
 51                         new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
 52 
 53                 if (!new)
 54                         return -ENOMEM;
 55                 kasan_populate_pte(new, prot);
 56 
 57                 smp_wmb(); /* See comment in __pte_alloc */
 58 
 59                 spin_lock(&init_mm.page_table_lock);
 60                         /* Has another populated it ? */
 61                 if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) {
 62                         pmd_populate_kernel(&init_mm, pmd, new);
 63                         new = NULL;
 64                 }
 65                 spin_unlock(&init_mm.page_table_lock);
 66 
 67                 if (new && slab_is_available())
 68                         pte_free_kernel(&init_mm, new);
 69         }
 70         return 0;
 71 }
 72 
 73 static void __ref *kasan_get_one_page(void)
 74 {
 75         if (slab_is_available())
 76                 return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 77 
 78         return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 79 }
 80 
 81 static int __ref kasan_init_region(void *start, size_t size)
 82 {
 83         unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
 84         unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
 85         unsigned long k_cur;
 86         int ret;
 87         void *block = NULL;
 88 
 89         ret = kasan_init_shadow_page_tables(k_start, k_end);
 90         if (ret)
 91                 return ret;
 92 
 93         if (!slab_is_available())
 94                 block = memblock_alloc(k_end - k_start, PAGE_SIZE);
 95 
 96         for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
 97                 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
 98                 void *va = block ? block + k_cur - k_start : kasan_get_one_page();
 99                 pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
100 
101                 if (!va)
102                         return -ENOMEM;
103 
104                 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
105         }
106         flush_tlb_kernel_range(k_start, k_end);
107         return 0;
108 }
109 
110 static void __init kasan_remap_early_shadow_ro(void)
111 {
112         pgprot_t prot = kasan_prot_ro();
113         unsigned long k_start = KASAN_SHADOW_START;
114         unsigned long k_end = KASAN_SHADOW_END;
115         unsigned long k_cur;
116         phys_addr_t pa = __pa(kasan_early_shadow_page);
117 
118         kasan_populate_pte(kasan_early_shadow_pte, prot);
119 
120         for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
121                 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
122                 pte_t *ptep = pte_offset_kernel(pmd, k_cur);
123 
124                 if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
125                         continue;
126 
127                 __set_pte_at(&init_mm, k_cur, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
128         }
129         flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
130 }
131 
132 void __init kasan_mmu_init(void)
133 {
134         int ret;
135         struct memblock_region *reg;
136 
137         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
138                 ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
139 
140                 if (ret)
141                         panic("kasan: kasan_init_shadow_page_tables() failed");
142         }
143 
144         for_each_memblock(memory, reg) {
145                 phys_addr_t base = reg->base;
146                 phys_addr_t top = min(base + reg->size, total_lowmem);
147 
148                 if (base >= top)
149                         continue;
150 
151                 ret = kasan_init_region(__va(base), top - base);
152                 if (ret)
153                         panic("kasan: kasan_init_region() failed");
154         }
155 }
156 
157 void __init kasan_init(void)
158 {
159         kasan_remap_early_shadow_ro();
160 
161         clear_page(kasan_early_shadow_page);
162 
163         /* At this point kasan is fully initialized. Enable error messages */
164         init_task.kasan_depth = 0;
165         pr_info("KASAN init done\n");
166 }
167 
168 #ifdef CONFIG_MODULES
169 void *module_alloc(unsigned long size)
170 {
171         void *base;
172 
173         base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
174                                     GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
175                                     NUMA_NO_NODE, __builtin_return_address(0));
176 
177         if (!base)
178                 return NULL;
179 
180         if (!kasan_init_region(base, size))
181                 return base;
182 
183         vfree(base);
184 
185         return NULL;
186 }
187 #endif
188 
189 #ifdef CONFIG_PPC_BOOK3S_32
190 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
191 
192 static void __init kasan_early_hash_table(void)
193 {
194         modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
195         modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
196 
197         Hash = (struct hash_pte *)early_hash;
198 }
199 #else
200 static void __init kasan_early_hash_table(void) {}
201 #endif
202 
203 void __init kasan_early_init(void)
204 {
205         unsigned long addr = KASAN_SHADOW_START;
206         unsigned long end = KASAN_SHADOW_END;
207         unsigned long next;
208         pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
209 
210         BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
211 
212         kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
213 
214         do {
215                 next = pgd_addr_end(addr, end);
216                 pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
217         } while (pmd++, addr = next, addr != end);
218 
219         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
220                 kasan_early_hash_table();
221 }
222 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp