~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/mm/subpage-prot.c

Version: ~ [ linux-5.15-rc1 ] ~ [ linux-5.14.5 ] ~ [ linux-5.13.18 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.66 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.147 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.206 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.246 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.282 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.283 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright 2007-2008 Paul Mackerras, IBM Corp.
  3  *
  4  * This program is free software; you can redistribute it and/or
  5  * modify it under the terms of the GNU General Public License
  6  * as published by the Free Software Foundation; either version
  7  * 2 of the License, or (at your option) any later version.
  8  */
  9 
 10 #include <linux/errno.h>
 11 #include <linux/kernel.h>
 12 #include <linux/gfp.h>
 13 #include <linux/types.h>
 14 #include <linux/mm.h>
 15 #include <linux/hugetlb.h>
 16 
 17 #include <asm/pgtable.h>
 18 #include <linux/uaccess.h>
 19 #include <asm/tlbflush.h>
 20 
 21 /*
 22  * Free all pages allocated for subpage protection maps and pointers.
 23  * Also makes sure that the subpage_prot_table structure is
 24  * reinitialized for the next user.
 25  */
 26 void subpage_prot_free(struct mm_struct *mm)
 27 {
 28         struct subpage_prot_table *spt = &mm->context.spt;
 29         unsigned long i, j, addr;
 30         u32 **p;
 31 
 32         for (i = 0; i < 4; ++i) {
 33                 if (spt->low_prot[i]) {
 34                         free_page((unsigned long)spt->low_prot[i]);
 35                         spt->low_prot[i] = NULL;
 36                 }
 37         }
 38         addr = 0;
 39         for (i = 0; i < 2; ++i) {
 40                 p = spt->protptrs[i];
 41                 if (!p)
 42                         continue;
 43                 spt->protptrs[i] = NULL;
 44                 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
 45                      ++j, addr += PAGE_SIZE)
 46                         if (p[j])
 47                                 free_page((unsigned long)p[j]);
 48                 free_page((unsigned long)p);
 49         }
 50         spt->maxaddr = 0;
 51 }
 52 
 53 void subpage_prot_init_new_context(struct mm_struct *mm)
 54 {
 55         struct subpage_prot_table *spt = &mm->context.spt;
 56 
 57         memset(spt, 0, sizeof(*spt));
 58 }
 59 
 60 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
 61                              int npages)
 62 {
 63         pgd_t *pgd;
 64         pud_t *pud;
 65         pmd_t *pmd;
 66         pte_t *pte;
 67         spinlock_t *ptl;
 68 
 69         pgd = pgd_offset(mm, addr);
 70         if (pgd_none(*pgd))
 71                 return;
 72         pud = pud_offset(pgd, addr);
 73         if (pud_none(*pud))
 74                 return;
 75         pmd = pmd_offset(pud, addr);
 76         if (pmd_none(*pmd))
 77                 return;
 78         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 79         arch_enter_lazy_mmu_mode();
 80         for (; npages > 0; --npages) {
 81                 pte_update(mm, addr, pte, 0, 0, 0);
 82                 addr += PAGE_SIZE;
 83                 ++pte;
 84         }
 85         arch_leave_lazy_mmu_mode();
 86         pte_unmap_unlock(pte - 1, ptl);
 87 }
 88 
 89 /*
 90  * Clear the subpage protection map for an address range, allowing
 91  * all accesses that are allowed by the pte permissions.
 92  */
 93 static void subpage_prot_clear(unsigned long addr, unsigned long len)
 94 {
 95         struct mm_struct *mm = current->mm;
 96         struct subpage_prot_table *spt = &mm->context.spt;
 97         u32 **spm, *spp;
 98         unsigned long i;
 99         size_t nw;
100         unsigned long next, limit;
101 
102         down_write(&mm->mmap_sem);
103         limit = addr + len;
104         if (limit > spt->maxaddr)
105                 limit = spt->maxaddr;
106         for (; addr < limit; addr = next) {
107                 next = pmd_addr_end(addr, limit);
108                 if (addr < 0x100000000UL) {
109                         spm = spt->low_prot;
110                 } else {
111                         spm = spt->protptrs[addr >> SBP_L3_SHIFT];
112                         if (!spm)
113                                 continue;
114                 }
115                 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
116                 if (!spp)
117                         continue;
118                 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
119 
120                 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
121                 nw = PTRS_PER_PTE - i;
122                 if (addr + (nw << PAGE_SHIFT) > next)
123                         nw = (next - addr) >> PAGE_SHIFT;
124 
125                 memset(spp, 0, nw * sizeof(u32));
126 
127                 /* now flush any existing HPTEs for the range */
128                 hpte_flush_range(mm, addr, nw);
129         }
130         up_write(&mm->mmap_sem);
131 }
132 
133 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
134 static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
135                                   unsigned long end, struct mm_walk *walk)
136 {
137         struct vm_area_struct *vma = walk->vma;
138         split_huge_pmd(vma, pmd, addr);
139         return 0;
140 }
141 
142 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
143                                     unsigned long len)
144 {
145         struct vm_area_struct *vma;
146         struct mm_walk subpage_proto_walk = {
147                 .mm = mm,
148                 .pmd_entry = subpage_walk_pmd_entry,
149         };
150 
151         /*
152          * We don't try too hard, we just mark all the vma in that range
153          * VM_NOHUGEPAGE and split them.
154          */
155         vma = find_vma(mm, addr);
156         /*
157          * If the range is in unmapped range, just return
158          */
159         if (vma && ((addr + len) <= vma->vm_start))
160                 return;
161 
162         while (vma) {
163                 if (vma->vm_start >= (addr + len))
164                         break;
165                 vma->vm_flags |= VM_NOHUGEPAGE;
166                 walk_page_vma(vma, &subpage_proto_walk);
167                 vma = vma->vm_next;
168         }
169 }
170 #else
171 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
172                                     unsigned long len)
173 {
174         return;
175 }
176 #endif
177 
178 /*
179  * Copy in a subpage protection map for an address range.
180  * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
181  * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
182  * 2 or 3 to prevent all accesses.
183  * Note that the normal page protections also apply; the subpage
184  * protection mechanism is an additional constraint, so putting 0
185  * in a 2-bit field won't allow writes to a page that is otherwise
186  * write-protected.
187  */
188 long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
189 {
190         struct mm_struct *mm = current->mm;
191         struct subpage_prot_table *spt = &mm->context.spt;
192         u32 **spm, *spp;
193         unsigned long i;
194         size_t nw;
195         unsigned long next, limit;
196         int err;
197 
198         /* Check parameters */
199         if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
200             addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
201                 return -EINVAL;
202 
203         if (is_hugepage_only_range(mm, addr, len))
204                 return -EINVAL;
205 
206         if (!map) {
207                 /* Clear out the protection map for the address range */
208                 subpage_prot_clear(addr, len);
209                 return 0;
210         }
211 
212         if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
213                 return -EFAULT;
214 
215         down_write(&mm->mmap_sem);
216         subpage_mark_vma_nohuge(mm, addr, len);
217         for (limit = addr + len; addr < limit; addr = next) {
218                 next = pmd_addr_end(addr, limit);
219                 err = -ENOMEM;
220                 if (addr < 0x100000000UL) {
221                         spm = spt->low_prot;
222                 } else {
223                         spm = spt->protptrs[addr >> SBP_L3_SHIFT];
224                         if (!spm) {
225                                 spm = (u32 **)get_zeroed_page(GFP_KERNEL);
226                                 if (!spm)
227                                         goto out;
228                                 spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
229                         }
230                 }
231                 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
232                 spp = *spm;
233                 if (!spp) {
234                         spp = (u32 *)get_zeroed_page(GFP_KERNEL);
235                         if (!spp)
236                                 goto out;
237                         *spm = spp;
238                 }
239                 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
240 
241                 local_irq_disable();
242                 demote_segment_4k(mm, addr);
243                 local_irq_enable();
244 
245                 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
246                 nw = PTRS_PER_PTE - i;
247                 if (addr + (nw << PAGE_SHIFT) > next)
248                         nw = (next - addr) >> PAGE_SHIFT;
249 
250                 up_write(&mm->mmap_sem);
251                 err = -EFAULT;
252                 if (__copy_from_user(spp, map, nw * sizeof(u32)))
253                         goto out2;
254                 map += nw;
255                 down_write(&mm->mmap_sem);
256 
257                 /* now flush any existing HPTEs for the range */
258                 hpte_flush_range(mm, addr, nw);
259         }
260         if (limit > spt->maxaddr)
261                 spt->maxaddr = limit;
262         err = 0;
263  out:
264         up_write(&mm->mmap_sem);
265  out2:
266         return err;
267 }
268 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp