~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/pgtable.h

Version: ~ [ linux-5.19-rc8 ] ~ [ linux-5.18.14 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.57 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.133 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.207 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.253 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.289 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.324 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2012 ARM Ltd.
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License version 2 as
  6  * published by the Free Software Foundation.
  7  *
  8  * This program is distributed in the hope that it will be useful,
  9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 11  * GNU General Public License for more details.
 12  *
 13  * You should have received a copy of the GNU General Public License
 14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 15  */
 16 #ifndef __ASM_PGTABLE_H
 17 #define __ASM_PGTABLE_H
 18 
 19 #include <asm/bug.h>
 20 #include <asm/proc-fns.h>
 21 
 22 #include <asm/memory.h>
 23 #include <asm/pgtable-hwdef.h>
 24 #include <asm/pgtable-prot.h>
 25 
 26 /*
 27  * VMALLOC range.
 28  *
 29  * VMALLOC_START: beginning of the kernel vmalloc space
 30  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
 31  *      and fixed mappings
 32  */
 33 #define VMALLOC_START           (MODULES_END)
 34 #define VMALLOC_END             (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 35 
 36 #define vmemmap                 ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
 37 
 38 #define FIRST_USER_ADDRESS      0UL
 39 
 40 #ifndef __ASSEMBLY__
 41 
 42 #include <asm/fixmap.h>
 43 #include <linux/mmdebug.h>
 44 
 45 extern void __pte_error(const char *file, int line, unsigned long val);
 46 extern void __pmd_error(const char *file, int line, unsigned long val);
 47 extern void __pud_error(const char *file, int line, unsigned long val);
 48 extern void __pgd_error(const char *file, int line, unsigned long val);
 49 
 50 /*
 51  * ZERO_PAGE is a global shared page that is always zero: used
 52  * for zero-mapped memory areas etc..
 53  */
 54 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 55 #define ZERO_PAGE(vaddr)        pfn_to_page(PHYS_PFN(__pa(empty_zero_page)))
 56 
 57 #define pte_ERROR(pte)          __pte_error(__FILE__, __LINE__, pte_val(pte))
 58 
 59 #define pte_pfn(pte)            ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
 60 
 61 #define pfn_pte(pfn,prot)       (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
 62 
 63 #define pte_none(pte)           (!pte_val(pte))
 64 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
 65 #define pte_page(pte)           (pfn_to_page(pte_pfn(pte)))
 66 
 67 /*
 68  * The following only work if pte_present(). Undefined behaviour otherwise.
 69  */
 70 #define pte_present(pte)        (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
 71 #define pte_young(pte)          (!!(pte_val(pte) & PTE_AF))
 72 #define pte_special(pte)        (!!(pte_val(pte) & PTE_SPECIAL))
 73 #define pte_write(pte)          (!!(pte_val(pte) & PTE_WRITE))
 74 #define pte_user_exec(pte)      (!(pte_val(pte) & PTE_UXN))
 75 #define pte_cont(pte)           (!!(pte_val(pte) & PTE_CONT))
 76 
 77 #ifdef CONFIG_ARM64_HW_AFDBM
 78 #define pte_hw_dirty(pte)       (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
 79 #else
 80 #define pte_hw_dirty(pte)       (0)
 81 #endif
 82 #define pte_sw_dirty(pte)       (!!(pte_val(pte) & PTE_DIRTY))
 83 #define pte_dirty(pte)          (pte_sw_dirty(pte) || pte_hw_dirty(pte))
 84 
 85 #define pte_valid(pte)          (!!(pte_val(pte) & PTE_VALID))
 86 /*
 87  * Execute-only user mappings do not have the PTE_USER bit set. All valid
 88  * kernel mappings have the PTE_UXN bit set.
 89  */
 90 #define pte_valid_not_user(pte) \
 91         ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
 92 #define pte_valid_young(pte) \
 93         ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
 94 
 95 /*
 96  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
 97  * so that we don't erroneously return false for pages that have been
 98  * remapped as PROT_NONE but are yet to be flushed from the TLB.
 99  */
100 #define pte_accessible(mm, pte) \
101         (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
102 
103 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
104 {
105         pte_val(pte) &= ~pgprot_val(prot);
106         return pte;
107 }
108 
109 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
110 {
111         pte_val(pte) |= pgprot_val(prot);
112         return pte;
113 }
114 
115 static inline pte_t pte_wrprotect(pte_t pte)
116 {
117         return clear_pte_bit(pte, __pgprot(PTE_WRITE));
118 }
119 
120 static inline pte_t pte_mkwrite(pte_t pte)
121 {
122         return set_pte_bit(pte, __pgprot(PTE_WRITE));
123 }
124 
125 static inline pte_t pte_mkclean(pte_t pte)
126 {
127         return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
128 }
129 
130 static inline pte_t pte_mkdirty(pte_t pte)
131 {
132         return set_pte_bit(pte, __pgprot(PTE_DIRTY));
133 }
134 
135 static inline pte_t pte_mkold(pte_t pte)
136 {
137         return clear_pte_bit(pte, __pgprot(PTE_AF));
138 }
139 
140 static inline pte_t pte_mkyoung(pte_t pte)
141 {
142         return set_pte_bit(pte, __pgprot(PTE_AF));
143 }
144 
145 static inline pte_t pte_mkspecial(pte_t pte)
146 {
147         return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
148 }
149 
150 static inline pte_t pte_mkcont(pte_t pte)
151 {
152         pte = set_pte_bit(pte, __pgprot(PTE_CONT));
153         return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
154 }
155 
156 static inline pte_t pte_mknoncont(pte_t pte)
157 {
158         return clear_pte_bit(pte, __pgprot(PTE_CONT));
159 }
160 
161 static inline pte_t pte_clear_rdonly(pte_t pte)
162 {
163         return clear_pte_bit(pte, __pgprot(PTE_RDONLY));
164 }
165 
166 static inline pte_t pte_mkpresent(pte_t pte)
167 {
168         return set_pte_bit(pte, __pgprot(PTE_VALID));
169 }
170 
171 static inline pmd_t pmd_mkcont(pmd_t pmd)
172 {
173         return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
174 }
175 
176 static inline void set_pte(pte_t *ptep, pte_t pte)
177 {
178         *ptep = pte;
179 
180         /*
181          * Only if the new pte is valid and kernel, otherwise TLB maintenance
182          * or update_mmu_cache() have the necessary barriers.
183          */
184         if (pte_valid_not_user(pte)) {
185                 dsb(ishst);
186                 isb();
187         }
188 }
189 
190 struct mm_struct;
191 struct vm_area_struct;
192 
193 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
194 
195 /*
196  * PTE bits configuration in the presence of hardware Dirty Bit Management
197  * (PTE_WRITE == PTE_DBM):
198  *
199  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
200  *   0      0      |   1           0          0
201  *   0      1      |   1           1          0
202  *   1      0      |   1           0          1
203  *   1      1      |   0           1          x
204  *
205  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
206  * the page fault mechanism. Checking the dirty status of a pte becomes:
207  *
208  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
209  */
210 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
211                               pte_t *ptep, pte_t pte)
212 {
213         if (pte_present(pte)) {
214                 if (pte_sw_dirty(pte) && pte_write(pte))
215                         pte_val(pte) &= ~PTE_RDONLY;
216                 else
217                         pte_val(pte) |= PTE_RDONLY;
218                 if (pte_user_exec(pte) && !pte_special(pte))
219                         __sync_icache_dcache(pte, addr);
220         }
221 
222         /*
223          * If the existing pte is valid, check for potential race with
224          * hardware updates of the pte (ptep_set_access_flags safely changes
225          * valid ptes without going through an invalid entry).
226          */
227         if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
228             pte_valid(*ptep) && pte_valid(pte)) {
229                 VM_WARN_ONCE(!pte_young(pte),
230                              "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
231                              __func__, pte_val(*ptep), pte_val(pte));
232                 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
233                              "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
234                              __func__, pte_val(*ptep), pte_val(pte));
235         }
236 
237         set_pte(ptep, pte);
238 }
239 
240 #define __HAVE_ARCH_PTE_SAME
241 static inline int pte_same(pte_t pte_a, pte_t pte_b)
242 {
243         pteval_t lhs, rhs;
244 
245         lhs = pte_val(pte_a);
246         rhs = pte_val(pte_b);
247 
248         if (pte_present(pte_a))
249                 lhs &= ~PTE_RDONLY;
250 
251         if (pte_present(pte_b))
252                 rhs &= ~PTE_RDONLY;
253 
254         return (lhs == rhs);
255 }
256 
257 /*
258  * Huge pte definitions.
259  */
260 #define pte_huge(pte)           (!(pte_val(pte) & PTE_TABLE_BIT))
261 #define pte_mkhuge(pte)         (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
262 
263 /*
264  * Hugetlb definitions.
265  */
266 #define HUGE_MAX_HSTATE         4
267 #define HPAGE_SHIFT             PMD_SHIFT
268 #define HPAGE_SIZE              (_AC(1, UL) << HPAGE_SHIFT)
269 #define HPAGE_MASK              (~(HPAGE_SIZE - 1))
270 #define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
271 
272 #define __HAVE_ARCH_PTE_SPECIAL
273 
274 static inline pte_t pud_pte(pud_t pud)
275 {
276         return __pte(pud_val(pud));
277 }
278 
279 static inline pmd_t pud_pmd(pud_t pud)
280 {
281         return __pmd(pud_val(pud));
282 }
283 
284 static inline pte_t pmd_pte(pmd_t pmd)
285 {
286         return __pte(pmd_val(pmd));
287 }
288 
289 static inline pmd_t pte_pmd(pte_t pte)
290 {
291         return __pmd(pte_val(pte));
292 }
293 
294 static inline pgprot_t mk_sect_prot(pgprot_t prot)
295 {
296         return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
297 }
298 
299 #ifdef CONFIG_NUMA_BALANCING
300 /*
301  * See the comment in include/asm-generic/pgtable.h
302  */
303 static inline int pte_protnone(pte_t pte)
304 {
305         return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
306 }
307 
308 static inline int pmd_protnone(pmd_t pmd)
309 {
310         return pte_protnone(pmd_pte(pmd));
311 }
312 #endif
313 
314 /*
315  * THP definitions.
316  */
317 
318 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
319 #define pmd_trans_huge(pmd)     (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
320 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
321 
322 #define pmd_present(pmd)        pte_present(pmd_pte(pmd))
323 #define pmd_dirty(pmd)          pte_dirty(pmd_pte(pmd))
324 #define pmd_young(pmd)          pte_young(pmd_pte(pmd))
325 #define pmd_wrprotect(pmd)      pte_pmd(pte_wrprotect(pmd_pte(pmd)))
326 #define pmd_mkold(pmd)          pte_pmd(pte_mkold(pmd_pte(pmd)))
327 #define pmd_mkwrite(pmd)        pte_pmd(pte_mkwrite(pmd_pte(pmd)))
328 #define pmd_mkclean(pmd)        pte_pmd(pte_mkclean(pmd_pte(pmd)))
329 #define pmd_mkdirty(pmd)        pte_pmd(pte_mkdirty(pmd_pte(pmd)))
330 #define pmd_mkyoung(pmd)        pte_pmd(pte_mkyoung(pmd_pte(pmd)))
331 #define pmd_mknotpresent(pmd)   (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
332 
333 #define pmd_thp_or_huge(pmd)    (pmd_huge(pmd) || pmd_trans_huge(pmd))
334 
335 #define __HAVE_ARCH_PMD_WRITE
336 #define pmd_write(pmd)          pte_write(pmd_pte(pmd))
337 
338 #define pmd_mkhuge(pmd)         (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
339 
340 #define pmd_pfn(pmd)            (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
341 #define pfn_pmd(pfn,prot)       (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
342 #define mk_pmd(page,prot)       pfn_pmd(page_to_pfn(page),prot)
343 
344 #define pud_write(pud)          pte_write(pud_pte(pud))
345 #define pud_pfn(pud)            (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
346 
347 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
348 
349 #define __pgprot_modify(prot,mask,bits) \
350         __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
351 
352 /*
353  * Mark the prot value as uncacheable and unbufferable.
354  */
355 #define pgprot_noncached(prot) \
356         __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
357 #define pgprot_writecombine(prot) \
358         __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
359 #define pgprot_device(prot) \
360         __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
361 #define __HAVE_PHYS_MEM_ACCESS_PROT
362 struct file;
363 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
364                                      unsigned long size, pgprot_t vma_prot);
365 
366 #define pmd_none(pmd)           (!pmd_val(pmd))
367 
368 #define pmd_bad(pmd)            (!(pmd_val(pmd) & PMD_TABLE_BIT))
369 
370 #define pmd_table(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
371                                  PMD_TYPE_TABLE)
372 #define pmd_sect(pmd)           ((pmd_val(pmd) & PMD_TYPE_MASK) == \
373                                  PMD_TYPE_SECT)
374 
375 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
376 #define pud_sect(pud)           (0)
377 #define pud_table(pud)          (1)
378 #else
379 #define pud_sect(pud)           ((pud_val(pud) & PUD_TYPE_MASK) == \
380                                  PUD_TYPE_SECT)
381 #define pud_table(pud)          ((pud_val(pud) & PUD_TYPE_MASK) == \
382                                  PUD_TYPE_TABLE)
383 #endif
384 
385 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
386 {
387         *pmdp = pmd;
388         dsb(ishst);
389         isb();
390 }
391 
392 static inline void pmd_clear(pmd_t *pmdp)
393 {
394         set_pmd(pmdp, __pmd(0));
395 }
396 
397 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
398 {
399         return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
400 }
401 
402 /* Find an entry in the third-level page table. */
403 #define pte_index(addr)         (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
404 
405 #define pte_offset_phys(dir,addr)       (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
406 #define pte_offset_kernel(dir,addr)     ((pte_t *)__va(pte_offset_phys((dir), (addr))))
407 
408 #define pte_offset_map(dir,addr)        pte_offset_kernel((dir), (addr))
409 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
410 #define pte_unmap(pte)                  do { } while (0)
411 #define pte_unmap_nested(pte)           do { } while (0)
412 
413 #define pte_set_fixmap(addr)            ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
414 #define pte_set_fixmap_offset(pmd, addr)        pte_set_fixmap(pte_offset_phys(pmd, addr))
415 #define pte_clear_fixmap()              clear_fixmap(FIX_PTE)
416 
417 #define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
418 
419 /* use ONLY for statically allocated translation tables */
420 #define pte_offset_kimg(dir,addr)       ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
421 
422 /*
423  * Conversion functions: convert a page and protection to a page entry,
424  * and a page entry and page directory to the page they refer to.
425  */
426 #define mk_pte(page,prot)       pfn_pte(page_to_pfn(page),prot)
427 
428 #if CONFIG_PGTABLE_LEVELS > 2
429 
430 #define pmd_ERROR(pmd)          __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
431 
432 #define pud_none(pud)           (!pud_val(pud))
433 #define pud_bad(pud)            (!(pud_val(pud) & PUD_TABLE_BIT))
434 #define pud_present(pud)        (pud_val(pud))
435 
436 static inline void set_pud(pud_t *pudp, pud_t pud)
437 {
438         *pudp = pud;
439         dsb(ishst);
440         isb();
441 }
442 
443 static inline void pud_clear(pud_t *pudp)
444 {
445         set_pud(pudp, __pud(0));
446 }
447 
448 static inline phys_addr_t pud_page_paddr(pud_t pud)
449 {
450         return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
451 }
452 
453 /* Find an entry in the second-level page table. */
454 #define pmd_index(addr)         (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
455 
456 #define pmd_offset_phys(dir, addr)      (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
457 #define pmd_offset(dir, addr)           ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
458 
459 #define pmd_set_fixmap(addr)            ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
460 #define pmd_set_fixmap_offset(pud, addr)        pmd_set_fixmap(pmd_offset_phys(pud, addr))
461 #define pmd_clear_fixmap()              clear_fixmap(FIX_PMD)
462 
463 #define pud_page(pud)           pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
464 
465 /* use ONLY for statically allocated translation tables */
466 #define pmd_offset_kimg(dir,addr)       ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
467 
468 #else
469 
470 #define pud_page_paddr(pud)     ({ BUILD_BUG(); 0; })
471 
472 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
473 #define pmd_set_fixmap(addr)            NULL
474 #define pmd_set_fixmap_offset(pudp, addr)       ((pmd_t *)pudp)
475 #define pmd_clear_fixmap()
476 
477 #define pmd_offset_kimg(dir,addr)       ((pmd_t *)dir)
478 
479 #endif  /* CONFIG_PGTABLE_LEVELS > 2 */
480 
481 #if CONFIG_PGTABLE_LEVELS > 3
482 
483 #define pud_ERROR(pud)          __pud_error(__FILE__, __LINE__, pud_val(pud))
484 
485 #define pgd_none(pgd)           (!pgd_val(pgd))
486 #define pgd_bad(pgd)            (!(pgd_val(pgd) & 2))
487 #define pgd_present(pgd)        (pgd_val(pgd))
488 
489 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
490 {
491         *pgdp = pgd;
492         dsb(ishst);
493 }
494 
495 static inline void pgd_clear(pgd_t *pgdp)
496 {
497         set_pgd(pgdp, __pgd(0));
498 }
499 
500 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
501 {
502         return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
503 }
504 
505 /* Find an entry in the frst-level page table. */
506 #define pud_index(addr)         (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
507 
508 #define pud_offset_phys(dir, addr)      (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
509 #define pud_offset(dir, addr)           ((pud_t *)__va(pud_offset_phys((dir), (addr))))
510 
511 #define pud_set_fixmap(addr)            ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
512 #define pud_set_fixmap_offset(pgd, addr)        pud_set_fixmap(pud_offset_phys(pgd, addr))
513 #define pud_clear_fixmap()              clear_fixmap(FIX_PUD)
514 
515 #define pgd_page(pgd)           pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
516 
517 /* use ONLY for statically allocated translation tables */
518 #define pud_offset_kimg(dir,addr)       ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
519 
520 #else
521 
522 #define pgd_page_paddr(pgd)     ({ BUILD_BUG(); 0;})
523 
524 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
525 #define pud_set_fixmap(addr)            NULL
526 #define pud_set_fixmap_offset(pgdp, addr)       ((pud_t *)pgdp)
527 #define pud_clear_fixmap()
528 
529 #define pud_offset_kimg(dir,addr)       ((pud_t *)dir)
530 
531 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
532 
533 #define pgd_ERROR(pgd)          __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
534 
535 /* to find an entry in a page-table-directory */
536 #define pgd_index(addr)         (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
537 
538 #define pgd_offset_raw(pgd, addr)       ((pgd) + pgd_index(addr))
539 
540 #define pgd_offset(mm, addr)    (pgd_offset_raw((mm)->pgd, (addr)))
541 
542 /* to find an entry in a kernel page-table-directory */
543 #define pgd_offset_k(addr)      pgd_offset(&init_mm, addr)
544 
545 #define pgd_set_fixmap(addr)    ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
546 #define pgd_clear_fixmap()      clear_fixmap(FIX_PGD)
547 
548 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
549 {
550         const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
551                               PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
552         /* preserve the hardware dirty information */
553         if (pte_hw_dirty(pte))
554                 pte = pte_mkdirty(pte);
555         pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
556         return pte;
557 }
558 
559 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
560 {
561         return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
562 }
563 
564 #ifdef CONFIG_ARM64_HW_AFDBM
565 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
566 extern int ptep_set_access_flags(struct vm_area_struct *vma,
567                                  unsigned long address, pte_t *ptep,
568                                  pte_t entry, int dirty);
569 
570 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
571 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
572 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
573                                         unsigned long address, pmd_t *pmdp,
574                                         pmd_t entry, int dirty)
575 {
576         return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
577 }
578 #endif
579 
580 /*
581  * Atomic pte/pmd modifications.
582  */
583 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
584 static inline int __ptep_test_and_clear_young(pte_t *ptep)
585 {
586         pteval_t pteval;
587         unsigned int tmp, res;
588 
589         asm volatile("//        __ptep_test_and_clear_young\n"
590         "       prfm    pstl1strm, %2\n"
591         "1:     ldxr    %0, %2\n"
592         "       ubfx    %w3, %w0, %5, #1        // extract PTE_AF (young)\n"
593         "       and     %0, %0, %4              // clear PTE_AF\n"
594         "       stxr    %w1, %0, %2\n"
595         "       cbnz    %w1, 1b\n"
596         : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
597         : "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
598 
599         return res;
600 }
601 
602 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
603                                             unsigned long address,
604                                             pte_t *ptep)
605 {
606         return __ptep_test_and_clear_young(ptep);
607 }
608 
609 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
610 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
611 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
612                                             unsigned long address,
613                                             pmd_t *pmdp)
614 {
615         return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
616 }
617 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
618 
619 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
620 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
621                                        unsigned long address, pte_t *ptep)
622 {
623         pteval_t old_pteval;
624         unsigned int tmp;
625 
626         asm volatile("//        ptep_get_and_clear\n"
627         "       prfm    pstl1strm, %2\n"
628         "1:     ldxr    %0, %2\n"
629         "       stxr    %w1, xzr, %2\n"
630         "       cbnz    %w1, 1b\n"
631         : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
632 
633         return __pte(old_pteval);
634 }
635 
636 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
637 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
638 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
639                                             unsigned long address, pmd_t *pmdp)
640 {
641         return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
642 }
643 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
644 
645 /*
646  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
647  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
648  */
649 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
650 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
651 {
652         pteval_t pteval;
653         unsigned long tmp;
654 
655         asm volatile("//        ptep_set_wrprotect\n"
656         "       prfm    pstl1strm, %2\n"
657         "1:     ldxr    %0, %2\n"
658         "       tst     %0, %4                  // check for hw dirty (!PTE_RDONLY)\n"
659         "       csel    %1, %3, xzr, eq         // set PTE_DIRTY|PTE_RDONLY if dirty\n"
660         "       orr     %0, %0, %1              // if !dirty, PTE_RDONLY is already set\n"
661         "       and     %0, %0, %5              // clear PTE_WRITE/PTE_DBM\n"
662         "       stxr    %w1, %0, %2\n"
663         "       cbnz    %w1, 1b\n"
664         : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
665         : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
666         : "cc");
667 }
668 
669 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
670 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
671 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
672                                       unsigned long address, pmd_t *pmdp)
673 {
674         ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
675 }
676 #endif
677 #endif  /* CONFIG_ARM64_HW_AFDBM */
678 
679 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
680 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
681 
682 /*
683  * Encode and decode a swap entry:
684  *      bits 0-1:       present (must be zero)
685  *      bits 2-7:       swap type
686  *      bits 8-57:      swap offset
687  *      bit  58:        PTE_PROT_NONE (must be zero)
688  */
689 #define __SWP_TYPE_SHIFT        2
690 #define __SWP_TYPE_BITS         6
691 #define __SWP_OFFSET_BITS       50
692 #define __SWP_TYPE_MASK         ((1 << __SWP_TYPE_BITS) - 1)
693 #define __SWP_OFFSET_SHIFT      (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
694 #define __SWP_OFFSET_MASK       ((1UL << __SWP_OFFSET_BITS) - 1)
695 
696 #define __swp_type(x)           (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
697 #define __swp_offset(x)         (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
698 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
699 
700 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
701 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
702 
703 /*
704  * Ensure that there are not more swap files than can be encoded in the kernel
705  * PTEs.
706  */
707 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
708 
709 extern int kern_addr_valid(unsigned long addr);
710 
711 #include <asm-generic/pgtable.h>
712 
713 void pgd_cache_init(void);
714 #define pgtable_cache_init      pgd_cache_init
715 
716 /*
717  * On AArch64, the cache coherency is handled via the set_pte_at() function.
718  */
719 static inline void update_mmu_cache(struct vm_area_struct *vma,
720                                     unsigned long addr, pte_t *ptep)
721 {
722         /*
723          * We don't do anything here, so there's a very small chance of
724          * us retaking a user fault which we just fixed up. The alternative
725          * is doing a dsb(ishst), but that penalises the fastpath.
726          */
727 }
728 
729 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
730 
731 #define kc_vaddr_to_offset(v)   ((v) & ~VA_START)
732 #define kc_offset_to_vaddr(o)   ((o) | VA_START)
733 
734 #endif /* !__ASSEMBLY__ */
735 
736 #endif /* __ASM_PGTABLE_H */
737 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp