~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/um/include/asm/pgtable.h

Version: ~ [ linux-5.16 ] ~ [ linux-5.15.13 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.90 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.170 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.224 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.261 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.296 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.298 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  4  * Copyright 2003 PathScale, Inc.
  5  * Derived from include/asm-i386/pgtable.h
  6  */
  7 
  8 #ifndef __UM_PGTABLE_H
  9 #define __UM_PGTABLE_H
 10 
 11 #include <asm/fixmap.h>
 12 
 13 #define _PAGE_PRESENT   0x001
 14 #define _PAGE_NEWPAGE   0x002
 15 #define _PAGE_NEWPROT   0x004
 16 #define _PAGE_RW        0x020
 17 #define _PAGE_USER      0x040
 18 #define _PAGE_ACCESSED  0x080
 19 #define _PAGE_DIRTY     0x100
 20 /* If _PAGE_PRESENT is clear, we use these: */
 21 #define _PAGE_PROTNONE  0x010   /* if the user mapped it with PROT_NONE;
 22                                    pte_present gives true */
 23 
 24 #ifdef CONFIG_3_LEVEL_PGTABLES
 25 #include <asm/pgtable-3level.h>
 26 #else
 27 #include <asm/pgtable-2level.h>
 28 #endif
 29 
 30 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 31 
 32 /* zero page used for uninitialized stuff */
 33 extern unsigned long *empty_zero_page;
 34 
 35 /* Just any arbitrary offset to the start of the vmalloc VM area: the
 36  * current 8MB value just means that there will be a 8MB "hole" after the
 37  * physical memory until the kernel virtual memory starts.  That means that
 38  * any out-of-bounds memory accesses will hopefully be caught.
 39  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 40  * area for the same reason. ;)
 41  */
 42 
 43 extern unsigned long end_iomem;
 44 
 45 #define VMALLOC_OFFSET  (__va_space)
 46 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
 47 #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
 48 #define VMALLOC_END     (FIXADDR_START-2*PAGE_SIZE)
 49 #define MODULES_VADDR   VMALLOC_START
 50 #define MODULES_END     VMALLOC_END
 51 #define MODULES_LEN     (MODULES_VADDR - MODULES_END)
 52 
 53 #define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
 54 #define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
 55 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 56 #define __PAGE_KERNEL_EXEC                                              \
 57          (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 58 #define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 59 #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
 60 #define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 61 #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 62 #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 63 #define PAGE_KERNEL_EXEC        __pgprot(__PAGE_KERNEL_EXEC)
 64 
 65 /*
 66  * The i386 can't do page protection for execute, and considers that the same
 67  * are read.
 68  * Also, write permissions imply read permissions. This is the closest we can
 69  * get..
 70  */
 71 #define __P000  PAGE_NONE
 72 #define __P001  PAGE_READONLY
 73 #define __P010  PAGE_COPY
 74 #define __P011  PAGE_COPY
 75 #define __P100  PAGE_READONLY
 76 #define __P101  PAGE_READONLY
 77 #define __P110  PAGE_COPY
 78 #define __P111  PAGE_COPY
 79 
 80 #define __S000  PAGE_NONE
 81 #define __S001  PAGE_READONLY
 82 #define __S010  PAGE_SHARED
 83 #define __S011  PAGE_SHARED
 84 #define __S100  PAGE_READONLY
 85 #define __S101  PAGE_READONLY
 86 #define __S110  PAGE_SHARED
 87 #define __S111  PAGE_SHARED
 88 
 89 /*
 90  * ZERO_PAGE is a global shared page that is always zero: used
 91  * for zero-mapped memory areas etc..
 92  */
 93 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
 94 
 95 #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
 96 
 97 #define pmd_none(x)     (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
 98 #define pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
 99 
100 #define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
101 #define pmd_clear(xp)   do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
102 
103 #define pmd_newpage(x)  (pmd_val(x) & _PAGE_NEWPAGE)
104 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
105 
106 #define pud_newpage(x)  (pud_val(x) & _PAGE_NEWPAGE)
107 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
108 
109 #define p4d_newpage(x)  (p4d_val(x) & _PAGE_NEWPAGE)
110 #define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE)
111 
112 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
113 
114 #define pte_page(x) pfn_to_page(pte_pfn(x))
115 
116 #define pte_present(x)  pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
117 
118 /*
119  * =================================
120  * Flags checking section.
121  * =================================
122  */
123 
124 static inline int pte_none(pte_t pte)
125 {
126         return pte_is_zero(pte);
127 }
128 
129 /*
130  * The following only work if pte_present() is true.
131  * Undefined behaviour if not..
132  */
133 static inline int pte_read(pte_t pte)
134 {
135         return((pte_get_bits(pte, _PAGE_USER)) &&
136                !(pte_get_bits(pte, _PAGE_PROTNONE)));
137 }
138 
139 static inline int pte_exec(pte_t pte){
140         return((pte_get_bits(pte, _PAGE_USER)) &&
141                !(pte_get_bits(pte, _PAGE_PROTNONE)));
142 }
143 
144 static inline int pte_write(pte_t pte)
145 {
146         return((pte_get_bits(pte, _PAGE_RW)) &&
147                !(pte_get_bits(pte, _PAGE_PROTNONE)));
148 }
149 
150 static inline int pte_dirty(pte_t pte)
151 {
152         return pte_get_bits(pte, _PAGE_DIRTY);
153 }
154 
155 static inline int pte_young(pte_t pte)
156 {
157         return pte_get_bits(pte, _PAGE_ACCESSED);
158 }
159 
160 static inline int pte_newpage(pte_t pte)
161 {
162         return pte_get_bits(pte, _PAGE_NEWPAGE);
163 }
164 
165 static inline int pte_newprot(pte_t pte)
166 {
167         return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
168 }
169 
170 /*
171  * =================================
172  * Flags setting section.
173  * =================================
174  */
175 
176 static inline pte_t pte_mknewprot(pte_t pte)
177 {
178         pte_set_bits(pte, _PAGE_NEWPROT);
179         return(pte);
180 }
181 
182 static inline pte_t pte_mkclean(pte_t pte)
183 {
184         pte_clear_bits(pte, _PAGE_DIRTY);
185         return(pte);
186 }
187 
188 static inline pte_t pte_mkold(pte_t pte)
189 {
190         pte_clear_bits(pte, _PAGE_ACCESSED);
191         return(pte);
192 }
193 
194 static inline pte_t pte_wrprotect(pte_t pte)
195 {
196         if (likely(pte_get_bits(pte, _PAGE_RW)))
197                 pte_clear_bits(pte, _PAGE_RW);
198         else
199                 return pte;
200         return(pte_mknewprot(pte));
201 }
202 
203 static inline pte_t pte_mkread(pte_t pte)
204 {
205         if (unlikely(pte_get_bits(pte, _PAGE_USER)))
206                 return pte;
207         pte_set_bits(pte, _PAGE_USER);
208         return(pte_mknewprot(pte));
209 }
210 
211 static inline pte_t pte_mkdirty(pte_t pte)
212 {
213         pte_set_bits(pte, _PAGE_DIRTY);
214         return(pte);
215 }
216 
217 static inline pte_t pte_mkyoung(pte_t pte)
218 {
219         pte_set_bits(pte, _PAGE_ACCESSED);
220         return(pte);
221 }
222 
223 static inline pte_t pte_mkwrite(pte_t pte)
224 {
225         if (unlikely(pte_get_bits(pte,  _PAGE_RW)))
226                 return pte;
227         pte_set_bits(pte, _PAGE_RW);
228         return(pte_mknewprot(pte));
229 }
230 
231 static inline pte_t pte_mkuptodate(pte_t pte)
232 {
233         pte_clear_bits(pte, _PAGE_NEWPAGE);
234         if(pte_present(pte))
235                 pte_clear_bits(pte, _PAGE_NEWPROT);
236         return(pte);
237 }
238 
239 static inline pte_t pte_mknewpage(pte_t pte)
240 {
241         pte_set_bits(pte, _PAGE_NEWPAGE);
242         return(pte);
243 }
244 
245 static inline void set_pte(pte_t *pteptr, pte_t pteval)
246 {
247         pte_copy(*pteptr, pteval);
248 
249         /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
250          * fix_range knows to unmap it.  _PAGE_NEWPROT is specific to
251          * mapped pages.
252          */
253 
254         *pteptr = pte_mknewpage(*pteptr);
255         if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
256 }
257 
258 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
259                               pte_t *pteptr, pte_t pteval)
260 {
261         set_pte(pteptr, pteval);
262 }
263 
264 #define __HAVE_ARCH_PTE_SAME
265 static inline int pte_same(pte_t pte_a, pte_t pte_b)
266 {
267         return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
268 }
269 
270 /*
271  * Conversion functions: convert a page and protection to a page entry,
272  * and a page entry and page directory to the page they refer to.
273  */
274 
275 #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
276 #define __virt_to_page(virt) phys_to_page(__pa(virt))
277 #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
278 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
279 
280 #define mk_pte(page, pgprot) \
281         ({ pte_t pte;                                   \
282                                                         \
283         pte_set_val(pte, page_to_phys(page), (pgprot)); \
284         if (pte_present(pte))                           \
285                 pte_mknewprot(pte_mknewpage(pte));      \
286         pte;})
287 
288 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
289 {
290         pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
291         return pte;
292 }
293 
294 /*
295  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
296  *
297  * this macro returns the index of the entry in the pmd page which would
298  * control the given virtual address
299  */
300 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
301 
302 struct mm_struct;
303 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
304 
305 #define update_mmu_cache(vma,address,ptep) do {} while (0)
306 
307 /* Encode and de-code a swap entry */
308 #define __swp_type(x)                   (((x).val >> 5) & 0x1f)
309 #define __swp_offset(x)                 ((x).val >> 11)
310 
311 #define __swp_entry(type, offset) \
312         ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
313 #define __pte_to_swp_entry(pte) \
314         ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
315 #define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
316 
317 #define kern_addr_valid(addr) (1)
318 
319 /* Clear a kernel PTE and flush it from the TLB */
320 #define kpte_clear_flush(ptep, vaddr)           \
321 do {                                            \
322         pte_clear(&init_mm, (vaddr), (ptep));   \
323         __flush_tlb_one((vaddr));               \
324 } while (0)
325 
326 #endif
327 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp