~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/um/include/asm/pgtable.h

Version: ~ [ linux-5.16-rc1 ] ~ [ linux-5.15.2 ] ~ [ linux-5.14.18 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.79 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.159 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.217 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.255 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.290 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.292 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* 
  2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3  * Copyright 2003 PathScale, Inc.
  4  * Derived from include/asm-i386/pgtable.h
  5  * Licensed under the GPL
  6  */
  7 
  8 #ifndef __UM_PGTABLE_H
  9 #define __UM_PGTABLE_H
 10 
 11 #include <asm/fixmap.h>
 12 
 13 #define _PAGE_PRESENT   0x001
 14 #define _PAGE_NEWPAGE   0x002
 15 #define _PAGE_NEWPROT   0x004
 16 #define _PAGE_RW        0x020
 17 #define _PAGE_USER      0x040
 18 #define _PAGE_ACCESSED  0x080
 19 #define _PAGE_DIRTY     0x100
 20 /* If _PAGE_PRESENT is clear, we use these: */
 21 #define _PAGE_PROTNONE  0x010   /* if the user mapped it with PROT_NONE;
 22                                    pte_present gives true */
 23 
 24 #ifdef CONFIG_3_LEVEL_PGTABLES
 25 #include <asm/pgtable-3level.h>
 26 #else
 27 #include <asm/pgtable-2level.h>
 28 #endif
 29 
 30 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 31 
 32 /* zero page used for uninitialized stuff */
 33 extern unsigned long *empty_zero_page;
 34 
 35 #define pgtable_cache_init() do ; while (0)
 36 
 37 /* Just any arbitrary offset to the start of the vmalloc VM area: the
 38  * current 8MB value just means that there will be a 8MB "hole" after the
 39  * physical memory until the kernel virtual memory starts.  That means that
 40  * any out-of-bounds memory accesses will hopefully be caught.
 41  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 42  * area for the same reason. ;)
 43  */
 44 
 45 extern unsigned long end_iomem;
 46 
 47 #define VMALLOC_OFFSET  (__va_space)
 48 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
 49 #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
 50 #define VMALLOC_END     (FIXADDR_START-2*PAGE_SIZE)
 51 #define MODULES_VADDR   VMALLOC_START
 52 #define MODULES_END     VMALLOC_END
 53 #define MODULES_LEN     (MODULES_VADDR - MODULES_END)
 54 
 55 #define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
 56 #define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
 57 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 58 #define __PAGE_KERNEL_EXEC                                              \
 59          (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 60 #define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 61 #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
 62 #define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 63 #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 64 #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 65 #define PAGE_KERNEL_EXEC        __pgprot(__PAGE_KERNEL_EXEC)
 66 
 67 /*
 68  * The i386 can't do page protection for execute, and considers that the same
 69  * are read.
 70  * Also, write permissions imply read permissions. This is the closest we can
 71  * get..
 72  */
 73 #define __P000  PAGE_NONE
 74 #define __P001  PAGE_READONLY
 75 #define __P010  PAGE_COPY
 76 #define __P011  PAGE_COPY
 77 #define __P100  PAGE_READONLY
 78 #define __P101  PAGE_READONLY
 79 #define __P110  PAGE_COPY
 80 #define __P111  PAGE_COPY
 81 
 82 #define __S000  PAGE_NONE
 83 #define __S001  PAGE_READONLY
 84 #define __S010  PAGE_SHARED
 85 #define __S011  PAGE_SHARED
 86 #define __S100  PAGE_READONLY
 87 #define __S101  PAGE_READONLY
 88 #define __S110  PAGE_SHARED
 89 #define __S111  PAGE_SHARED
 90 
 91 /*
 92  * ZERO_PAGE is a global shared page that is always zero: used
 93  * for zero-mapped memory areas etc..
 94  */
 95 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
 96 
 97 #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
 98 
 99 #define pmd_none(x)     (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
100 #define pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
101 
102 #define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
103 #define pmd_clear(xp)   do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
104 
105 #define pmd_newpage(x)  (pmd_val(x) & _PAGE_NEWPAGE)
106 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
107 
108 #define pud_newpage(x)  (pud_val(x) & _PAGE_NEWPAGE)
109 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
110 
111 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
112 
113 #define pte_page(x) pfn_to_page(pte_pfn(x))
114 
115 #define pte_present(x)  pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
116 
117 /*
118  * =================================
119  * Flags checking section.
120  * =================================
121  */
122 
123 static inline int pte_none(pte_t pte)
124 {
125         return pte_is_zero(pte);
126 }
127 
128 /*
129  * The following only work if pte_present() is true.
130  * Undefined behaviour if not..
131  */
132 static inline int pte_read(pte_t pte)
133 { 
134         return((pte_get_bits(pte, _PAGE_USER)) &&
135                !(pte_get_bits(pte, _PAGE_PROTNONE)));
136 }
137 
138 static inline int pte_exec(pte_t pte){
139         return((pte_get_bits(pte, _PAGE_USER)) &&
140                !(pte_get_bits(pte, _PAGE_PROTNONE)));
141 }
142 
143 static inline int pte_write(pte_t pte)
144 {
145         return((pte_get_bits(pte, _PAGE_RW)) &&
146                !(pte_get_bits(pte, _PAGE_PROTNONE)));
147 }
148 
149 static inline int pte_dirty(pte_t pte)
150 {
151         return pte_get_bits(pte, _PAGE_DIRTY);
152 }
153 
154 static inline int pte_young(pte_t pte)
155 {
156         return pte_get_bits(pte, _PAGE_ACCESSED);
157 }
158 
159 static inline int pte_newpage(pte_t pte)
160 {
161         return pte_get_bits(pte, _PAGE_NEWPAGE);
162 }
163 
164 static inline int pte_newprot(pte_t pte)
165 { 
166         return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
167 }
168 
169 static inline int pte_special(pte_t pte)
170 {
171         return 0;
172 }
173 
174 /*
175  * =================================
176  * Flags setting section.
177  * =================================
178  */
179 
180 static inline pte_t pte_mknewprot(pte_t pte)
181 {
182         pte_set_bits(pte, _PAGE_NEWPROT);
183         return(pte);
184 }
185 
186 static inline pte_t pte_mkclean(pte_t pte)
187 {
188         pte_clear_bits(pte, _PAGE_DIRTY);
189         return(pte);
190 }
191 
192 static inline pte_t pte_mkold(pte_t pte)        
193 { 
194         pte_clear_bits(pte, _PAGE_ACCESSED);
195         return(pte);
196 }
197 
198 static inline pte_t pte_wrprotect(pte_t pte)
199 { 
200         if (likely(pte_get_bits(pte, _PAGE_RW)))
201                 pte_clear_bits(pte, _PAGE_RW);
202         else
203                 return pte;
204         return(pte_mknewprot(pte)); 
205 }
206 
207 static inline pte_t pte_mkread(pte_t pte)
208 { 
209         if (unlikely(pte_get_bits(pte, _PAGE_USER)))
210                 return pte;
211         pte_set_bits(pte, _PAGE_USER);
212         return(pte_mknewprot(pte)); 
213 }
214 
215 static inline pte_t pte_mkdirty(pte_t pte)
216 { 
217         pte_set_bits(pte, _PAGE_DIRTY);
218         return(pte);
219 }
220 
221 static inline pte_t pte_mkyoung(pte_t pte)
222 {
223         pte_set_bits(pte, _PAGE_ACCESSED);
224         return(pte);
225 }
226 
227 static inline pte_t pte_mkwrite(pte_t pte)      
228 {
229         if (unlikely(pte_get_bits(pte,  _PAGE_RW)))
230                 return pte;
231         pte_set_bits(pte, _PAGE_RW);
232         return(pte_mknewprot(pte)); 
233 }
234 
235 static inline pte_t pte_mkuptodate(pte_t pte)   
236 {
237         pte_clear_bits(pte, _PAGE_NEWPAGE);
238         if(pte_present(pte))
239                 pte_clear_bits(pte, _PAGE_NEWPROT);
240         return(pte); 
241 }
242 
243 static inline pte_t pte_mknewpage(pte_t pte)
244 {
245         pte_set_bits(pte, _PAGE_NEWPAGE);
246         return(pte);
247 }
248 
249 static inline pte_t pte_mkspecial(pte_t pte)
250 {
251         return(pte);
252 }
253 
254 static inline void set_pte(pte_t *pteptr, pte_t pteval)
255 {
256         pte_copy(*pteptr, pteval);
257 
258         /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
259          * fix_range knows to unmap it.  _PAGE_NEWPROT is specific to
260          * mapped pages.
261          */
262 
263         *pteptr = pte_mknewpage(*pteptr);
264         if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
265 }
266 
267 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
268                               pte_t *pteptr, pte_t pteval)
269 {
270         set_pte(pteptr, pteval);
271 }
272 
273 #define __HAVE_ARCH_PTE_SAME
274 static inline int pte_same(pte_t pte_a, pte_t pte_b)
275 {
276         return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
277 }
278 
279 /*
280  * Conversion functions: convert a page and protection to a page entry,
281  * and a page entry and page directory to the page they refer to.
282  */
283 
284 #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
285 #define __virt_to_page(virt) phys_to_page(__pa(virt))
286 #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
287 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
288 
289 #define mk_pte(page, pgprot) \
290         ({ pte_t pte;                                   \
291                                                         \
292         pte_set_val(pte, page_to_phys(page), (pgprot)); \
293         if (pte_present(pte))                           \
294                 pte_mknewprot(pte_mknewpage(pte));      \
295         pte;})
296 
297 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
298 {
299         pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
300         return pte; 
301 }
302 
303 /*
304  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
305  *
306  * this macro returns the index of the entry in the pgd page which would
307  * control the given virtual address
308  */
309 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
310 
311 /*
312  * pgd_offset() returns a (pgd_t *)
313  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
314  */
315 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
316 
317 /*
318  * a shortcut which implies the use of the kernel's pgd, instead
319  * of a process's
320  */
321 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
322 
323 /*
324  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
325  *
326  * this macro returns the index of the entry in the pmd page which would
327  * control the given virtual address
328  */
329 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
330 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
331 
332 #define pmd_page_vaddr(pmd) \
333         ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
334 
335 /*
336  * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
337  *
338  * this macro returns the index of the entry in the pte page which would
339  * control the given virtual address
340  */
341 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
342 #define pte_offset_kernel(dir, address) \
343         ((pte_t *) pmd_page_vaddr(*(dir)) +  pte_index(address))
344 #define pte_offset_map(dir, address) \
345         ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
346 #define pte_unmap(pte) do { } while (0)
347 
348 struct mm_struct;
349 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
350 
351 #define update_mmu_cache(vma,address,ptep) do ; while (0)
352 
353 /* Encode and de-code a swap entry */
354 #define __swp_type(x)                   (((x).val >> 5) & 0x1f)
355 #define __swp_offset(x)                 ((x).val >> 11)
356 
357 #define __swp_entry(type, offset) \
358         ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
359 #define __pte_to_swp_entry(pte) \
360         ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
361 #define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
362 
363 #define kern_addr_valid(addr) (1)
364 
365 #include <asm-generic/pgtable.h>
366 
367 /* Clear a kernel PTE and flush it from the TLB */
368 #define kpte_clear_flush(ptep, vaddr)           \
369 do {                                            \
370         pte_clear(&init_mm, (vaddr), (ptep));   \
371         __flush_tlb_one((vaddr));               \
372 } while (0)
373 
374 #endif
375 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp