~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arc/include/asm/pgtable.h

Version: ~ [ linux-5.8-rc3 ] ~ [ linux-5.7.5 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.48 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.129 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.185 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.228 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.228 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License version 2 as
  6  * published by the Free Software Foundation.
  7  *
  8  * vineetg: May 2011
  9  *  -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
 10  *     They are semantically the same although in different contexts
 11  *     VALID marks a TLB entry exists and it will only happen if PRESENT
 12  *  - Utilise some unused free bits to confine PTE flags to 12 bits
 13  *     This is a must for 4k pg-sz
 14  *
 15  * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
 16  *  -TLB Locking never really existed, except for initial specs
 17  *  -SILENT_xxx not needed for our port
 18  *  -Per my request, MMU V3 changes the layout of some of the bits
 19  *     to avoid a few shifts in TLB Miss handlers.
 20  *
 21  * vineetg: April 2010
 22  *  -PGD entry no longer contains any flags. If empty it is 0, otherwise has
 23  *   Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
 24  *
 25  * vineetg: April 2010
 26  *  -Switched form 8:11:13 split for page table lookup to 11:8:13
 27  *  -this speeds up page table allocation itself as we now have to memset 1K
 28  *    instead of 8k per page table.
 29  * -TODO: Right now page table alloc is 8K and rest 7K is unused
 30  *    need to optimise it
 31  *
 32  * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
 33  */
 34 
 35 #ifndef _ASM_ARC_PGTABLE_H
 36 #define _ASM_ARC_PGTABLE_H
 37 
 38 #include <asm/page.h>
 39 #include <asm/mmu.h>
 40 #include <asm-generic/pgtable-nopmd.h>
 41 
 42 /**************************************************************************
 43  * Page Table Flags
 44  *
 45  * ARC700 MMU only deals with softare managed TLB entries.
 46  * Page Tables are purely for Linux VM's consumption and the bits below are
 47  * suited to that (uniqueness). Hence some are not implemented in the TLB and
 48  * some have different value in TLB.
 49  * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
 50  *      seperate PD0 and PD1, which combined forms a translation entry)
 51  *      while for PTE perspective, they are 8 and 9 respectively
 52  * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
 53  *      (saves some bit shift ops in TLB Miss hdlrs)
 54  */
 55 
 56 #if (CONFIG_ARC_MMU_VER <= 2)
 57 
 58 #define _PAGE_ACCESSED      (1<<1)      /* Page is accessed (S) */
 59 #define _PAGE_CACHEABLE     (1<<2)      /* Page is cached (H) */
 60 #define _PAGE_EXECUTE       (1<<3)      /* Page has user execute perm (H) */
 61 #define _PAGE_WRITE         (1<<4)      /* Page has user write perm (H) */
 62 #define _PAGE_READ          (1<<5)      /* Page has user read perm (H) */
 63 #define _PAGE_MODIFIED      (1<<6)      /* Page modified (dirty) (S) */
 64 #define _PAGE_FILE          (1<<7)      /* page cache/ swap (S) */
 65 #define _PAGE_GLOBAL        (1<<8)      /* Page is global (H) */
 66 #define _PAGE_PRESENT       (1<<10)     /* TLB entry is valid (H) */
 67 
 68 #else   /* MMU v3 onwards */
 69 
 70 #define _PAGE_CACHEABLE     (1<<0)      /* Page is cached (H) */
 71 #define _PAGE_EXECUTE       (1<<1)      /* Page has user execute perm (H) */
 72 #define _PAGE_WRITE         (1<<2)      /* Page has user write perm (H) */
 73 #define _PAGE_READ          (1<<3)      /* Page has user read perm (H) */
 74 #define _PAGE_ACCESSED      (1<<4)      /* Page is accessed (S) */
 75 #define _PAGE_MODIFIED      (1<<5)      /* Page modified (dirty) (S) */
 76 #define _PAGE_FILE          (1<<6)      /* page cache/ swap (S) */
 77 #define _PAGE_GLOBAL        (1<<8)      /* Page is global (H) */
 78 #define _PAGE_PRESENT       (1<<9)      /* TLB entry is valid (H) */
 79 #define _PAGE_SHARED_CODE   (1<<11)     /* Shared Code page with cmn vaddr
 80                                            usable for shared TLB entries (H) */
 81 #endif
 82 
 83 /* vmalloc permissions */
 84 #define _K_PAGE_PERMS  (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
 85                         _PAGE_GLOBAL | _PAGE_PRESENT)
 86 
 87 #ifdef CONFIG_ARC_CACHE_PAGES
 88 #define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
 89 #else
 90 #define _PAGE_DEF_CACHEABLE (0)
 91 #endif
 92 
 93 /* Helper for every "user" page
 94  * -kernel can R/W/X
 95  * -by default cached, unless config otherwise
 96  * -present in memory
 97  */
 98 #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
 99 
100 /* Set of bits not changed in pte_modify */
101 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
102 
103 /* More Abbrevaited helpers */
104 #define PAGE_U_NONE     __pgprot(___DEF)
105 #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
106 #define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
107 #define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
108 #define PAGE_U_X_W_R    __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
109                                                        _PAGE_EXECUTE)
110 
111 #define PAGE_SHARED     PAGE_U_W_R
112 
113 /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
114  * user vaddr space - visible in all addr spaces, but kernel mode only
115  * Thus Global, all-kernel-access, no-user-access, cached
116  */
117 #define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
118 
119 /* ioremap */
120 #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
121 
122 /* Masks for actual TLB "PD"s */
123 #define PTE_BITS_IN_PD0         (_PAGE_GLOBAL | _PAGE_PRESENT)
124 #define PTE_BITS_RWX            (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
125 #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
126 
127 /**************************************************************************
128  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
129  *
130  * Certain cases have 1:1 mapping
131  *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
132  *       which directly corresponds to  PAGE_U_X_R
133  *
134  * Other rules which cause the divergence from 1:1 mapping
135  *
136  *  1. Although ARC700 can do exclusive execute/write protection (meaning R
137  *     can be tracked independet of X/W unlike some other CPUs), still to
138  *     keep things consistent with other archs:
139  *      -Write implies Read:   W => R
140  *      -Execute implies Read: X => R
141  *
142  *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
143  *     This is to enable COW mechanism
144  */
145         /* xwr */
146 #define __P000  PAGE_U_NONE
147 #define __P001  PAGE_U_R
148 #define __P010  PAGE_U_R        /* Pvt-W => !W */
149 #define __P011  PAGE_U_R        /* Pvt-W => !W */
150 #define __P100  PAGE_U_X_R      /* X => R */
151 #define __P101  PAGE_U_X_R
152 #define __P110  PAGE_U_X_R      /* Pvt-W => !W and X => R */
153 #define __P111  PAGE_U_X_R      /* Pvt-W => !W */
154 
155 #define __S000  PAGE_U_NONE
156 #define __S001  PAGE_U_R
157 #define __S010  PAGE_U_W_R      /* W => R */
158 #define __S011  PAGE_U_W_R
159 #define __S100  PAGE_U_X_R      /* X => R */
160 #define __S101  PAGE_U_X_R
161 #define __S110  PAGE_U_X_W_R    /* X => R */
162 #define __S111  PAGE_U_X_W_R
163 
164 /****************************************************************
165  * Page Table Lookup split
166  *
167  * We implement 2 tier paging and since this is all software, we are free
168  * to customize the span of a PGD / PTE entry to suit us
169  *
170  *                      32 bit virtual address
171  * -------------------------------------------------------
172  * | BITS_FOR_PGD    |  BITS_FOR_PTE    |  BITS_IN_PAGE  |
173  * -------------------------------------------------------
174  *       |                  |                |
175  *       |                  |                --> off in page frame
176  *       |                  |
177  *       |                  ---> index into Page Table
178  *       |
179  *       ----> index into Page Directory
180  */
181 
182 #define BITS_IN_PAGE    PAGE_SHIFT
183 
184 /* Optimal Sizing of Pg Tbl - based on MMU page size */
185 #if defined(CONFIG_ARC_PAGE_SIZE_8K)
186 #define BITS_FOR_PTE    8
187 #elif defined(CONFIG_ARC_PAGE_SIZE_16K)
188 #define BITS_FOR_PTE    8
189 #elif defined(CONFIG_ARC_PAGE_SIZE_4K)
190 #define BITS_FOR_PTE    9
191 #endif
192 
193 #define BITS_FOR_PGD    (32 - BITS_FOR_PTE - BITS_IN_PAGE)
194 
195 #define PGDIR_SHIFT     (BITS_FOR_PTE + BITS_IN_PAGE)
196 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)    /* vaddr span, not PDG sz */
197 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
198 
199 #ifdef __ASSEMBLY__
200 #define PTRS_PER_PTE    (1 << BITS_FOR_PTE)
201 #define PTRS_PER_PGD    (1 << BITS_FOR_PGD)
202 #else
203 #define PTRS_PER_PTE    (1UL << BITS_FOR_PTE)
204 #define PTRS_PER_PGD    (1UL << BITS_FOR_PGD)
205 #endif
206 /*
207  * Number of entries a user land program use.
208  * TASK_SIZE is the maximum vaddr that can be used by a userland program.
209  */
210 #define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
211 
212 /*
213  * No special requirements for lowest virtual address we permit any user space
214  * mapping to be mapped at.
215  */
216 #define FIRST_USER_ADDRESS      0
217 
218 
219 /****************************************************************
220  * Bucket load of VM Helpers
221  */
222 
223 #ifndef __ASSEMBLY__
224 
225 #define pte_ERROR(e) \
226         pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
227 #define pgd_ERROR(e) \
228         pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
229 
230 /* the zero page used for uninitialized and anonymous pages */
231 extern char empty_zero_page[PAGE_SIZE];
232 #define ZERO_PAGE(vaddr)        (virt_to_page(empty_zero_page))
233 
234 #define pte_unmap(pte)          do { } while (0)
235 #define pte_unmap_nested(pte)           do { } while (0)
236 
237 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
238 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
239 
240 /* find the page descriptor of the Page Tbl ref by PMD entry */
241 #define pmd_page(pmd)           virt_to_page(pmd_val(pmd) & PAGE_MASK)
242 
243 /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
244 #define pmd_page_vaddr(pmd)     (pmd_val(pmd) & PAGE_MASK)
245 
246 /* In a 2 level sys, setup the PGD entry with PTE value */
247 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
248 {
249         pmd_val(*pmdp) = (unsigned long)ptep;
250 }
251 
252 #define pte_none(x)                     (!pte_val(x))
253 #define pte_present(x)                  (pte_val(x) & _PAGE_PRESENT)
254 #define pte_clear(mm, addr, ptep)       set_pte_at(mm, addr, ptep, __pte(0))
255 
256 #define pmd_none(x)                     (!pmd_val(x))
257 #define pmd_bad(x)                      ((pmd_val(x) & ~PAGE_MASK))
258 #define pmd_present(x)                  (pmd_val(x))
259 #define pmd_clear(xp)                   do { pmd_val(*(xp)) = 0; } while (0)
260 
261 #define pte_page(x) (mem_map + \
262                 (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
263                                 PAGE_SHIFT)))
264 
265 #define mk_pte(page, pgprot)                                            \
266 ({                                                                      \
267         pte_t pte;                                                      \
268         pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot);   \
269         pte;                                                            \
270 })
271 
272 /* TBD: Non linear mapping stuff */
273 static inline int pte_file(pte_t pte)
274 {
275         return pte_val(pte) & _PAGE_FILE;
276 }
277 
278 #define PTE_FILE_MAX_BITS       30
279 #define pgoff_to_pte(x)         __pte(x)
280 #define pte_to_pgoff(x)         (pte_val(x) >> 2)
281 #define pte_pfn(pte)            (pte_val(pte) >> PAGE_SHIFT)
282 #define pfn_pte(pfn, prot)      (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
283 #define __pte_index(addr)       (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
284 
285 /*
286  * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
287  * and returns ptr to PTE entry corresponding to @addr
288  */
289 #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
290                                          __pte_index(addr))
291 
292 /* No mapping of Page Tables in high mem etc, so following same as above */
293 #define pte_offset_kernel(dir, addr)            pte_offset(dir, addr)
294 #define pte_offset_map(dir, addr)               pte_offset(dir, addr)
295 
296 /* Zoo of pte_xxx function */
297 #define pte_read(pte)           (pte_val(pte) & _PAGE_READ)
298 #define pte_write(pte)          (pte_val(pte) & _PAGE_WRITE)
299 #define pte_dirty(pte)          (pte_val(pte) & _PAGE_MODIFIED)
300 #define pte_young(pte)          (pte_val(pte) & _PAGE_ACCESSED)
301 #define pte_special(pte)        (0)
302 
303 #define PTE_BIT_FUNC(fn, op) \
304         static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
305 
306 PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
307 PTE_BIT_FUNC(mkwrite,   |= (_PAGE_WRITE));
308 PTE_BIT_FUNC(mkclean,   &= ~(_PAGE_MODIFIED));
309 PTE_BIT_FUNC(mkdirty,   |= (_PAGE_MODIFIED));
310 PTE_BIT_FUNC(mkold,     &= ~(_PAGE_ACCESSED));
311 PTE_BIT_FUNC(mkyoung,   |= (_PAGE_ACCESSED));
312 PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
313 PTE_BIT_FUNC(mkexec,    |= (_PAGE_EXECUTE));
314 
315 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
316 
317 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
318 {
319         return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
320 }
321 
322 /* Macro to mark a page protection as uncacheable */
323 #define pgprot_noncached(prot)  (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
324 
325 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
326                               pte_t *ptep, pte_t pteval)
327 {
328         set_pte(ptep, pteval);
329 }
330 
331 /*
332  * All kernel related VM pages are in init's mm.
333  */
334 #define pgd_offset_k(address)   pgd_offset(&init_mm, address)
335 #define pgd_index(addr)         ((addr) >> PGDIR_SHIFT)
336 #define pgd_offset(mm, addr)    (((mm)->pgd)+pgd_index(addr))
337 
338 /*
339  * Macro to quickly access the PGD entry, utlising the fact that some
340  * arch may cache the pointer to Page Directory of "current" task
341  * in a MMU register
342  *
343  * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
344  * becomes read a register
345  *
346  * ********CAUTION*******:
347  * Kernel code might be dealing with some mm_struct of NON "current"
348  * Thus use this macro only when you are certain that "current" is current
349  * e.g. when dealing with signal frame setup code etc
350  */
351 #ifndef CONFIG_SMP
352 #define pgd_offset_fast(mm, addr)       \
353 ({                                      \
354         pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0);  \
355         pgd_base + pgd_index(addr);     \
356 })
357 #else
358 #define pgd_offset_fast(mm, addr)       pgd_offset(mm, addr)
359 #endif
360 
361 extern void paging_init(void);
362 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
363 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
364                       pte_t *ptep);
365 
366 /* Encode swap {type,off} tuple into PTE
367  * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
368  * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
369  */
370 #define __swp_entry(type, off)  ((swp_entry_t) { \
371                                         ((type) & 0x1f) | ((off) << 13) })
372 
373 /* Decode a PTE containing swap "identifier "into constituents */
374 #define __swp_type(pte_lookalike)       (((pte_lookalike).val) & 0x1f)
375 #define __swp_offset(pte_lookalike)     ((pte_lookalike).val << 13)
376 
377 /* NOPs, to keep generic kernel happy */
378 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
379 #define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
380 
381 #define kern_addr_valid(addr)   (1)
382 
383 /*
384  * remap a physical page `pfn' of size `size' with page protection `prot'
385  * into virtual address `from'
386  */
387 #include <asm-generic/pgtable.h>
388 
389 /* to cope with aliasing VIPT cache */
390 #define HAVE_ARCH_UNMAPPED_AREA
391 
392 /*
393  * No page table caches to initialise
394  */
395 #define pgtable_cache_init()   do { } while (0)
396 
397 #endif /* __ASSEMBLY__ */
398 
399 #endif
400 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp