~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/include/asm/book3s/32/pgtable.h

Version: ~ [ linux-5.16-rc1 ] ~ [ linux-5.15.2 ] ~ [ linux-5.14.18 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.79 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.159 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.217 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.255 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.290 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.292 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
  3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
  4 
  5 #define __ARCH_USE_5LEVEL_HACK
  6 #include <asm-generic/pgtable-nopmd.h>
  7 
  8 #include <asm/book3s/32/hash.h>
  9 
 10 /* And here we include common definitions */
 11 
 12 #define _PAGE_KERNEL_RO         0
 13 #define _PAGE_KERNEL_ROX        (_PAGE_EXEC)
 14 #define _PAGE_KERNEL_RW         (_PAGE_DIRTY | _PAGE_RW)
 15 #define _PAGE_KERNEL_RWX        (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
 16 
 17 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
 18 
 19 #ifndef __ASSEMBLY__
 20 
 21 static inline bool pte_user(pte_t pte)
 22 {
 23         return pte_val(pte) & _PAGE_USER;
 24 }
 25 #endif /* __ASSEMBLY__ */
 26 
 27 /*
 28  * Location of the PFN in the PTE. Most 32-bit platforms use the same
 29  * as _PAGE_SHIFT here (ie, naturally aligned).
 30  * Platform who don't just pre-define the value so we don't override it here.
 31  */
 32 #define PTE_RPN_SHIFT   (PAGE_SHIFT)
 33 
 34 /*
 35  * The mask covered by the RPN must be a ULL on 32-bit platforms with
 36  * 64-bit PTEs.
 37  */
 38 #ifdef CONFIG_PTE_64BIT
 39 #define PTE_RPN_MASK    (~((1ULL << PTE_RPN_SHIFT) - 1))
 40 #else
 41 #define PTE_RPN_MASK    (~((1UL << PTE_RPN_SHIFT) - 1))
 42 #endif
 43 
 44 /*
 45  * _PAGE_CHG_MASK masks of bits that are to be preserved across
 46  * pgprot changes.
 47  */
 48 #define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
 49                          _PAGE_ACCESSED | _PAGE_SPECIAL)
 50 
 51 /*
 52  * We define 2 sets of base prot bits, one for basic pages (ie,
 53  * cacheable kernel and user pages) and one for non cacheable
 54  * pages. We always set _PAGE_COHERENT when SMP is enabled or
 55  * the processor might need it for DMA coherency.
 56  */
 57 #define _PAGE_BASE_NC   (_PAGE_PRESENT | _PAGE_ACCESSED)
 58 #define _PAGE_BASE      (_PAGE_BASE_NC | _PAGE_COHERENT)
 59 
 60 /*
 61  * Permission masks used to generate the __P and __S table.
 62  *
 63  * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
 64  *
 65  * Write permissions imply read permissions for now.
 66  */
 67 #define PAGE_NONE       __pgprot(_PAGE_BASE)
 68 #define PAGE_SHARED     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
 69 #define PAGE_SHARED_X   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
 70 #define PAGE_COPY       __pgprot(_PAGE_BASE | _PAGE_USER)
 71 #define PAGE_COPY_X     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 72 #define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
 73 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 74 
 75 /* Permission masks used for kernel mappings */
 76 #define PAGE_KERNEL     __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
 77 #define PAGE_KERNEL_NC  __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
 78 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
 79                                  _PAGE_NO_CACHE | _PAGE_GUARDED)
 80 #define PAGE_KERNEL_X   __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
 81 #define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
 82 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
 83 
 84 /*
 85  * Protection used for kernel text. We want the debuggers to be able to
 86  * set breakpoints anywhere, so don't write protect the kernel text
 87  * on platforms where such control is possible.
 88  */
 89 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
 90         defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
 91 #define PAGE_KERNEL_TEXT        PAGE_KERNEL_X
 92 #else
 93 #define PAGE_KERNEL_TEXT        PAGE_KERNEL_ROX
 94 #endif
 95 
 96 /* Make modules code happy. We don't set RO yet */
 97 #define PAGE_KERNEL_EXEC        PAGE_KERNEL_X
 98 
 99 /* Advertise special mapping type for AGP */
100 #define PAGE_AGP                (PAGE_KERNEL_NC)
101 #define HAVE_PAGE_AGP
102 
103 #define PTE_INDEX_SIZE  PTE_SHIFT
104 #define PMD_INDEX_SIZE  0
105 #define PUD_INDEX_SIZE  0
106 #define PGD_INDEX_SIZE  (32 - PGDIR_SHIFT)
107 
108 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
109 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
110 
111 #ifndef __ASSEMBLY__
112 #define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
113 #define PMD_TABLE_SIZE  0
114 #define PUD_TABLE_SIZE  0
115 #define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
116 #endif  /* __ASSEMBLY__ */
117 
118 #define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
119 #define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
120 
121 /*
122  * The normal case is that PTEs are 32-bits and we have a 1-page
123  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
124  *
125  * For any >32-bit physical address platform, we can use the following
126  * two level page table layout where the pgdir is 8KB and the MS 13 bits
127  * are an index to the second level table.  The combined pgdir/pmd first
128  * level has 2048 entries and the second level has 512 64-bit PTE entries.
129  * -Matt
130  */
131 /* PGDIR_SHIFT determines what a top-level page table entry can map */
132 #define PGDIR_SHIFT     (PAGE_SHIFT + PTE_INDEX_SIZE)
133 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
134 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
135 
136 #define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
137 
138 #ifndef __ASSEMBLY__
139 
140 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
141 
142 #endif /* !__ASSEMBLY__ */
143 
144 /*
145  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
146  * value (for now) on others, from where we can start layout kernel
147  * virtual space that goes below PKMAP and FIXMAP
148  */
149 #include <asm/fixmap.h>
150 
151 #ifdef CONFIG_HIGHMEM
152 #define KVIRT_TOP       PKMAP_BASE
153 #else
154 #define KVIRT_TOP       FIXADDR_START
155 #endif
156 
157 /*
158  * ioremap_bot starts at that address. Early ioremaps move down from there,
159  * until mem_init() at which point this becomes the top of the vmalloc
160  * and ioremap space
161  */
162 #ifdef CONFIG_NOT_COHERENT_CACHE
163 #define IOREMAP_TOP     ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
164 #else
165 #define IOREMAP_TOP     KVIRT_TOP
166 #endif
167 
168 /*
169  * Just any arbitrary offset to the start of the vmalloc VM area: the
170  * current 16MB value just means that there will be a 64MB "hole" after the
171  * physical memory until the kernel virtual memory starts.  That means that
172  * any out-of-bounds memory accesses will hopefully be caught.
173  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
174  * area for the same reason. ;)
175  *
176  * We no longer map larger than phys RAM with the BATs so we don't have
177  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
178  * about clashes between our early calls to ioremap() that start growing down
179  * from ioremap_base being run into the VM area allocations (growing upwards
180  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
181  * we actually run into our mappings setup in the early boot with the VM
182  * system.  This really does become a problem for machines with good amounts
183  * of RAM.  -- Cort
184  */
185 #define VMALLOC_OFFSET (0x1000000) /* 16M */
186 
187 /*
188  * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules
189  * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear
190  * memory shall not share segments.
191  */
192 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES)
193 #define VMALLOC_START ((_ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \
194                        ~(VMALLOC_OFFSET - 1))
195 #else
196 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
197 #endif
198 #define VMALLOC_END     ioremap_bot
199 
200 #ifndef __ASSEMBLY__
201 #include <linux/sched.h>
202 #include <linux/threads.h>
203 
204 extern unsigned long ioremap_bot;
205 
206 /* Bits to mask out from a PGD to get to the PUD page */
207 #define PGD_MASKED_BITS         0
208 
209 #define pte_ERROR(e) \
210         pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
211                 (unsigned long long)pte_val(e))
212 #define pgd_ERROR(e) \
213         pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
214 /*
215  * Bits in a linux-style PTE.  These match the bits in the
216  * (hardware-defined) PowerPC PTE as closely as possible.
217  */
218 
219 #define pte_clear(mm, addr, ptep) \
220         do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
221 
222 #define pmd_none(pmd)           (!pmd_val(pmd))
223 #define pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
224 #define pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
225 static inline void pmd_clear(pmd_t *pmdp)
226 {
227         *pmdp = __pmd(0);
228 }
229 
230 
231 /*
232  * When flushing the tlb entry for a page, we also need to flush the hash
233  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
234  */
235 extern int flush_hash_pages(unsigned context, unsigned long va,
236                             unsigned long pmdval, int count);
237 
238 /* Add an HPTE to the hash table */
239 extern void add_hash_page(unsigned context, unsigned long va,
240                           unsigned long pmdval);
241 
242 /* Flush an entry from the TLB/hash table */
243 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
244                              unsigned long address);
245 
246 /*
247  * PTE updates. This function is called whenever an existing
248  * valid PTE is updated. This does -not- include set_pte_at()
249  * which nowadays only sets a new PTE.
250  *
251  * Depending on the type of MMU, we may need to use atomic updates
252  * and the PTE may be either 32 or 64 bit wide. In the later case,
253  * when using atomic updates, only the low part of the PTE is
254  * accessed atomically.
255  *
256  * In addition, on 44x, we also maintain a global flag indicating
257  * that an executable user mapping was modified, which is needed
258  * to properly flush the virtually tagged instruction cache of
259  * those implementations.
260  */
261 #ifndef CONFIG_PTE_64BIT
262 static inline unsigned long pte_update(pte_t *p,
263                                        unsigned long clr,
264                                        unsigned long set)
265 {
266         unsigned long old, tmp;
267 
268         __asm__ __volatile__("\
269 1:      lwarx   %0,0,%3\n\
270         andc    %1,%0,%4\n\
271         or      %1,%1,%5\n"
272 "       stwcx.  %1,0,%3\n\
273         bne-    1b"
274         : "=&r" (old), "=&r" (tmp), "=m" (*p)
275         : "r" (p), "r" (clr), "r" (set), "m" (*p)
276         : "cc" );
277 
278         return old;
279 }
280 #else /* CONFIG_PTE_64BIT */
281 static inline unsigned long long pte_update(pte_t *p,
282                                             unsigned long clr,
283                                             unsigned long set)
284 {
285         unsigned long long old;
286         unsigned long tmp;
287 
288         __asm__ __volatile__("\
289 1:      lwarx   %L0,0,%4\n\
290         lwzx    %0,0,%3\n\
291         andc    %1,%L0,%5\n\
292         or      %1,%1,%6\n"
293 "       stwcx.  %1,0,%4\n\
294         bne-    1b"
295         : "=&r" (old), "=&r" (tmp), "=m" (*p)
296         : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
297         : "cc" );
298 
299         return old;
300 }
301 #endif /* CONFIG_PTE_64BIT */
302 
303 /*
304  * 2.6 calls this without flushing the TLB entry; this is wrong
305  * for our hash-based implementation, we fix that up here.
306  */
307 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
308 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
309 {
310         unsigned long old;
311         old = pte_update(ptep, _PAGE_ACCESSED, 0);
312         if (old & _PAGE_HASHPTE) {
313                 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
314                 flush_hash_pages(context, addr, ptephys, 1);
315         }
316         return (old & _PAGE_ACCESSED) != 0;
317 }
318 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
319         __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
320 
321 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
322 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
323                                        pte_t *ptep)
324 {
325         return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
326 }
327 
328 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
329 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
330                                       pte_t *ptep)
331 {
332         pte_update(ptep, _PAGE_RW, 0);
333 }
334 
335 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
336                                            pte_t *ptep, pte_t entry,
337                                            unsigned long address,
338                                            int psize)
339 {
340         unsigned long set = pte_val(entry) &
341                 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
342 
343         pte_update(ptep, 0, set);
344 
345         flush_tlb_page(vma, address);
346 }
347 
348 #define __HAVE_ARCH_PTE_SAME
349 #define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
350 
351 #define pmd_page_vaddr(pmd)     \
352         ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
353 #define pmd_page(pmd)           \
354         pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
355 
356 /* to find an entry in a kernel page-table-directory */
357 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
358 
359 /* to find an entry in a page-table-directory */
360 #define pgd_index(address)       ((address) >> PGDIR_SHIFT)
361 #define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
362 
363 /* Find an entry in the third-level page table.. */
364 #define pte_index(address)              \
365         (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
366 #define pte_offset_kernel(dir, addr)    \
367         ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
368 #define pte_offset_map(dir, addr)               \
369         ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
370                    (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
371 #define pte_unmap(pte)          kunmap_atomic(pte)
372 
373 /*
374  * Encode and decode a swap entry.
375  * Note that the bits we use in a PTE for representing a swap entry
376  * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
377  *   -- paulus
378  */
379 #define __swp_type(entry)               ((entry).val & 0x1f)
380 #define __swp_offset(entry)             ((entry).val >> 5)
381 #define __swp_entry(type, offset)       ((swp_entry_t) { (type) | ((offset) << 5) })
382 #define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 3 })
383 #define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 3 })
384 
385 /* Generic accessors to PTE bits */
386 static inline int pte_write(pte_t pte)          { return !!(pte_val(pte) & _PAGE_RW);}
387 static inline int pte_read(pte_t pte)           { return 1; }
388 static inline int pte_dirty(pte_t pte)          { return !!(pte_val(pte) & _PAGE_DIRTY); }
389 static inline int pte_young(pte_t pte)          { return !!(pte_val(pte) & _PAGE_ACCESSED); }
390 static inline int pte_special(pte_t pte)        { return !!(pte_val(pte) & _PAGE_SPECIAL); }
391 static inline int pte_none(pte_t pte)           { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
392 static inline bool pte_exec(pte_t pte)          { return pte_val(pte) & _PAGE_EXEC; }
393 
394 static inline int pte_present(pte_t pte)
395 {
396         return pte_val(pte) & _PAGE_PRESENT;
397 }
398 
399 static inline bool pte_hw_valid(pte_t pte)
400 {
401         return pte_val(pte) & _PAGE_PRESENT;
402 }
403 
404 static inline bool pte_hashpte(pte_t pte)
405 {
406         return !!(pte_val(pte) & _PAGE_HASHPTE);
407 }
408 
409 static inline bool pte_ci(pte_t pte)
410 {
411         return !!(pte_val(pte) & _PAGE_NO_CACHE);
412 }
413 
414 /*
415  * We only find page table entry in the last level
416  * Hence no need for other accessors
417  */
418 #define pte_access_permitted pte_access_permitted
419 static inline bool pte_access_permitted(pte_t pte, bool write)
420 {
421         /*
422          * A read-only access is controlled by _PAGE_USER bit.
423          * We have _PAGE_READ set for WRITE and EXECUTE
424          */
425         if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
426                 return false;
427 
428         if (write && !pte_write(pte))
429                 return false;
430 
431         return true;
432 }
433 
434 /* Conversion functions: convert a page and protection to a page entry,
435  * and a page entry and page directory to the page they refer to.
436  *
437  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
438  * long for now.
439  */
440 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
441 {
442         return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
443                      pgprot_val(pgprot));
444 }
445 
446 static inline unsigned long pte_pfn(pte_t pte)
447 {
448         return pte_val(pte) >> PTE_RPN_SHIFT;
449 }
450 
451 /* Generic modifiers for PTE bits */
452 static inline pte_t pte_wrprotect(pte_t pte)
453 {
454         return __pte(pte_val(pte) & ~_PAGE_RW);
455 }
456 
457 static inline pte_t pte_exprotect(pte_t pte)
458 {
459         return __pte(pte_val(pte) & ~_PAGE_EXEC);
460 }
461 
462 static inline pte_t pte_mkclean(pte_t pte)
463 {
464         return __pte(pte_val(pte) & ~_PAGE_DIRTY);
465 }
466 
467 static inline pte_t pte_mkold(pte_t pte)
468 {
469         return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
470 }
471 
472 static inline pte_t pte_mkexec(pte_t pte)
473 {
474         return __pte(pte_val(pte) | _PAGE_EXEC);
475 }
476 
477 static inline pte_t pte_mkpte(pte_t pte)
478 {
479         return pte;
480 }
481 
482 static inline pte_t pte_mkwrite(pte_t pte)
483 {
484         return __pte(pte_val(pte) | _PAGE_RW);
485 }
486 
487 static inline pte_t pte_mkdirty(pte_t pte)
488 {
489         return __pte(pte_val(pte) | _PAGE_DIRTY);
490 }
491 
492 static inline pte_t pte_mkyoung(pte_t pte)
493 {
494         return __pte(pte_val(pte) | _PAGE_ACCESSED);
495 }
496 
497 static inline pte_t pte_mkspecial(pte_t pte)
498 {
499         return __pte(pte_val(pte) | _PAGE_SPECIAL);
500 }
501 
502 static inline pte_t pte_mkhuge(pte_t pte)
503 {
504         return pte;
505 }
506 
507 static inline pte_t pte_mkprivileged(pte_t pte)
508 {
509         return __pte(pte_val(pte) & ~_PAGE_USER);
510 }
511 
512 static inline pte_t pte_mkuser(pte_t pte)
513 {
514         return __pte(pte_val(pte) | _PAGE_USER);
515 }
516 
517 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
518 {
519         return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
520 }
521 
522 
523 
524 /* This low level function performs the actual PTE insertion
525  * Setting the PTE depends on the MMU type and other factors. It's
526  * an horrible mess that I'm not going to try to clean up now but
527  * I'm keeping it in one place rather than spread around
528  */
529 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
530                                 pte_t *ptep, pte_t pte, int percpu)
531 {
532 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
533         /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
534          * helper pte_update() which does an atomic update. We need to do that
535          * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
536          * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
537          * the hash bits instead (ie, same as the non-SMP case)
538          */
539         if (percpu)
540                 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
541                               | (pte_val(pte) & ~_PAGE_HASHPTE));
542         else
543                 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
544 
545 #elif defined(CONFIG_PTE_64BIT)
546         /* Second case is 32-bit with 64-bit PTE.  In this case, we
547          * can just store as long as we do the two halves in the right order
548          * with a barrier in between. This is possible because we take care,
549          * in the hash code, to pre-invalidate if the PTE was already hashed,
550          * which synchronizes us with any concurrent invalidation.
551          * In the percpu case, we also fallback to the simple update preserving
552          * the hash bits
553          */
554         if (percpu) {
555                 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
556                               | (pte_val(pte) & ~_PAGE_HASHPTE));
557                 return;
558         }
559         if (pte_val(*ptep) & _PAGE_HASHPTE)
560                 flush_hash_entry(mm, ptep, addr);
561         __asm__ __volatile__("\
562                 stw%U0%X0 %2,%0\n\
563                 eieio\n\
564                 stw%U0%X0 %L2,%1"
565         : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
566         : "r" (pte) : "memory");
567 
568 #else
569         /* Third case is 32-bit hash table in UP mode, we need to preserve
570          * the _PAGE_HASHPTE bit since we may not have invalidated the previous
571          * translation in the hash yet (done in a subsequent flush_tlb_xxx())
572          * and see we need to keep track that this PTE needs invalidating
573          */
574         *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
575                       | (pte_val(pte) & ~_PAGE_HASHPTE));
576 #endif
577 }
578 
579 /*
580  * Macro to mark a page protection value as "uncacheable".
581  */
582 
583 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
584                          _PAGE_WRITETHRU)
585 
586 #define pgprot_noncached pgprot_noncached
587 static inline pgprot_t pgprot_noncached(pgprot_t prot)
588 {
589         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
590                         _PAGE_NO_CACHE | _PAGE_GUARDED);
591 }
592 
593 #define pgprot_noncached_wc pgprot_noncached_wc
594 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
595 {
596         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
597                         _PAGE_NO_CACHE);
598 }
599 
600 #define pgprot_cached pgprot_cached
601 static inline pgprot_t pgprot_cached(pgprot_t prot)
602 {
603         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
604                         _PAGE_COHERENT);
605 }
606 
607 #define pgprot_cached_wthru pgprot_cached_wthru
608 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
609 {
610         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
611                         _PAGE_COHERENT | _PAGE_WRITETHRU);
612 }
613 
614 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
615 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
616 {
617         return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
618 }
619 
620 #define pgprot_writecombine pgprot_writecombine
621 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
622 {
623         return pgprot_noncached_wc(prot);
624 }
625 
626 #endif /* !__ASSEMBLY__ */
627 
628 #endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
629 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp