~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/include/asm/pgtable.h

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _ASM_POWERPC_PGTABLE_H
  2 #define _ASM_POWERPC_PGTABLE_H
  3 #ifdef __KERNEL__
  4 
  5 #ifndef __ASSEMBLY__
  6 #include <asm/processor.h>              /* For TASK_SIZE */
  7 #include <asm/mmu.h>
  8 #include <asm/page.h>
  9 
 10 struct mm_struct;
 11 
 12 #endif /* !__ASSEMBLY__ */
 13 
 14 #if defined(CONFIG_PPC64)
 15 #  include <asm/pgtable-ppc64.h>
 16 #else
 17 #  include <asm/pgtable-ppc32.h>
 18 #endif
 19 
 20 /*
 21  * We save the slot number & secondary bit in the second half of the
 22  * PTE page. We use the 8 bytes per each pte entry.
 23  */
 24 #define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
 25 
 26 #ifndef __ASSEMBLY__
 27 
 28 #include <asm/tlbflush.h>
 29 
 30 /* Generic accessors to PTE bits */
 31 static inline int pte_write(pte_t pte)          { return pte_val(pte) & _PAGE_RW; }
 32 static inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
 33 static inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
 34 static inline int pte_file(pte_t pte)           { return pte_val(pte) & _PAGE_FILE; }
 35 static inline int pte_special(pte_t pte)        { return pte_val(pte) & _PAGE_SPECIAL; }
 36 static inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_PRESENT; }
 37 static inline int pte_none(pte_t pte)           { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
 38 static inline pgprot_t pte_pgprot(pte_t pte)    { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
 39 
 40 /* Conversion functions: convert a page and protection to a page entry,
 41  * and a page entry and page directory to the page they refer to.
 42  *
 43  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
 44  * long for now.
 45  */
 46 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
 47         return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
 48                      pgprot_val(pgprot)); }
 49 static inline unsigned long pte_pfn(pte_t pte)  {
 50         return pte_val(pte) >> PTE_RPN_SHIFT; }
 51 
 52 /* Keep these as a macros to avoid include dependency mess */
 53 #define pte_page(x)             pfn_to_page(pte_pfn(x))
 54 #define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
 55 
 56 /* Generic modifiers for PTE bits */
 57 static inline pte_t pte_wrprotect(pte_t pte) {
 58         pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
 59 static inline pte_t pte_mkclean(pte_t pte) {
 60         pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
 61 static inline pte_t pte_mkold(pte_t pte) {
 62         pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
 63 static inline pte_t pte_mkwrite(pte_t pte) {
 64         pte_val(pte) |= _PAGE_RW; return pte; }
 65 static inline pte_t pte_mkdirty(pte_t pte) {
 66         pte_val(pte) |= _PAGE_DIRTY; return pte; }
 67 static inline pte_t pte_mkyoung(pte_t pte) {
 68         pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 69 static inline pte_t pte_mkspecial(pte_t pte) {
 70         pte_val(pte) |= _PAGE_SPECIAL; return pte; }
 71 static inline pte_t pte_mkhuge(pte_t pte) {
 72         return pte; }
 73 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 74 {
 75         pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
 76         return pte;
 77 }
 78 
 79 
 80 /* Insert a PTE, top-level function is out of line. It uses an inline
 81  * low level function in the respective pgtable-* files
 82  */
 83 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
 84                        pte_t pte);
 85 
 86 /* This low level function performs the actual PTE insertion
 87  * Setting the PTE depends on the MMU type and other factors. It's
 88  * an horrible mess that I'm not going to try to clean up now but
 89  * I'm keeping it in one place rather than spread around
 90  */
 91 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 92                                 pte_t *ptep, pte_t pte, int percpu)
 93 {
 94 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
 95         /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
 96          * helper pte_update() which does an atomic update. We need to do that
 97          * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
 98          * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
 99          * the hash bits instead (ie, same as the non-SMP case)
100          */
101         if (percpu)
102                 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
103                               | (pte_val(pte) & ~_PAGE_HASHPTE));
104         else
105                 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
106 
107 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
108         /* Second case is 32-bit with 64-bit PTE.  In this case, we
109          * can just store as long as we do the two halves in the right order
110          * with a barrier in between. This is possible because we take care,
111          * in the hash code, to pre-invalidate if the PTE was already hashed,
112          * which synchronizes us with any concurrent invalidation.
113          * In the percpu case, we also fallback to the simple update preserving
114          * the hash bits
115          */
116         if (percpu) {
117                 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
118                               | (pte_val(pte) & ~_PAGE_HASHPTE));
119                 return;
120         }
121 #if _PAGE_HASHPTE != 0
122         if (pte_val(*ptep) & _PAGE_HASHPTE)
123                 flush_hash_entry(mm, ptep, addr);
124 #endif
125         __asm__ __volatile__("\
126                 stw%U0%X0 %2,%0\n\
127                 eieio\n\
128                 stw%U0%X0 %L2,%1"
129         : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
130         : "r" (pte) : "memory");
131 
132 #elif defined(CONFIG_PPC_STD_MMU_32)
133         /* Third case is 32-bit hash table in UP mode, we need to preserve
134          * the _PAGE_HASHPTE bit since we may not have invalidated the previous
135          * translation in the hash yet (done in a subsequent flush_tlb_xxx())
136          * and see we need to keep track that this PTE needs invalidating
137          */
138         *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
139                       | (pte_val(pte) & ~_PAGE_HASHPTE));
140 
141 #else
142         /* Anything else just stores the PTE normally. That covers all 64-bit
143          * cases, and 32-bit non-hash with 32-bit PTEs.
144          */
145         *ptep = pte;
146 #endif
147 }
148 
149 
150 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
151 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
152                                  pte_t *ptep, pte_t entry, int dirty);
153 
154 /*
155  * Macro to mark a page protection value as "uncacheable".
156  */
157 
158 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
159                          _PAGE_WRITETHRU)
160 
161 #define pgprot_noncached(prot)    (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
162                                             _PAGE_NO_CACHE | _PAGE_GUARDED))
163 
164 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
165                                             _PAGE_NO_CACHE))
166 
167 #define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
168                                             _PAGE_COHERENT))
169 
170 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
171                                             _PAGE_COHERENT | _PAGE_WRITETHRU))
172 
173 #define pgprot_cached_noncoherent(prot) \
174                 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
175 
176 #define pgprot_writecombine pgprot_noncached_wc
177 
178 struct file;
179 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
180                                      unsigned long size, pgprot_t vma_prot);
181 #define __HAVE_PHYS_MEM_ACCESS_PROT
182 
183 /*
184  * ZERO_PAGE is a global shared page that is always zero: used
185  * for zero-mapped memory areas etc..
186  */
187 extern unsigned long empty_zero_page[];
188 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
189 
190 extern pgd_t swapper_pg_dir[];
191 
192 extern void paging_init(void);
193 
194 /*
195  * kern_addr_valid is intended to indicate whether an address is a valid
196  * kernel address.  Most 32-bit archs define it as always true (like this)
197  * but most 64-bit archs actually perform a test.  What should we do here?
198  */
199 #define kern_addr_valid(addr)   (1)
200 
201 #include <asm-generic/pgtable.h>
202 
203 
204 /*
205  * This gets called at the end of handling a page fault, when
206  * the kernel has put a new PTE into the page table for the process.
207  * We use it to ensure coherency between the i-cache and d-cache
208  * for the page which has just been mapped in.
209  * On machines which use an MMU hash table, we use this to put a
210  * corresponding HPTE into the hash table ahead of time, instead of
211  * waiting for the inevitable extra hash-table miss exception.
212  */
213 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
214 
215 extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
216                       unsigned long end, int write, struct page **pages, int *nr);
217 
218 extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
219                        unsigned long end, int write, struct page **pages, int *nr);
220 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
221 #define pmd_large(pmd)          0
222 #define has_transparent_hugepage() 0
223 #endif
224 pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
225                                  unsigned *shift);
226 #endif /* __ASSEMBLY__ */
227 
228 #endif /* __KERNEL__ */
229 #endif /* _ASM_POWERPC_PGTABLE_H */
230 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp