~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/include/asm/pgtable.h

Version: ~ [ linux-5.18-rc6 ] ~ [ linux-5.17.6 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.38 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.114 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.192 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.241 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.277 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.312 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Copyright (C) 2003 Ralf Baechle
  7  */
  8 #ifndef _ASM_PGTABLE_H
  9 #define _ASM_PGTABLE_H
 10 
 11 #include <linux/mm_types.h>
 12 #include <linux/mmzone.h>
 13 #ifdef CONFIG_32BIT
 14 #include <asm/pgtable-32.h>
 15 #endif
 16 #ifdef CONFIG_64BIT
 17 #include <asm/pgtable-64.h>
 18 #endif
 19 
 20 #include <asm/cmpxchg.h>
 21 #include <asm/io.h>
 22 #include <asm/pgtable-bits.h>
 23 #include <asm/cpu-features.h>
 24 
 25 struct mm_struct;
 26 struct vm_area_struct;
 27 
 28 #define PAGE_SHARED     vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED)
 29 
 30 #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 31                                  _PAGE_GLOBAL | _page_cachable_default)
 32 #define PAGE_KERNEL_NC  __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 33                                  _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
 34 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
 35                         __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
 36 
 37 /*
 38  * If _PAGE_NO_EXEC is not defined, we can't do page protection for
 39  * execute, and consider it to be the same as read. Also, write
 40  * permissions imply read permissions. This is the closest we can get
 41  * by reasonable means..
 42  */
 43 
 44 /*
 45  * Dummy values to fill the table in mmap.c
 46  * The real values will be generated at runtime
 47  */
 48 #define __P000 __pgprot(0)
 49 #define __P001 __pgprot(0)
 50 #define __P010 __pgprot(0)
 51 #define __P011 __pgprot(0)
 52 #define __P100 __pgprot(0)
 53 #define __P101 __pgprot(0)
 54 #define __P110 __pgprot(0)
 55 #define __P111 __pgprot(0)
 56 
 57 #define __S000 __pgprot(0)
 58 #define __S001 __pgprot(0)
 59 #define __S010 __pgprot(0)
 60 #define __S011 __pgprot(0)
 61 #define __S100 __pgprot(0)
 62 #define __S101 __pgprot(0)
 63 #define __S110 __pgprot(0)
 64 #define __S111 __pgprot(0)
 65 
 66 extern unsigned long _page_cachable_default;
 67 extern void __update_cache(unsigned long address, pte_t pte);
 68 
 69 /*
 70  * ZERO_PAGE is a global shared page that is always zero; used
 71  * for zero-mapped memory areas etc..
 72  */
 73 
 74 extern unsigned long empty_zero_page;
 75 extern unsigned long zero_page_mask;
 76 
 77 #define ZERO_PAGE(vaddr) \
 78         (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
 79 #define __HAVE_COLOR_ZERO_PAGE
 80 
 81 extern void paging_init(void);
 82 
 83 /*
 84  * Conversion functions: convert a page and protection to a page entry,
 85  * and a page entry and page directory to the page they refer to.
 86  */
 87 #define pmd_phys(pmd)           virt_to_phys((void *)pmd_val(pmd))
 88 
 89 #define __pmd_page(pmd)         (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
 90 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
 91 #define pmd_page(pmd)           __pmd_page(pmd)
 92 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
 93 
 94 #define pmd_page_vaddr(pmd)     pmd_val(pmd)
 95 
 96 #define htw_stop()                                                      \
 97 do {                                                                    \
 98         unsigned long __flags;                                          \
 99                                                                         \
100         if (cpu_has_htw) {                                              \
101                 local_irq_save(__flags);                                \
102                 if(!raw_current_cpu_data.htw_seq++) {                   \
103                         write_c0_pwctl(read_c0_pwctl() &                \
104                                        ~(1 << MIPS_PWCTL_PWEN_SHIFT));  \
105                         back_to_back_c0_hazard();                       \
106                 }                                                       \
107                 local_irq_restore(__flags);                             \
108         }                                                               \
109 } while(0)
110 
111 #define htw_start()                                                     \
112 do {                                                                    \
113         unsigned long __flags;                                          \
114                                                                         \
115         if (cpu_has_htw) {                                              \
116                 local_irq_save(__flags);                                \
117                 if (!--raw_current_cpu_data.htw_seq) {                  \
118                         write_c0_pwctl(read_c0_pwctl() |                \
119                                        (1 << MIPS_PWCTL_PWEN_SHIFT));   \
120                         back_to_back_c0_hazard();                       \
121                 }                                                       \
122                 local_irq_restore(__flags);                             \
123         }                                                               \
124 } while(0)
125 
126 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
127                               pte_t *ptep, pte_t pteval);
128 
129 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
130 
131 #ifdef CONFIG_XPA
132 # define pte_none(pte)          (!(((pte).pte_high) & ~_PAGE_GLOBAL))
133 #else
134 # define pte_none(pte)          (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
135 #endif
136 
137 #define pte_present(pte)        ((pte).pte_low & _PAGE_PRESENT)
138 #define pte_no_exec(pte)        ((pte).pte_low & _PAGE_NO_EXEC)
139 
140 static inline void set_pte(pte_t *ptep, pte_t pte)
141 {
142         ptep->pte_high = pte.pte_high;
143         smp_wmb();
144         ptep->pte_low = pte.pte_low;
145 
146 #ifdef CONFIG_XPA
147         if (pte.pte_high & _PAGE_GLOBAL) {
148 #else
149         if (pte.pte_low & _PAGE_GLOBAL) {
150 #endif
151                 pte_t *buddy = ptep_buddy(ptep);
152                 /*
153                  * Make sure the buddy is global too (if it's !none,
154                  * it better already be global)
155                  */
156                 if (pte_none(*buddy)) {
157                         if (!IS_ENABLED(CONFIG_XPA))
158                                 buddy->pte_low |= _PAGE_GLOBAL;
159                         buddy->pte_high |= _PAGE_GLOBAL;
160                 }
161         }
162 }
163 
164 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
165 {
166         pte_t null = __pte(0);
167 
168         htw_stop();
169         /* Preserve global status for the pair */
170         if (IS_ENABLED(CONFIG_XPA)) {
171                 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
172                         null.pte_high = _PAGE_GLOBAL;
173         } else {
174                 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
175                         null.pte_low = null.pte_high = _PAGE_GLOBAL;
176         }
177 
178         set_pte_at(mm, addr, ptep, null);
179         htw_start();
180 }
181 #else
182 
183 #define pte_none(pte)           (!(pte_val(pte) & ~_PAGE_GLOBAL))
184 #define pte_present(pte)        (pte_val(pte) & _PAGE_PRESENT)
185 #define pte_no_exec(pte)        (pte_val(pte) & _PAGE_NO_EXEC)
186 
187 /*
188  * Certain architectures need to do special things when pte's
189  * within a page table are directly modified.  Thus, the following
190  * hook is made available.
191  */
192 static inline void set_pte(pte_t *ptep, pte_t pteval)
193 {
194         *ptep = pteval;
195 #if !defined(CONFIG_CPU_R3K_TLB)
196         if (pte_val(pteval) & _PAGE_GLOBAL) {
197                 pte_t *buddy = ptep_buddy(ptep);
198                 /*
199                  * Make sure the buddy is global too (if it's !none,
200                  * it better already be global)
201                  */
202 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
203                 cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
204 # else
205                 cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
206 # endif
207         }
208 #endif
209 }
210 
211 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
212 {
213         htw_stop();
214 #if !defined(CONFIG_CPU_R3K_TLB)
215         /* Preserve global status for the pair */
216         if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
217                 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
218         else
219 #endif
220                 set_pte_at(mm, addr, ptep, __pte(0));
221         htw_start();
222 }
223 #endif
224 
225 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
226                               pte_t *ptep, pte_t pteval)
227 {
228 
229         if (!pte_present(pteval))
230                 goto cache_sync_done;
231 
232         if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
233                 goto cache_sync_done;
234 
235         __update_cache(addr, pteval);
236 cache_sync_done:
237         set_pte(ptep, pteval);
238 }
239 
240 /*
241  * (pmds are folded into puds so this doesn't get actually called,
242  * but the define is needed for a generic inline function.)
243  */
244 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
245 
246 #ifndef __PAGETABLE_PMD_FOLDED
247 /*
248  * (puds are folded into pgds so this doesn't get actually called,
249  * but the define is needed for a generic inline function.)
250  */
251 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
252 #endif
253 
254 #define PGD_T_LOG2      (__builtin_ffs(sizeof(pgd_t)) - 1)
255 #define PMD_T_LOG2      (__builtin_ffs(sizeof(pmd_t)) - 1)
256 #define PTE_T_LOG2      (__builtin_ffs(sizeof(pte_t)) - 1)
257 
258 /*
259  * We used to declare this array with size but gcc 3.3 and older are not able
260  * to find that this expression is a constant, so the size is dropped.
261  */
262 extern pgd_t swapper_pg_dir[];
263 
264 /*
265  * Platform specific pte_special() and pte_mkspecial() definitions
266  * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
267  */
268 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
269 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
270 static inline int pte_special(pte_t pte)
271 {
272         return pte.pte_low & _PAGE_SPECIAL;
273 }
274 
275 static inline pte_t pte_mkspecial(pte_t pte)
276 {
277         pte.pte_low |= _PAGE_SPECIAL;
278         return pte;
279 }
280 #else
281 static inline int pte_special(pte_t pte)
282 {
283         return pte_val(pte) & _PAGE_SPECIAL;
284 }
285 
286 static inline pte_t pte_mkspecial(pte_t pte)
287 {
288         pte_val(pte) |= _PAGE_SPECIAL;
289         return pte;
290 }
291 #endif
292 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
293 
294 /*
295  * The following only work if pte_present() is true.
296  * Undefined behaviour if not..
297  */
298 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
299 static inline int pte_write(pte_t pte)  { return pte.pte_low & _PAGE_WRITE; }
300 static inline int pte_dirty(pte_t pte)  { return pte.pte_low & _PAGE_MODIFIED; }
301 static inline int pte_young(pte_t pte)  { return pte.pte_low & _PAGE_ACCESSED; }
302 
303 static inline pte_t pte_wrprotect(pte_t pte)
304 {
305         pte.pte_low  &= ~_PAGE_WRITE;
306         if (!IS_ENABLED(CONFIG_XPA))
307                 pte.pte_low &= ~_PAGE_SILENT_WRITE;
308         pte.pte_high &= ~_PAGE_SILENT_WRITE;
309         return pte;
310 }
311 
312 static inline pte_t pte_mkclean(pte_t pte)
313 {
314         pte.pte_low  &= ~_PAGE_MODIFIED;
315         if (!IS_ENABLED(CONFIG_XPA))
316                 pte.pte_low &= ~_PAGE_SILENT_WRITE;
317         pte.pte_high &= ~_PAGE_SILENT_WRITE;
318         return pte;
319 }
320 
321 static inline pte_t pte_mkold(pte_t pte)
322 {
323         pte.pte_low  &= ~_PAGE_ACCESSED;
324         if (!IS_ENABLED(CONFIG_XPA))
325                 pte.pte_low &= ~_PAGE_SILENT_READ;
326         pte.pte_high &= ~_PAGE_SILENT_READ;
327         return pte;
328 }
329 
330 static inline pte_t pte_mkwrite(pte_t pte)
331 {
332         pte.pte_low |= _PAGE_WRITE;
333         if (pte.pte_low & _PAGE_MODIFIED) {
334                 if (!IS_ENABLED(CONFIG_XPA))
335                         pte.pte_low |= _PAGE_SILENT_WRITE;
336                 pte.pte_high |= _PAGE_SILENT_WRITE;
337         }
338         return pte;
339 }
340 
341 static inline pte_t pte_mkdirty(pte_t pte)
342 {
343         pte.pte_low |= _PAGE_MODIFIED;
344         if (pte.pte_low & _PAGE_WRITE) {
345                 if (!IS_ENABLED(CONFIG_XPA))
346                         pte.pte_low |= _PAGE_SILENT_WRITE;
347                 pte.pte_high |= _PAGE_SILENT_WRITE;
348         }
349         return pte;
350 }
351 
352 static inline pte_t pte_mkyoung(pte_t pte)
353 {
354         pte.pte_low |= _PAGE_ACCESSED;
355         if (!(pte.pte_low & _PAGE_NO_READ)) {
356                 if (!IS_ENABLED(CONFIG_XPA))
357                         pte.pte_low |= _PAGE_SILENT_READ;
358                 pte.pte_high |= _PAGE_SILENT_READ;
359         }
360         return pte;
361 }
362 #else
363 static inline int pte_write(pte_t pte)  { return pte_val(pte) & _PAGE_WRITE; }
364 static inline int pte_dirty(pte_t pte)  { return pte_val(pte) & _PAGE_MODIFIED; }
365 static inline int pte_young(pte_t pte)  { return pte_val(pte) & _PAGE_ACCESSED; }
366 
367 static inline pte_t pte_wrprotect(pte_t pte)
368 {
369         pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
370         return pte;
371 }
372 
373 static inline pte_t pte_mkclean(pte_t pte)
374 {
375         pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
376         return pte;
377 }
378 
379 static inline pte_t pte_mkold(pte_t pte)
380 {
381         pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
382         return pte;
383 }
384 
385 static inline pte_t pte_mkwrite(pte_t pte)
386 {
387         pte_val(pte) |= _PAGE_WRITE;
388         if (pte_val(pte) & _PAGE_MODIFIED)
389                 pte_val(pte) |= _PAGE_SILENT_WRITE;
390         return pte;
391 }
392 
393 static inline pte_t pte_mkdirty(pte_t pte)
394 {
395         pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
396         if (pte_val(pte) & _PAGE_WRITE)
397                 pte_val(pte) |= _PAGE_SILENT_WRITE;
398         return pte;
399 }
400 
401 static inline pte_t pte_mkyoung(pte_t pte)
402 {
403         pte_val(pte) |= _PAGE_ACCESSED;
404         if (!(pte_val(pte) & _PAGE_NO_READ))
405                 pte_val(pte) |= _PAGE_SILENT_READ;
406         return pte;
407 }
408 
409 #define pte_sw_mkyoung  pte_mkyoung
410 
411 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
412 static inline int pte_huge(pte_t pte)   { return pte_val(pte) & _PAGE_HUGE; }
413 
414 static inline pte_t pte_mkhuge(pte_t pte)
415 {
416         pte_val(pte) |= _PAGE_HUGE;
417         return pte;
418 }
419 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
420 
421 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
422 static inline bool pte_soft_dirty(pte_t pte)
423 {
424         return pte_val(pte) & _PAGE_SOFT_DIRTY;
425 }
426 #define pte_swp_soft_dirty pte_soft_dirty
427 
428 static inline pte_t pte_mksoft_dirty(pte_t pte)
429 {
430         pte_val(pte) |= _PAGE_SOFT_DIRTY;
431         return pte;
432 }
433 #define pte_swp_mksoft_dirty pte_mksoft_dirty
434 
435 static inline pte_t pte_clear_soft_dirty(pte_t pte)
436 {
437         pte_val(pte) &= ~(_PAGE_SOFT_DIRTY);
438         return pte;
439 }
440 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
441 
442 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
443 
444 #endif
445 
446 /*
447  * Macro to make mark a page protection value as "uncacheable".  Note
448  * that "protection" is really a misnomer here as the protection value
449  * contains the memory attribute bits, dirty bits, and various other
450  * bits as well.
451  */
452 #define pgprot_noncached pgprot_noncached
453 
454 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
455 {
456         unsigned long prot = pgprot_val(_prot);
457 
458         prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
459 
460         return __pgprot(prot);
461 }
462 
463 #define pgprot_writecombine pgprot_writecombine
464 
465 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
466 {
467         unsigned long prot = pgprot_val(_prot);
468 
469         /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
470         prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
471 
472         return __pgprot(prot);
473 }
474 
475 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
476                                                 unsigned long address)
477 {
478 }
479 
480 #define __HAVE_ARCH_PTE_SAME
481 static inline int pte_same(pte_t pte_a, pte_t pte_b)
482 {
483         return pte_val(pte_a) == pte_val(pte_b);
484 }
485 
486 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
487 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
488                                         unsigned long address, pte_t *ptep,
489                                         pte_t entry, int dirty)
490 {
491         if (!pte_same(*ptep, entry))
492                 set_pte_at(vma->vm_mm, address, ptep, entry);
493         /*
494          * update_mmu_cache will unconditionally execute, handling both
495          * the case that the PTE changed and the spurious fault case.
496          */
497         return true;
498 }
499 
500 /*
501  * Conversion functions: convert a page and protection to a page entry,
502  * and a page entry and page directory to the page they refer to.
503  */
504 #define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
505 
506 #if defined(CONFIG_XPA)
507 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
508 {
509         pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
510         pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
511         pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
512         pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
513         return pte;
514 }
515 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
516 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
517 {
518         pte.pte_low  &= _PAGE_CHG_MASK;
519         pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
520         pte.pte_low  |= pgprot_val(newprot);
521         pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
522         return pte;
523 }
524 #else
525 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
526 {
527         pte_val(pte) &= _PAGE_CHG_MASK;
528         pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK;
529         if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ))
530                 pte_val(pte) |= _PAGE_SILENT_READ;
531         return pte;
532 }
533 #endif
534 
535 
536 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
537         pte_t pte);
538 
539 static inline void update_mmu_cache(struct vm_area_struct *vma,
540         unsigned long address, pte_t *ptep)
541 {
542         pte_t pte = *ptep;
543         __update_tlb(vma, address, pte);
544 }
545 
546 #define __HAVE_ARCH_UPDATE_MMU_TLB
547 #define update_mmu_tlb  update_mmu_cache
548 
549 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
550         unsigned long address, pmd_t *pmdp)
551 {
552         pte_t pte = *(pte_t *)pmdp;
553 
554         __update_tlb(vma, address, pte);
555 }
556 
557 #define kern_addr_valid(addr)   (1)
558 
559 /*
560  * Allow physical addresses to be fixed up to help 36-bit peripherals.
561  */
562 #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
563 phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size);
564 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
565                 unsigned long pfn, unsigned long size, pgprot_t prot);
566 #define io_remap_pfn_range io_remap_pfn_range
567 #else
568 #define fixup_bigphys_addr(addr, size)  (addr)
569 #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
570 
571 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
572 
573 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
574 #define pmdp_establish generic_pmdp_establish
575 
576 #define has_transparent_hugepage has_transparent_hugepage
577 extern int has_transparent_hugepage(void);
578 
579 static inline int pmd_trans_huge(pmd_t pmd)
580 {
581         return !!(pmd_val(pmd) & _PAGE_HUGE);
582 }
583 
584 static inline pmd_t pmd_mkhuge(pmd_t pmd)
585 {
586         pmd_val(pmd) |= _PAGE_HUGE;
587 
588         return pmd;
589 }
590 
591 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
592                        pmd_t *pmdp, pmd_t pmd);
593 
594 #define pmd_write pmd_write
595 static inline int pmd_write(pmd_t pmd)
596 {
597         return !!(pmd_val(pmd) & _PAGE_WRITE);
598 }
599 
600 static inline pmd_t pmd_wrprotect(pmd_t pmd)
601 {
602         pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
603         return pmd;
604 }
605 
606 static inline pmd_t pmd_mkwrite(pmd_t pmd)
607 {
608         pmd_val(pmd) |= _PAGE_WRITE;
609         if (pmd_val(pmd) & _PAGE_MODIFIED)
610                 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
611 
612         return pmd;
613 }
614 
615 static inline int pmd_dirty(pmd_t pmd)
616 {
617         return !!(pmd_val(pmd) & _PAGE_MODIFIED);
618 }
619 
620 static inline pmd_t pmd_mkclean(pmd_t pmd)
621 {
622         pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
623         return pmd;
624 }
625 
626 static inline pmd_t pmd_mkdirty(pmd_t pmd)
627 {
628         pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
629         if (pmd_val(pmd) & _PAGE_WRITE)
630                 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
631 
632         return pmd;
633 }
634 
635 static inline int pmd_young(pmd_t pmd)
636 {
637         return !!(pmd_val(pmd) & _PAGE_ACCESSED);
638 }
639 
640 static inline pmd_t pmd_mkold(pmd_t pmd)
641 {
642         pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
643 
644         return pmd;
645 }
646 
647 static inline pmd_t pmd_mkyoung(pmd_t pmd)
648 {
649         pmd_val(pmd) |= _PAGE_ACCESSED;
650 
651         if (!(pmd_val(pmd) & _PAGE_NO_READ))
652                 pmd_val(pmd) |= _PAGE_SILENT_READ;
653 
654         return pmd;
655 }
656 
657 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
658 static inline int pmd_soft_dirty(pmd_t pmd)
659 {
660         return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY);
661 }
662 
663 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
664 {
665         pmd_val(pmd) |= _PAGE_SOFT_DIRTY;
666         return pmd;
667 }
668 
669 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
670 {
671         pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY);
672         return pmd;
673 }
674 
675 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
676 
677 /* Extern to avoid header file madness */
678 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
679 
680 static inline unsigned long pmd_pfn(pmd_t pmd)
681 {
682         return pmd_val(pmd) >> _PFN_SHIFT;
683 }
684 
685 static inline struct page *pmd_page(pmd_t pmd)
686 {
687         if (pmd_trans_huge(pmd))
688                 return pfn_to_page(pmd_pfn(pmd));
689 
690         return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
691 }
692 
693 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
694 {
695         pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
696                        (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
697         return pmd;
698 }
699 
700 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
701 {
702         pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
703 
704         return pmd;
705 }
706 
707 /*
708  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
709  * different prototype.
710  */
711 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
712 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
713                                             unsigned long address, pmd_t *pmdp)
714 {
715         pmd_t old = *pmdp;
716 
717         pmd_clear(pmdp);
718 
719         return old;
720 }
721 
722 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
723 
724 #ifdef _PAGE_HUGE
725 #define pmd_leaf(pmd)   ((pmd_val(pmd) & _PAGE_HUGE) != 0)
726 #define pud_leaf(pud)   ((pud_val(pud) & _PAGE_HUGE) != 0)
727 #endif
728 
729 #define gup_fast_permitted(start, end)  (!cpu_has_dc_aliases)
730 
731 /*
732  * We provide our own get_unmapped area to cope with the virtual aliasing
733  * constraints placed on us by the cache architecture.
734  */
735 #define HAVE_ARCH_UNMAPPED_AREA
736 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
737 
738 #endif /* _ASM_PGTABLE_H */
739 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp