~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/include/asm/page_64.h

Version: ~ [ linux-5.19-rc3 ] ~ [ linux-5.18.5 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.48 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.123 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.199 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.248 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.284 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.319 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _ASM_POWERPC_PAGE_64_H
  2 #define _ASM_POWERPC_PAGE_64_H
  3 
  4 /*
  5  * Copyright (C) 2001 PPC64 Team, IBM Corp
  6  *
  7  * This program is free software; you can redistribute it and/or
  8  * modify it under the terms of the GNU General Public License
  9  * as published by the Free Software Foundation; either version
 10  * 2 of the License, or (at your option) any later version.
 11  */
 12 
 13 /*
 14  * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
 15  * specific, every notion of page number shared with the firmware, TCEs,
 16  * iommu, etc... still uses a page size of 4K.
 17  */
 18 #define HW_PAGE_SHIFT           12
 19 #define HW_PAGE_SIZE            (ASM_CONST(1) << HW_PAGE_SHIFT)
 20 #define HW_PAGE_MASK            (~(HW_PAGE_SIZE-1))
 21 
 22 /*
 23  * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
 24  * HW_PAGE_SHIFT, that is 4K pages.
 25  */
 26 #define PAGE_FACTOR             (PAGE_SHIFT - HW_PAGE_SHIFT)
 27 
 28 /* Segment size; normal 256M segments */
 29 #define SID_SHIFT               28
 30 #define SID_MASK                ASM_CONST(0xfffffffff)
 31 #define ESID_MASK               0xfffffffff0000000UL
 32 #define GET_ESID(x)             (((x) >> SID_SHIFT) & SID_MASK)
 33 
 34 /* 1T segments */
 35 #define SID_SHIFT_1T            40
 36 #define SID_MASK_1T             0xffffffUL
 37 #define ESID_MASK_1T            0xffffff0000000000UL
 38 #define GET_ESID_1T(x)          (((x) >> SID_SHIFT_1T) & SID_MASK_1T)
 39 
 40 #ifndef __ASSEMBLY__
 41 #include <asm/cache.h>
 42 
 43 typedef unsigned long pte_basic_t;
 44 
 45 static inline void clear_page(void *addr)
 46 {
 47         unsigned long iterations;
 48         unsigned long onex, twox, fourx, eightx;
 49 
 50         iterations = ppc64_caches.dlines_per_page / 8;
 51 
 52         /*
 53          * Some verisions of gcc use multiply instructions to
 54          * calculate the offsets so lets give it a hand to
 55          * do better.
 56          */
 57         onex = ppc64_caches.dline_size;
 58         twox = onex << 1;
 59         fourx = onex << 2;
 60         eightx = onex << 3;
 61 
 62         asm volatile(
 63         "mtctr  %1      # clear_page\n\
 64         .balign 16\n\
 65 1:      dcbz    0,%0\n\
 66         dcbz    %3,%0\n\
 67         dcbz    %4,%0\n\
 68         dcbz    %5,%0\n\
 69         dcbz    %6,%0\n\
 70         dcbz    %7,%0\n\
 71         dcbz    %8,%0\n\
 72         dcbz    %9,%0\n\
 73         add     %0,%0,%10\n\
 74         bdnz+   1b"
 75         : "=&r" (addr)
 76         : "r" (iterations), "" (addr), "b" (onex), "b" (twox),
 77                 "b" (twox+onex), "b" (fourx), "b" (fourx+onex),
 78                 "b" (twox+fourx), "b" (eightx-onex), "r" (eightx)
 79         : "ctr", "memory");
 80 }
 81 
 82 extern void copy_page(void *to, void *from);
 83 
 84 /* Log 2 of page table size */
 85 extern u64 ppc64_pft_size;
 86 
 87 #endif /* __ASSEMBLY__ */
 88 
 89 #ifdef CONFIG_PPC_MM_SLICES
 90 
 91 #define SLICE_LOW_SHIFT         28
 92 #define SLICE_HIGH_SHIFT        40
 93 
 94 #define SLICE_LOW_TOP           (0x100000000ul)
 95 #define SLICE_NUM_LOW           (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
 96 #define SLICE_NUM_HIGH          (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
 97 
 98 #define GET_LOW_SLICE_INDEX(addr)       ((addr) >> SLICE_LOW_SHIFT)
 99 #define GET_HIGH_SLICE_INDEX(addr)      ((addr) >> SLICE_HIGH_SHIFT)
100 
101 /*
102  * 1 bit per slice and we have one slice per 1TB
103  * Right now we support only 64TB.
104  * IF we change this we will have to change the type
105  * of high_slices
106  */
107 #define SLICE_MASK_SIZE 8
108 
109 #ifndef __ASSEMBLY__
110 
111 struct slice_mask {
112         u16 low_slices;
113         u64 high_slices;
114 };
115 
116 struct mm_struct;
117 
118 extern unsigned long slice_get_unmapped_area(unsigned long addr,
119                                              unsigned long len,
120                                              unsigned long flags,
121                                              unsigned int psize,
122                                              int topdown);
123 
124 extern unsigned int get_slice_psize(struct mm_struct *mm,
125                                     unsigned long addr);
126 
127 extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
128 extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
129                                   unsigned long len, unsigned int psize);
130 
131 #endif /* __ASSEMBLY__ */
132 #else
133 #define slice_init()
134 #ifdef CONFIG_PPC_STD_MMU_64
135 #define get_slice_psize(mm, addr)       ((mm)->context.user_psize)
136 #define slice_set_user_psize(mm, psize)         \
137 do {                                            \
138         (mm)->context.user_psize = (psize);     \
139         (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
140 } while (0)
141 #else /* CONFIG_PPC_STD_MMU_64 */
142 #ifdef CONFIG_PPC_64K_PAGES
143 #define get_slice_psize(mm, addr)       MMU_PAGE_64K
144 #else /* CONFIG_PPC_64K_PAGES */
145 #define get_slice_psize(mm, addr)       MMU_PAGE_4K
146 #endif /* !CONFIG_PPC_64K_PAGES */
147 #define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
148 #endif /* !CONFIG_PPC_STD_MMU_64 */
149 
150 #define slice_set_range_psize(mm, start, len, psize)    \
151         slice_set_user_psize((mm), (psize))
152 #endif /* CONFIG_PPC_MM_SLICES */
153 
154 #ifdef CONFIG_HUGETLB_PAGE
155 
156 #ifdef CONFIG_PPC_MM_SLICES
157 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
158 #endif
159 
160 #endif /* !CONFIG_HUGETLB_PAGE */
161 
162 #define VM_DATA_DEFAULT_FLAGS \
163         (is_32bit_task() ? \
164          VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
165 
166 /*
167  * This is the default if a program doesn't have a PT_GNU_STACK
168  * program header entry. The PPC64 ELF ABI has a non executable stack
169  * stack by default, so in the absence of a PT_GNU_STACK program header
170  * we turn execute permission off.
171  */
172 #define VM_STACK_DEFAULT_FLAGS32        (VM_READ | VM_WRITE | VM_EXEC | \
173                                          VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
174 
175 #define VM_STACK_DEFAULT_FLAGS64        (VM_READ | VM_WRITE | \
176                                          VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
177 
178 #define VM_STACK_DEFAULT_FLAGS \
179         (is_32bit_task() ? \
180          VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
181 
182 #include <asm-generic/getorder.h>
183 
184 #endif /* _ASM_POWERPC_PAGE_64_H */
185 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp