~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/huge_mm.h

Version: ~ [ linux-5.2-rc1 ] ~ [ linux-5.1.2 ] ~ [ linux-5.0.16 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.43 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.119 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.176 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.179 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.139 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.67 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_HUGE_MM_H
  3 #define _LINUX_HUGE_MM_H
  4 
  5 #include <linux/sched/coredump.h>
  6 
  7 #include <linux/fs.h> /* only for vma_is_dax() */
  8 
  9 extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
 10 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 11                          pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
 12                          struct vm_area_struct *vma);
 13 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
 14 extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 15                          pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
 16                          struct vm_area_struct *vma);
 17 
 18 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 19 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
 20 #else
 21 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
 22 {
 23 }
 24 #endif
 25 
 26 extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
 27 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 28                                           unsigned long addr,
 29                                           pmd_t *pmd,
 30                                           unsigned int flags);
 31 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
 32                         struct vm_area_struct *vma,
 33                         pmd_t *pmd, unsigned long addr, unsigned long next);
 34 extern int zap_huge_pmd(struct mmu_gather *tlb,
 35                         struct vm_area_struct *vma,
 36                         pmd_t *pmd, unsigned long addr);
 37 extern int zap_huge_pud(struct mmu_gather *tlb,
 38                         struct vm_area_struct *vma,
 39                         pud_t *pud, unsigned long addr);
 40 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 41                         unsigned long addr, unsigned long end,
 42                         unsigned char *vec);
 43 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 44                          unsigned long new_addr, unsigned long old_end,
 45                          pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
 46 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 47                         unsigned long addr, pgprot_t newprot,
 48                         int prot_numa);
 49 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
 50                         pmd_t *pmd, pfn_t pfn, bool write);
 51 int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
 52                         pud_t *pud, pfn_t pfn, bool write);
 53 enum transparent_hugepage_flag {
 54         TRANSPARENT_HUGEPAGE_FLAG,
 55         TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 56         TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
 57         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
 58         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
 59         TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
 60         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
 61         TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
 62 #ifdef CONFIG_DEBUG_VM
 63         TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
 64 #endif
 65 };
 66 
 67 struct kobject;
 68 struct kobj_attribute;
 69 
 70 extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
 71                                  struct kobj_attribute *attr,
 72                                  const char *buf, size_t count,
 73                                  enum transparent_hugepage_flag flag);
 74 extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
 75                                 struct kobj_attribute *attr, char *buf,
 76                                 enum transparent_hugepage_flag flag);
 77 extern struct kobj_attribute shmem_enabled_attr;
 78 
 79 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
 80 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
 81 
 82 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 83 #define HPAGE_PMD_SHIFT PMD_SHIFT
 84 #define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
 85 #define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
 86 
 87 #define HPAGE_PUD_SHIFT PUD_SHIFT
 88 #define HPAGE_PUD_SIZE  ((1UL) << HPAGE_PUD_SHIFT)
 89 #define HPAGE_PUD_MASK  (~(HPAGE_PUD_SIZE - 1))
 90 
 91 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
 92 
 93 extern unsigned long transparent_hugepage_flags;
 94 
 95 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
 96 {
 97         if (vma->vm_flags & VM_NOHUGEPAGE)
 98                 return false;
 99 
100         if (is_vma_temporary_stack(vma))
101                 return false;
102 
103         if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
104                 return false;
105 
106         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
107                 return true;
108 
109         if (vma_is_dax(vma))
110                 return true;
111 
112         if (transparent_hugepage_flags &
113                                 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
114                 return !!(vma->vm_flags & VM_HUGEPAGE);
115 
116         return false;
117 }
118 
119 #define transparent_hugepage_use_zero_page()                            \
120         (transparent_hugepage_flags &                                   \
121          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
122 #ifdef CONFIG_DEBUG_VM
123 #define transparent_hugepage_debug_cow()                                \
124         (transparent_hugepage_flags &                                   \
125          (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
126 #else /* CONFIG_DEBUG_VM */
127 #define transparent_hugepage_debug_cow() 0
128 #endif /* CONFIG_DEBUG_VM */
129 
130 extern unsigned long thp_get_unmapped_area(struct file *filp,
131                 unsigned long addr, unsigned long len, unsigned long pgoff,
132                 unsigned long flags);
133 
134 extern void prep_transhuge_page(struct page *page);
135 extern void free_transhuge_page(struct page *page);
136 
137 bool can_split_huge_page(struct page *page, int *pextra_pins);
138 int split_huge_page_to_list(struct page *page, struct list_head *list);
139 static inline int split_huge_page(struct page *page)
140 {
141         return split_huge_page_to_list(page, NULL);
142 }
143 void deferred_split_huge_page(struct page *page);
144 
145 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
146                 unsigned long address, bool freeze, struct page *page);
147 
148 #define split_huge_pmd(__vma, __pmd, __address)                         \
149         do {                                                            \
150                 pmd_t *____pmd = (__pmd);                               \
151                 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)   \
152                                         || pmd_devmap(*____pmd))        \
153                         __split_huge_pmd(__vma, __pmd, __address,       \
154                                                 false, NULL);           \
155         }  while (0)
156 
157 
158 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
159                 bool freeze, struct page *page);
160 
161 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
162                 unsigned long address);
163 
164 #define split_huge_pud(__vma, __pud, __address)                         \
165         do {                                                            \
166                 pud_t *____pud = (__pud);                               \
167                 if (pud_trans_huge(*____pud)                            \
168                                         || pud_devmap(*____pud))        \
169                         __split_huge_pud(__vma, __pud, __address);      \
170         }  while (0)
171 
172 extern int hugepage_madvise(struct vm_area_struct *vma,
173                             unsigned long *vm_flags, int advice);
174 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
175                                     unsigned long start,
176                                     unsigned long end,
177                                     long adjust_next);
178 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
179                 struct vm_area_struct *vma);
180 extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
181                 struct vm_area_struct *vma);
182 
183 static inline int is_swap_pmd(pmd_t pmd)
184 {
185         return !pmd_none(pmd) && !pmd_present(pmd);
186 }
187 
188 /* mmap_sem must be held on entry */
189 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
190                 struct vm_area_struct *vma)
191 {
192         VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
193         if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
194                 return __pmd_trans_huge_lock(pmd, vma);
195         else
196                 return NULL;
197 }
198 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
199                 struct vm_area_struct *vma)
200 {
201         VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
202         if (pud_trans_huge(*pud) || pud_devmap(*pud))
203                 return __pud_trans_huge_lock(pud, vma);
204         else
205                 return NULL;
206 }
207 static inline int hpage_nr_pages(struct page *page)
208 {
209         if (unlikely(PageTransHuge(page)))
210                 return HPAGE_PMD_NR;
211         return 1;
212 }
213 
214 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
215                 pmd_t *pmd, int flags);
216 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
217                 pud_t *pud, int flags);
218 
219 extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
220 
221 extern struct page *huge_zero_page;
222 
223 static inline bool is_huge_zero_page(struct page *page)
224 {
225         return READ_ONCE(huge_zero_page) == page;
226 }
227 
228 static inline bool is_huge_zero_pmd(pmd_t pmd)
229 {
230         return is_huge_zero_page(pmd_page(pmd));
231 }
232 
233 static inline bool is_huge_zero_pud(pud_t pud)
234 {
235         return false;
236 }
237 
238 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
239 void mm_put_huge_zero_page(struct mm_struct *mm);
240 
241 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
242 
243 static inline bool thp_migration_supported(void)
244 {
245         return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
246 }
247 
248 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
249 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
250 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
251 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
252 
253 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
254 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
255 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
256 
257 #define hpage_nr_pages(x) 1
258 
259 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
260 {
261         return false;
262 }
263 
264 static inline void prep_transhuge_page(struct page *page) {}
265 
266 #define transparent_hugepage_flags 0UL
267 
268 #define thp_get_unmapped_area   NULL
269 
270 static inline bool
271 can_split_huge_page(struct page *page, int *pextra_pins)
272 {
273         BUILD_BUG();
274         return false;
275 }
276 static inline int
277 split_huge_page_to_list(struct page *page, struct list_head *list)
278 {
279         return 0;
280 }
281 static inline int split_huge_page(struct page *page)
282 {
283         return 0;
284 }
285 static inline void deferred_split_huge_page(struct page *page) {}
286 #define split_huge_pmd(__vma, __pmd, __address) \
287         do { } while (0)
288 
289 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
290                 unsigned long address, bool freeze, struct page *page) {}
291 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
292                 unsigned long address, bool freeze, struct page *page) {}
293 
294 #define split_huge_pud(__vma, __pmd, __address) \
295         do { } while (0)
296 
297 static inline int hugepage_madvise(struct vm_area_struct *vma,
298                                    unsigned long *vm_flags, int advice)
299 {
300         BUG();
301         return 0;
302 }
303 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
304                                          unsigned long start,
305                                          unsigned long end,
306                                          long adjust_next)
307 {
308 }
309 static inline int is_swap_pmd(pmd_t pmd)
310 {
311         return 0;
312 }
313 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
314                 struct vm_area_struct *vma)
315 {
316         return NULL;
317 }
318 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
319                 struct vm_area_struct *vma)
320 {
321         return NULL;
322 }
323 
324 static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
325 {
326         return 0;
327 }
328 
329 static inline bool is_huge_zero_page(struct page *page)
330 {
331         return false;
332 }
333 
334 static inline bool is_huge_zero_pud(pud_t pud)
335 {
336         return false;
337 }
338 
339 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
340 {
341         return;
342 }
343 
344 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
345                 unsigned long addr, pmd_t *pmd, int flags)
346 {
347         return NULL;
348 }
349 
350 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
351                 unsigned long addr, pud_t *pud, int flags)
352 {
353         return NULL;
354 }
355 
356 static inline bool thp_migration_supported(void)
357 {
358         return false;
359 }
360 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
361 
362 #endif /* _LINUX_HUGE_MM_H */
363 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp