~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/hugetlb.h

Version: ~ [ linux-5.10-rc5 ] ~ [ linux-5.9.10 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.79 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.159 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.208 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.245 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.245 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_HUGETLB_H
  2 #define _LINUX_HUGETLB_H
  3 
  4 #include <linux/mm_types.h>
  5 #include <linux/mmdebug.h>
  6 #include <linux/fs.h>
  7 #include <linux/hugetlb_inline.h>
  8 #include <linux/cgroup.h>
  9 #include <linux/list.h>
 10 #include <linux/kref.h>
 11 
 12 struct ctl_table;
 13 struct user_struct;
 14 struct mmu_gather;
 15 
 16 #ifdef CONFIG_HUGETLB_PAGE
 17 
 18 #include <linux/mempolicy.h>
 19 #include <linux/shm.h>
 20 #include <asm/tlbflush.h>
 21 
 22 struct hugepage_subpool {
 23         spinlock_t lock;
 24         long count;
 25         long max_hpages, used_hpages;
 26 };
 27 
 28 struct resv_map {
 29         struct kref refs;
 30         spinlock_t lock;
 31         struct list_head regions;
 32 };
 33 extern struct resv_map *resv_map_alloc(void);
 34 void resv_map_release(struct kref *ref);
 35 
 36 extern spinlock_t hugetlb_lock;
 37 extern int hugetlb_max_hstate __read_mostly;
 38 #define for_each_hstate(h) \
 39         for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
 40 
 41 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
 42 void hugepage_put_subpool(struct hugepage_subpool *spool);
 43 
 44 int PageHuge(struct page *page);
 45 
 46 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
 47 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
 48 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
 49 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
 50 
 51 #ifdef CONFIG_NUMA
 52 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
 53                                         void __user *, size_t *, loff_t *);
 54 #endif
 55 
 56 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
 57 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
 58                          struct page **, struct vm_area_struct **,
 59                          unsigned long *, unsigned long *, long, unsigned int);
 60 void unmap_hugepage_range(struct vm_area_struct *,
 61                           unsigned long, unsigned long, struct page *);
 62 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 63                           struct vm_area_struct *vma,
 64                           unsigned long start, unsigned long end,
 65                           struct page *ref_page);
 66 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 67                                 unsigned long start, unsigned long end,
 68                                 struct page *ref_page);
 69 void hugetlb_report_meminfo(struct seq_file *);
 70 int hugetlb_report_node_meminfo(int, char *);
 71 void hugetlb_show_meminfo(void);
 72 unsigned long hugetlb_total_pages(void);
 73 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 74                         unsigned long address, unsigned int flags);
 75 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
 76                                                 struct vm_area_struct *vma,
 77                                                 vm_flags_t vm_flags);
 78 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
 79 int dequeue_hwpoisoned_huge_page(struct page *page);
 80 bool isolate_huge_page(struct page *page, struct list_head *list);
 81 void putback_active_hugepage(struct page *page);
 82 bool is_hugepage_active(struct page *page);
 83 void free_huge_page(struct page *page);
 84 
 85 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
 86 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
 87 #endif
 88 
 89 extern unsigned long hugepages_treat_as_movable;
 90 extern const unsigned long hugetlb_zero, hugetlb_infinity;
 91 extern int sysctl_hugetlb_shm_group;
 92 extern struct list_head huge_boot_pages;
 93 
 94 /* arch callbacks */
 95 
 96 pte_t *huge_pte_alloc(struct mm_struct *mm,
 97                         unsigned long addr, unsigned long sz);
 98 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
 99 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
100 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
101                                 unsigned long *start, unsigned long *end);
102 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
103                               int write);
104 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
105                                 pmd_t *pmd, int flags);
106 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
107                                 pud_t *pud, int flags);
108 int pmd_huge(pmd_t pmd);
109 int pud_huge(pud_t pmd);
110 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
111                 unsigned long address, unsigned long end, pgprot_t newprot);
112 
113 #else /* !CONFIG_HUGETLB_PAGE */
114 
115 static inline int PageHuge(struct page *page)
116 {
117         return 0;
118 }
119 
120 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
121 {
122 }
123 
124 static inline unsigned long hugetlb_total_pages(void)
125 {
126         return 0;
127 }
128 
129 static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
130                                                 pte_t *ptep)
131 {
132         return 0;
133 }
134 
135 static inline void adjust_range_if_pmd_sharing_possible(
136                                 struct vm_area_struct *vma,
137                                 unsigned long *start, unsigned long *end)
138 {
139 }
140 
141 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w)   ({ BUG(); 0; })
142 #define follow_huge_addr(mm, addr, write)       ERR_PTR(-EINVAL)
143 #define copy_hugetlb_page_range(src, dst, vma)  ({ BUG(); 0; })
144 static inline void hugetlb_report_meminfo(struct seq_file *m)
145 {
146 }
147 #define hugetlb_report_node_meminfo(n, buf)     0
148 static inline void hugetlb_show_meminfo(void)
149 {
150 }
151 #define follow_huge_pmd(mm, addr, pmd, flags)   NULL
152 #define follow_huge_pud(mm, addr, pud, flags)   NULL
153 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
154 #define pmd_huge(x)     0
155 #define pud_huge(x)     0
156 #define is_hugepage_only_range(mm, addr, len)   0
157 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
158 #define hugetlb_fault(mm, vma, addr, flags)     ({ BUG(); 0; })
159 #define huge_pte_offset(mm, address)    0
160 static inline int dequeue_hwpoisoned_huge_page(struct page *page)
161 {
162         return 0;
163 }
164 
165 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
166 {
167         return false;
168 }
169 #define putback_active_hugepage(p)      do {} while (0)
170 #define is_hugepage_active(x)   false
171 
172 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
173                 unsigned long address, unsigned long end, pgprot_t newprot)
174 {
175         return 0;
176 }
177 
178 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
179                         struct vm_area_struct *vma, unsigned long start,
180                         unsigned long end, struct page *ref_page)
181 {
182         BUG();
183 }
184 
185 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
186                         struct vm_area_struct *vma, unsigned long start,
187                         unsigned long end, struct page *ref_page)
188 {
189         BUG();
190 }
191 
192 #endif /* !CONFIG_HUGETLB_PAGE */
193 
194 #define HUGETLB_ANON_FILE "anon_hugepage"
195 
196 enum {
197         /*
198          * The file will be used as an shm file so shmfs accounting rules
199          * apply
200          */
201         HUGETLB_SHMFS_INODE     = 1,
202         /*
203          * The file is being created on the internal vfs mount and shmfs
204          * accounting rules do not apply
205          */
206         HUGETLB_ANONHUGE_INODE  = 2,
207 };
208 
209 #ifdef CONFIG_HUGETLBFS
210 struct hugetlbfs_sb_info {
211         long    max_inodes;   /* inodes allowed */
212         long    free_inodes;  /* inodes free */
213         spinlock_t      stat_lock;
214         struct hstate *hstate;
215         struct hugepage_subpool *spool;
216 };
217 
218 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
219 {
220         return sb->s_fs_info;
221 }
222 
223 extern const struct file_operations hugetlbfs_file_operations;
224 extern const struct vm_operations_struct hugetlb_vm_ops;
225 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
226                                 struct user_struct **user, int creat_flags,
227                                 int page_size_log);
228 
229 static inline int is_file_hugepages(struct file *file)
230 {
231         if (file->f_op == &hugetlbfs_file_operations)
232                 return 1;
233         if (is_file_shm_hugepages(file))
234                 return 1;
235 
236         return 0;
237 }
238 
239 
240 #else /* !CONFIG_HUGETLBFS */
241 
242 #define is_file_hugepages(file)                 0
243 static inline struct file *
244 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
245                 struct user_struct **user, int creat_flags,
246                 int page_size_log)
247 {
248         return ERR_PTR(-ENOSYS);
249 }
250 
251 #endif /* !CONFIG_HUGETLBFS */
252 
253 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
254 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
255                                         unsigned long len, unsigned long pgoff,
256                                         unsigned long flags);
257 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
258 
259 #ifdef CONFIG_HUGETLB_PAGE
260 
261 #define HSTATE_NAME_LEN 32
262 /* Defines one hugetlb page size */
263 struct hstate {
264         int next_nid_to_alloc;
265         int next_nid_to_free;
266         unsigned int order;
267         unsigned long mask;
268         unsigned long max_huge_pages;
269         unsigned long nr_huge_pages;
270         unsigned long free_huge_pages;
271         unsigned long resv_huge_pages;
272         unsigned long surplus_huge_pages;
273         unsigned long nr_overcommit_huge_pages;
274         struct list_head hugepage_activelist;
275         struct list_head hugepage_freelists[MAX_NUMNODES];
276         unsigned int nr_huge_pages_node[MAX_NUMNODES];
277         unsigned int free_huge_pages_node[MAX_NUMNODES];
278         unsigned int surplus_huge_pages_node[MAX_NUMNODES];
279 #ifdef CONFIG_CGROUP_HUGETLB
280         /* cgroup control files */
281         struct cftype cgroup_files[5];
282 #endif
283         char name[HSTATE_NAME_LEN];
284 };
285 
286 struct huge_bootmem_page {
287         struct list_head list;
288         struct hstate *hstate;
289 #ifdef CONFIG_HIGHMEM
290         phys_addr_t phys;
291 #endif
292 };
293 
294 struct page *alloc_huge_page_node(struct hstate *h, int nid);
295 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
296                                 unsigned long addr, int avoid_reserve);
297 
298 /* arch callback */
299 int __init alloc_bootmem_huge_page(struct hstate *h);
300 
301 void __init hugetlb_add_hstate(unsigned order);
302 struct hstate *size_to_hstate(unsigned long size);
303 
304 #ifndef HUGE_MAX_HSTATE
305 #define HUGE_MAX_HSTATE 1
306 #endif
307 
308 extern struct hstate hstates[HUGE_MAX_HSTATE];
309 extern unsigned int default_hstate_idx;
310 
311 #define default_hstate (hstates[default_hstate_idx])
312 
313 static inline struct hstate *hstate_inode(struct inode *i)
314 {
315         struct hugetlbfs_sb_info *hsb;
316         hsb = HUGETLBFS_SB(i->i_sb);
317         return hsb->hstate;
318 }
319 
320 static inline struct hstate *hstate_file(struct file *f)
321 {
322         return hstate_inode(file_inode(f));
323 }
324 
325 static inline struct hstate *hstate_sizelog(int page_size_log)
326 {
327         if (!page_size_log)
328                 return &default_hstate;
329         return size_to_hstate(1 << page_size_log);
330 }
331 
332 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
333 {
334         return hstate_file(vma->vm_file);
335 }
336 
337 static inline unsigned long huge_page_size(struct hstate *h)
338 {
339         return (unsigned long)PAGE_SIZE << h->order;
340 }
341 
342 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
343 
344 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
345 
346 static inline unsigned long huge_page_mask(struct hstate *h)
347 {
348         return h->mask;
349 }
350 
351 static inline unsigned int huge_page_order(struct hstate *h)
352 {
353         return h->order;
354 }
355 
356 static inline unsigned huge_page_shift(struct hstate *h)
357 {
358         return h->order + PAGE_SHIFT;
359 }
360 
361 static inline bool hstate_is_gigantic(struct hstate *h)
362 {
363         return huge_page_order(h) >= MAX_ORDER;
364 }
365 
366 static inline unsigned int pages_per_huge_page(struct hstate *h)
367 {
368         return 1 << h->order;
369 }
370 
371 static inline unsigned int blocks_per_huge_page(struct hstate *h)
372 {
373         return huge_page_size(h) / 512;
374 }
375 
376 #include <asm/hugetlb.h>
377 
378 #ifndef arch_make_huge_pte
379 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
380                                        struct page *page, int writable)
381 {
382         return entry;
383 }
384 #endif
385 
386 static inline struct hstate *page_hstate(struct page *page)
387 {
388         VM_BUG_ON_PAGE(!PageHuge(page), page);
389         return size_to_hstate(PAGE_SIZE << compound_order(page));
390 }
391 
392 static inline unsigned hstate_index_to_shift(unsigned index)
393 {
394         return hstates[index].order + PAGE_SHIFT;
395 }
396 
397 static inline int hstate_index(struct hstate *h)
398 {
399         return h - hstates;
400 }
401 
402 pgoff_t __basepage_index(struct page *page);
403 
404 /* Return page->index in PAGE_SIZE units */
405 static inline pgoff_t basepage_index(struct page *page)
406 {
407         if (!PageCompound(page))
408                 return page->index;
409 
410         return __basepage_index(page);
411 }
412 
413 extern int dissolve_free_huge_pages(unsigned long start_pfn,
414                                     unsigned long end_pfn);
415 static inline int hugepage_migration_supported(struct hstate *h)
416 {
417 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
418         return huge_page_shift(h) == PMD_SHIFT;
419 #else
420         return 0;
421 #endif
422 }
423 
424 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
425                                            struct mm_struct *mm, pte_t *pte)
426 {
427         if (huge_page_size(h) == PMD_SIZE)
428                 return pmd_lockptr(mm, (pmd_t *) pte);
429         VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
430         return &mm->page_table_lock;
431 }
432 
433 #ifndef hugepages_supported
434 /*
435  * Some platform decide whether they support huge pages at boot
436  * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
437  * when there is no such support
438  */
439 #define hugepages_supported() (HPAGE_SHIFT != 0)
440 #endif
441 
442 #else   /* CONFIG_HUGETLB_PAGE */
443 struct hstate {};
444 #define alloc_huge_page_node(h, nid) NULL
445 #define alloc_huge_page_noerr(v, a, r) NULL
446 #define alloc_bootmem_huge_page(h) NULL
447 #define hstate_file(f) NULL
448 #define hstate_sizelog(s) NULL
449 #define hstate_vma(v) NULL
450 #define hstate_inode(i) NULL
451 #define page_hstate(page) NULL
452 #define huge_page_size(h) PAGE_SIZE
453 #define huge_page_mask(h) PAGE_MASK
454 #define vma_kernel_pagesize(v) PAGE_SIZE
455 #define vma_mmu_pagesize(v) PAGE_SIZE
456 #define huge_page_order(h) 0
457 #define huge_page_shift(h) PAGE_SHIFT
458 static inline unsigned int pages_per_huge_page(struct hstate *h)
459 {
460         return 1;
461 }
462 #define hstate_index_to_shift(index) 0
463 #define hstate_index(h) 0
464 
465 static inline pgoff_t basepage_index(struct page *page)
466 {
467         return page->index;
468 }
469 #define dissolve_free_huge_pages(s, e)  0
470 #define hugepage_migration_supported(h) 0
471 
472 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
473                                            struct mm_struct *mm, pte_t *pte)
474 {
475         return &mm->page_table_lock;
476 }
477 #endif  /* CONFIG_HUGETLB_PAGE */
478 
479 static inline spinlock_t *huge_pte_lock(struct hstate *h,
480                                         struct mm_struct *mm, pte_t *pte)
481 {
482         spinlock_t *ptl;
483 
484         ptl = huge_pte_lockptr(h, mm, pte);
485         spin_lock(ptl);
486         return ptl;
487 }
488 
489 #endif /* _LINUX_HUGETLB_H */
490 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp