~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/mm/hugetlbpage.c

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.10 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.83 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.153 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.200 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.200 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.76 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * IA-32 Huge TLB Page Support for Kernel.
  3  *
  4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  5  */
  6 
  7 #include <linux/init.h>
  8 #include <linux/fs.h>
  9 #include <linux/mm.h>
 10 #include <linux/hugetlb.h>
 11 #include <linux/pagemap.h>
 12 #include <linux/err.h>
 13 #include <linux/sysctl.h>
 14 #include <asm/mman.h>
 15 #include <asm/tlb.h>
 16 #include <asm/tlbflush.h>
 17 #include <asm/pgalloc.h>
 18 
 19 #if 0   /* This is just for testing */
 20 struct page *
 21 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 22 {
 23         unsigned long start = address;
 24         int length = 1;
 25         int nr;
 26         struct page *page;
 27         struct vm_area_struct *vma;
 28 
 29         vma = find_vma(mm, addr);
 30         if (!vma || !is_vm_hugetlb_page(vma))
 31                 return ERR_PTR(-EINVAL);
 32 
 33         pte = huge_pte_offset(mm, address);
 34 
 35         /* hugetlb should be locked, and hence, prefaulted */
 36         WARN_ON(!pte || pte_none(*pte));
 37 
 38         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
 39 
 40         WARN_ON(!PageHead(page));
 41 
 42         return page;
 43 }
 44 
 45 int pmd_huge(pmd_t pmd)
 46 {
 47         return 0;
 48 }
 49 
 50 int pud_huge(pud_t pud)
 51 {
 52         return 0;
 53 }
 54 
 55 #else
 56 
 57 /*
 58  * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
 59  * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
 60  * Otherwise, returns 0.
 61  */
 62 int pmd_huge(pmd_t pmd)
 63 {
 64         return !pmd_none(pmd) &&
 65                 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
 66 }
 67 
 68 int pud_huge(pud_t pud)
 69 {
 70         return !!(pud_val(pud) & _PAGE_PSE);
 71 }
 72 #endif
 73 
 74 #ifdef CONFIG_HUGETLB_PAGE
 75 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 76                 unsigned long addr, unsigned long len,
 77                 unsigned long pgoff, unsigned long flags)
 78 {
 79         struct hstate *h = hstate_file(file);
 80         struct vm_unmapped_area_info info;
 81 
 82         info.flags = 0;
 83         info.length = len;
 84         info.low_limit = current->mm->mmap_legacy_base;
 85         info.high_limit = TASK_SIZE;
 86         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 87         info.align_offset = 0;
 88         return vm_unmapped_area(&info);
 89 }
 90 
 91 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 92                 unsigned long addr0, unsigned long len,
 93                 unsigned long pgoff, unsigned long flags)
 94 {
 95         struct hstate *h = hstate_file(file);
 96         struct vm_unmapped_area_info info;
 97         unsigned long addr;
 98 
 99         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
100         info.length = len;
101         info.low_limit = PAGE_SIZE;
102         info.high_limit = current->mm->mmap_base;
103         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
104         info.align_offset = 0;
105         addr = vm_unmapped_area(&info);
106 
107         /*
108          * A failed mmap() very likely causes application failure,
109          * so fall back to the bottom-up function here. This scenario
110          * can happen with large stack limits and large mmap()
111          * allocations.
112          */
113         if (addr & ~PAGE_MASK) {
114                 VM_BUG_ON(addr != -ENOMEM);
115                 info.flags = 0;
116                 info.low_limit = TASK_UNMAPPED_BASE;
117                 info.high_limit = TASK_SIZE;
118                 addr = vm_unmapped_area(&info);
119         }
120 
121         return addr;
122 }
123 
124 unsigned long
125 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
126                 unsigned long len, unsigned long pgoff, unsigned long flags)
127 {
128         struct hstate *h = hstate_file(file);
129         struct mm_struct *mm = current->mm;
130         struct vm_area_struct *vma;
131 
132         if (len & ~huge_page_mask(h))
133                 return -EINVAL;
134         if (len > TASK_SIZE)
135                 return -ENOMEM;
136 
137         if (flags & MAP_FIXED) {
138                 if (prepare_hugepage_range(file, addr, len))
139                         return -EINVAL;
140                 return addr;
141         }
142 
143         if (addr) {
144                 addr = ALIGN(addr, huge_page_size(h));
145                 vma = find_vma(mm, addr);
146                 if (TASK_SIZE - len >= addr &&
147                     (!vma || addr + len <= vma->vm_start))
148                         return addr;
149         }
150         if (mm->get_unmapped_area == arch_get_unmapped_area)
151                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
152                                 pgoff, flags);
153         else
154                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
155                                 pgoff, flags);
156 }
157 #endif /* CONFIG_HUGETLB_PAGE */
158 
159 #ifdef CONFIG_X86_64
160 static __init int setup_hugepagesz(char *opt)
161 {
162         unsigned long ps = memparse(opt, &opt);
163         if (ps == PMD_SIZE) {
164                 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
165         } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
166                 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
167         } else {
168                 hugetlb_bad_size();
169                 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
170                         ps >> 20);
171                 return 0;
172         }
173         return 1;
174 }
175 __setup("hugepagesz=", setup_hugepagesz);
176 
177 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
178 static __init int gigantic_pages_init(void)
179 {
180         /* With compaction or CMA we can allocate gigantic pages at runtime */
181         if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
182                 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
183         return 0;
184 }
185 arch_initcall(gigantic_pages_init);
186 #endif
187 #endif
188 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp