~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/hugetlb.c

Version: ~ [ linux-6.6-rc1 ] ~ [ linux-6.5.2 ] ~ [ linux-6.4.15 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.52 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.131 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.194 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.256 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.294 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.325 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Generic hugetlb support.
  3  * (C) Nadia Yvette Chambers, April 2004
  4  */
  5 #include <linux/list.h>
  6 #include <linux/init.h>
  7 #include <linux/module.h>
  8 #include <linux/mm.h>
  9 #include <linux/seq_file.h>
 10 #include <linux/sysctl.h>
 11 #include <linux/highmem.h>
 12 #include <linux/mmu_notifier.h>
 13 #include <linux/nodemask.h>
 14 #include <linux/pagemap.h>
 15 #include <linux/mempolicy.h>
 16 #include <linux/cpuset.h>
 17 #include <linux/mutex.h>
 18 #include <linux/bootmem.h>
 19 #include <linux/sysfs.h>
 20 #include <linux/slab.h>
 21 #include <linux/rmap.h>
 22 #include <linux/swap.h>
 23 #include <linux/swapops.h>
 24 #include <linux/page-isolation.h>
 25 
 26 #include <asm/page.h>
 27 #include <asm/pgtable.h>
 28 #include <asm/tlb.h>
 29 
 30 #include <linux/io.h>
 31 #include <linux/hugetlb.h>
 32 #include <linux/hugetlb_cgroup.h>
 33 #include <linux/node.h>
 34 #include "internal.h"
 35 
 36 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
 37 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 38 unsigned long hugepages_treat_as_movable;
 39 
 40 int hugetlb_max_hstate __read_mostly;
 41 unsigned int default_hstate_idx;
 42 struct hstate hstates[HUGE_MAX_HSTATE];
 43 
 44 __initdata LIST_HEAD(huge_boot_pages);
 45 
 46 /* for command line parsing */
 47 static struct hstate * __initdata parsed_hstate;
 48 static unsigned long __initdata default_hstate_max_huge_pages;
 49 static unsigned long __initdata default_hstate_size;
 50 
 51 /*
 52  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 53  */
 54 DEFINE_SPINLOCK(hugetlb_lock);
 55 
 56 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
 57 {
 58         bool free = (spool->count == 0) && (spool->used_hpages == 0);
 59 
 60         spin_unlock(&spool->lock);
 61 
 62         /* If no pages are used, and no other handles to the subpool
 63          * remain, free the subpool the subpool remain */
 64         if (free)
 65                 kfree(spool);
 66 }
 67 
 68 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
 69 {
 70         struct hugepage_subpool *spool;
 71 
 72         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
 73         if (!spool)
 74                 return NULL;
 75 
 76         spin_lock_init(&spool->lock);
 77         spool->count = 1;
 78         spool->max_hpages = nr_blocks;
 79         spool->used_hpages = 0;
 80 
 81         return spool;
 82 }
 83 
 84 void hugepage_put_subpool(struct hugepage_subpool *spool)
 85 {
 86         spin_lock(&spool->lock);
 87         BUG_ON(!spool->count);
 88         spool->count--;
 89         unlock_or_release_subpool(spool);
 90 }
 91 
 92 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
 93                                       long delta)
 94 {
 95         int ret = 0;
 96 
 97         if (!spool)
 98                 return 0;
 99 
100         spin_lock(&spool->lock);
101         if ((spool->used_hpages + delta) <= spool->max_hpages) {
102                 spool->used_hpages += delta;
103         } else {
104                 ret = -ENOMEM;
105         }
106         spin_unlock(&spool->lock);
107 
108         return ret;
109 }
110 
111 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
112                                        long delta)
113 {
114         if (!spool)
115                 return;
116 
117         spin_lock(&spool->lock);
118         spool->used_hpages -= delta;
119         /* If hugetlbfs_put_super couldn't free spool due to
120         * an outstanding quota reference, free it now. */
121         unlock_or_release_subpool(spool);
122 }
123 
124 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
125 {
126         return HUGETLBFS_SB(inode->i_sb)->spool;
127 }
128 
129 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
130 {
131         return subpool_inode(file_inode(vma->vm_file));
132 }
133 
134 /*
135  * Region tracking -- allows tracking of reservations and instantiated pages
136  *                    across the pages in a mapping.
137  *
138  * The region data structures are protected by a combination of the mmap_sem
139  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
140  * must either hold the mmap_sem for write, or the mmap_sem for read and
141  * the hugetlb_instantiation mutex:
142  *
143  *      down_write(&mm->mmap_sem);
144  * or
145  *      down_read(&mm->mmap_sem);
146  *      mutex_lock(&hugetlb_instantiation_mutex);
147  */
148 struct file_region {
149         struct list_head link;
150         long from;
151         long to;
152 };
153 
154 static long region_add(struct list_head *head, long f, long t)
155 {
156         struct file_region *rg, *nrg, *trg;
157 
158         /* Locate the region we are either in or before. */
159         list_for_each_entry(rg, head, link)
160                 if (f <= rg->to)
161                         break;
162 
163         /* Round our left edge to the current segment if it encloses us. */
164         if (f > rg->from)
165                 f = rg->from;
166 
167         /* Check for and consume any regions we now overlap with. */
168         nrg = rg;
169         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
170                 if (&rg->link == head)
171                         break;
172                 if (rg->from > t)
173                         break;
174 
175                 /* If this area reaches higher then extend our area to
176                  * include it completely.  If this is not the first area
177                  * which we intend to reuse, free it. */
178                 if (rg->to > t)
179                         t = rg->to;
180                 if (rg != nrg) {
181                         list_del(&rg->link);
182                         kfree(rg);
183                 }
184         }
185         nrg->from = f;
186         nrg->to = t;
187         return 0;
188 }
189 
190 static long region_chg(struct list_head *head, long f, long t)
191 {
192         struct file_region *rg, *nrg;
193         long chg = 0;
194 
195         /* Locate the region we are before or in. */
196         list_for_each_entry(rg, head, link)
197                 if (f <= rg->to)
198                         break;
199 
200         /* If we are below the current region then a new region is required.
201          * Subtle, allocate a new region at the position but make it zero
202          * size such that we can guarantee to record the reservation. */
203         if (&rg->link == head || t < rg->from) {
204                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
205                 if (!nrg)
206                         return -ENOMEM;
207                 nrg->from = f;
208                 nrg->to   = f;
209                 INIT_LIST_HEAD(&nrg->link);
210                 list_add(&nrg->link, rg->link.prev);
211 
212                 return t - f;
213         }
214 
215         /* Round our left edge to the current segment if it encloses us. */
216         if (f > rg->from)
217                 f = rg->from;
218         chg = t - f;
219 
220         /* Check for and consume any regions we now overlap with. */
221         list_for_each_entry(rg, rg->link.prev, link) {
222                 if (&rg->link == head)
223                         break;
224                 if (rg->from > t)
225                         return chg;
226 
227                 /* We overlap with this area, if it extends further than
228                  * us then we must extend ourselves.  Account for its
229                  * existing reservation. */
230                 if (rg->to > t) {
231                         chg += rg->to - t;
232                         t = rg->to;
233                 }
234                 chg -= rg->to - rg->from;
235         }
236         return chg;
237 }
238 
239 static long region_truncate(struct list_head *head, long end)
240 {
241         struct file_region *rg, *trg;
242         long chg = 0;
243 
244         /* Locate the region we are either in or before. */
245         list_for_each_entry(rg, head, link)
246                 if (end <= rg->to)
247                         break;
248         if (&rg->link == head)
249                 return 0;
250 
251         /* If we are in the middle of a region then adjust it. */
252         if (end > rg->from) {
253                 chg = rg->to - end;
254                 rg->to = end;
255                 rg = list_entry(rg->link.next, typeof(*rg), link);
256         }
257 
258         /* Drop any remaining regions. */
259         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
260                 if (&rg->link == head)
261                         break;
262                 chg += rg->to - rg->from;
263                 list_del(&rg->link);
264                 kfree(rg);
265         }
266         return chg;
267 }
268 
269 static long region_count(struct list_head *head, long f, long t)
270 {
271         struct file_region *rg;
272         long chg = 0;
273 
274         /* Locate each segment we overlap with, and count that overlap. */
275         list_for_each_entry(rg, head, link) {
276                 long seg_from;
277                 long seg_to;
278 
279                 if (rg->to <= f)
280                         continue;
281                 if (rg->from >= t)
282                         break;
283 
284                 seg_from = max(rg->from, f);
285                 seg_to = min(rg->to, t);
286 
287                 chg += seg_to - seg_from;
288         }
289 
290         return chg;
291 }
292 
293 /*
294  * Convert the address within this vma to the page offset within
295  * the mapping, in pagecache page units; huge pages here.
296  */
297 static pgoff_t vma_hugecache_offset(struct hstate *h,
298                         struct vm_area_struct *vma, unsigned long address)
299 {
300         return ((address - vma->vm_start) >> huge_page_shift(h)) +
301                         (vma->vm_pgoff >> huge_page_order(h));
302 }
303 
304 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
305                                      unsigned long address)
306 {
307         return vma_hugecache_offset(hstate_vma(vma), vma, address);
308 }
309 
310 /*
311  * Return the size of the pages allocated when backing a VMA. In the majority
312  * cases this will be same size as used by the page table entries.
313  */
314 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
315 {
316         struct hstate *hstate;
317 
318         if (!is_vm_hugetlb_page(vma))
319                 return PAGE_SIZE;
320 
321         hstate = hstate_vma(vma);
322 
323         return 1UL << (hstate->order + PAGE_SHIFT);
324 }
325 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
326 
327 /*
328  * Return the page size being used by the MMU to back a VMA. In the majority
329  * of cases, the page size used by the kernel matches the MMU size. On
330  * architectures where it differs, an architecture-specific version of this
331  * function is required.
332  */
333 #ifndef vma_mmu_pagesize
334 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
335 {
336         return vma_kernel_pagesize(vma);
337 }
338 #endif
339 
340 /*
341  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
342  * bits of the reservation map pointer, which are always clear due to
343  * alignment.
344  */
345 #define HPAGE_RESV_OWNER    (1UL << 0)
346 #define HPAGE_RESV_UNMAPPED (1UL << 1)
347 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
348 
349 /*
350  * These helpers are used to track how many pages are reserved for
351  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
352  * is guaranteed to have their future faults succeed.
353  *
354  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
355  * the reserve counters are updated with the hugetlb_lock held. It is safe
356  * to reset the VMA at fork() time as it is not in use yet and there is no
357  * chance of the global counters getting corrupted as a result of the values.
358  *
359  * The private mapping reservation is represented in a subtly different
360  * manner to a shared mapping.  A shared mapping has a region map associated
361  * with the underlying file, this region map represents the backing file
362  * pages which have ever had a reservation assigned which this persists even
363  * after the page is instantiated.  A private mapping has a region map
364  * associated with the original mmap which is attached to all VMAs which
365  * reference it, this region map represents those offsets which have consumed
366  * reservation ie. where pages have been instantiated.
367  */
368 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
369 {
370         return (unsigned long)vma->vm_private_data;
371 }
372 
373 static void set_vma_private_data(struct vm_area_struct *vma,
374                                                         unsigned long value)
375 {
376         vma->vm_private_data = (void *)value;
377 }
378 
379 struct resv_map {
380         struct kref refs;
381         struct list_head regions;
382 };
383 
384 static struct resv_map *resv_map_alloc(void)
385 {
386         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
387         if (!resv_map)
388                 return NULL;
389 
390         kref_init(&resv_map->refs);
391         INIT_LIST_HEAD(&resv_map->regions);
392 
393         return resv_map;
394 }
395 
396 static void resv_map_release(struct kref *ref)
397 {
398         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
399 
400         /* Clear out any active regions before we release the map. */
401         region_truncate(&resv_map->regions, 0);
402         kfree(resv_map);
403 }
404 
405 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
406 {
407         VM_BUG_ON(!is_vm_hugetlb_page(vma));
408         if (!(vma->vm_flags & VM_MAYSHARE))
409                 return (struct resv_map *)(get_vma_private_data(vma) &
410                                                         ~HPAGE_RESV_MASK);
411         return NULL;
412 }
413 
414 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
415 {
416         VM_BUG_ON(!is_vm_hugetlb_page(vma));
417         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
418 
419         set_vma_private_data(vma, (get_vma_private_data(vma) &
420                                 HPAGE_RESV_MASK) | (unsigned long)map);
421 }
422 
423 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
424 {
425         VM_BUG_ON(!is_vm_hugetlb_page(vma));
426         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
427 
428         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
429 }
430 
431 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
432 {
433         VM_BUG_ON(!is_vm_hugetlb_page(vma));
434 
435         return (get_vma_private_data(vma) & flag) != 0;
436 }
437 
438 /* Decrement the reserved pages in the hugepage pool by one */
439 static void decrement_hugepage_resv_vma(struct hstate *h,
440                         struct vm_area_struct *vma)
441 {
442         if (vma->vm_flags & VM_NORESERVE)
443                 return;
444 
445         if (vma->vm_flags & VM_MAYSHARE) {
446                 /* Shared mappings always use reserves */
447                 h->resv_huge_pages--;
448         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
449                 /*
450                  * Only the process that called mmap() has reserves for
451                  * private mappings.
452                  */
453                 h->resv_huge_pages--;
454         }
455 }
456 
457 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
458 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
459 {
460         VM_BUG_ON(!is_vm_hugetlb_page(vma));
461         if (!(vma->vm_flags & VM_MAYSHARE))
462                 vma->vm_private_data = (void *)0;
463 }
464 
465 /* Returns true if the VMA has associated reserve pages */
466 static int vma_has_reserves(struct vm_area_struct *vma)
467 {
468         if (vma->vm_flags & VM_MAYSHARE)
469                 return 1;
470         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
471                 return 1;
472         return 0;
473 }
474 
475 static void copy_gigantic_page(struct page *dst, struct page *src)
476 {
477         int i;
478         struct hstate *h = page_hstate(src);
479         struct page *dst_base = dst;
480         struct page *src_base = src;
481 
482         for (i = 0; i < pages_per_huge_page(h); ) {
483                 cond_resched();
484                 copy_highpage(dst, src);
485 
486                 i++;
487                 dst = mem_map_next(dst, dst_base, i);
488                 src = mem_map_next(src, src_base, i);
489         }
490 }
491 
492 void copy_huge_page(struct page *dst, struct page *src)
493 {
494         int i;
495         struct hstate *h = page_hstate(src);
496 
497         if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
498                 copy_gigantic_page(dst, src);
499                 return;
500         }
501 
502         might_sleep();
503         for (i = 0; i < pages_per_huge_page(h); i++) {
504                 cond_resched();
505                 copy_highpage(dst + i, src + i);
506         }
507 }
508 
509 static void enqueue_huge_page(struct hstate *h, struct page *page)
510 {
511         int nid = page_to_nid(page);
512         list_move(&page->lru, &h->hugepage_freelists[nid]);
513         h->free_huge_pages++;
514         h->free_huge_pages_node[nid]++;
515 }
516 
517 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
518 {
519         struct page *page;
520 
521         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
522                 if (!is_migrate_isolate_page(page))
523                         break;
524         /*
525          * if 'non-isolated free hugepage' not found on the list,
526          * the allocation fails.
527          */
528         if (&h->hugepage_freelists[nid] == &page->lru)
529                 return NULL;
530         list_move(&page->lru, &h->hugepage_activelist);
531         set_page_refcounted(page);
532         h->free_huge_pages--;
533         h->free_huge_pages_node[nid]--;
534         return page;
535 }
536 
537 static struct page *dequeue_huge_page_vma(struct hstate *h,
538                                 struct vm_area_struct *vma,
539                                 unsigned long address, int avoid_reserve)
540 {
541         struct page *page = NULL;
542         struct mempolicy *mpol;
543         nodemask_t *nodemask;
544         struct zonelist *zonelist;
545         struct zone *zone;
546         struct zoneref *z;
547         unsigned int cpuset_mems_cookie;
548 
549 retry_cpuset:
550         cpuset_mems_cookie = get_mems_allowed();
551         zonelist = huge_zonelist(vma, address,
552                                         htlb_alloc_mask, &mpol, &nodemask);
553         /*
554          * A child process with MAP_PRIVATE mappings created by their parent
555          * have no page reserves. This check ensures that reservations are
556          * not "stolen". The child may still get SIGKILLed
557          */
558         if (!vma_has_reserves(vma) &&
559                         h->free_huge_pages - h->resv_huge_pages == 0)
560                 goto err;
561 
562         /* If reserves cannot be used, ensure enough pages are in the pool */
563         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
564                 goto err;
565 
566         for_each_zone_zonelist_nodemask(zone, z, zonelist,
567                                                 MAX_NR_ZONES - 1, nodemask) {
568                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
569                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
570                         if (page) {
571                                 if (!avoid_reserve)
572                                         decrement_hugepage_resv_vma(h, vma);
573                                 break;
574                         }
575                 }
576         }
577 
578         mpol_cond_put(mpol);
579         if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
580                 goto retry_cpuset;
581         return page;
582 
583 err:
584         mpol_cond_put(mpol);
585         return NULL;
586 }
587 
588 static void update_and_free_page(struct hstate *h, struct page *page)
589 {
590         int i;
591 
592         VM_BUG_ON(h->order >= MAX_ORDER);
593 
594         h->nr_huge_pages--;
595         h->nr_huge_pages_node[page_to_nid(page)]--;
596         for (i = 0; i < pages_per_huge_page(h); i++) {
597                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
598                                 1 << PG_referenced | 1 << PG_dirty |
599                                 1 << PG_active | 1 << PG_reserved |
600                                 1 << PG_private | 1 << PG_writeback);
601         }
602         VM_BUG_ON(hugetlb_cgroup_from_page(page));
603         set_compound_page_dtor(page, NULL);
604         set_page_refcounted(page);
605         arch_release_hugepage(page);
606         __free_pages(page, huge_page_order(h));
607 }
608 
609 struct hstate *size_to_hstate(unsigned long size)
610 {
611         struct hstate *h;
612 
613         for_each_hstate(h) {
614                 if (huge_page_size(h) == size)
615                         return h;
616         }
617         return NULL;
618 }
619 
620 static void free_huge_page(struct page *page)
621 {
622         /*
623          * Can't pass hstate in here because it is called from the
624          * compound page destructor.
625          */
626         struct hstate *h = page_hstate(page);
627         int nid = page_to_nid(page);
628         struct hugepage_subpool *spool =
629                 (struct hugepage_subpool *)page_private(page);
630 
631         set_page_private(page, 0);
632         page->mapping = NULL;
633         BUG_ON(page_count(page));
634         BUG_ON(page_mapcount(page));
635 
636         spin_lock(&hugetlb_lock);
637         hugetlb_cgroup_uncharge_page(hstate_index(h),
638                                      pages_per_huge_page(h), page);
639         if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
640                 /* remove the page from active list */
641                 list_del(&page->lru);
642                 update_and_free_page(h, page);
643                 h->surplus_huge_pages--;
644                 h->surplus_huge_pages_node[nid]--;
645         } else {
646                 arch_clear_hugepage_flags(page);
647                 enqueue_huge_page(h, page);
648         }
649         spin_unlock(&hugetlb_lock);
650         hugepage_subpool_put_pages(spool, 1);
651 }
652 
653 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
654 {
655         INIT_LIST_HEAD(&page->lru);
656         set_compound_page_dtor(page, free_huge_page);
657         spin_lock(&hugetlb_lock);
658         set_hugetlb_cgroup(page, NULL);
659         h->nr_huge_pages++;
660         h->nr_huge_pages_node[nid]++;
661         spin_unlock(&hugetlb_lock);
662         put_page(page); /* free it into the hugepage allocator */
663 }
664 
665 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
666 {
667         int i;
668         int nr_pages = 1 << order;
669         struct page *p = page + 1;
670 
671         /* we rely on prep_new_huge_page to set the destructor */
672         set_compound_order(page, order);
673         __SetPageHead(page);
674         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
675                 __SetPageTail(p);
676                 set_page_count(p, 0);
677                 p->first_page = page;
678         }
679 }
680 
681 /*
682  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
683  * transparent huge pages.  See the PageTransHuge() documentation for more
684  * details.
685  */
686 int PageHuge(struct page *page)
687 {
688         compound_page_dtor *dtor;
689 
690         if (!PageCompound(page))
691                 return 0;
692 
693         page = compound_head(page);
694         dtor = get_compound_page_dtor(page);
695 
696         return dtor == free_huge_page;
697 }
698 EXPORT_SYMBOL_GPL(PageHuge);
699 
700 /*
701  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
702  * normal or transparent huge pages.
703  */
704 int PageHeadHuge(struct page *page_head)
705 {
706         compound_page_dtor *dtor;
707 
708         if (!PageHead(page_head))
709                 return 0;
710 
711         dtor = get_compound_page_dtor(page_head);
712 
713         return dtor == free_huge_page;
714 }
715 EXPORT_SYMBOL_GPL(PageHeadHuge);
716 
717 pgoff_t __basepage_index(struct page *page)
718 {
719         struct page *page_head = compound_head(page);
720         pgoff_t index = page_index(page_head);
721         unsigned long compound_idx;
722 
723         if (!PageHuge(page_head))
724                 return page_index(page);
725 
726         if (compound_order(page_head) >= MAX_ORDER)
727                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
728         else
729                 compound_idx = page - page_head;
730 
731         return (index << compound_order(page_head)) + compound_idx;
732 }
733 
734 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
735 {
736         struct page *page;
737 
738         if (h->order >= MAX_ORDER)
739                 return NULL;
740 
741         page = alloc_pages_exact_node(nid,
742                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
743                                                 __GFP_REPEAT|__GFP_NOWARN,
744                 huge_page_order(h));
745         if (page) {
746                 if (arch_prepare_hugepage(page)) {
747                         __free_pages(page, huge_page_order(h));
748                         return NULL;
749                 }
750                 prep_new_huge_page(h, page, nid);
751         }
752 
753         return page;
754 }
755 
756 /*
757  * common helper functions for hstate_next_node_to_{alloc|free}.
758  * We may have allocated or freed a huge page based on a different
759  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
760  * be outside of *nodes_allowed.  Ensure that we use an allowed
761  * node for alloc or free.
762  */
763 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
764 {
765         nid = next_node(nid, *nodes_allowed);
766         if (nid == MAX_NUMNODES)
767                 nid = first_node(*nodes_allowed);
768         VM_BUG_ON(nid >= MAX_NUMNODES);
769 
770         return nid;
771 }
772 
773 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
774 {
775         if (!node_isset(nid, *nodes_allowed))
776                 nid = next_node_allowed(nid, nodes_allowed);
777         return nid;
778 }
779 
780 /*
781  * returns the previously saved node ["this node"] from which to
782  * allocate a persistent huge page for the pool and advance the
783  * next node from which to allocate, handling wrap at end of node
784  * mask.
785  */
786 static int hstate_next_node_to_alloc(struct hstate *h,
787                                         nodemask_t *nodes_allowed)
788 {
789         int nid;
790 
791         VM_BUG_ON(!nodes_allowed);
792 
793         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
794         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
795 
796         return nid;
797 }
798 
799 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
800 {
801         struct page *page;
802         int start_nid;
803         int next_nid;
804         int ret = 0;
805 
806         start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
807         next_nid = start_nid;
808 
809         do {
810                 page = alloc_fresh_huge_page_node(h, next_nid);
811                 if (page) {
812                         ret = 1;
813                         break;
814                 }
815                 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
816         } while (next_nid != start_nid);
817 
818         if (ret)
819                 count_vm_event(HTLB_BUDDY_PGALLOC);
820         else
821                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
822 
823         return ret;
824 }
825 
826 /*
827  * helper for free_pool_huge_page() - return the previously saved
828  * node ["this node"] from which to free a huge page.  Advance the
829  * next node id whether or not we find a free huge page to free so
830  * that the next attempt to free addresses the next node.
831  */
832 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
833 {
834         int nid;
835 
836         VM_BUG_ON(!nodes_allowed);
837 
838         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
839         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
840 
841         return nid;
842 }
843 
844 /*
845  * Free huge page from pool from next node to free.
846  * Attempt to keep persistent huge pages more or less
847  * balanced over allowed nodes.
848  * Called with hugetlb_lock locked.
849  */
850 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
851                                                          bool acct_surplus)
852 {
853         int start_nid;
854         int next_nid;
855         int ret = 0;
856 
857         start_nid = hstate_next_node_to_free(h, nodes_allowed);
858         next_nid = start_nid;
859 
860         do {
861                 /*
862                  * If we're returning unused surplus pages, only examine
863                  * nodes with surplus pages.
864                  */
865                 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
866                     !list_empty(&h->hugepage_freelists[next_nid])) {
867                         struct page *page =
868                                 list_entry(h->hugepage_freelists[next_nid].next,
869                                           struct page, lru);
870                         list_del(&page->lru);
871                         h->free_huge_pages--;
872                         h->free_huge_pages_node[next_nid]--;
873                         if (acct_surplus) {
874                                 h->surplus_huge_pages--;
875                                 h->surplus_huge_pages_node[next_nid]--;
876                         }
877                         update_and_free_page(h, page);
878                         ret = 1;
879                         break;
880                 }
881                 next_nid = hstate_next_node_to_free(h, nodes_allowed);
882         } while (next_nid != start_nid);
883 
884         return ret;
885 }
886 
887 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
888 {
889         struct page *page;
890         unsigned int r_nid;
891 
892         if (h->order >= MAX_ORDER)
893                 return NULL;
894 
895         /*
896          * Assume we will successfully allocate the surplus page to
897          * prevent racing processes from causing the surplus to exceed
898          * overcommit
899          *
900          * This however introduces a different race, where a process B
901          * tries to grow the static hugepage pool while alloc_pages() is
902          * called by process A. B will only examine the per-node
903          * counters in determining if surplus huge pages can be
904          * converted to normal huge pages in adjust_pool_surplus(). A
905          * won't be able to increment the per-node counter, until the
906          * lock is dropped by B, but B doesn't drop hugetlb_lock until
907          * no more huge pages can be converted from surplus to normal
908          * state (and doesn't try to convert again). Thus, we have a
909          * case where a surplus huge page exists, the pool is grown, and
910          * the surplus huge page still exists after, even though it
911          * should just have been converted to a normal huge page. This
912          * does not leak memory, though, as the hugepage will be freed
913          * once it is out of use. It also does not allow the counters to
914          * go out of whack in adjust_pool_surplus() as we don't modify
915          * the node values until we've gotten the hugepage and only the
916          * per-node value is checked there.
917          */
918         spin_lock(&hugetlb_lock);
919         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
920                 spin_unlock(&hugetlb_lock);
921                 return NULL;
922         } else {
923                 h->nr_huge_pages++;
924                 h->surplus_huge_pages++;
925         }
926         spin_unlock(&hugetlb_lock);
927 
928         if (nid == NUMA_NO_NODE)
929                 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
930                                    __GFP_REPEAT|__GFP_NOWARN,
931                                    huge_page_order(h));
932         else
933                 page = alloc_pages_exact_node(nid,
934                         htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
935                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
936 
937         if (page && arch_prepare_hugepage(page)) {
938                 __free_pages(page, huge_page_order(h));
939                 page = NULL;
940         }
941 
942         spin_lock(&hugetlb_lock);
943         if (page) {
944                 INIT_LIST_HEAD(&page->lru);
945                 r_nid = page_to_nid(page);
946                 set_compound_page_dtor(page, free_huge_page);
947                 set_hugetlb_cgroup(page, NULL);
948                 /*
949                  * We incremented the global counters already
950                  */
951                 h->nr_huge_pages_node[r_nid]++;
952                 h->surplus_huge_pages_node[r_nid]++;
953                 __count_vm_event(HTLB_BUDDY_PGALLOC);
954         } else {
955                 h->nr_huge_pages--;
956                 h->surplus_huge_pages--;
957                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
958         }
959         spin_unlock(&hugetlb_lock);
960 
961         return page;
962 }
963 
964 /*
965  * This allocation function is useful in the context where vma is irrelevant.
966  * E.g. soft-offlining uses this function because it only cares physical
967  * address of error page.
968  */
969 struct page *alloc_huge_page_node(struct hstate *h, int nid)
970 {
971         struct page *page;
972 
973         spin_lock(&hugetlb_lock);
974         page = dequeue_huge_page_node(h, nid);
975         spin_unlock(&hugetlb_lock);
976 
977         if (!page)
978                 page = alloc_buddy_huge_page(h, nid);
979 
980         return page;
981 }
982 
983 /*
984  * Increase the hugetlb pool such that it can accommodate a reservation
985  * of size 'delta'.
986  */
987 static int gather_surplus_pages(struct hstate *h, int delta)
988 {
989         struct list_head surplus_list;
990         struct page *page, *tmp;
991         int ret, i;
992         int needed, allocated;
993         bool alloc_ok = true;
994 
995         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
996         if (needed <= 0) {
997                 h->resv_huge_pages += delta;
998                 return 0;
999         }
1000 
1001         allocated = 0;
1002         INIT_LIST_HEAD(&surplus_list);
1003 
1004         ret = -ENOMEM;
1005 retry:
1006         spin_unlock(&hugetlb_lock);
1007         for (i = 0; i < needed; i++) {
1008                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1009                 if (!page) {
1010                         alloc_ok = false;
1011                         break;
1012                 }
1013                 list_add(&page->lru, &surplus_list);
1014         }
1015         allocated += i;
1016 
1017         /*
1018          * After retaking hugetlb_lock, we need to recalculate 'needed'
1019          * because either resv_huge_pages or free_huge_pages may have changed.
1020          */
1021         spin_lock(&hugetlb_lock);
1022         needed = (h->resv_huge_pages + delta) -
1023                         (h->free_huge_pages + allocated);
1024         if (needed > 0) {
1025                 if (alloc_ok)
1026                         goto retry;
1027                 /*
1028                  * We were not able to allocate enough pages to
1029                  * satisfy the entire reservation so we free what
1030                  * we've allocated so far.
1031                  */
1032                 goto free;
1033         }
1034         /*
1035          * The surplus_list now contains _at_least_ the number of extra pages
1036          * needed to accommodate the reservation.  Add the appropriate number
1037          * of pages to the hugetlb pool and free the extras back to the buddy
1038          * allocator.  Commit the entire reservation here to prevent another
1039          * process from stealing the pages as they are added to the pool but
1040          * before they are reserved.
1041          */
1042         needed += allocated;
1043         h->resv_huge_pages += delta;
1044         ret = 0;
1045 
1046         /* Free the needed pages to the hugetlb pool */
1047         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1048                 if ((--needed) < 0)
1049                         break;
1050                 /*
1051                  * This page is now managed by the hugetlb allocator and has
1052                  * no users -- drop the buddy allocator's reference.
1053                  */
1054                 put_page_testzero(page);
1055                 VM_BUG_ON(page_count(page));
1056                 enqueue_huge_page(h, page);
1057         }
1058 free:
1059         spin_unlock(&hugetlb_lock);
1060 
1061         /* Free unnecessary surplus pages to the buddy allocator */
1062         if (!list_empty(&surplus_list)) {
1063                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1064                         put_page(page);
1065                 }
1066         }
1067         spin_lock(&hugetlb_lock);
1068 
1069         return ret;
1070 }
1071 
1072 /*
1073  * This routine has two main purposes:
1074  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1075  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1076  *    to the associated reservation map.
1077  * 2) Free any unused surplus pages that may have been allocated to satisfy
1078  *    the reservation.  As many as unused_resv_pages may be freed.
1079  *
1080  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1081  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1082  * we must make sure nobody else can claim pages we are in the process of
1083  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1084  * number of huge pages we plan to free when dropping the lock.
1085  */
1086 static void return_unused_surplus_pages(struct hstate *h,
1087                                         unsigned long unused_resv_pages)
1088 {
1089         unsigned long nr_pages;
1090 
1091         /* Cannot return gigantic pages currently */
1092         if (h->order >= MAX_ORDER)
1093                 goto out;
1094 
1095         /*
1096          * Part (or even all) of the reservation could have been backed
1097          * by pre-allocated pages. Only free surplus pages.
1098          */
1099         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1100 
1101         /*
1102          * We want to release as many surplus pages as possible, spread
1103          * evenly across all nodes with memory. Iterate across these nodes
1104          * until we can no longer free unreserved surplus pages. This occurs
1105          * when the nodes with surplus pages have no free pages.
1106          * free_pool_huge_page() will balance the the freed pages across the
1107          * on-line nodes with memory and will handle the hstate accounting.
1108          *
1109          * Note that we decrement resv_huge_pages as we free the pages.  If
1110          * we drop the lock, resv_huge_pages will still be sufficiently large
1111          * to cover subsequent pages we may free.
1112          */
1113         while (nr_pages--) {
1114                 h->resv_huge_pages--;
1115                 unused_resv_pages--;
1116                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1117                         goto out;
1118                 cond_resched_lock(&hugetlb_lock);
1119         }
1120 
1121 out:
1122         /* Fully uncommit the reservation */
1123         h->resv_huge_pages -= unused_resv_pages;
1124 }
1125 
1126 /*
1127  * Determine if the huge page at addr within the vma has an associated
1128  * reservation.  Where it does not we will need to logically increase
1129  * reservation and actually increase subpool usage before an allocation
1130  * can occur.  Where any new reservation would be required the
1131  * reservation change is prepared, but not committed.  Once the page
1132  * has been allocated from the subpool and instantiated the change should
1133  * be committed via vma_commit_reservation.  No action is required on
1134  * failure.
1135  */
1136 static long vma_needs_reservation(struct hstate *h,
1137                         struct vm_area_struct *vma, unsigned long addr)
1138 {
1139         struct address_space *mapping = vma->vm_file->f_mapping;
1140         struct inode *inode = mapping->host;
1141 
1142         if (vma->vm_flags & VM_MAYSHARE) {
1143                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1144                 return region_chg(&inode->i_mapping->private_list,
1145                                                         idx, idx + 1);
1146 
1147         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1148                 return 1;
1149 
1150         } else  {
1151                 long err;
1152                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1153                 struct resv_map *reservations = vma_resv_map(vma);
1154 
1155                 err = region_chg(&reservations->regions, idx, idx + 1);
1156                 if (err < 0)
1157                         return err;
1158                 return 0;
1159         }
1160 }
1161 static void vma_commit_reservation(struct hstate *h,
1162                         struct vm_area_struct *vma, unsigned long addr)
1163 {
1164         struct address_space *mapping = vma->vm_file->f_mapping;
1165         struct inode *inode = mapping->host;
1166 
1167         if (vma->vm_flags & VM_MAYSHARE) {
1168                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1169                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1170 
1171         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1172                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1173                 struct resv_map *reservations = vma_resv_map(vma);
1174 
1175                 /* Mark this page used in the map. */
1176                 region_add(&reservations->regions, idx, idx + 1);
1177         }
1178 }
1179 
1180 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1181                                     unsigned long addr, int avoid_reserve)
1182 {
1183         struct hugepage_subpool *spool = subpool_vma(vma);
1184         struct hstate *h = hstate_vma(vma);
1185         struct page *page;
1186         long chg;
1187         int ret, idx;
1188         struct hugetlb_cgroup *h_cg;
1189 
1190         idx = hstate_index(h);
1191         /*
1192          * Processes that did not create the mapping will have no
1193          * reserves and will not have accounted against subpool
1194          * limit. Check that the subpool limit can be made before
1195          * satisfying the allocation MAP_NORESERVE mappings may also
1196          * need pages and subpool limit allocated allocated if no reserve
1197          * mapping overlaps.
1198          */
1199         chg = vma_needs_reservation(h, vma, addr);
1200         if (chg < 0)
1201                 return ERR_PTR(-ENOMEM);
1202         if (chg)
1203                 if (hugepage_subpool_get_pages(spool, chg))
1204                         return ERR_PTR(-ENOSPC);
1205 
1206         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1207         if (ret) {
1208                 hugepage_subpool_put_pages(spool, chg);
1209                 return ERR_PTR(-ENOSPC);
1210         }
1211         spin_lock(&hugetlb_lock);
1212         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1213         if (page) {
1214                 /* update page cgroup details */
1215                 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1216                                              h_cg, page);
1217                 spin_unlock(&hugetlb_lock);
1218         } else {
1219                 spin_unlock(&hugetlb_lock);
1220                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1221                 if (!page) {
1222                         hugetlb_cgroup_uncharge_cgroup(idx,
1223                                                        pages_per_huge_page(h),
1224                                                        h_cg);
1225                         hugepage_subpool_put_pages(spool, chg);
1226                         return ERR_PTR(-ENOSPC);
1227                 }
1228                 spin_lock(&hugetlb_lock);
1229                 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1230                                              h_cg, page);
1231                 list_move(&page->lru, &h->hugepage_activelist);
1232                 spin_unlock(&hugetlb_lock);
1233         }
1234 
1235         set_page_private(page, (unsigned long)spool);
1236 
1237         vma_commit_reservation(h, vma, addr);
1238         return page;
1239 }
1240 
1241 int __weak alloc_bootmem_huge_page(struct hstate *h)
1242 {
1243         struct huge_bootmem_page *m;
1244         int nr_nodes = nodes_weight(node_states[N_MEMORY]);
1245 
1246         while (nr_nodes) {
1247                 void *addr;
1248 
1249                 addr = __alloc_bootmem_node_nopanic(
1250                                 NODE_DATA(hstate_next_node_to_alloc(h,
1251                                                 &node_states[N_MEMORY])),
1252                                 huge_page_size(h), huge_page_size(h), 0);
1253 
1254                 if (addr) {
1255                         /*
1256                          * Use the beginning of the huge page to store the
1257                          * huge_bootmem_page struct (until gather_bootmem
1258                          * puts them into the mem_map).
1259                          */
1260                         m = addr;
1261                         goto found;
1262                 }
1263                 nr_nodes--;
1264         }
1265         return 0;
1266 
1267 found:
1268         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1269         /* Put them into a private list first because mem_map is not up yet */
1270         list_add(&m->list, &huge_boot_pages);
1271         m->hstate = h;
1272         return 1;
1273 }
1274 
1275 static void prep_compound_huge_page(struct page *page, int order)
1276 {
1277         if (unlikely(order > (MAX_ORDER - 1)))
1278                 prep_compound_gigantic_page(page, order);
1279         else
1280                 prep_compound_page(page, order);
1281 }
1282 
1283 /* Put bootmem huge pages into the standard lists after mem_map is up */
1284 static void __init gather_bootmem_prealloc(void)
1285 {
1286         struct huge_bootmem_page *m;
1287 
1288         list_for_each_entry(m, &huge_boot_pages, list) {
1289                 struct hstate *h = m->hstate;
1290                 struct page *page;
1291 
1292 #ifdef CONFIG_HIGHMEM
1293                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1294                 free_bootmem_late((unsigned long)m,
1295                                   sizeof(struct huge_bootmem_page));
1296 #else
1297                 page = virt_to_page(m);
1298 #endif
1299                 __ClearPageReserved(page);
1300                 WARN_ON(page_count(page) != 1);
1301                 prep_compound_huge_page(page, h->order);
1302                 prep_new_huge_page(h, page, page_to_nid(page));
1303                 /*
1304                  * If we had gigantic hugepages allocated at boot time, we need
1305                  * to restore the 'stolen' pages to totalram_pages in order to
1306                  * fix confusing memory reports from free(1) and another
1307                  * side-effects, like CommitLimit going negative.
1308                  */
1309                 if (h->order > (MAX_ORDER - 1))
1310                         totalram_pages += 1 << h->order;
1311         }
1312 }
1313 
1314 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1315 {
1316         unsigned long i;
1317 
1318         for (i = 0; i < h->max_huge_pages; ++i) {
1319                 if (h->order >= MAX_ORDER) {
1320                         if (!alloc_bootmem_huge_page(h))
1321                                 break;
1322                 } else if (!alloc_fresh_huge_page(h,
1323                                          &node_states[N_MEMORY]))
1324                         break;
1325         }
1326         h->max_huge_pages = i;
1327 }
1328 
1329 static void __init hugetlb_init_hstates(void)
1330 {
1331         struct hstate *h;
1332 
1333         for_each_hstate(h) {
1334                 /* oversize hugepages were init'ed in early boot */
1335                 if (h->order < MAX_ORDER)
1336                         hugetlb_hstate_alloc_pages(h);
1337         }
1338 }
1339 
1340 static char * __init memfmt(char *buf, unsigned long n)
1341 {
1342         if (n >= (1UL << 30))
1343                 sprintf(buf, "%lu GB", n >> 30);
1344         else if (n >= (1UL << 20))
1345                 sprintf(buf, "%lu MB", n >> 20);
1346         else
1347                 sprintf(buf, "%lu KB", n >> 10);
1348         return buf;
1349 }
1350 
1351 static void __init report_hugepages(void)
1352 {
1353         struct hstate *h;
1354 
1355         for_each_hstate(h) {
1356                 char buf[32];
1357                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1358                         memfmt(buf, huge_page_size(h)),
1359                         h->free_huge_pages);
1360         }
1361 }
1362 
1363 #ifdef CONFIG_HIGHMEM
1364 static void try_to_free_low(struct hstate *h, unsigned long count,
1365                                                 nodemask_t *nodes_allowed)
1366 {
1367         int i;
1368 
1369         if (h->order >= MAX_ORDER)
1370                 return;
1371 
1372         for_each_node_mask(i, *nodes_allowed) {
1373                 struct page *page, *next;
1374                 struct list_head *freel = &h->hugepage_freelists[i];
1375                 list_for_each_entry_safe(page, next, freel, lru) {
1376                         if (count >= h->nr_huge_pages)
1377                                 return;
1378                         if (PageHighMem(page))
1379                                 continue;
1380                         list_del(&page->lru);
1381                         update_and_free_page(h, page);
1382                         h->free_huge_pages--;
1383                         h->free_huge_pages_node[page_to_nid(page)]--;
1384                 }
1385         }
1386 }
1387 #else
1388 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1389                                                 nodemask_t *nodes_allowed)
1390 {
1391 }
1392 #endif
1393 
1394 /*
1395  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1396  * balanced by operating on them in a round-robin fashion.
1397  * Returns 1 if an adjustment was made.
1398  */
1399 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1400                                 int delta)
1401 {
1402         int start_nid, next_nid;
1403         int ret = 0;
1404 
1405         VM_BUG_ON(delta != -1 && delta != 1);
1406 
1407         if (delta < 0)
1408                 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1409         else
1410                 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1411         next_nid = start_nid;
1412 
1413         do {
1414                 int nid = next_nid;
1415                 if (delta < 0)  {
1416                         /*
1417                          * To shrink on this node, there must be a surplus page
1418                          */
1419                         if (!h->surplus_huge_pages_node[nid]) {
1420                                 next_nid = hstate_next_node_to_alloc(h,
1421                                                                 nodes_allowed);
1422                                 continue;
1423                         }
1424                 }
1425                 if (delta > 0) {
1426                         /*
1427                          * Surplus cannot exceed the total number of pages
1428                          */
1429                         if (h->surplus_huge_pages_node[nid] >=
1430                                                 h->nr_huge_pages_node[nid]) {
1431                                 next_nid = hstate_next_node_to_free(h,
1432                                                                 nodes_allowed);
1433                                 continue;
1434                         }
1435                 }
1436 
1437                 h->surplus_huge_pages += delta;
1438                 h->surplus_huge_pages_node[nid] += delta;
1439                 ret = 1;
1440                 break;
1441         } while (next_nid != start_nid);
1442 
1443         return ret;
1444 }
1445 
1446 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1447 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1448                                                 nodemask_t *nodes_allowed)
1449 {
1450         unsigned long min_count, ret;
1451 
1452         if (h->order >= MAX_ORDER)
1453                 return h->max_huge_pages;
1454 
1455         /*
1456          * Increase the pool size
1457          * First take pages out of surplus state.  Then make up the
1458          * remaining difference by allocating fresh huge pages.
1459          *
1460          * We might race with alloc_buddy_huge_page() here and be unable
1461          * to convert a surplus huge page to a normal huge page. That is
1462          * not critical, though, it just means the overall size of the
1463          * pool might be one hugepage larger than it needs to be, but
1464          * within all the constraints specified by the sysctls.
1465          */
1466         spin_lock(&hugetlb_lock);
1467         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1468                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1469                         break;
1470         }
1471 
1472         while (count > persistent_huge_pages(h)) {
1473                 /*
1474                  * If this allocation races such that we no longer need the
1475                  * page, free_huge_page will handle it by freeing the page
1476                  * and reducing the surplus.
1477                  */
1478                 spin_unlock(&hugetlb_lock);
1479                 ret = alloc_fresh_huge_page(h, nodes_allowed);
1480                 spin_lock(&hugetlb_lock);
1481                 if (!ret)
1482                         goto out;
1483 
1484                 /* Bail for signals. Probably ctrl-c from user */
1485                 if (signal_pending(current))
1486                         goto out;
1487         }
1488 
1489         /*
1490          * Decrease the pool size
1491          * First return free pages to the buddy allocator (being careful
1492          * to keep enough around to satisfy reservations).  Then place
1493          * pages into surplus state as needed so the pool will shrink
1494          * to the desired size as pages become free.
1495          *
1496          * By placing pages into the surplus state independent of the
1497          * overcommit value, we are allowing the surplus pool size to
1498          * exceed overcommit. There are few sane options here. Since
1499          * alloc_buddy_huge_page() is checking the global counter,
1500          * though, we'll note that we're not allowed to exceed surplus
1501          * and won't grow the pool anywhere else. Not until one of the
1502          * sysctls are changed, or the surplus pages go out of use.
1503          */
1504         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1505         min_count = max(count, min_count);
1506         try_to_free_low(h, min_count, nodes_allowed);
1507         while (min_count < persistent_huge_pages(h)) {
1508                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1509                         break;
1510                 cond_resched_lock(&hugetlb_lock);
1511         }
1512         while (count < persistent_huge_pages(h)) {
1513                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1514                         break;
1515         }
1516 out:
1517         ret = persistent_huge_pages(h);
1518         spin_unlock(&hugetlb_lock);
1519         return ret;
1520 }
1521 
1522 #define HSTATE_ATTR_RO(_name) \
1523         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1524 
1525 #define HSTATE_ATTR(_name) \
1526         static struct kobj_attribute _name##_attr = \
1527                 __ATTR(_name, 0644, _name##_show, _name##_store)
1528 
1529 static struct kobject *hugepages_kobj;
1530 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1531 
1532 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1533 
1534 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1535 {
1536         int i;
1537 
1538         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1539                 if (hstate_kobjs[i] == kobj) {
1540                         if (nidp)
1541                                 *nidp = NUMA_NO_NODE;
1542                         return &hstates[i];
1543                 }
1544 
1545         return kobj_to_node_hstate(kobj, nidp);
1546 }
1547 
1548 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1549                                         struct kobj_attribute *attr, char *buf)
1550 {
1551         struct hstate *h;
1552         unsigned long nr_huge_pages;
1553         int nid;
1554 
1555         h = kobj_to_hstate(kobj, &nid);
1556         if (nid == NUMA_NO_NODE)
1557                 nr_huge_pages = h->nr_huge_pages;
1558         else
1559                 nr_huge_pages = h->nr_huge_pages_node[nid];
1560 
1561         return sprintf(buf, "%lu\n", nr_huge_pages);
1562 }
1563 
1564 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1565                         struct kobject *kobj, struct kobj_attribute *attr,
1566                         const char *buf, size_t len)
1567 {
1568         int err;
1569         int nid;
1570         unsigned long count;
1571         struct hstate *h;
1572         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1573 
1574         err = strict_strtoul(buf, 10, &count);
1575         if (err)
1576                 goto out;
1577 
1578         h = kobj_to_hstate(kobj, &nid);
1579         if (h->order >= MAX_ORDER) {
1580                 err = -EINVAL;
1581                 goto out;
1582         }
1583 
1584         if (nid == NUMA_NO_NODE) {
1585                 /*
1586                  * global hstate attribute
1587                  */
1588                 if (!(obey_mempolicy &&
1589                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1590                         NODEMASK_FREE(nodes_allowed);
1591                         nodes_allowed = &node_states[N_MEMORY];
1592                 }
1593         } else if (nodes_allowed) {
1594                 /*
1595                  * per node hstate attribute: adjust count to global,
1596                  * but restrict alloc/free to the specified node.
1597                  */
1598                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1599                 init_nodemask_of_node(nodes_allowed, nid);
1600         } else
1601                 nodes_allowed = &node_states[N_MEMORY];
1602 
1603         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1604 
1605         if (nodes_allowed != &node_states[N_MEMORY])
1606                 NODEMASK_FREE(nodes_allowed);
1607 
1608         return len;
1609 out:
1610         NODEMASK_FREE(nodes_allowed);
1611         return err;
1612 }
1613 
1614 static ssize_t nr_hugepages_show(struct kobject *kobj,
1615                                        struct kobj_attribute *attr, char *buf)
1616 {
1617         return nr_hugepages_show_common(kobj, attr, buf);
1618 }
1619 
1620 static ssize_t nr_hugepages_store(struct kobject *kobj,
1621                struct kobj_attribute *attr, const char *buf, size_t len)
1622 {
1623         return nr_hugepages_store_common(false, kobj, attr, buf, len);
1624 }
1625 HSTATE_ATTR(nr_hugepages);
1626 
1627 #ifdef CONFIG_NUMA
1628 
1629 /*
1630  * hstate attribute for optionally mempolicy-based constraint on persistent
1631  * huge page alloc/free.
1632  */
1633 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1634                                        struct kobj_attribute *attr, char *buf)
1635 {
1636         return nr_hugepages_show_common(kobj, attr, buf);
1637 }
1638 
1639 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1640                struct kobj_attribute *attr, const char *buf, size_t len)
1641 {
1642         return nr_hugepages_store_common(true, kobj, attr, buf, len);
1643 }
1644 HSTATE_ATTR(nr_hugepages_mempolicy);
1645 #endif
1646 
1647 
1648 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1649                                         struct kobj_attribute *attr, char *buf)
1650 {
1651         struct hstate *h = kobj_to_hstate(kobj, NULL);
1652         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1653 }
1654 
1655 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1656                 struct kobj_attribute *attr, const char *buf, size_t count)
1657 {
1658         int err;
1659         unsigned long input;
1660         struct hstate *h = kobj_to_hstate(kobj, NULL);
1661 
1662         if (h->order >= MAX_ORDER)
1663                 return -EINVAL;
1664 
1665         err = strict_strtoul(buf, 10, &input);
1666         if (err)
1667                 return err;
1668 
1669         spin_lock(&hugetlb_lock);
1670         h->nr_overcommit_huge_pages = input;
1671         spin_unlock(&hugetlb_lock);
1672 
1673         return count;
1674 }
1675 HSTATE_ATTR(nr_overcommit_hugepages);
1676 
1677 static ssize_t free_hugepages_show(struct kobject *kobj,
1678                                         struct kobj_attribute *attr, char *buf)
1679 {
1680         struct hstate *h;
1681         unsigned long free_huge_pages;
1682         int nid;
1683 
1684         h = kobj_to_hstate(kobj, &nid);
1685         if (nid == NUMA_NO_NODE)
1686                 free_huge_pages = h->free_huge_pages;
1687         else
1688                 free_huge_pages = h->free_huge_pages_node[nid];
1689 
1690         return sprintf(buf, "%lu\n", free_huge_pages);
1691 }
1692 HSTATE_ATTR_RO(free_hugepages);
1693 
1694 static ssize_t resv_hugepages_show(struct kobject *kobj,
1695                                         struct kobj_attribute *attr, char *buf)
1696 {
1697         struct hstate *h = kobj_to_hstate(kobj, NULL);
1698         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1699 }
1700 HSTATE_ATTR_RO(resv_hugepages);
1701 
1702 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1703                                         struct kobj_attribute *attr, char *buf)
1704 {
1705         struct hstate *h;
1706         unsigned long surplus_huge_pages;
1707         int nid;
1708 
1709         h = kobj_to_hstate(kobj, &nid);
1710         if (nid == NUMA_NO_NODE)
1711                 surplus_huge_pages = h->surplus_huge_pages;
1712         else
1713                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1714 
1715         return sprintf(buf, "%lu\n", surplus_huge_pages);
1716 }
1717 HSTATE_ATTR_RO(surplus_hugepages);
1718 
1719 static struct attribute *hstate_attrs[] = {
1720         &nr_hugepages_attr.attr,
1721         &nr_overcommit_hugepages_attr.attr,
1722         &free_hugepages_attr.attr,
1723         &resv_hugepages_attr.attr,
1724         &surplus_hugepages_attr.attr,
1725 #ifdef CONFIG_NUMA
1726         &nr_hugepages_mempolicy_attr.attr,
1727 #endif
1728         NULL,
1729 };
1730 
1731 static struct attribute_group hstate_attr_group = {
1732         .attrs = hstate_attrs,
1733 };
1734 
1735 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1736                                     struct kobject **hstate_kobjs,
1737                                     struct attribute_group *hstate_attr_group)
1738 {
1739         int retval;
1740         int hi = hstate_index(h);
1741 
1742         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1743         if (!hstate_kobjs[hi])
1744                 return -ENOMEM;
1745 
1746         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1747         if (retval)
1748                 kobject_put(hstate_kobjs[hi]);
1749 
1750         return retval;
1751 }
1752 
1753 static void __init hugetlb_sysfs_init(void)
1754 {
1755         struct hstate *h;
1756         int err;
1757 
1758         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1759         if (!hugepages_kobj)
1760                 return;
1761 
1762         for_each_hstate(h) {
1763                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1764                                          hstate_kobjs, &hstate_attr_group);
1765                 if (err)
1766                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
1767         }
1768 }
1769 
1770 #ifdef CONFIG_NUMA
1771 
1772 /*
1773  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1774  * with node devices in node_devices[] using a parallel array.  The array
1775  * index of a node device or _hstate == node id.
1776  * This is here to avoid any static dependency of the node device driver, in
1777  * the base kernel, on the hugetlb module.
1778  */
1779 struct node_hstate {
1780         struct kobject          *hugepages_kobj;
1781         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1782 };
1783 struct node_hstate node_hstates[MAX_NUMNODES];
1784 
1785 /*
1786  * A subset of global hstate attributes for node devices
1787  */
1788 static struct attribute *per_node_hstate_attrs[] = {
1789         &nr_hugepages_attr.attr,
1790         &free_hugepages_attr.attr,
1791         &surplus_hugepages_attr.attr,
1792         NULL,
1793 };
1794 
1795 static struct attribute_group per_node_hstate_attr_group = {
1796         .attrs = per_node_hstate_attrs,
1797 };
1798 
1799 /*
1800  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1801  * Returns node id via non-NULL nidp.
1802  */
1803 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1804 {
1805         int nid;
1806 
1807         for (nid = 0; nid < nr_node_ids; nid++) {
1808                 struct node_hstate *nhs = &node_hstates[nid];
1809                 int i;
1810                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1811                         if (nhs->hstate_kobjs[i] == kobj) {
1812                                 if (nidp)
1813                                         *nidp = nid;
1814                                 return &hstates[i];
1815                         }
1816         }
1817 
1818         BUG();
1819         return NULL;
1820 }
1821 
1822 /*
1823  * Unregister hstate attributes from a single node device.
1824  * No-op if no hstate attributes attached.
1825  */
1826 static void hugetlb_unregister_node(struct node *node)
1827 {
1828         struct hstate *h;
1829         struct node_hstate *nhs = &node_hstates[node->dev.id];
1830 
1831         if (!nhs->hugepages_kobj)
1832                 return;         /* no hstate attributes */
1833 
1834         for_each_hstate(h) {
1835                 int idx = hstate_index(h);
1836                 if (nhs->hstate_kobjs[idx]) {
1837                         kobject_put(nhs->hstate_kobjs[idx]);
1838                         nhs->hstate_kobjs[idx] = NULL;
1839                 }
1840         }
1841 
1842         kobject_put(nhs->hugepages_kobj);
1843         nhs->hugepages_kobj = NULL;
1844 }
1845 
1846 /*
1847  * hugetlb module exit:  unregister hstate attributes from node devices
1848  * that have them.
1849  */
1850 static void hugetlb_unregister_all_nodes(void)
1851 {
1852         int nid;
1853 
1854         /*
1855          * disable node device registrations.
1856          */
1857         register_hugetlbfs_with_node(NULL, NULL);
1858 
1859         /*
1860          * remove hstate attributes from any nodes that have them.
1861          */
1862         for (nid = 0; nid < nr_node_ids; nid++)
1863                 hugetlb_unregister_node(node_devices[nid]);
1864 }
1865 
1866 /*
1867  * Register hstate attributes for a single node device.
1868  * No-op if attributes already registered.
1869  */
1870 static void hugetlb_register_node(struct node *node)
1871 {
1872         struct hstate *h;
1873         struct node_hstate *nhs = &node_hstates[node->dev.id];
1874         int err;
1875 
1876         if (nhs->hugepages_kobj)
1877                 return;         /* already allocated */
1878 
1879         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1880                                                         &node->dev.kobj);
1881         if (!nhs->hugepages_kobj)
1882                 return;
1883 
1884         for_each_hstate(h) {
1885                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1886                                                 nhs->hstate_kobjs,
1887                                                 &per_node_hstate_attr_group);
1888                 if (err) {
1889                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
1890                                 h->name, node->dev.id);
1891                         hugetlb_unregister_node(node);
1892                         break;
1893                 }
1894         }
1895 }
1896 
1897 /*
1898  * hugetlb init time:  register hstate attributes for all registered node
1899  * devices of nodes that have memory.  All on-line nodes should have
1900  * registered their associated device by this time.
1901  */
1902 static void hugetlb_register_all_nodes(void)
1903 {
1904         int nid;
1905 
1906         for_each_node_state(nid, N_MEMORY) {
1907                 struct node *node = node_devices[nid];
1908                 if (node->dev.id == nid)
1909                         hugetlb_register_node(node);
1910         }
1911 
1912         /*
1913          * Let the node device driver know we're here so it can
1914          * [un]register hstate attributes on node hotplug.
1915          */
1916         register_hugetlbfs_with_node(hugetlb_register_node,
1917                                      hugetlb_unregister_node);
1918 }
1919 #else   /* !CONFIG_NUMA */
1920 
1921 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1922 {
1923         BUG();
1924         if (nidp)
1925                 *nidp = -1;
1926         return NULL;
1927 }
1928 
1929 static void hugetlb_unregister_all_nodes(void) { }
1930 
1931 static void hugetlb_register_all_nodes(void) { }
1932 
1933 #endif
1934 
1935 static void __exit hugetlb_exit(void)
1936 {
1937         struct hstate *h;
1938 
1939         hugetlb_unregister_all_nodes();
1940 
1941         for_each_hstate(h) {
1942                 kobject_put(hstate_kobjs[hstate_index(h)]);
1943         }
1944 
1945         kobject_put(hugepages_kobj);
1946 }
1947 module_exit(hugetlb_exit);
1948 
1949 static int __init hugetlb_init(void)
1950 {
1951         /* Some platform decide whether they support huge pages at boot
1952          * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1953          * there is no such support
1954          */
1955         if (HPAGE_SHIFT == 0)
1956                 return 0;
1957 
1958         if (!size_to_hstate(default_hstate_size)) {
1959                 default_hstate_size = HPAGE_SIZE;
1960                 if (!size_to_hstate(default_hstate_size))
1961                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1962         }
1963         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
1964         if (default_hstate_max_huge_pages)
1965                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1966 
1967         hugetlb_init_hstates();
1968         gather_bootmem_prealloc();
1969         report_hugepages();
1970 
1971         hugetlb_sysfs_init();
1972         hugetlb_register_all_nodes();
1973         hugetlb_cgroup_file_init();
1974 
1975         return 0;
1976 }
1977 module_init(hugetlb_init);
1978 
1979 /* Should be called on processing a hugepagesz=... option */
1980 void __init hugetlb_add_hstate(unsigned order)
1981 {
1982         struct hstate *h;
1983         unsigned long i;
1984 
1985         if (size_to_hstate(PAGE_SIZE << order)) {
1986                 pr_warning("hugepagesz= specified twice, ignoring\n");
1987                 return;
1988         }
1989         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
1990         BUG_ON(order == 0);
1991         h = &hstates[hugetlb_max_hstate++];
1992         h->order = order;
1993         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1994         h->nr_huge_pages = 0;
1995         h->free_huge_pages = 0;
1996         for (i = 0; i < MAX_NUMNODES; ++i)
1997                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1998         INIT_LIST_HEAD(&h->hugepage_activelist);
1999         h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2000         h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2001         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2002                                         huge_page_size(h)/1024);
2003 
2004         parsed_hstate = h;
2005 }
2006 
2007 static int __init hugetlb_nrpages_setup(char *s)
2008 {
2009         unsigned long *mhp;
2010         static unsigned long *last_mhp;
2011 
2012         /*
2013          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2014          * so this hugepages= parameter goes to the "default hstate".
2015          */
2016         if (!hugetlb_max_hstate)
2017                 mhp = &default_hstate_max_huge_pages;
2018         else
2019                 mhp = &parsed_hstate->max_huge_pages;
2020 
2021         if (mhp == last_mhp) {
2022                 pr_warning("hugepages= specified twice without "
2023                            "interleaving hugepagesz=, ignoring\n");
2024                 return 1;
2025         }
2026 
2027         if (sscanf(s, "%lu", mhp) <= 0)
2028                 *mhp = 0;
2029 
2030         /*
2031          * Global state is always initialized later in hugetlb_init.
2032          * But we need to allocate >= MAX_ORDER hstates here early to still
2033          * use the bootmem allocator.
2034          */
2035         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2036                 hugetlb_hstate_alloc_pages(parsed_hstate);
2037 
2038         last_mhp = mhp;
2039 
2040         return 1;
2041 }
2042 __setup("hugepages=", hugetlb_nrpages_setup);
2043 
2044 static int __init hugetlb_default_setup(char *s)
2045 {
2046         default_hstate_size = memparse(s, &s);
2047         return 1;
2048 }
2049 __setup("default_hugepagesz=", hugetlb_default_setup);
2050 
2051 static unsigned int cpuset_mems_nr(unsigned int *array)
2052 {
2053         int node;
2054         unsigned int nr = 0;
2055 
2056         for_each_node_mask(node, cpuset_current_mems_allowed)
2057                 nr += array[node];
2058 
2059         return nr;
2060 }
2061 
2062 #ifdef CONFIG_SYSCTL
2063 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2064                          struct ctl_table *table, int write,
2065                          void __user *buffer, size_t *length, loff_t *ppos)
2066 {
2067         struct hstate *h = &default_hstate;
2068         unsigned long tmp;
2069         int ret;
2070 
2071         tmp = h->max_huge_pages;
2072 
2073         if (write && h->order >= MAX_ORDER)
2074                 return -EINVAL;
2075 
2076         table->data = &tmp;
2077         table->maxlen = sizeof(unsigned long);
2078         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2079         if (ret)
2080                 goto out;
2081 
2082         if (write) {
2083                 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
2084                                                 GFP_KERNEL | __GFP_NORETRY);
2085                 if (!(obey_mempolicy &&
2086                                init_nodemask_of_mempolicy(nodes_allowed))) {
2087                         NODEMASK_FREE(nodes_allowed);
2088                         nodes_allowed = &node_states[N_MEMORY];
2089                 }
2090                 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2091 
2092                 if (nodes_allowed != &node_states[N_MEMORY])
2093                         NODEMASK_FREE(nodes_allowed);
2094         }
2095 out:
2096         return ret;
2097 }
2098 
2099 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2100                           void __user *buffer, size_t *length, loff_t *ppos)
2101 {
2102 
2103         return hugetlb_sysctl_handler_common(false, table, write,
2104                                                         buffer, length, ppos);
2105 }
2106 
2107 #ifdef CONFIG_NUMA
2108 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2109                           void __user *buffer, size_t *length, loff_t *ppos)
2110 {
2111         return hugetlb_sysctl_handler_common(true, table, write,
2112                                                         buffer, length, ppos);
2113 }
2114 #endif /* CONFIG_NUMA */
2115 
2116 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2117                         void __user *buffer,
2118                         size_t *length, loff_t *ppos)
2119 {
2120         proc_dointvec(table, write, buffer, length, ppos);
2121         if (hugepages_treat_as_movable)
2122                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2123         else
2124                 htlb_alloc_mask = GFP_HIGHUSER;
2125         return 0;
2126 }
2127 
2128 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2129                         void __user *buffer,
2130                         size_t *length, loff_t *ppos)
2131 {
2132         struct hstate *h = &default_hstate;
2133         unsigned long tmp;
2134         int ret;
2135 
2136         tmp = h->nr_overcommit_huge_pages;
2137 
2138         if (write && h->order >= MAX_ORDER)
2139                 return -EINVAL;
2140 
2141         table->data = &tmp;
2142         table->maxlen = sizeof(unsigned long);
2143         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2144         if (ret)
2145                 goto out;
2146 
2147         if (write) {
2148                 spin_lock(&hugetlb_lock);
2149                 h->nr_overcommit_huge_pages = tmp;
2150                 spin_unlock(&hugetlb_lock);
2151         }
2152 out:
2153         return ret;
2154 }
2155 
2156 #endif /* CONFIG_SYSCTL */
2157 
2158 void hugetlb_report_meminfo(struct seq_file *m)
2159 {
2160         struct hstate *h = &default_hstate;
2161         seq_printf(m,
2162                         "HugePages_Total:   %5lu\n"
2163                         "HugePages_Free:    %5lu\n"
2164                         "HugePages_Rsvd:    %5lu\n"
2165                         "HugePages_Surp:    %5lu\n"
2166                         "Hugepagesize:   %8lu kB\n",
2167                         h->nr_huge_pages,
2168                         h->free_huge_pages,
2169                         h->resv_huge_pages,
2170                         h->surplus_huge_pages,
2171                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2172 }
2173 
2174 int hugetlb_report_node_meminfo(int nid, char *buf)
2175 {
2176         struct hstate *h = &default_hstate;
2177         return sprintf(buf,
2178                 "Node %d HugePages_Total: %5u\n"
2179                 "Node %d HugePages_Free:  %5u\n"
2180                 "Node %d HugePages_Surp:  %5u\n",
2181                 nid, h->nr_huge_pages_node[nid],
2182                 nid, h->free_huge_pages_node[nid],
2183                 nid, h->surplus_huge_pages_node[nid]);
2184 }
2185 
2186 void hugetlb_show_meminfo(void)
2187 {
2188         struct hstate *h;
2189         int nid;
2190 
2191         for_each_node_state(nid, N_MEMORY)
2192                 for_each_hstate(h)
2193                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2194                                 nid,
2195                                 h->nr_huge_pages_node[nid],
2196                                 h->free_huge_pages_node[nid],
2197                                 h->surplus_huge_pages_node[nid],
2198                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2199 }
2200 
2201 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2202 unsigned long hugetlb_total_pages(void)
2203 {
2204         struct hstate *h;
2205         unsigned long nr_total_pages = 0;
2206 
2207         for_each_hstate(h)
2208                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2209         return nr_total_pages;
2210 }
2211 
2212 static int hugetlb_acct_memory(struct hstate *h, long delta)
2213 {
2214         int ret = -ENOMEM;
2215 
2216         spin_lock(&hugetlb_lock);
2217         /*
2218          * When cpuset is configured, it breaks the strict hugetlb page
2219          * reservation as the accounting is done on a global variable. Such
2220          * reservation is completely rubbish in the presence of cpuset because
2221          * the reservation is not checked against page availability for the
2222          * current cpuset. Application can still potentially OOM'ed by kernel
2223          * with lack of free htlb page in cpuset that the task is in.
2224          * Attempt to enforce strict accounting with cpuset is almost
2225          * impossible (or too ugly) because cpuset is too fluid that
2226          * task or memory node can be dynamically moved between cpusets.
2227          *
2228          * The change of semantics for shared hugetlb mapping with cpuset is
2229          * undesirable. However, in order to preserve some of the semantics,
2230          * we fall back to check against current free page availability as
2231          * a best attempt and hopefully to minimize the impact of changing
2232          * semantics that cpuset has.
2233          */
2234         if (delta > 0) {
2235                 if (gather_surplus_pages(h, delta) < 0)
2236                         goto out;
2237 
2238                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2239                         return_unused_surplus_pages(h, delta);
2240                         goto out;
2241                 }
2242         }
2243 
2244         ret = 0;
2245         if (delta < 0)
2246                 return_unused_surplus_pages(h, (unsigned long) -delta);
2247 
2248 out:
2249         spin_unlock(&hugetlb_lock);
2250         return ret;
2251 }
2252 
2253 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2254 {
2255         struct resv_map *reservations = vma_resv_map(vma);
2256 
2257         /*
2258          * This new VMA should share its siblings reservation map if present.
2259          * The VMA will only ever have a valid reservation map pointer where
2260          * it is being copied for another still existing VMA.  As that VMA
2261          * has a reference to the reservation map it cannot disappear until
2262          * after this open call completes.  It is therefore safe to take a
2263          * new reference here without additional locking.
2264          */
2265         if (reservations)
2266                 kref_get(&reservations->refs);
2267 }
2268 
2269 static void resv_map_put(struct vm_area_struct *vma)
2270 {
2271         struct resv_map *reservations = vma_resv_map(vma);
2272 
2273         if (!reservations)
2274                 return;
2275         kref_put(&reservations->refs, resv_map_release);
2276 }
2277 
2278 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2279 {
2280         struct hstate *h = hstate_vma(vma);
2281         struct resv_map *reservations = vma_resv_map(vma);
2282         struct hugepage_subpool *spool = subpool_vma(vma);
2283         unsigned long reserve;
2284         unsigned long start;
2285         unsigned long end;
2286 
2287         if (reservations) {
2288                 start = vma_hugecache_offset(h, vma, vma->vm_start);
2289                 end = vma_hugecache_offset(h, vma, vma->vm_end);
2290 
2291                 reserve = (end - start) -
2292                         region_count(&reservations->regions, start, end);
2293 
2294                 resv_map_put(vma);
2295 
2296                 if (reserve) {
2297                         hugetlb_acct_memory(h, -reserve);
2298                         hugepage_subpool_put_pages(spool, reserve);
2299                 }
2300         }
2301 }
2302 
2303 /*
2304  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2305  * handle_mm_fault() to try to instantiate regular-sized pages in the
2306  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2307  * this far.
2308  */
2309 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2310 {
2311         BUG();
2312         return 0;
2313 }
2314 
2315 const struct vm_operations_struct hugetlb_vm_ops = {
2316         .fault = hugetlb_vm_op_fault,
2317         .open = hugetlb_vm_op_open,
2318         .close = hugetlb_vm_op_close,
2319 };
2320 
2321 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2322                                 int writable)
2323 {
2324         pte_t entry;
2325 
2326         if (writable) {
2327                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2328                                          vma->vm_page_prot)));
2329         } else {
2330                 entry = huge_pte_wrprotect(mk_huge_pte(page,
2331                                            vma->vm_page_prot));
2332         }
2333         entry = pte_mkyoung(entry);
2334         entry = pte_mkhuge(entry);
2335         entry = arch_make_huge_pte(entry, vma, page, writable);
2336 
2337         return entry;
2338 }
2339 
2340 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2341                                    unsigned long address, pte_t *ptep)
2342 {
2343         pte_t entry;
2344 
2345         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2346         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2347                 update_mmu_cache(vma, address, ptep);
2348 }
2349 
2350 static int is_hugetlb_entry_migration(pte_t pte)
2351 {
2352         swp_entry_t swp;
2353 
2354         if (huge_pte_none(pte) || pte_present(pte))
2355                 return 0;
2356         swp = pte_to_swp_entry(pte);
2357         if (non_swap_entry(swp) && is_migration_entry(swp))
2358                 return 1;
2359         else
2360                 return 0;
2361 }
2362 
2363 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2364 {
2365         swp_entry_t swp;
2366 
2367         if (huge_pte_none(pte) || pte_present(pte))
2368                 return 0;
2369         swp = pte_to_swp_entry(pte);
2370         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2371                 return 1;
2372         else
2373                 return 0;
2374 }
2375 
2376 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2377                             struct vm_area_struct *vma)
2378 {
2379         pte_t *src_pte, *dst_pte, entry;
2380         struct page *ptepage;
2381         unsigned long addr;
2382         int cow;
2383         struct hstate *h = hstate_vma(vma);
2384         unsigned long sz = huge_page_size(h);
2385 
2386         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2387 
2388         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2389                 src_pte = huge_pte_offset(src, addr);
2390                 if (!src_pte)
2391                         continue;
2392                 dst_pte = huge_pte_alloc(dst, addr, sz);
2393                 if (!dst_pte)
2394                         goto nomem;
2395 
2396                 /* If the pagetables are shared don't copy or take references */
2397                 if (dst_pte == src_pte)
2398                         continue;
2399 
2400                 spin_lock(&dst->page_table_lock);
2401                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2402                 entry = huge_ptep_get(src_pte);
2403                 if (huge_pte_none(entry)) { /* skip none entry */
2404                         ;
2405                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2406                                     is_hugetlb_entry_hwpoisoned(entry))) {
2407                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
2408 
2409                         if (is_write_migration_entry(swp_entry) && cow) {
2410                                 /*
2411                                  * COW mappings require pages in both
2412                                  * parent and child to be set to read.
2413                                  */
2414                                 make_migration_entry_read(&swp_entry);
2415                                 entry = swp_entry_to_pte(swp_entry);
2416                                 set_huge_pte_at(src, addr, src_pte, entry);
2417                         }
2418                         set_huge_pte_at(dst, addr, dst_pte, entry);
2419                 } else {
2420                         if (cow)
2421                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2422                         entry = huge_ptep_get(src_pte);
2423                         ptepage = pte_page(entry);
2424                         get_page(ptepage);
2425                         page_dup_rmap(ptepage);
2426                         set_huge_pte_at(dst, addr, dst_pte, entry);
2427                 }
2428                 spin_unlock(&src->page_table_lock);
2429                 spin_unlock(&dst->page_table_lock);
2430         }
2431         return 0;
2432 
2433 nomem:
2434         return -ENOMEM;
2435 }
2436 
2437 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2438                             unsigned long start, unsigned long end,
2439                             struct page *ref_page)
2440 {
2441         int force_flush = 0;
2442         struct mm_struct *mm = vma->vm_mm;
2443         unsigned long address;
2444         pte_t *ptep;
2445         pte_t pte;
2446         struct page *page;
2447         struct hstate *h = hstate_vma(vma);
2448         unsigned long sz = huge_page_size(h);
2449         const unsigned long mmun_start = start; /* For mmu_notifiers */
2450         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
2451 
2452         WARN_ON(!is_vm_hugetlb_page(vma));
2453         BUG_ON(start & ~huge_page_mask(h));
2454         BUG_ON(end & ~huge_page_mask(h));
2455 
2456         tlb_start_vma(tlb, vma);
2457         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2458 again:
2459         spin_lock(&mm->page_table_lock);
2460         for (address = start; address < end; address += sz) {
2461                 ptep = huge_pte_offset(mm, address);
2462                 if (!ptep)
2463                         continue;
2464 
2465                 if (huge_pmd_unshare(mm, &address, ptep))
2466                         continue;
2467 
2468                 pte = huge_ptep_get(ptep);
2469                 if (huge_pte_none(pte))
2470                         continue;
2471 
2472                 /*
2473                  * Migrating hugepage or HWPoisoned hugepage is already
2474                  * unmapped and its refcount is dropped, so just clear pte here.
2475                  */
2476                 if (unlikely(!pte_present(pte))) {
2477                         huge_pte_clear(mm, address, ptep);
2478                         continue;
2479                 }
2480 
2481                 page = pte_page(pte);
2482                 /*
2483                  * If a reference page is supplied, it is because a specific
2484                  * page is being unmapped, not a range. Ensure the page we
2485                  * are about to unmap is the actual page of interest.
2486                  */
2487                 if (ref_page) {
2488                         if (page != ref_page)
2489                                 continue;
2490 
2491                         /*
2492                          * Mark the VMA as having unmapped its page so that
2493                          * future faults in this VMA will fail rather than
2494                          * looking like data was lost
2495                          */
2496                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2497                 }
2498 
2499                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2500                 tlb_remove_tlb_entry(tlb, ptep, address);
2501                 if (huge_pte_dirty(pte))
2502                         set_page_dirty(page);
2503 
2504                 page_remove_rmap(page);
2505                 force_flush = !__tlb_remove_page(tlb, page);
2506                 if (force_flush)
2507                         break;
2508                 /* Bail out after unmapping reference page if supplied */
2509                 if (ref_page)
2510                         break;
2511         }
2512         spin_unlock(&mm->page_table_lock);
2513         /*
2514          * mmu_gather ran out of room to batch pages, we break out of
2515          * the PTE lock to avoid doing the potential expensive TLB invalidate
2516          * and page-free while holding it.
2517          */
2518         if (force_flush) {
2519                 force_flush = 0;
2520                 tlb_flush_mmu(tlb);
2521                 if (address < end && !ref_page)
2522                         goto again;
2523         }
2524         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2525         tlb_end_vma(tlb, vma);
2526 }
2527 
2528 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2529                           struct vm_area_struct *vma, unsigned long start,
2530                           unsigned long end, struct page *ref_page)
2531 {
2532         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2533 
2534         /*
2535          * Clear this flag so that x86's huge_pmd_share page_table_shareable
2536          * test will fail on a vma being torn down, and not grab a page table
2537          * on its way out.  We're lucky that the flag has such an appropriate
2538          * name, and can in fact be safely cleared here. We could clear it
2539          * before the __unmap_hugepage_range above, but all that's necessary
2540          * is to clear it before releasing the i_mmap_mutex. This works
2541          * because in the context this is called, the VMA is about to be
2542          * destroyed and the i_mmap_mutex is held.
2543          */
2544         vma->vm_flags &= ~VM_MAYSHARE;
2545 }
2546 
2547 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2548                           unsigned long end, struct page *ref_page)
2549 {
2550         struct mm_struct *mm;
2551         struct mmu_gather tlb;
2552 
2553         mm = vma->vm_mm;
2554 
2555         tlb_gather_mmu(&tlb, mm, start, end);
2556         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2557         tlb_finish_mmu(&tlb, start, end);
2558 }
2559 
2560 /*
2561  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2562  * mappping it owns the reserve page for. The intention is to unmap the page
2563  * from other VMAs and let the children be SIGKILLed if they are faulting the
2564  * same region.
2565  */
2566 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2567                                 struct page *page, unsigned long address)
2568 {
2569         struct hstate *h = hstate_vma(vma);
2570         struct vm_area_struct *iter_vma;
2571         struct address_space *mapping;
2572         pgoff_t pgoff;
2573 
2574         /*
2575          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2576          * from page cache lookup which is in HPAGE_SIZE units.
2577          */
2578         address = address & huge_page_mask(h);
2579         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2580                         vma->vm_pgoff;
2581         mapping = file_inode(vma->vm_file)->i_mapping;
2582 
2583         /*
2584          * Take the mapping lock for the duration of the table walk. As
2585          * this mapping should be shared between all the VMAs,
2586          * __unmap_hugepage_range() is called as the lock is already held
2587          */
2588         mutex_lock(&mapping->i_mmap_mutex);
2589         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2590                 /* Do not unmap the current VMA */
2591                 if (iter_vma == vma)
2592                         continue;
2593 
2594                 /*
2595                  * Shared VMAs have their own reserves and do not affect
2596                  * MAP_PRIVATE accounting but it is possible that a shared
2597                  * VMA is using the same page so check and skip such VMAs.
2598                  */
2599                 if (iter_vma->vm_flags & VM_MAYSHARE)
2600                         continue;
2601 
2602                 /*
2603                  * Unmap the page from other VMAs without their own reserves.
2604                  * They get marked to be SIGKILLed if they fault in these
2605                  * areas. This is because a future no-page fault on this VMA
2606                  * could insert a zeroed page instead of the data existing
2607                  * from the time of fork. This would look like data corruption
2608                  */
2609                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2610                         unmap_hugepage_range(iter_vma, address,
2611                                              address + huge_page_size(h), page);
2612         }
2613         mutex_unlock(&mapping->i_mmap_mutex);
2614 
2615         return 1;
2616 }
2617 
2618 /*
2619  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2620  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2621  * cannot race with other handlers or page migration.
2622  * Keep the pte_same checks anyway to make transition from the mutex easier.
2623  */
2624 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2625                         unsigned long address, pte_t *ptep, pte_t pte,
2626                         struct page *pagecache_page)
2627 {
2628         struct hstate *h = hstate_vma(vma);
2629         struct page *old_page, *new_page;
2630         int avoidcopy;
2631         int outside_reserve = 0;
2632         unsigned long mmun_start;       /* For mmu_notifiers */
2633         unsigned long mmun_end;         /* For mmu_notifiers */
2634 
2635         old_page = pte_page(pte);
2636 
2637 retry_avoidcopy:
2638         /* If no-one else is actually using this page, avoid the copy
2639          * and just make the page writable */
2640         avoidcopy = (page_mapcount(old_page) == 1);
2641         if (avoidcopy) {
2642                 if (PageAnon(old_page))
2643                         page_move_anon_rmap(old_page, vma, address);
2644                 set_huge_ptep_writable(vma, address, ptep);
2645                 return 0;
2646         }
2647 
2648         /*
2649          * If the process that created a MAP_PRIVATE mapping is about to
2650          * perform a COW due to a shared page count, attempt to satisfy
2651          * the allocation without using the existing reserves. The pagecache
2652          * page is used to determine if the reserve at this address was
2653          * consumed or not. If reserves were used, a partial faulted mapping
2654          * at the time of fork() could consume its reserves on COW instead
2655          * of the full address range.
2656          */
2657         if (!(vma->vm_flags & VM_MAYSHARE) &&
2658                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2659                         old_page != pagecache_page)
2660                 outside_reserve = 1;
2661 
2662         page_cache_get(old_page);
2663 
2664         /* Drop page_table_lock as buddy allocator may be called */
2665         spin_unlock(&mm->page_table_lock);
2666         new_page = alloc_huge_page(vma, address, outside_reserve);
2667 
2668         if (IS_ERR(new_page)) {
2669                 long err = PTR_ERR(new_page);
2670                 page_cache_release(old_page);
2671 
2672                 /*
2673                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2674                  * it is due to references held by a child and an insufficient
2675                  * huge page pool. To guarantee the original mappers
2676                  * reliability, unmap the page from child processes. The child
2677                  * may get SIGKILLed if it later faults.
2678                  */
2679                 if (outside_reserve) {
2680                         BUG_ON(huge_pte_none(pte));
2681                         if (unmap_ref_private(mm, vma, old_page, address)) {
2682                                 BUG_ON(huge_pte_none(pte));
2683                                 spin_lock(&mm->page_table_lock);
2684                                 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2685                                 if (likely(pte_same(huge_ptep_get(ptep), pte)))
2686                                         goto retry_avoidcopy;
2687                                 /*
2688                                  * race occurs while re-acquiring page_table_lock, and
2689                                  * our job is done.
2690                                  */
2691                                 return 0;
2692                         }
2693                         WARN_ON_ONCE(1);
2694                 }
2695 
2696                 /* Caller expects lock to be held */
2697                 spin_lock(&mm->page_table_lock);
2698                 if (err == -ENOMEM)
2699                         return VM_FAULT_OOM;
2700                 else
2701                         return VM_FAULT_SIGBUS;
2702         }
2703 
2704         /*
2705          * When the original hugepage is shared one, it does not have
2706          * anon_vma prepared.
2707          */
2708         if (unlikely(anon_vma_prepare(vma))) {
2709                 page_cache_release(new_page);
2710                 page_cache_release(old_page);
2711                 /* Caller expects lock to be held */
2712                 spin_lock(&mm->page_table_lock);
2713                 return VM_FAULT_OOM;
2714         }
2715 
2716         copy_user_huge_page(new_page, old_page, address, vma,
2717                             pages_per_huge_page(h));
2718         __SetPageUptodate(new_page);
2719 
2720         mmun_start = address & huge_page_mask(h);
2721         mmun_end = mmun_start + huge_page_size(h);
2722         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2723         /*
2724          * Retake the page_table_lock to check for racing updates
2725          * before the page tables are altered
2726          */
2727         spin_lock(&mm->page_table_lock);
2728         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2729         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2730                 /* Break COW */
2731                 huge_ptep_clear_flush(vma, address, ptep);
2732                 set_huge_pte_at(mm, address, ptep,
2733                                 make_huge_pte(vma, new_page, 1));
2734                 page_remove_rmap(old_page);
2735                 hugepage_add_new_anon_rmap(new_page, vma, address);
2736                 /* Make the old page be freed below */
2737                 new_page = old_page;
2738         }
2739         spin_unlock(&mm->page_table_lock);
2740         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2741         /* Caller expects lock to be held */
2742         spin_lock(&mm->page_table_lock);
2743         page_cache_release(new_page);
2744         page_cache_release(old_page);
2745         return 0;
2746 }
2747 
2748 /* Return the pagecache page at a given address within a VMA */
2749 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2750                         struct vm_area_struct *vma, unsigned long address)
2751 {
2752         struct address_space *mapping;
2753         pgoff_t idx;
2754 
2755         mapping = vma->vm_file->f_mapping;
2756         idx = vma_hugecache_offset(h, vma, address);
2757 
2758         return find_lock_page(mapping, idx);
2759 }
2760 
2761 /*
2762  * Return whether there is a pagecache page to back given address within VMA.
2763  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2764  */
2765 static bool hugetlbfs_pagecache_present(struct hstate *h,
2766                         struct vm_area_struct *vma, unsigned long address)
2767 {
2768         struct address_space *mapping;
2769         pgoff_t idx;
2770         struct page *page;
2771 
2772         mapping = vma->vm_file->f_mapping;
2773         idx = vma_hugecache_offset(h, vma, address);
2774 
2775         page = find_get_page(mapping, idx);
2776         if (page)
2777                 put_page(page);
2778         return page != NULL;
2779 }
2780 
2781 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2782                         unsigned long address, pte_t *ptep, unsigned int flags)
2783 {
2784         struct hstate *h = hstate_vma(vma);
2785         int ret = VM_FAULT_SIGBUS;
2786         int anon_rmap = 0;
2787         pgoff_t idx;
2788         unsigned long size;
2789         struct page *page;
2790         struct address_space *mapping;
2791         pte_t new_pte;
2792 
2793         /*
2794          * Currently, we are forced to kill the process in the event the
2795          * original mapper has unmapped pages from the child due to a failed
2796          * COW. Warn that such a situation has occurred as it may not be obvious
2797          */
2798         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2799                 pr_warning("PID %d killed due to inadequate hugepage pool\n",
2800                            current->pid);
2801                 return ret;
2802         }
2803 
2804         mapping = vma->vm_file->f_mapping;
2805         idx = vma_hugecache_offset(h, vma, address);
2806 
2807         /*
2808          * Use page lock to guard against racing truncation
2809          * before we get page_table_lock.
2810          */
2811 retry:
2812         page = find_lock_page(mapping, idx);
2813         if (!page) {
2814                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2815                 if (idx >= size)
2816                         goto out;
2817                 page = alloc_huge_page(vma, address, 0);
2818                 if (IS_ERR(page)) {
2819                         ret = PTR_ERR(page);
2820                         if (ret == -ENOMEM)
2821                                 ret = VM_FAULT_OOM;
2822                         else
2823                                 ret = VM_FAULT_SIGBUS;
2824                         goto out;
2825                 }
2826                 clear_huge_page(page, address, pages_per_huge_page(h));
2827                 __SetPageUptodate(page);
2828 
2829                 if (vma->vm_flags & VM_MAYSHARE) {
2830                         int err;
2831                         struct inode *inode = mapping->host;
2832 
2833                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2834                         if (err) {
2835                                 put_page(page);
2836                                 if (err == -EEXIST)
2837                                         goto retry;
2838                                 goto out;
2839                         }
2840 
2841                         spin_lock(&inode->i_lock);
2842                         inode->i_blocks += blocks_per_huge_page(h);
2843                         spin_unlock(&inode->i_lock);
2844                 } else {
2845                         lock_page(page);
2846                         if (unlikely(anon_vma_prepare(vma))) {
2847                                 ret = VM_FAULT_OOM;
2848                                 goto backout_unlocked;
2849                         }
2850                         anon_rmap = 1;
2851                 }
2852         } else {
2853                 /*
2854                  * If memory error occurs between mmap() and fault, some process
2855                  * don't have hwpoisoned swap entry for errored virtual address.
2856                  * So we need to block hugepage fault by PG_hwpoison bit check.
2857                  */
2858                 if (unlikely(PageHWPoison(page))) {
2859                         ret = VM_FAULT_HWPOISON |
2860                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2861                         goto backout_unlocked;
2862                 }
2863         }
2864 
2865         /*
2866          * If we are going to COW a private mapping later, we examine the
2867          * pending reservations for this page now. This will ensure that
2868          * any allocations necessary to record that reservation occur outside
2869          * the spinlock.
2870          */
2871         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2872                 if (vma_needs_reservation(h, vma, address) < 0) {
2873                         ret = VM_FAULT_OOM;
2874                         goto backout_unlocked;
2875                 }
2876 
2877         spin_lock(&mm->page_table_lock);
2878         size = i_size_read(mapping->host) >> huge_page_shift(h);
2879         if (idx >= size)
2880                 goto backout;
2881 
2882         ret = 0;
2883         if (!huge_pte_none(huge_ptep_get(ptep)))
2884                 goto backout;
2885 
2886         if (anon_rmap)
2887                 hugepage_add_new_anon_rmap(page, vma, address);
2888         else
2889                 page_dup_rmap(page);
2890         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2891                                 && (vma->vm_flags & VM_SHARED)));
2892         set_huge_pte_at(mm, address, ptep, new_pte);
2893 
2894         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2895                 /* Optimization, do the COW without a second fault */
2896                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2897         }
2898 
2899         spin_unlock(&mm->page_table_lock);
2900         unlock_page(page);
2901 out:
2902         return ret;
2903 
2904 backout:
2905         spin_unlock(&mm->page_table_lock);
2906 backout_unlocked:
2907         unlock_page(page);
2908         put_page(page);
2909         goto out;
2910 }
2911 
2912 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2913                         unsigned long address, unsigned int flags)
2914 {
2915         pte_t *ptep;
2916         pte_t entry;
2917         int ret;
2918         struct page *page = NULL;
2919         struct page *pagecache_page = NULL;
2920         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2921         struct hstate *h = hstate_vma(vma);
2922 
2923         address &= huge_page_mask(h);
2924 
2925         ptep = huge_pte_offset(mm, address);
2926         if (ptep) {
2927                 entry = huge_ptep_get(ptep);
2928                 if (unlikely(is_hugetlb_entry_migration(entry))) {
2929                         migration_entry_wait_huge(mm, ptep);
2930                         return 0;
2931                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2932                         return VM_FAULT_HWPOISON_LARGE |
2933                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2934         }
2935 
2936         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2937         if (!ptep)
2938                 return VM_FAULT_OOM;
2939 
2940         /*
2941          * Serialize hugepage allocation and instantiation, so that we don't
2942          * get spurious allocation failures if two CPUs race to instantiate
2943          * the same page in the page cache.
2944          */
2945         mutex_lock(&hugetlb_instantiation_mutex);
2946         entry = huge_ptep_get(ptep);
2947         if (huge_pte_none(entry)) {
2948                 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2949                 goto out_mutex;
2950         }
2951 
2952         ret = 0;
2953 
2954         /*
2955          * If we are going to COW the mapping later, we examine the pending
2956          * reservations for this page now. This will ensure that any
2957          * allocations necessary to record that reservation occur outside the
2958          * spinlock. For private mappings, we also lookup the pagecache
2959          * page now as it is used to determine if a reservation has been
2960          * consumed.
2961          */
2962         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
2963                 if (vma_needs_reservation(h, vma, address) < 0) {
2964                         ret = VM_FAULT_OOM;
2965                         goto out_mutex;
2966                 }
2967 
2968                 if (!(vma->vm_flags & VM_MAYSHARE))
2969                         pagecache_page = hugetlbfs_pagecache_page(h,
2970                                                                 vma, address);
2971         }
2972 
2973         /*
2974          * hugetlb_cow() requires page locks of pte_page(entry) and
2975          * pagecache_page, so here we need take the former one
2976          * when page != pagecache_page or !pagecache_page.
2977          * Note that locking order is always pagecache_page -> page,
2978          * so no worry about deadlock.
2979          */
2980         page = pte_page(entry);
2981         get_page(page);
2982         if (page != pagecache_page)
2983                 lock_page(page);
2984 
2985         spin_lock(&mm->page_table_lock);
2986         /* Check for a racing update before calling hugetlb_cow */
2987         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2988                 goto out_page_table_lock;
2989 
2990 
2991         if (flags & FAULT_FLAG_WRITE) {
2992                 if (!huge_pte_write(entry)) {
2993                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
2994                                                         pagecache_page);
2995                         goto out_page_table_lock;
2996                 }
2997                 entry = huge_pte_mkdirty(entry);
2998         }
2999         entry = pte_mkyoung(entry);
3000         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3001                                                 flags & FAULT_FLAG_WRITE))
3002                 update_mmu_cache(vma, address, ptep);
3003 
3004 out_page_table_lock:
3005         spin_unlock(&mm->page_table_lock);
3006 
3007         if (pagecache_page) {
3008                 unlock_page(pagecache_page);
3009                 put_page(pagecache_page);
3010         }
3011         if (page != pagecache_page)
3012                 unlock_page(page);
3013         put_page(page);
3014 
3015 out_mutex:
3016         mutex_unlock(&hugetlb_instantiation_mutex);
3017 
3018         return ret;
3019 }
3020 
3021 /* Can be overriden by architectures */
3022 __attribute__((weak)) struct page *
3023 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3024                pud_t *pud, int write)
3025 {
3026         BUG();
3027         return NULL;
3028 }
3029 
3030 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3031                          struct page **pages, struct vm_area_struct **vmas,
3032                          unsigned long *position, unsigned long *nr_pages,
3033                          long i, unsigned int flags)
3034 {
3035         unsigned long pfn_offset;
3036         unsigned long vaddr = *position;
3037         unsigned long remainder = *nr_pages;
3038         struct hstate *h = hstate_vma(vma);
3039 
3040         spin_lock(&mm->page_table_lock);
3041         while (vaddr < vma->vm_end && remainder) {
3042                 pte_t *pte;
3043                 int absent;
3044                 struct page *page;
3045 
3046                 /*
3047                  * Some archs (sparc64, sh*) have multiple pte_ts to
3048                  * each hugepage.  We have to make sure we get the
3049                  * first, for the page indexing below to work.
3050                  */
3051                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3052                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3053 
3054                 /*
3055                  * When coredumping, it suits get_dump_page if we just return
3056                  * an error where there's an empty slot with no huge pagecache
3057                  * to back it.  This way, we avoid allocating a hugepage, and
3058                  * the sparse dumpfile avoids allocating disk blocks, but its
3059                  * huge holes still show up with zeroes where they need to be.
3060                  */
3061                 if (absent && (flags & FOLL_DUMP) &&
3062                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3063                         remainder = 0;
3064                         break;
3065                 }
3066 
3067                 /*
3068                  * We need call hugetlb_fault for both hugepages under migration
3069                  * (in which case hugetlb_fault waits for the migration,) and
3070                  * hwpoisoned hugepages (in which case we need to prevent the
3071                  * caller from accessing to them.) In order to do this, we use
3072                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3073                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3074                  * both cases, and because we can't follow correct pages
3075                  * directly from any kind of swap entries.
3076                  */
3077                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3078                     ((flags & FOLL_WRITE) &&
3079                       !huge_pte_write(huge_ptep_get(pte)))) {
3080                         int ret;
3081 
3082                         spin_unlock(&mm->page_table_lock);
3083                         ret = hugetlb_fault(mm, vma, vaddr,
3084                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3085                         spin_lock(&mm->page_table_lock);
3086                         if (!(ret & VM_FAULT_ERROR))
3087                                 continue;
3088 
3089                         remainder = 0;
3090                         break;
3091                 }
3092 
3093                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3094                 page = pte_page(huge_ptep_get(pte));
3095 same_page:
3096                 if (pages) {
3097                         pages[i] = mem_map_offset(page, pfn_offset);
3098                         get_page(pages[i]);
3099                 }
3100 
3101                 if (vmas)
3102                         vmas[i] = vma;
3103 
3104                 vaddr += PAGE_SIZE;
3105                 ++pfn_offset;
3106                 --remainder;
3107                 ++i;
3108                 if (vaddr < vma->vm_end && remainder &&
3109                                 pfn_offset < pages_per_huge_page(h)) {
3110                         /*
3111                          * We use pfn_offset to avoid touching the pageframes
3112                          * of this compound page.
3113                          */
3114                         goto same_page;
3115                 }
3116         }
3117         spin_unlock(&mm->page_table_lock);
3118         *nr_pages = remainder;
3119         *position = vaddr;
3120 
3121         return i ? i : -EFAULT;
3122 }
3123 
3124 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3125                 unsigned long address, unsigned long end, pgprot_t newprot)
3126 {
3127         struct mm_struct *mm = vma->vm_mm;
3128         unsigned long start = address;
3129         pte_t *ptep;
3130         pte_t pte;
3131         struct hstate *h = hstate_vma(vma);
3132         unsigned long pages = 0;
3133 
3134         BUG_ON(address >= end);
3135         flush_cache_range(vma, address, end);
3136 
3137         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3138         spin_lock(&mm->page_table_lock);
3139         for (; address < end; address += huge_page_size(h)) {
3140                 ptep = huge_pte_offset(mm, address);
3141                 if (!ptep)
3142                         continue;
3143                 if (huge_pmd_unshare(mm, &address, ptep)) {
3144                         pages++;
3145                         continue;
3146                 }
3147                 if (!huge_pte_none(huge_ptep_get(ptep))) {
3148                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3149                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3150                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3151                         set_huge_pte_at(mm, address, ptep, pte);
3152                         pages++;
3153                 }
3154         }
3155         spin_unlock(&mm->page_table_lock);
3156         /*
3157          * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3158          * may have cleared our pud entry and done put_page on the page table:
3159          * once we release i_mmap_mutex, another task can do the final put_page
3160          * and that page table be reused and filled with junk.
3161          */
3162         flush_tlb_range(vma, start, end);
3163         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3164 
3165         return pages << h->order;
3166 }
3167 
3168 int hugetlb_reserve_pages(struct inode *inode,
3169                                         long from, long to,
3170                                         struct vm_area_struct *vma,
3171                                         vm_flags_t vm_flags)
3172 {
3173         long ret, chg;
3174         struct hstate *h = hstate_inode(inode);
3175         struct hugepage_subpool *spool = subpool_inode(inode);
3176 
3177         /*
3178          * Only apply hugepage reservation if asked. At fault time, an
3179          * attempt will be made for VM_NORESERVE to allocate a page
3180          * without using reserves
3181          */
3182         if (vm_flags & VM_NORESERVE)
3183                 return 0;
3184 
3185         /*
3186          * Shared mappings base their reservation on the number of pages that
3187          * are already allocated on behalf of the file. Private mappings need
3188          * to reserve the full area even if read-only as mprotect() may be
3189          * called to make the mapping read-write. Assume !vma is a shm mapping
3190          */
3191         if (!vma || vma->vm_flags & VM_MAYSHARE)
3192                 chg = region_chg(&inode->i_mapping->private_list, from, to);
3193         else {
3194                 struct resv_map *resv_map = resv_map_alloc();
3195                 if (!resv_map)
3196                         return -ENOMEM;
3197 
3198                 chg = to - from;
3199 
3200                 set_vma_resv_map(vma, resv_map);
3201                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3202         }
3203 
3204         if (chg < 0) {
3205                 ret = chg;
3206                 goto out_err;
3207         }
3208 
3209         /* There must be enough pages in the subpool for the mapping */
3210         if (hugepage_subpool_get_pages(spool, chg)) {
3211                 ret = -ENOSPC;
3212                 goto out_err;
3213         }
3214 
3215         /*
3216          * Check enough hugepages are available for the reservation.
3217          * Hand the pages back to the subpool if there are not
3218          */
3219         ret = hugetlb_acct_memory(h, chg);
3220         if (ret < 0) {
3221                 hugepage_subpool_put_pages(spool, chg);
3222                 goto out_err;
3223         }
3224 
3225         /*
3226          * Account for the reservations made. Shared mappings record regions
3227          * that have reservations as they are shared by multiple VMAs.
3228          * When the last VMA disappears, the region map says how much
3229          * the reservation was and the page cache tells how much of
3230          * the reservation was consumed. Private mappings are per-VMA and
3231          * only the consumed reservations are tracked. When the VMA
3232          * disappears, the original reservation is the VMA size and the
3233          * consumed reservations are stored in the map. Hence, nothing
3234          * else has to be done for private mappings here
3235          */
3236         if (!vma || vma->vm_flags & VM_MAYSHARE)
3237                 region_add(&inode->i_mapping->private_list, from, to);
3238         return 0;
3239 out_err:
3240         if (vma)
3241                 resv_map_put(vma);
3242         return ret;
3243 }
3244 
3245 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3246 {
3247         struct hstate *h = hstate_inode(inode);
3248         long chg = region_truncate(&inode->i_mapping->private_list, offset);
3249         struct hugepage_subpool *spool = subpool_inode(inode);
3250 
3251         spin_lock(&inode->i_lock);
3252         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3253         spin_unlock(&inode->i_lock);
3254 
3255         hugepage_subpool_put_pages(spool, (chg - freed));
3256         hugetlb_acct_memory(h, -(chg - freed));
3257 }
3258 
3259 #ifdef CONFIG_MEMORY_FAILURE
3260 
3261 /* Should be called in hugetlb_lock */
3262 static int is_hugepage_on_freelist(struct page *hpage)
3263 {
3264         struct page *page;
3265         struct page *tmp;
3266         struct hstate *h = page_hstate(hpage);
3267         int nid = page_to_nid(hpage);
3268 
3269         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3270                 if (page == hpage)
3271                         return 1;
3272         return 0;
3273 }
3274 
3275 /*
3276  * This function is called from memory failure code.
3277  * Assume the caller holds page lock of the head page.
3278  */
3279 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3280 {
3281         struct hstate *h = page_hstate(hpage);
3282         int nid = page_to_nid(hpage);
3283         int ret = -EBUSY;
3284 
3285         spin_lock(&hugetlb_lock);
3286         if (is_hugepage_on_freelist(hpage)) {
3287                 /*
3288                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
3289                  * but dangling hpage->lru can trigger list-debug warnings
3290                  * (this happens when we call unpoison_memory() on it),
3291                  * so let it point to itself with list_del_init().
3292                  */
3293                 list_del_init(&hpage->lru);
3294                 set_page_refcounted(hpage);
3295                 h->free_huge_pages--;
3296                 h->free_huge_pages_node[nid]--;
3297                 ret = 0;
3298         }
3299         spin_unlock(&hugetlb_lock);
3300         return ret;
3301 }
3302 #endif
3303 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp