~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/page_alloc.c

Version: ~ [ linux-6.6-rc1 ] ~ [ linux-6.5.2 ] ~ [ linux-6.4.15 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.52 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.131 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.194 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.256 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.294 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.325 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/mm/page_alloc.c
  3  *
  4  *  Manages the free list, the system allocates free pages here.
  5  *  Note that kmalloc() lives in slab.c
  6  *
  7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  8  *  Swap reorganised 29.12.95, Stephen Tweedie
  9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 15  */
 16 
 17 #include <linux/stddef.h>
 18 #include <linux/mm.h>
 19 #include <linux/swap.h>
 20 #include <linux/interrupt.h>
 21 #include <linux/pagemap.h>
 22 #include <linux/jiffies.h>
 23 #include <linux/bootmem.h>
 24 #include <linux/memblock.h>
 25 #include <linux/compiler.h>
 26 #include <linux/kernel.h>
 27 #include <linux/kmemcheck.h>
 28 #include <linux/kasan.h>
 29 #include <linux/module.h>
 30 #include <linux/suspend.h>
 31 #include <linux/pagevec.h>
 32 #include <linux/blkdev.h>
 33 #include <linux/slab.h>
 34 #include <linux/ratelimit.h>
 35 #include <linux/oom.h>
 36 #include <linux/notifier.h>
 37 #include <linux/topology.h>
 38 #include <linux/sysctl.h>
 39 #include <linux/cpu.h>
 40 #include <linux/cpuset.h>
 41 #include <linux/memory_hotplug.h>
 42 #include <linux/nodemask.h>
 43 #include <linux/vmalloc.h>
 44 #include <linux/vmstat.h>
 45 #include <linux/mempolicy.h>
 46 #include <linux/memremap.h>
 47 #include <linux/stop_machine.h>
 48 #include <linux/sort.h>
 49 #include <linux/pfn.h>
 50 #include <linux/backing-dev.h>
 51 #include <linux/fault-inject.h>
 52 #include <linux/page-isolation.h>
 53 #include <linux/page_ext.h>
 54 #include <linux/debugobjects.h>
 55 #include <linux/kmemleak.h>
 56 #include <linux/compaction.h>
 57 #include <trace/events/kmem.h>
 58 #include <trace/events/oom.h>
 59 #include <linux/prefetch.h>
 60 #include <linux/mm_inline.h>
 61 #include <linux/migrate.h>
 62 #include <linux/hugetlb.h>
 63 #include <linux/sched/rt.h>
 64 #include <linux/sched/mm.h>
 65 #include <linux/page_owner.h>
 66 #include <linux/kthread.h>
 67 #include <linux/memcontrol.h>
 68 #include <linux/ftrace.h>
 69 #include <linux/nmi.h>
 70 
 71 #include <asm/sections.h>
 72 #include <asm/tlbflush.h>
 73 #include <asm/div64.h>
 74 #include "internal.h"
 75 
 76 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 77 static DEFINE_MUTEX(pcp_batch_high_lock);
 78 #define MIN_PERCPU_PAGELIST_FRACTION    (8)
 79 
 80 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
 81 DEFINE_PER_CPU(int, numa_node);
 82 EXPORT_PER_CPU_SYMBOL(numa_node);
 83 #endif
 84 
 85 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
 86 /*
 87  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 88  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 89  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 90  * defined in <linux/topology.h>.
 91  */
 92 DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
 93 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
 94 int _node_numa_mem_[MAX_NUMNODES];
 95 #endif
 96 
 97 /* work_structs for global per-cpu drains */
 98 DEFINE_MUTEX(pcpu_drain_mutex);
 99 DEFINE_PER_CPU(struct work_struct, pcpu_drain);
100 
101 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
102 volatile unsigned long latent_entropy __latent_entropy;
103 EXPORT_SYMBOL(latent_entropy);
104 #endif
105 
106 /*
107  * Array of node states.
108  */
109 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
110         [N_POSSIBLE] = NODE_MASK_ALL,
111         [N_ONLINE] = { { [0] = 1UL } },
112 #ifndef CONFIG_NUMA
113         [N_NORMAL_MEMORY] = { { [0] = 1UL } },
114 #ifdef CONFIG_HIGHMEM
115         [N_HIGH_MEMORY] = { { [0] = 1UL } },
116 #endif
117 #ifdef CONFIG_MOVABLE_NODE
118         [N_MEMORY] = { { [0] = 1UL } },
119 #endif
120         [N_CPU] = { { [0] = 1UL } },
121 #endif  /* NUMA */
122 };
123 EXPORT_SYMBOL(node_states);
124 
125 /* Protect totalram_pages and zone->managed_pages */
126 static DEFINE_SPINLOCK(managed_page_count_lock);
127 
128 unsigned long totalram_pages __read_mostly;
129 unsigned long totalreserve_pages __read_mostly;
130 unsigned long totalcma_pages __read_mostly;
131 
132 int percpu_pagelist_fraction;
133 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
134 
135 /*
136  * A cached value of the page's pageblock's migratetype, used when the page is
137  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
138  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
139  * Also the migratetype set in the page does not necessarily match the pcplist
140  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
141  * other index - this ensures that it will be put on the correct CMA freelist.
142  */
143 static inline int get_pcppage_migratetype(struct page *page)
144 {
145         return page->index;
146 }
147 
148 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
149 {
150         page->index = migratetype;
151 }
152 
153 #ifdef CONFIG_PM_SLEEP
154 /*
155  * The following functions are used by the suspend/hibernate code to temporarily
156  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
157  * while devices are suspended.  To avoid races with the suspend/hibernate code,
158  * they should always be called with pm_mutex held (gfp_allowed_mask also should
159  * only be modified with pm_mutex held, unless the suspend/hibernate code is
160  * guaranteed not to run in parallel with that modification).
161  */
162 
163 static gfp_t saved_gfp_mask;
164 
165 void pm_restore_gfp_mask(void)
166 {
167         WARN_ON(!mutex_is_locked(&pm_mutex));
168         if (saved_gfp_mask) {
169                 gfp_allowed_mask = saved_gfp_mask;
170                 saved_gfp_mask = 0;
171         }
172 }
173 
174 void pm_restrict_gfp_mask(void)
175 {
176         WARN_ON(!mutex_is_locked(&pm_mutex));
177         WARN_ON(saved_gfp_mask);
178         saved_gfp_mask = gfp_allowed_mask;
179         gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
180 }
181 
182 bool pm_suspended_storage(void)
183 {
184         if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
185                 return false;
186         return true;
187 }
188 #endif /* CONFIG_PM_SLEEP */
189 
190 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
191 unsigned int pageblock_order __read_mostly;
192 #endif
193 
194 static void __free_pages_ok(struct page *page, unsigned int order);
195 
196 /*
197  * results with 256, 32 in the lowmem_reserve sysctl:
198  *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
199  *      1G machine -> (16M dma, 784M normal, 224M high)
200  *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
201  *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
202  *      HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
203  *
204  * TBD: should special case ZONE_DMA32 machines here - in those we normally
205  * don't need any ZONE_NORMAL reservation
206  */
207 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
208 #ifdef CONFIG_ZONE_DMA
209          256,
210 #endif
211 #ifdef CONFIG_ZONE_DMA32
212          256,
213 #endif
214 #ifdef CONFIG_HIGHMEM
215          32,
216 #endif
217          32,
218 };
219 
220 EXPORT_SYMBOL(totalram_pages);
221 
222 static char * const zone_names[MAX_NR_ZONES] = {
223 #ifdef CONFIG_ZONE_DMA
224          "DMA",
225 #endif
226 #ifdef CONFIG_ZONE_DMA32
227          "DMA32",
228 #endif
229          "Normal",
230 #ifdef CONFIG_HIGHMEM
231          "HighMem",
232 #endif
233          "Movable",
234 #ifdef CONFIG_ZONE_DEVICE
235          "Device",
236 #endif
237 };
238 
239 char * const migratetype_names[MIGRATE_TYPES] = {
240         "Unmovable",
241         "Movable",
242         "Reclaimable",
243         "HighAtomic",
244 #ifdef CONFIG_CMA
245         "CMA",
246 #endif
247 #ifdef CONFIG_MEMORY_ISOLATION
248         "Isolate",
249 #endif
250 };
251 
252 compound_page_dtor * const compound_page_dtors[] = {
253         NULL,
254         free_compound_page,
255 #ifdef CONFIG_HUGETLB_PAGE
256         free_huge_page,
257 #endif
258 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
259         free_transhuge_page,
260 #endif
261 };
262 
263 int min_free_kbytes = 1024;
264 int user_min_free_kbytes = -1;
265 int watermark_scale_factor = 10;
266 
267 static unsigned long __meminitdata nr_kernel_pages;
268 static unsigned long __meminitdata nr_all_pages;
269 static unsigned long __meminitdata dma_reserve;
270 
271 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
272 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
273 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
274 static unsigned long __initdata required_kernelcore;
275 static unsigned long __initdata required_movablecore;
276 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
277 static bool mirrored_kernelcore;
278 
279 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
280 int movable_zone;
281 EXPORT_SYMBOL(movable_zone);
282 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
283 
284 #if MAX_NUMNODES > 1
285 int nr_node_ids __read_mostly = MAX_NUMNODES;
286 int nr_online_nodes __read_mostly = 1;
287 EXPORT_SYMBOL(nr_node_ids);
288 EXPORT_SYMBOL(nr_online_nodes);
289 #endif
290 
291 int page_group_by_mobility_disabled __read_mostly;
292 
293 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
294 static inline void reset_deferred_meminit(pg_data_t *pgdat)
295 {
296         unsigned long max_initialise;
297         unsigned long reserved_lowmem;
298 
299         /*
300          * Initialise at least 2G of a node but also take into account that
301          * two large system hashes that can take up 1GB for 0.25TB/node.
302          */
303         max_initialise = max(2UL << (30 - PAGE_SHIFT),
304                 (pgdat->node_spanned_pages >> 8));
305 
306         /*
307          * Compensate the all the memblock reservations (e.g. crash kernel)
308          * from the initial estimation to make sure we will initialize enough
309          * memory to boot.
310          */
311         reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
312                         pgdat->node_start_pfn + max_initialise);
313         max_initialise += reserved_lowmem;
314 
315         pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
316         pgdat->first_deferred_pfn = ULONG_MAX;
317 }
318 
319 /* Returns true if the struct page for the pfn is uninitialised */
320 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
321 {
322         int nid = early_pfn_to_nid(pfn);
323 
324         if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
325                 return true;
326 
327         return false;
328 }
329 
330 /*
331  * Returns false when the remaining initialisation should be deferred until
332  * later in the boot cycle when it can be parallelised.
333  */
334 static inline bool update_defer_init(pg_data_t *pgdat,
335                                 unsigned long pfn, unsigned long zone_end,
336                                 unsigned long *nr_initialised)
337 {
338         /* Always populate low zones for address-contrained allocations */
339         if (zone_end < pgdat_end_pfn(pgdat))
340                 return true;
341         (*nr_initialised)++;
342         if ((*nr_initialised > pgdat->static_init_size) &&
343             (pfn & (PAGES_PER_SECTION - 1)) == 0) {
344                 pgdat->first_deferred_pfn = pfn;
345                 return false;
346         }
347 
348         return true;
349 }
350 #else
351 static inline void reset_deferred_meminit(pg_data_t *pgdat)
352 {
353 }
354 
355 static inline bool early_page_uninitialised(unsigned long pfn)
356 {
357         return false;
358 }
359 
360 static inline bool update_defer_init(pg_data_t *pgdat,
361                                 unsigned long pfn, unsigned long zone_end,
362                                 unsigned long *nr_initialised)
363 {
364         return true;
365 }
366 #endif
367 
368 /* Return a pointer to the bitmap storing bits affecting a block of pages */
369 static inline unsigned long *get_pageblock_bitmap(struct page *page,
370                                                         unsigned long pfn)
371 {
372 #ifdef CONFIG_SPARSEMEM
373         return __pfn_to_section(pfn)->pageblock_flags;
374 #else
375         return page_zone(page)->pageblock_flags;
376 #endif /* CONFIG_SPARSEMEM */
377 }
378 
379 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
380 {
381 #ifdef CONFIG_SPARSEMEM
382         pfn &= (PAGES_PER_SECTION-1);
383         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
384 #else
385         pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
386         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
387 #endif /* CONFIG_SPARSEMEM */
388 }
389 
390 /**
391  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
392  * @page: The page within the block of interest
393  * @pfn: The target page frame number
394  * @end_bitidx: The last bit of interest to retrieve
395  * @mask: mask of bits that the caller is interested in
396  *
397  * Return: pageblock_bits flags
398  */
399 static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
400                                         unsigned long pfn,
401                                         unsigned long end_bitidx,
402                                         unsigned long mask)
403 {
404         unsigned long *bitmap;
405         unsigned long bitidx, word_bitidx;
406         unsigned long word;
407 
408         bitmap = get_pageblock_bitmap(page, pfn);
409         bitidx = pfn_to_bitidx(page, pfn);
410         word_bitidx = bitidx / BITS_PER_LONG;
411         bitidx &= (BITS_PER_LONG-1);
412 
413         word = bitmap[word_bitidx];
414         bitidx += end_bitidx;
415         return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
416 }
417 
418 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
419                                         unsigned long end_bitidx,
420                                         unsigned long mask)
421 {
422         return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
423 }
424 
425 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
426 {
427         return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
428 }
429 
430 /**
431  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
432  * @page: The page within the block of interest
433  * @flags: The flags to set
434  * @pfn: The target page frame number
435  * @end_bitidx: The last bit of interest
436  * @mask: mask of bits that the caller is interested in
437  */
438 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
439                                         unsigned long pfn,
440                                         unsigned long end_bitidx,
441                                         unsigned long mask)
442 {
443         unsigned long *bitmap;
444         unsigned long bitidx, word_bitidx;
445         unsigned long old_word, word;
446 
447         BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
448 
449         bitmap = get_pageblock_bitmap(page, pfn);
450         bitidx = pfn_to_bitidx(page, pfn);
451         word_bitidx = bitidx / BITS_PER_LONG;
452         bitidx &= (BITS_PER_LONG-1);
453 
454         VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
455 
456         bitidx += end_bitidx;
457         mask <<= (BITS_PER_LONG - bitidx - 1);
458         flags <<= (BITS_PER_LONG - bitidx - 1);
459 
460         word = READ_ONCE(bitmap[word_bitidx]);
461         for (;;) {
462                 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
463                 if (word == old_word)
464                         break;
465                 word = old_word;
466         }
467 }
468 
469 void set_pageblock_migratetype(struct page *page, int migratetype)
470 {
471         if (unlikely(page_group_by_mobility_disabled &&
472                      migratetype < MIGRATE_PCPTYPES))
473                 migratetype = MIGRATE_UNMOVABLE;
474 
475         set_pageblock_flags_group(page, (unsigned long)migratetype,
476                                         PB_migrate, PB_migrate_end);
477 }
478 
479 #ifdef CONFIG_DEBUG_VM
480 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
481 {
482         int ret = 0;
483         unsigned seq;
484         unsigned long pfn = page_to_pfn(page);
485         unsigned long sp, start_pfn;
486 
487         do {
488                 seq = zone_span_seqbegin(zone);
489                 start_pfn = zone->zone_start_pfn;
490                 sp = zone->spanned_pages;
491                 if (!zone_spans_pfn(zone, pfn))
492                         ret = 1;
493         } while (zone_span_seqretry(zone, seq));
494 
495         if (ret)
496                 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
497                         pfn, zone_to_nid(zone), zone->name,
498                         start_pfn, start_pfn + sp);
499 
500         return ret;
501 }
502 
503 static int page_is_consistent(struct zone *zone, struct page *page)
504 {
505         if (!pfn_valid_within(page_to_pfn(page)))
506                 return 0;
507         if (zone != page_zone(page))
508                 return 0;
509 
510         return 1;
511 }
512 /*
513  * Temporary debugging check for pages not lying within a given zone.
514  */
515 static int bad_range(struct zone *zone, struct page *page)
516 {
517         if (page_outside_zone_boundaries(zone, page))
518                 return 1;
519         if (!page_is_consistent(zone, page))
520                 return 1;
521 
522         return 0;
523 }
524 #else
525 static inline int bad_range(struct zone *zone, struct page *page)
526 {
527         return 0;
528 }
529 #endif
530 
531 static void bad_page(struct page *page, const char *reason,
532                 unsigned long bad_flags)
533 {
534         static unsigned long resume;
535         static unsigned long nr_shown;
536         static unsigned long nr_unshown;
537 
538         /*
539          * Allow a burst of 60 reports, then keep quiet for that minute;
540          * or allow a steady drip of one report per second.
541          */
542         if (nr_shown == 60) {
543                 if (time_before(jiffies, resume)) {
544                         nr_unshown++;
545                         goto out;
546                 }
547                 if (nr_unshown) {
548                         pr_alert(
549                               "BUG: Bad page state: %lu messages suppressed\n",
550                                 nr_unshown);
551                         nr_unshown = 0;
552                 }
553                 nr_shown = 0;
554         }
555         if (nr_shown++ == 0)
556                 resume = jiffies + 60 * HZ;
557 
558         pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
559                 current->comm, page_to_pfn(page));
560         __dump_page(page, reason);
561         bad_flags &= page->flags;
562         if (bad_flags)
563                 pr_alert("bad because of flags: %#lx(%pGp)\n",
564                                                 bad_flags, &bad_flags);
565         dump_page_owner(page);
566 
567         print_modules();
568         dump_stack();
569 out:
570         /* Leave bad fields for debug, except PageBuddy could make trouble */
571         page_mapcount_reset(page); /* remove PageBuddy */
572         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
573 }
574 
575 /*
576  * Higher-order pages are called "compound pages".  They are structured thusly:
577  *
578  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
579  *
580  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
581  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
582  *
583  * The first tail page's ->compound_dtor holds the offset in array of compound
584  * page destructors. See compound_page_dtors.
585  *
586  * The first tail page's ->compound_order holds the order of allocation.
587  * This usage means that zero-order pages may not be compound.
588  */
589 
590 void free_compound_page(struct page *page)
591 {
592         __free_pages_ok(page, compound_order(page));
593 }
594 
595 void prep_compound_page(struct page *page, unsigned int order)
596 {
597         int i;
598         int nr_pages = 1 << order;
599 
600         set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
601         set_compound_order(page, order);
602         __SetPageHead(page);
603         for (i = 1; i < nr_pages; i++) {
604                 struct page *p = page + i;
605                 set_page_count(p, 0);
606                 p->mapping = TAIL_MAPPING;
607                 set_compound_head(p, page);
608         }
609         atomic_set(compound_mapcount_ptr(page), -1);
610 }
611 
612 #ifdef CONFIG_DEBUG_PAGEALLOC
613 unsigned int _debug_guardpage_minorder;
614 bool _debug_pagealloc_enabled __read_mostly
615                         = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
616 EXPORT_SYMBOL(_debug_pagealloc_enabled);
617 bool _debug_guardpage_enabled __read_mostly;
618 
619 static int __init early_debug_pagealloc(char *buf)
620 {
621         if (!buf)
622                 return -EINVAL;
623         return kstrtobool(buf, &_debug_pagealloc_enabled);
624 }
625 early_param("debug_pagealloc", early_debug_pagealloc);
626 
627 static bool need_debug_guardpage(void)
628 {
629         /* If we don't use debug_pagealloc, we don't need guard page */
630         if (!debug_pagealloc_enabled())
631                 return false;
632 
633         if (!debug_guardpage_minorder())
634                 return false;
635 
636         return true;
637 }
638 
639 static void init_debug_guardpage(void)
640 {
641         if (!debug_pagealloc_enabled())
642                 return;
643 
644         if (!debug_guardpage_minorder())
645                 return;
646 
647         _debug_guardpage_enabled = true;
648 }
649 
650 struct page_ext_operations debug_guardpage_ops = {
651         .need = need_debug_guardpage,
652         .init = init_debug_guardpage,
653 };
654 
655 static int __init debug_guardpage_minorder_setup(char *buf)
656 {
657         unsigned long res;
658 
659         if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
660                 pr_err("Bad debug_guardpage_minorder value\n");
661                 return 0;
662         }
663         _debug_guardpage_minorder = res;
664         pr_info("Setting debug_guardpage_minorder to %lu\n", res);
665         return 0;
666 }
667 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
668 
669 static inline bool set_page_guard(struct zone *zone, struct page *page,
670                                 unsigned int order, int migratetype)
671 {
672         struct page_ext *page_ext;
673 
674         if (!debug_guardpage_enabled())
675                 return false;
676 
677         if (order >= debug_guardpage_minorder())
678                 return false;
679 
680         page_ext = lookup_page_ext(page);
681         if (unlikely(!page_ext))
682                 return false;
683 
684         __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
685 
686         INIT_LIST_HEAD(&page->lru);
687         set_page_private(page, order);
688         /* Guard pages are not available for any usage */
689         __mod_zone_freepage_state(zone, -(1 << order), migratetype);
690 
691         return true;
692 }
693 
694 static inline void clear_page_guard(struct zone *zone, struct page *page,
695                                 unsigned int order, int migratetype)
696 {
697         struct page_ext *page_ext;
698 
699         if (!debug_guardpage_enabled())
700                 return;
701 
702         page_ext = lookup_page_ext(page);
703         if (unlikely(!page_ext))
704                 return;
705 
706         __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
707 
708         set_page_private(page, 0);
709         if (!is_migrate_isolate(migratetype))
710                 __mod_zone_freepage_state(zone, (1 << order), migratetype);
711 }
712 #else
713 struct page_ext_operations debug_guardpage_ops;
714 static inline bool set_page_guard(struct zone *zone, struct page *page,
715                         unsigned int order, int migratetype) { return false; }
716 static inline void clear_page_guard(struct zone *zone, struct page *page,
717                                 unsigned int order, int migratetype) {}
718 #endif
719 
720 static inline void set_page_order(struct page *page, unsigned int order)
721 {
722         set_page_private(page, order);
723         __SetPageBuddy(page);
724 }
725 
726 static inline void rmv_page_order(struct page *page)
727 {
728         __ClearPageBuddy(page);
729         set_page_private(page, 0);
730 }
731 
732 /*
733  * This function checks whether a page is free && is the buddy
734  * we can do coalesce a page and its buddy if
735  * (a) the buddy is not in a hole (check before calling!) &&
736  * (b) the buddy is in the buddy system &&
737  * (c) a page and its buddy have the same order &&
738  * (d) a page and its buddy are in the same zone.
739  *
740  * For recording whether a page is in the buddy system, we set ->_mapcount
741  * PAGE_BUDDY_MAPCOUNT_VALUE.
742  * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
743  * serialized by zone->lock.
744  *
745  * For recording page's order, we use page_private(page).
746  */
747 static inline int page_is_buddy(struct page *page, struct page *buddy,
748                                                         unsigned int order)
749 {
750         if (page_is_guard(buddy) && page_order(buddy) == order) {
751                 if (page_zone_id(page) != page_zone_id(buddy))
752                         return 0;
753 
754                 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
755 
756                 return 1;
757         }
758 
759         if (PageBuddy(buddy) && page_order(buddy) == order) {
760                 /*
761                  * zone check is done late to avoid uselessly
762                  * calculating zone/node ids for pages that could
763                  * never merge.
764                  */
765                 if (page_zone_id(page) != page_zone_id(buddy))
766                         return 0;
767 
768                 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
769 
770                 return 1;
771         }
772         return 0;
773 }
774 
775 /*
776  * Freeing function for a buddy system allocator.
777  *
778  * The concept of a buddy system is to maintain direct-mapped table
779  * (containing bit values) for memory blocks of various "orders".
780  * The bottom level table contains the map for the smallest allocatable
781  * units of memory (here, pages), and each level above it describes
782  * pairs of units from the levels below, hence, "buddies".
783  * At a high level, all that happens here is marking the table entry
784  * at the bottom level available, and propagating the changes upward
785  * as necessary, plus some accounting needed to play nicely with other
786  * parts of the VM system.
787  * At each level, we keep a list of pages, which are heads of continuous
788  * free pages of length of (1 << order) and marked with _mapcount
789  * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
790  * field.
791  * So when we are allocating or freeing one, we can derive the state of the
792  * other.  That is, if we allocate a small block, and both were
793  * free, the remainder of the region must be split into blocks.
794  * If a block is freed, and its buddy is also free, then this
795  * triggers coalescing into a block of larger size.
796  *
797  * -- nyc
798  */
799 
800 static inline void __free_one_page(struct page *page,
801                 unsigned long pfn,
802                 struct zone *zone, unsigned int order,
803                 int migratetype)
804 {
805         unsigned long combined_pfn;
806         unsigned long uninitialized_var(buddy_pfn);
807         struct page *buddy;
808         unsigned int max_order;
809 
810         max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
811 
812         VM_BUG_ON(!zone_is_initialized(zone));
813         VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
814 
815         VM_BUG_ON(migratetype == -1);
816         if (likely(!is_migrate_isolate(migratetype)))
817                 __mod_zone_freepage_state(zone, 1 << order, migratetype);
818 
819         VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
820         VM_BUG_ON_PAGE(bad_range(zone, page), page);
821 
822 continue_merging:
823         while (order < max_order - 1) {
824                 buddy_pfn = __find_buddy_pfn(pfn, order);
825                 buddy = page + (buddy_pfn - pfn);
826 
827                 if (!pfn_valid_within(buddy_pfn))
828                         goto done_merging;
829                 if (!page_is_buddy(page, buddy, order))
830                         goto done_merging;
831                 /*
832                  * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
833                  * merge with it and move up one order.
834                  */
835                 if (page_is_guard(buddy)) {
836                         clear_page_guard(zone, buddy, order, migratetype);
837                 } else {
838                         list_del(&buddy->lru);
839                         zone->free_area[order].nr_free--;
840                         rmv_page_order(buddy);
841                 }
842                 combined_pfn = buddy_pfn & pfn;
843                 page = page + (combined_pfn - pfn);
844                 pfn = combined_pfn;
845                 order++;
846         }
847         if (max_order < MAX_ORDER) {
848                 /* If we are here, it means order is >= pageblock_order.
849                  * We want to prevent merge between freepages on isolate
850                  * pageblock and normal pageblock. Without this, pageblock
851                  * isolation could cause incorrect freepage or CMA accounting.
852                  *
853                  * We don't want to hit this code for the more frequent
854                  * low-order merging.
855                  */
856                 if (unlikely(has_isolate_pageblock(zone))) {
857                         int buddy_mt;
858 
859                         buddy_pfn = __find_buddy_pfn(pfn, order);
860                         buddy = page + (buddy_pfn - pfn);
861                         buddy_mt = get_pageblock_migratetype(buddy);
862 
863                         if (migratetype != buddy_mt
864                                         && (is_migrate_isolate(migratetype) ||
865                                                 is_migrate_isolate(buddy_mt)))
866                                 goto done_merging;
867                 }
868                 max_order++;
869                 goto continue_merging;
870         }
871 
872 done_merging:
873         set_page_order(page, order);
874 
875         /*
876          * If this is not the largest possible page, check if the buddy
877          * of the next-highest order is free. If it is, it's possible
878          * that pages are being freed that will coalesce soon. In case,
879          * that is happening, add the free page to the tail of the list
880          * so it's less likely to be used soon and more likely to be merged
881          * as a higher order page
882          */
883         if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
884                 struct page *higher_page, *higher_buddy;
885                 combined_pfn = buddy_pfn & pfn;
886                 higher_page = page + (combined_pfn - pfn);
887                 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
888                 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
889                 if (pfn_valid_within(buddy_pfn) &&
890                     page_is_buddy(higher_page, higher_buddy, order + 1)) {
891                         list_add_tail(&page->lru,
892                                 &zone->free_area[order].free_list[migratetype]);
893                         goto out;
894                 }
895         }
896 
897         list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
898 out:
899         zone->free_area[order].nr_free++;
900 }
901 
902 /*
903  * A bad page could be due to a number of fields. Instead of multiple branches,
904  * try and check multiple fields with one check. The caller must do a detailed
905  * check if necessary.
906  */
907 static inline bool page_expected_state(struct page *page,
908                                         unsigned long check_flags)
909 {
910         if (unlikely(atomic_read(&page->_mapcount) != -1))
911                 return false;
912 
913         if (unlikely((unsigned long)page->mapping |
914                         page_ref_count(page) |
915 #ifdef CONFIG_MEMCG
916                         (unsigned long)page->mem_cgroup |
917 #endif
918                         (page->flags & check_flags)))
919                 return false;
920 
921         return true;
922 }
923 
924 static void free_pages_check_bad(struct page *page)
925 {
926         const char *bad_reason;
927         unsigned long bad_flags;
928 
929         bad_reason = NULL;
930         bad_flags = 0;
931 
932         if (unlikely(atomic_read(&page->_mapcount) != -1))
933                 bad_reason = "nonzero mapcount";
934         if (unlikely(page->mapping != NULL))
935                 bad_reason = "non-NULL mapping";
936         if (unlikely(page_ref_count(page) != 0))
937                 bad_reason = "nonzero _refcount";
938         if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
939                 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
940                 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
941         }
942 #ifdef CONFIG_MEMCG
943         if (unlikely(page->mem_cgroup))
944                 bad_reason = "page still charged to cgroup";
945 #endif
946         bad_page(page, bad_reason, bad_flags);
947 }
948 
949 static inline int free_pages_check(struct page *page)
950 {
951         if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
952                 return 0;
953 
954         /* Something has gone sideways, find it */
955         free_pages_check_bad(page);
956         return 1;
957 }
958 
959 static int free_tail_pages_check(struct page *head_page, struct page *page)
960 {
961         int ret = 1;
962 
963         /*
964          * We rely page->lru.next never has bit 0 set, unless the page
965          * is PageTail(). Let's make sure that's true even for poisoned ->lru.
966          */
967         BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
968 
969         if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
970                 ret = 0;
971                 goto out;
972         }
973         switch (page - head_page) {
974         case 1:
975                 /* the first tail page: ->mapping is compound_mapcount() */
976                 if (unlikely(compound_mapcount(page))) {
977                         bad_page(page, "nonzero compound_mapcount", 0);
978                         goto out;
979                 }
980                 break;
981         case 2:
982                 /*
983                  * the second tail page: ->mapping is
984                  * page_deferred_list().next -- ignore value.
985                  */
986                 break;
987         default:
988                 if (page->mapping != TAIL_MAPPING) {
989                         bad_page(page, "corrupted mapping in tail page", 0);
990                         goto out;
991                 }
992                 break;
993         }
994         if (unlikely(!PageTail(page))) {
995                 bad_page(page, "PageTail not set", 0);
996                 goto out;
997         }
998         if (unlikely(compound_head(page) != head_page)) {
999                 bad_page(page, "compound_head not consistent", 0);
1000                 goto out;
1001         }
1002         ret = 0;
1003 out:
1004         page->mapping = NULL;
1005         clear_compound_head(page);
1006         return ret;
1007 }
1008 
1009 static __always_inline bool free_pages_prepare(struct page *page,
1010                                         unsigned int order, bool check_free)
1011 {
1012         int bad = 0;
1013 
1014         VM_BUG_ON_PAGE(PageTail(page), page);
1015 
1016         trace_mm_page_free(page, order);
1017         kmemcheck_free_shadow(page, order);
1018 
1019         /*
1020          * Check tail pages before head page information is cleared to
1021          * avoid checking PageCompound for order-0 pages.
1022          */
1023         if (unlikely(order)) {
1024                 bool compound = PageCompound(page);
1025                 int i;
1026 
1027                 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1028 
1029                 if (compound)
1030                         ClearPageDoubleMap(page);
1031                 for (i = 1; i < (1 << order); i++) {
1032                         if (compound)
1033                                 bad += free_tail_pages_check(page, page + i);
1034                         if (unlikely(free_pages_check(page + i))) {
1035                                 bad++;
1036                                 continue;
1037                         }
1038                         (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1039                 }
1040         }
1041         if (PageMappingFlags(page))
1042                 page->mapping = NULL;
1043         if (memcg_kmem_enabled() && PageKmemcg(page))
1044                 memcg_kmem_uncharge(page, order);
1045         if (check_free)
1046                 bad += free_pages_check(page);
1047         if (bad)
1048                 return false;
1049 
1050         page_cpupid_reset_last(page);
1051         page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1052         reset_page_owner(page, order);
1053 
1054         if (!PageHighMem(page)) {
1055                 debug_check_no_locks_freed(page_address(page),
1056                                            PAGE_SIZE << order);
1057                 debug_check_no_obj_freed(page_address(page),
1058                                            PAGE_SIZE << order);
1059         }
1060         arch_free_page(page, order);
1061         kernel_poison_pages(page, 1 << order, 0);
1062         kernel_map_pages(page, 1 << order, 0);
1063         kasan_free_pages(page, order);
1064 
1065         return true;
1066 }
1067 
1068 #ifdef CONFIG_DEBUG_VM
1069 static inline bool free_pcp_prepare(struct page *page)
1070 {
1071         return free_pages_prepare(page, 0, true);
1072 }
1073 
1074 static inline bool bulkfree_pcp_prepare(struct page *page)
1075 {
1076         return false;
1077 }
1078 #else
1079 static bool free_pcp_prepare(struct page *page)
1080 {
1081         return free_pages_prepare(page, 0, false);
1082 }
1083 
1084 static bool bulkfree_pcp_prepare(struct page *page)
1085 {
1086         return free_pages_check(page);
1087 }
1088 #endif /* CONFIG_DEBUG_VM */
1089 
1090 /*
1091  * Frees a number of pages from the PCP lists
1092  * Assumes all pages on list are in same zone, and of same order.
1093  * count is the number of pages to free.
1094  *
1095  * If the zone was previously in an "all pages pinned" state then look to
1096  * see if this freeing clears that state.
1097  *
1098  * And clear the zone's pages_scanned counter, to hold off the "all pages are
1099  * pinned" detection logic.
1100  */
1101 static void free_pcppages_bulk(struct zone *zone, int count,
1102                                         struct per_cpu_pages *pcp)
1103 {
1104         int migratetype = 0;
1105         int batch_free = 0;
1106         bool isolated_pageblocks;
1107 
1108         spin_lock(&zone->lock);
1109         isolated_pageblocks = has_isolate_pageblock(zone);
1110 
1111         while (count) {
1112                 struct page *page;
1113                 struct list_head *list;
1114 
1115                 /*
1116                  * Remove pages from lists in a round-robin fashion. A
1117                  * batch_free count is maintained that is incremented when an
1118                  * empty list is encountered.  This is so more pages are freed
1119                  * off fuller lists instead of spinning excessively around empty
1120                  * lists
1121                  */
1122                 do {
1123                         batch_free++;
1124                         if (++migratetype == MIGRATE_PCPTYPES)
1125                                 migratetype = 0;
1126                         list = &pcp->lists[migratetype];
1127                 } while (list_empty(list));
1128 
1129                 /* This is the only non-empty list. Free them all. */
1130                 if (batch_free == MIGRATE_PCPTYPES)
1131                         batch_free = count;
1132 
1133                 do {
1134                         int mt; /* migratetype of the to-be-freed page */
1135 
1136                         page = list_last_entry(list, struct page, lru);
1137                         /* must delete as __free_one_page list manipulates */
1138                         list_del(&page->lru);
1139 
1140                         mt = get_pcppage_migratetype(page);
1141                         /* MIGRATE_ISOLATE page should not go to pcplists */
1142                         VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1143                         /* Pageblock could have been isolated meanwhile */
1144                         if (unlikely(isolated_pageblocks))
1145                                 mt = get_pageblock_migratetype(page);
1146 
1147                         if (bulkfree_pcp_prepare(page))
1148                                 continue;
1149 
1150                         __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1151                         trace_mm_page_pcpu_drain(page, 0, mt);
1152                 } while (--count && --batch_free && !list_empty(list));
1153         }
1154         spin_unlock(&zone->lock);
1155 }
1156 
1157 static void free_one_page(struct zone *zone,
1158                                 struct page *page, unsigned long pfn,
1159                                 unsigned int order,
1160                                 int migratetype)
1161 {
1162         spin_lock(&zone->lock);
1163         if (unlikely(has_isolate_pageblock(zone) ||
1164                 is_migrate_isolate(migratetype))) {
1165                 migratetype = get_pfnblock_migratetype(page, pfn);
1166         }
1167         __free_one_page(page, pfn, zone, order, migratetype);
1168         spin_unlock(&zone->lock);
1169 }
1170 
1171 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1172                                 unsigned long zone, int nid)
1173 {
1174         set_page_links(page, zone, nid, pfn);
1175         init_page_count(page);
1176         page_mapcount_reset(page);
1177         page_cpupid_reset_last(page);
1178 
1179         INIT_LIST_HEAD(&page->lru);
1180 #ifdef WANT_PAGE_VIRTUAL
1181         /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1182         if (!is_highmem_idx(zone))
1183                 set_page_address(page, __va(pfn << PAGE_SHIFT));
1184 #endif
1185 }
1186 
1187 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1188                                         int nid)
1189 {
1190         return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1191 }
1192 
1193 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1194 static void init_reserved_page(unsigned long pfn)
1195 {
1196         pg_data_t *pgdat;
1197         int nid, zid;
1198 
1199         if (!early_page_uninitialised(pfn))
1200                 return;
1201 
1202         nid = early_pfn_to_nid(pfn);
1203         pgdat = NODE_DATA(nid);
1204 
1205         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1206                 struct zone *zone = &pgdat->node_zones[zid];
1207 
1208                 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1209                         break;
1210         }
1211         __init_single_pfn(pfn, zid, nid);
1212 }
1213 #else
1214 static inline void init_reserved_page(unsigned long pfn)
1215 {
1216 }
1217 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1218 
1219 /*
1220  * Initialised pages do not have PageReserved set. This function is
1221  * called for each range allocated by the bootmem allocator and
1222  * marks the pages PageReserved. The remaining valid pages are later
1223  * sent to the buddy page allocator.
1224  */
1225 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1226 {
1227         unsigned long start_pfn = PFN_DOWN(start);
1228         unsigned long end_pfn = PFN_UP(end);
1229 
1230         for (; start_pfn < end_pfn; start_pfn++) {
1231                 if (pfn_valid(start_pfn)) {
1232                         struct page *page = pfn_to_page(start_pfn);
1233 
1234                         init_reserved_page(start_pfn);
1235 
1236                         /* Avoid false-positive PageTail() */
1237                         INIT_LIST_HEAD(&page->lru);
1238 
1239                         SetPageReserved(page);
1240                 }
1241         }
1242 }
1243 
1244 static void __free_pages_ok(struct page *page, unsigned int order)
1245 {
1246         unsigned long flags;
1247         int migratetype;
1248         unsigned long pfn = page_to_pfn(page);
1249 
1250         if (!free_pages_prepare(page, order, true))
1251                 return;
1252 
1253         migratetype = get_pfnblock_migratetype(page, pfn);
1254         local_irq_save(flags);
1255         __count_vm_events(PGFREE, 1 << order);
1256         free_one_page(page_zone(page), page, pfn, order, migratetype);
1257         local_irq_restore(flags);
1258 }
1259 
1260 static void __init __free_pages_boot_core(struct page *page, unsigned int order)
1261 {
1262         unsigned int nr_pages = 1 << order;
1263         struct page *p = page;
1264         unsigned int loop;
1265 
1266         prefetchw(p);
1267         for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1268                 prefetchw(p + 1);
1269                 __ClearPageReserved(p);
1270                 set_page_count(p, 0);
1271         }
1272         __ClearPageReserved(p);
1273         set_page_count(p, 0);
1274 
1275         page_zone(page)->managed_pages += nr_pages;
1276         set_page_refcounted(page);
1277         __free_pages(page, order);
1278 }
1279 
1280 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1281         defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1282 
1283 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1284 
1285 int __meminit early_pfn_to_nid(unsigned long pfn)
1286 {
1287         static DEFINE_SPINLOCK(early_pfn_lock);
1288         int nid;
1289 
1290         spin_lock(&early_pfn_lock);
1291         nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1292         if (nid < 0)
1293                 nid = first_online_node;
1294         spin_unlock(&early_pfn_lock);
1295 
1296         return nid;
1297 }
1298 #endif
1299 
1300 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
1301 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1302                                         struct mminit_pfnnid_cache *state)
1303 {
1304         int nid;
1305 
1306         nid = __early_pfn_to_nid(pfn, state);
1307         if (nid >= 0 && nid != node)
1308                 return false;
1309         return true;
1310 }
1311 
1312 /* Only safe to use early in boot when initialisation is single-threaded */
1313 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1314 {
1315         return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1316 }
1317 
1318 #else
1319 
1320 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1321 {
1322         return true;
1323 }
1324 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1325                                         struct mminit_pfnnid_cache *state)
1326 {
1327         return true;
1328 }
1329 #endif
1330 
1331 
1332 void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
1333                                                         unsigned int order)
1334 {
1335         if (early_page_uninitialised(pfn))
1336                 return;
1337         return __free_pages_boot_core(page, order);
1338 }
1339 
1340 /*
1341  * Check that the whole (or subset of) a pageblock given by the interval of
1342  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1343  * with the migration of free compaction scanner. The scanners then need to
1344  * use only pfn_valid_within() check for arches that allow holes within
1345  * pageblocks.
1346  *
1347  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1348  *
1349  * It's possible on some configurations to have a setup like node0 node1 node0
1350  * i.e. it's possible that all pages within a zones range of pages do not
1351  * belong to a single zone. We assume that a border between node0 and node1
1352  * can occur within a single pageblock, but not a node0 node1 node0
1353  * interleaving within a single pageblock. It is therefore sufficient to check
1354  * the first and last page of a pageblock and avoid checking each individual
1355  * page in a pageblock.
1356  */
1357 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1358                                      unsigned long end_pfn, struct zone *zone)
1359 {
1360         struct page *start_page;
1361         struct page *end_page;
1362 
1363         /* end_pfn is one past the range we are checking */
1364         end_pfn--;
1365 
1366         if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1367                 return NULL;
1368 
1369         start_page = pfn_to_page(start_pfn);
1370 
1371         if (page_zone(start_page) != zone)
1372                 return NULL;
1373 
1374         end_page = pfn_to_page(end_pfn);
1375 
1376         /* This gives a shorter code than deriving page_zone(end_page) */
1377         if (page_zone_id(start_page) != page_zone_id(end_page))
1378                 return NULL;
1379 
1380         return start_page;
1381 }
1382 
1383 void set_zone_contiguous(struct zone *zone)
1384 {
1385         unsigned long block_start_pfn = zone->zone_start_pfn;
1386         unsigned long block_end_pfn;
1387 
1388         block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1389         for (; block_start_pfn < zone_end_pfn(zone);
1390                         block_start_pfn = block_end_pfn,
1391                          block_end_pfn += pageblock_nr_pages) {
1392 
1393                 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1394 
1395                 if (!__pageblock_pfn_to_page(block_start_pfn,
1396                                              block_end_pfn, zone))
1397                         return;
1398         }
1399 
1400         /* We confirm that there is no hole */
1401         zone->contiguous = true;
1402 }
1403 
1404 void clear_zone_contiguous(struct zone *zone)
1405 {
1406         zone->contiguous = false;
1407 }
1408 
1409 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1410 static void __init deferred_free_range(struct page *page,
1411                                         unsigned long pfn, int nr_pages)
1412 {
1413         int i;
1414 
1415         if (!page)
1416                 return;
1417 
1418         /* Free a large naturally-aligned chunk if possible */
1419         if (nr_pages == pageblock_nr_pages &&
1420             (pfn & (pageblock_nr_pages - 1)) == 0) {
1421                 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1422                 __free_pages_boot_core(page, pageblock_order);
1423                 return;
1424         }
1425 
1426         for (i = 0; i < nr_pages; i++, page++, pfn++) {
1427                 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1428                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1429                 __free_pages_boot_core(page, 0);
1430         }
1431 }
1432 
1433 /* Completion tracking for deferred_init_memmap() threads */
1434 static atomic_t pgdat_init_n_undone __initdata;
1435 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1436 
1437 static inline void __init pgdat_init_report_one_done(void)
1438 {
1439         if (atomic_dec_and_test(&pgdat_init_n_undone))
1440                 complete(&pgdat_init_all_done_comp);
1441 }
1442 
1443 /* Initialise remaining memory on a node */
1444 static int __init deferred_init_memmap(void *data)
1445 {
1446         pg_data_t *pgdat = data;
1447         int nid = pgdat->node_id;
1448         struct mminit_pfnnid_cache nid_init_state = { };
1449         unsigned long start = jiffies;
1450         unsigned long nr_pages = 0;
1451         unsigned long walk_start, walk_end;
1452         int i, zid;
1453         struct zone *zone;
1454         unsigned long first_init_pfn = pgdat->first_deferred_pfn;
1455         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1456 
1457         if (first_init_pfn == ULONG_MAX) {
1458                 pgdat_init_report_one_done();
1459                 return 0;
1460         }
1461 
1462         /* Bind memory initialisation thread to a local node if possible */
1463         if (!cpumask_empty(cpumask))
1464                 set_cpus_allowed_ptr(current, cpumask);
1465 
1466         /* Sanity check boundaries */
1467         BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1468         BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1469         pgdat->first_deferred_pfn = ULONG_MAX;
1470 
1471         /* Only the highest zone is deferred so find it */
1472         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1473                 zone = pgdat->node_zones + zid;
1474                 if (first_init_pfn < zone_end_pfn(zone))
1475                         break;
1476         }
1477 
1478         for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1479                 unsigned long pfn, end_pfn;
1480                 struct page *page = NULL;
1481                 struct page *free_base_page = NULL;
1482                 unsigned long free_base_pfn = 0;
1483                 int nr_to_free = 0;
1484 
1485                 end_pfn = min(walk_end, zone_end_pfn(zone));
1486                 pfn = first_init_pfn;
1487                 if (pfn < walk_start)
1488                         pfn = walk_start;
1489                 if (pfn < zone->zone_start_pfn)
1490                         pfn = zone->zone_start_pfn;
1491 
1492                 for (; pfn < end_pfn; pfn++) {
1493                         if (!pfn_valid_within(pfn))
1494                                 goto free_range;
1495 
1496                         /*
1497                          * Ensure pfn_valid is checked every
1498                          * pageblock_nr_pages for memory holes
1499                          */
1500                         if ((pfn & (pageblock_nr_pages - 1)) == 0) {
1501                                 if (!pfn_valid(pfn)) {
1502                                         page = NULL;
1503                                         goto free_range;
1504                                 }
1505                         }
1506 
1507                         if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1508                                 page = NULL;
1509                                 goto free_range;
1510                         }
1511 
1512                         /* Minimise pfn page lookups and scheduler checks */
1513                         if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
1514                                 page++;
1515                         } else {
1516                                 nr_pages += nr_to_free;
1517                                 deferred_free_range(free_base_page,
1518                                                 free_base_pfn, nr_to_free);
1519                                 free_base_page = NULL;
1520                                 free_base_pfn = nr_to_free = 0;
1521 
1522                                 page = pfn_to_page(pfn);
1523                                 cond_resched();
1524                         }
1525 
1526                         if (page->flags) {
1527                                 VM_BUG_ON(page_zone(page) != zone);
1528                                 goto free_range;
1529                         }
1530 
1531                         __init_single_page(page, pfn, zid, nid);
1532                         if (!free_base_page) {
1533                                 free_base_page = page;
1534                                 free_base_pfn = pfn;
1535                                 nr_to_free = 0;
1536                         }
1537                         nr_to_free++;
1538 
1539                         /* Where possible, batch up pages for a single free */
1540                         continue;
1541 free_range:
1542                         /* Free the current block of pages to allocator */
1543                         nr_pages += nr_to_free;
1544                         deferred_free_range(free_base_page, free_base_pfn,
1545                                                                 nr_to_free);
1546                         free_base_page = NULL;
1547                         free_base_pfn = nr_to_free = 0;
1548                 }
1549                 /* Free the last block of pages to allocator */
1550                 nr_pages += nr_to_free;
1551                 deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
1552 
1553                 first_init_pfn = max(end_pfn, first_init_pfn);
1554         }
1555 
1556         /* Sanity check that the next zone really is unpopulated */
1557         WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1558 
1559         pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1560                                         jiffies_to_msecs(jiffies - start));
1561 
1562         pgdat_init_report_one_done();
1563         return 0;
1564 }
1565 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1566 
1567 void __init page_alloc_init_late(void)
1568 {
1569         struct zone *zone;
1570 
1571 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1572         int nid;
1573 
1574         /* There will be num_node_state(N_MEMORY) threads */
1575         atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1576         for_each_node_state(nid, N_MEMORY) {
1577                 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1578         }
1579 
1580         /* Block until all are initialised */
1581         wait_for_completion(&pgdat_init_all_done_comp);
1582 
1583         /* Reinit limits that are based on free pages after the kernel is up */
1584         files_maxfiles_init();
1585 #endif
1586 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1587         /* Discard memblock private memory */
1588         memblock_discard();
1589 #endif
1590 
1591         for_each_populated_zone(zone)
1592                 set_zone_contiguous(zone);
1593 }
1594 
1595 #ifdef CONFIG_CMA
1596 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1597 void __init init_cma_reserved_pageblock(struct page *page)
1598 {
1599         unsigned i = pageblock_nr_pages;
1600         struct page *p = page;
1601 
1602         do {
1603                 __ClearPageReserved(p);
1604                 set_page_count(p, 0);
1605         } while (++p, --i);
1606 
1607         set_pageblock_migratetype(page, MIGRATE_CMA);
1608 
1609         if (pageblock_order >= MAX_ORDER) {
1610                 i = pageblock_nr_pages;
1611                 p = page;
1612                 do {
1613                         set_page_refcounted(p);
1614                         __free_pages(p, MAX_ORDER - 1);
1615                         p += MAX_ORDER_NR_PAGES;
1616                 } while (i -= MAX_ORDER_NR_PAGES);
1617         } else {
1618                 set_page_refcounted(page);
1619                 __free_pages(page, pageblock_order);
1620         }
1621 
1622         adjust_managed_page_count(page, pageblock_nr_pages);
1623 }
1624 #endif
1625 
1626 /*
1627  * The order of subdivision here is critical for the IO subsystem.
1628  * Please do not alter this order without good reasons and regression
1629  * testing. Specifically, as large blocks of memory are subdivided,
1630  * the order in which smaller blocks are delivered depends on the order
1631  * they're subdivided in this function. This is the primary factor
1632  * influencing the order in which pages are delivered to the IO
1633  * subsystem according to empirical testing, and this is also justified
1634  * by considering the behavior of a buddy system containing a single
1635  * large block of memory acted on by a series of small allocations.
1636  * This behavior is a critical factor in sglist merging's success.
1637  *
1638  * -- nyc
1639  */
1640 static inline void expand(struct zone *zone, struct page *page,
1641         int low, int high, struct free_area *area,
1642         int migratetype)
1643 {
1644         unsigned long size = 1 << high;
1645 
1646         while (high > low) {
1647                 area--;
1648                 high--;
1649                 size >>= 1;
1650                 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1651 
1652                 /*
1653                  * Mark as guard pages (or page), that will allow to
1654                  * merge back to allocator when buddy will be freed.
1655                  * Corresponding page table entries will not be touched,
1656                  * pages will stay not present in virtual address space
1657                  */
1658                 if (set_page_guard(zone, &page[size], high, migratetype))
1659                         continue;
1660 
1661                 list_add(&page[size].lru, &area->free_list[migratetype]);
1662                 area->nr_free++;
1663                 set_page_order(&page[size], high);
1664         }
1665 }
1666 
1667 static void check_new_page_bad(struct page *page)
1668 {
1669         const char *bad_reason = NULL;
1670         unsigned long bad_flags = 0;
1671 
1672         if (unlikely(atomic_read(&page->_mapcount) != -1))
1673                 bad_reason = "nonzero mapcount";
1674         if (unlikely(page->mapping != NULL))
1675                 bad_reason = "non-NULL mapping";
1676         if (unlikely(page_ref_count(page) != 0))
1677                 bad_reason = "nonzero _count";
1678         if (unlikely(page->flags & __PG_HWPOISON)) {
1679                 bad_reason = "HWPoisoned (hardware-corrupted)";
1680                 bad_flags = __PG_HWPOISON;
1681                 /* Don't complain about hwpoisoned pages */
1682                 page_mapcount_reset(page); /* remove PageBuddy */
1683                 return;
1684         }
1685         if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1686                 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1687                 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1688         }
1689 #ifdef CONFIG_MEMCG
1690         if (unlikely(page->mem_cgroup))
1691                 bad_reason = "page still charged to cgroup";
1692 #endif
1693         bad_page(page, bad_reason, bad_flags);
1694 }
1695 
1696 /*
1697  * This page is about to be returned from the page allocator
1698  */
1699 static inline int check_new_page(struct page *page)
1700 {
1701         if (likely(page_expected_state(page,
1702                                 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1703                 return 0;
1704 
1705         check_new_page_bad(page);
1706         return 1;
1707 }
1708 
1709 static inline bool free_pages_prezeroed(void)
1710 {
1711         return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1712                 page_poisoning_enabled();
1713 }
1714 
1715 #ifdef CONFIG_DEBUG_VM
1716 static bool check_pcp_refill(struct page *page)
1717 {
1718         return false;
1719 }
1720 
1721 static bool check_new_pcp(struct page *page)
1722 {
1723         return check_new_page(page);
1724 }
1725 #else
1726 static bool check_pcp_refill(struct page *page)
1727 {
1728         return check_new_page(page);
1729 }
1730 static bool check_new_pcp(struct page *page)
1731 {
1732         return false;
1733 }
1734 #endif /* CONFIG_DEBUG_VM */
1735 
1736 static bool check_new_pages(struct page *page, unsigned int order)
1737 {
1738         int i;
1739         for (i = 0; i < (1 << order); i++) {
1740                 struct page *p = page + i;
1741 
1742                 if (unlikely(check_new_page(p)))
1743                         return true;
1744         }
1745 
1746         return false;
1747 }
1748 
1749 inline void post_alloc_hook(struct page *page, unsigned int order,
1750                                 gfp_t gfp_flags)
1751 {
1752         set_page_private(page, 0);
1753         set_page_refcounted(page);
1754 
1755         arch_alloc_page(page, order);
1756         kernel_map_pages(page, 1 << order, 1);
1757         kernel_poison_pages(page, 1 << order, 1);
1758         kasan_alloc_pages(page, order);
1759         set_page_owner(page, order, gfp_flags);
1760 }
1761 
1762 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1763                                                         unsigned int alloc_flags)
1764 {
1765         int i;
1766 
1767         post_alloc_hook(page, order, gfp_flags);
1768 
1769         if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
1770                 for (i = 0; i < (1 << order); i++)
1771                         clear_highpage(page + i);
1772 
1773         if (order && (gfp_flags & __GFP_COMP))
1774                 prep_compound_page(page, order);
1775 
1776         /*
1777          * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1778          * allocate the page. The expectation is that the caller is taking
1779          * steps that will free more memory. The caller should avoid the page
1780          * being used for !PFMEMALLOC purposes.
1781          */
1782         if (alloc_flags & ALLOC_NO_WATERMARKS)
1783                 set_page_pfmemalloc(page);
1784         else
1785                 clear_page_pfmemalloc(page);
1786 }
1787 
1788 /*
1789  * Go through the free lists for the given migratetype and remove
1790  * the smallest available page from the freelists
1791  */
1792 static inline
1793 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1794                                                 int migratetype)
1795 {
1796         unsigned int current_order;
1797         struct free_area *area;
1798         struct page *page;
1799 
1800         /* Find a page of the appropriate size in the preferred list */
1801         for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1802                 area = &(zone->free_area[current_order]);
1803                 page = list_first_entry_or_null(&area->free_list[migratetype],
1804                                                         struct page, lru);
1805                 if (!page)
1806                         continue;
1807                 list_del(&page->lru);
1808                 rmv_page_order(page);
1809                 area->nr_free--;
1810                 expand(zone, page, order, current_order, area, migratetype);
1811                 set_pcppage_migratetype(page, migratetype);
1812                 return page;
1813         }
1814 
1815         return NULL;
1816 }
1817 
1818 
1819 /*
1820  * This array describes the order lists are fallen back to when
1821  * the free lists for the desirable migrate type are depleted
1822  */
1823 static int fallbacks[MIGRATE_TYPES][4] = {
1824         [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
1825         [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
1826         [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
1827 #ifdef CONFIG_CMA
1828         [MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
1829 #endif
1830 #ifdef CONFIG_MEMORY_ISOLATION
1831         [MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
1832 #endif
1833 };
1834 
1835 #ifdef CONFIG_CMA
1836 static struct page *__rmqueue_cma_fallback(struct zone *zone,
1837                                         unsigned int order)
1838 {
1839         return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1840 }
1841 #else
1842 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1843                                         unsigned int order) { return NULL; }
1844 #endif
1845 
1846 /*
1847  * Move the free pages in a range to the free lists of the requested type.
1848  * Note that start_page and end_pages are not aligned on a pageblock
1849  * boundary. If alignment is required, use move_freepages_block()
1850  */
1851 static int move_freepages(struct zone *zone,
1852                           struct page *start_page, struct page *end_page,
1853                           int migratetype, int *num_movable)
1854 {
1855         struct page *page;
1856         unsigned int order;
1857         int pages_moved = 0;
1858 
1859 #ifndef CONFIG_HOLES_IN_ZONE
1860         /*
1861          * page_zone is not safe to call in this context when
1862          * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1863          * anyway as we check zone boundaries in move_freepages_block().
1864          * Remove at a later date when no bug reports exist related to
1865          * grouping pages by mobility
1866          */
1867         VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1868 #endif
1869 
1870         if (num_movable)
1871                 *num_movable = 0;
1872 
1873         for (page = start_page; page <= end_page;) {
1874                 if (!pfn_valid_within(page_to_pfn(page))) {
1875                         page++;
1876                         continue;
1877                 }
1878 
1879                 /* Make sure we are not inadvertently changing nodes */
1880                 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1881 
1882                 if (!PageBuddy(page)) {
1883                         /*
1884                          * We assume that pages that could be isolated for
1885                          * migration are movable. But we don't actually try
1886                          * isolating, as that would be expensive.
1887                          */
1888                         if (num_movable &&
1889                                         (PageLRU(page) || __PageMovable(page)))
1890                                 (*num_movable)++;
1891 
1892                         page++;
1893                         continue;
1894                 }
1895 
1896                 order = page_order(page);
1897                 list_move(&page->lru,
1898                           &zone->free_area[order].free_list[migratetype]);
1899                 page += 1 << order;
1900                 pages_moved += 1 << order;
1901         }
1902 
1903         return pages_moved;
1904 }
1905 
1906 int move_freepages_block(struct zone *zone, struct page *page,
1907                                 int migratetype, int *num_movable)
1908 {
1909         unsigned long start_pfn, end_pfn;
1910         struct page *start_page, *end_page;
1911 
1912         start_pfn = page_to_pfn(page);
1913         start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1914         start_page = pfn_to_page(start_pfn);
1915         end_page = start_page + pageblock_nr_pages - 1;
1916         end_pfn = start_pfn + pageblock_nr_pages - 1;
1917 
1918         /* Do not cross zone boundaries */
1919         if (!zone_spans_pfn(zone, start_pfn))
1920                 start_page = page;
1921         if (!zone_spans_pfn(zone, end_pfn))
1922                 return 0;
1923 
1924         return move_freepages(zone, start_page, end_page, migratetype,
1925                                                                 num_movable);
1926 }
1927 
1928 static void change_pageblock_range(struct page *pageblock_page,
1929                                         int start_order, int migratetype)
1930 {
1931         int nr_pageblocks = 1 << (start_order - pageblock_order);
1932 
1933         while (nr_pageblocks--) {
1934                 set_pageblock_migratetype(pageblock_page, migratetype);
1935                 pageblock_page += pageblock_nr_pages;
1936         }
1937 }
1938 
1939 /*
1940  * When we are falling back to another migratetype during allocation, try to
1941  * steal extra free pages from the same pageblocks to satisfy further
1942  * allocations, instead of polluting multiple pageblocks.
1943  *
1944  * If we are stealing a relatively large buddy page, it is likely there will
1945  * be more free pages in the pageblock, so try to steal them all. For
1946  * reclaimable and unmovable allocations, we steal regardless of page size,
1947  * as fragmentation caused by those allocations polluting movable pageblocks
1948  * is worse than movable allocations stealing from unmovable and reclaimable
1949  * pageblocks.
1950  */
1951 static bool can_steal_fallback(unsigned int order, int start_mt)
1952 {
1953         /*
1954          * Leaving this order check is intended, although there is
1955          * relaxed order check in next check. The reason is that
1956          * we can actually steal whole pageblock if this condition met,
1957          * but, below check doesn't guarantee it and that is just heuristic
1958          * so could be changed anytime.
1959          */
1960         if (order >= pageblock_order)
1961                 return true;
1962 
1963         if (order >= pageblock_order / 2 ||
1964                 start_mt == MIGRATE_RECLAIMABLE ||
1965                 start_mt == MIGRATE_UNMOVABLE ||
1966                 page_group_by_mobility_disabled)
1967                 return true;
1968 
1969         return false;
1970 }
1971 
1972 /*
1973  * This function implements actual steal behaviour. If order is large enough,
1974  * we can steal whole pageblock. If not, we first move freepages in this
1975  * pageblock to our migratetype and determine how many already-allocated pages
1976  * are there in the pageblock with a compatible migratetype. If at least half
1977  * of pages are free or compatible, we can change migratetype of the pageblock
1978  * itself, so pages freed in the future will be put on the correct free list.
1979  */
1980 static void steal_suitable_fallback(struct zone *zone, struct page *page,
1981                                         int start_type, bool whole_block)
1982 {
1983         unsigned int current_order = page_order(page);
1984         struct free_area *area;
1985         int free_pages, movable_pages, alike_pages;
1986         int old_block_type;
1987 
1988         old_block_type = get_pageblock_migratetype(page);
1989 
1990         /*
1991          * This can happen due to races and we want to prevent broken
1992          * highatomic accounting.
1993          */
1994         if (is_migrate_highatomic(old_block_type))
1995                 goto single_page;
1996 
1997         /* Take ownership for orders >= pageblock_order */
1998         if (current_order >= pageblock_order) {
1999                 change_pageblock_range(page, current_order, start_type);
2000                 goto single_page;
2001         }
2002 
2003         /* We are not allowed to try stealing from the whole block */
2004         if (!whole_block)
2005                 goto single_page;
2006 
2007         free_pages = move_freepages_block(zone, page, start_type,
2008                                                 &movable_pages);
2009         /*
2010          * Determine how many pages are compatible with our allocation.
2011          * For movable allocation, it's the number of movable pages which
2012          * we just obtained. For other types it's a bit more tricky.
2013          */
2014         if (start_type == MIGRATE_MOVABLE) {
2015                 alike_pages = movable_pages;
2016         } else {
2017                 /*
2018                  * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2019                  * to MOVABLE pageblock, consider all non-movable pages as
2020                  * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2021                  * vice versa, be conservative since we can't distinguish the
2022                  * exact migratetype of non-movable pages.
2023                  */
2024                 if (old_block_type == MIGRATE_MOVABLE)
2025                         alike_pages = pageblock_nr_pages
2026                                                 - (free_pages + movable_pages);
2027                 else
2028                         alike_pages = 0;
2029         }
2030 
2031         /* moving whole block can fail due to zone boundary conditions */
2032         if (!free_pages)
2033                 goto single_page;
2034 
2035         /*
2036          * If a sufficient number of pages in the block are either free or of
2037          * comparable migratability as our allocation, claim the whole block.
2038          */
2039         if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2040                         page_group_by_mobility_disabled)
2041                 set_pageblock_migratetype(page, start_type);
2042 
2043         return;
2044 
2045 single_page:
2046         area = &zone->free_area[current_order];
2047         list_move(&page->lru, &area->free_list[start_type]);
2048 }
2049 
2050 /*
2051  * Check whether there is a suitable fallback freepage with requested order.
2052  * If only_stealable is true, this function returns fallback_mt only if
2053  * we can steal other freepages all together. This would help to reduce
2054  * fragmentation due to mixed migratetype pages in one pageblock.
2055  */
2056 int find_suitable_fallback(struct free_area *area, unsigned int order,
2057                         int migratetype, bool only_stealable, bool *can_steal)
2058 {
2059         int i;
2060         int fallback_mt;
2061 
2062         if (area->nr_free == 0)
2063                 return -1;
2064 
2065         *can_steal = false;
2066         for (i = 0;; i++) {
2067                 fallback_mt = fallbacks[migratetype][i];
2068                 if (fallback_mt == MIGRATE_TYPES)
2069                         break;
2070 
2071                 if (list_empty(&area->free_list[fallback_mt]))
2072                         continue;
2073 
2074                 if (can_steal_fallback(order, migratetype))
2075                         *can_steal = true;
2076 
2077                 if (!only_stealable)
2078                         return fallback_mt;
2079 
2080                 if (*can_steal)
2081                         return fallback_mt;
2082         }
2083 
2084         return -1;
2085 }
2086 
2087 /*
2088  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2089  * there are no empty page blocks that contain a page with a suitable order
2090  */
2091 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2092                                 unsigned int alloc_order)
2093 {
2094         int mt;
2095         unsigned long max_managed, flags;
2096 
2097         /*
2098          * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2099          * Check is race-prone but harmless.
2100          */
2101         max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2102         if (zone->nr_reserved_highatomic >= max_managed)
2103                 return;
2104 
2105         spin_lock_irqsave(&zone->lock, flags);
2106 
2107         /* Recheck the nr_reserved_highatomic limit under the lock */
2108         if (zone->nr_reserved_highatomic >= max_managed)
2109                 goto out_unlock;
2110 
2111         /* Yoink! */
2112         mt = get_pageblock_migratetype(page);
2113         if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2114             && !is_migrate_cma(mt)) {
2115                 zone->nr_reserved_highatomic += pageblock_nr_pages;
2116                 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2117                 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2118         }
2119 
2120 out_unlock:
2121         spin_unlock_irqrestore(&zone->lock, flags);
2122 }
2123 
2124 /*
2125  * Used when an allocation is about to fail under memory pressure. This
2126  * potentially hurts the reliability of high-order allocations when under
2127  * intense memory pressure but failed atomic allocations should be easier
2128  * to recover from than an OOM.
2129  *
2130  * If @force is true, try to unreserve a pageblock even though highatomic
2131  * pageblock is exhausted.
2132  */
2133 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2134                                                 bool force)
2135 {
2136         struct zonelist *zonelist = ac->zonelist;
2137         unsigned long flags;
2138         struct zoneref *z;
2139         struct zone *zone;
2140         struct page *page;
2141         int order;
2142         bool ret;
2143 
2144         for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2145                                                                 ac->nodemask) {
2146                 /*
2147                  * Preserve at least one pageblock unless memory pressure
2148                  * is really high.
2149                  */
2150                 if (!force && zone->nr_reserved_highatomic <=
2151                                         pageblock_nr_pages)
2152                         continue;
2153 
2154                 spin_lock_irqsave(&zone->lock, flags);
2155                 for (order = 0; order < MAX_ORDER; order++) {
2156                         struct free_area *area = &(zone->free_area[order]);
2157 
2158                         page = list_first_entry_or_null(
2159                                         &area->free_list[MIGRATE_HIGHATOMIC],
2160                                         struct page, lru);
2161                         if (!page)
2162                                 continue;
2163 
2164                         /*
2165                          * In page freeing path, migratetype change is racy so
2166                          * we can counter several free pages in a pageblock
2167                          * in this loop althoug we changed the pageblock type
2168                          * from highatomic to ac->migratetype. So we should
2169                          * adjust the count once.
2170                          */
2171                         if (is_migrate_highatomic_page(page)) {
2172                                 /*
2173                                  * It should never happen but changes to
2174                                  * locking could inadvertently allow a per-cpu
2175                                  * drain to add pages to MIGRATE_HIGHATOMIC
2176                                  * while unreserving so be safe and watch for
2177                                  * underflows.
2178                                  */
2179                                 zone->nr_reserved_highatomic -= min(
2180                                                 pageblock_nr_pages,
2181                                                 zone->nr_reserved_highatomic);
2182                         }
2183 
2184                         /*
2185                          * Convert to ac->migratetype and avoid the normal
2186                          * pageblock stealing heuristics. Minimally, the caller
2187                          * is doing the work and needs the pages. More
2188                          * importantly, if the block was always converted to
2189                          * MIGRATE_UNMOVABLE or another type then the number
2190                          * of pageblocks that cannot be completely freed
2191                          * may increase.
2192                          */
2193                         set_pageblock_migratetype(page, ac->migratetype);
2194                         ret = move_freepages_block(zone, page, ac->migratetype,
2195                                                                         NULL);
2196                         if (ret) {
2197                                 spin_unlock_irqrestore(&zone->lock, flags);
2198                                 return ret;
2199                         }
2200                 }
2201                 spin_unlock_irqrestore(&zone->lock, flags);
2202         }
2203 
2204         return false;
2205 }
2206 
2207 /*
2208  * Try finding a free buddy page on the fallback list and put it on the free
2209  * list of requested migratetype, possibly along with other pages from the same
2210  * block, depending on fragmentation avoidance heuristics. Returns true if
2211  * fallback was found so that __rmqueue_smallest() can grab it.
2212  */
2213 static inline bool
2214 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
2215 {
2216         struct free_area *area;
2217         unsigned int current_order;
2218         struct page *page;
2219         int fallback_mt;
2220         bool can_steal;
2221 
2222         /* Find the largest possible block of pages in the other list */
2223         for (current_order = MAX_ORDER-1;
2224                                 current_order >= order && current_order <= MAX_ORDER-1;
2225                                 --current_order) {
2226                 area = &(zone->free_area[current_order]);
2227                 fallback_mt = find_suitable_fallback(area, current_order,
2228                                 start_migratetype, false, &can_steal);
2229                 if (fallback_mt == -1)
2230                         continue;
2231 
2232                 page = list_first_entry(&area->free_list[fallback_mt],
2233                                                 struct page, lru);
2234 
2235                 steal_suitable_fallback(zone, page, start_migratetype,
2236                                                                 can_steal);
2237 
2238                 trace_mm_page_alloc_extfrag(page, order, current_order,
2239                         start_migratetype, fallback_mt);
2240 
2241                 return true;
2242         }
2243 
2244         return false;
2245 }
2246 
2247 /*
2248  * Do the hard work of removing an element from the buddy allocator.
2249  * Call me with the zone->lock already held.
2250  */
2251 static struct page *__rmqueue(struct zone *zone, unsigned int order,
2252                                 int migratetype)
2253 {
2254         struct page *page;
2255 
2256 retry:
2257         page = __rmqueue_smallest(zone, order, migratetype);
2258         if (unlikely(!page)) {
2259                 if (migratetype == MIGRATE_MOVABLE)
2260                         page = __rmqueue_cma_fallback(zone, order);
2261 
2262                 if (!page && __rmqueue_fallback(zone, order, migratetype))
2263                         goto retry;
2264         }
2265 
2266         trace_mm_page_alloc_zone_locked(page, order, migratetype);
2267         return page;
2268 }
2269 
2270 /*
2271  * Obtain a specified number of elements from the buddy allocator, all under
2272  * a single hold of the lock, for efficiency.  Add them to the supplied list.
2273  * Returns the number of new pages which were placed at *list.
2274  */
2275 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2276                         unsigned long count, struct list_head *list,
2277                         int migratetype, bool cold)
2278 {
2279         int i, alloced = 0;
2280 
2281         spin_lock(&zone->lock);
2282         for (i = 0; i < count; ++i) {
2283                 struct page *page = __rmqueue(zone, order, migratetype);
2284                 if (unlikely(page == NULL))
2285                         break;
2286 
2287                 if (unlikely(check_pcp_refill(page)))
2288                         continue;
2289 
2290                 /*
2291                  * Split buddy pages returned by expand() are received here
2292                  * in physical page order. The page is added to the callers and
2293                  * list and the list head then moves forward. From the callers
2294                  * perspective, the linked list is ordered by page number in
2295                  * some conditions. This is useful for IO devices that can
2296                  * merge IO requests if the physical pages are ordered
2297                  * properly.
2298                  */
2299                 if (likely(!cold))
2300                         list_add(&page->lru, list);
2301                 else
2302                         list_add_tail(&page->lru, list);
2303                 list = &page->lru;
2304                 alloced++;
2305                 if (is_migrate_cma(get_pcppage_migratetype(page)))
2306                         __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2307                                               -(1 << order));
2308         }
2309 
2310         /*
2311          * i pages were removed from the buddy list even if some leak due
2312          * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2313          * on i. Do not confuse with 'alloced' which is the number of
2314          * pages added to the pcp list.
2315          */
2316         __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2317         spin_unlock(&zone->lock);
2318         return alloced;
2319 }
2320 
2321 #ifdef CONFIG_NUMA
2322 /*
2323  * Called from the vmstat counter updater to drain pagesets of this
2324  * currently executing processor on remote nodes after they have
2325  * expired.
2326  *
2327  * Note that this function must be called with the thread pinned to
2328  * a single processor.
2329  */
2330 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2331 {
2332         unsigned long flags;
2333         int to_drain, batch;
2334 
2335         local_irq_save(flags);
2336         batch = READ_ONCE(pcp->batch);
2337         to_drain = min(pcp->count, batch);
2338         if (to_drain > 0) {
2339                 free_pcppages_bulk(zone, to_drain, pcp);
2340                 pcp->count -= to_drain;
2341         }
2342         local_irq_restore(flags);
2343 }
2344 #endif
2345 
2346 /*
2347  * Drain pcplists of the indicated processor and zone.
2348  *
2349  * The processor must either be the current processor and the
2350  * thread pinned to the current processor or a processor that
2351  * is not online.
2352  */
2353 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2354 {
2355         unsigned long flags;
2356         struct per_cpu_pageset *pset;
2357         struct per_cpu_pages *pcp;
2358 
2359         local_irq_save(flags);
2360         pset = per_cpu_ptr(zone->pageset, cpu);
2361 
2362         pcp = &pset->pcp;
2363         if (pcp->count) {
2364                 free_pcppages_bulk(zone, pcp->count, pcp);
2365                 pcp->count = 0;
2366         }
2367         local_irq_restore(flags);
2368 }
2369 
2370 /*
2371  * Drain pcplists of all zones on the indicated processor.
2372  *
2373  * The processor must either be the current processor and the
2374  * thread pinned to the current processor or a processor that
2375  * is not online.
2376  */
2377 static void drain_pages(unsigned int cpu)
2378 {
2379         struct zone *zone;
2380 
2381         for_each_populated_zone(zone) {
2382                 drain_pages_zone(cpu, zone);
2383         }
2384 }
2385 
2386 /*
2387  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2388  *
2389  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2390  * the single zone's pages.
2391  */
2392 void drain_local_pages(struct zone *zone)
2393 {
2394         int cpu = smp_processor_id();
2395 
2396         if (zone)
2397                 drain_pages_zone(cpu, zone);
2398         else
2399                 drain_pages(cpu);
2400 }
2401 
2402 static void drain_local_pages_wq(struct work_struct *work)
2403 {
2404         /*
2405          * drain_all_pages doesn't use proper cpu hotplug protection so
2406          * we can race with cpu offline when the WQ can move this from
2407          * a cpu pinned worker to an unbound one. We can operate on a different
2408          * cpu which is allright but we also have to make sure to not move to
2409          * a different one.
2410          */
2411         preempt_disable();
2412         drain_local_pages(NULL);
2413         preempt_enable();
2414 }
2415 
2416 /*
2417  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2418  *
2419  * When zone parameter is non-NULL, spill just the single zone's pages.
2420  *
2421  * Note that this can be extremely slow as the draining happens in a workqueue.
2422  */
2423 void drain_all_pages(struct zone *zone)
2424 {
2425         int cpu;
2426 
2427         /*
2428          * Allocate in the BSS so we wont require allocation in
2429          * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2430          */
2431         static cpumask_t cpus_with_pcps;
2432 
2433         /*
2434          * Make sure nobody triggers this path before mm_percpu_wq is fully
2435          * initialized.
2436          */
2437         if (WARN_ON_ONCE(!mm_percpu_wq))
2438                 return;
2439 
2440         /* Workqueues cannot recurse */
2441         if (current->flags & PF_WQ_WORKER)
2442                 return;
2443 
2444         /*
2445          * Do not drain if one is already in progress unless it's specific to
2446          * a zone. Such callers are primarily CMA and memory hotplug and need
2447          * the drain to be complete when the call returns.
2448          */
2449         if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2450                 if (!zone)
2451                         return;
2452                 mutex_lock(&pcpu_drain_mutex);
2453         }
2454 
2455         /*
2456          * We don't care about racing with CPU hotplug event
2457          * as offline notification will cause the notified
2458          * cpu to drain that CPU pcps and on_each_cpu_mask
2459          * disables preemption as part of its processing
2460          */
2461         for_each_online_cpu(cpu) {
2462                 struct per_cpu_pageset *pcp;
2463                 struct zone *z;
2464                 bool has_pcps = false;
2465 
2466                 if (zone) {
2467                         pcp = per_cpu_ptr(zone->pageset, cpu);
2468                         if (pcp->pcp.count)
2469                                 has_pcps = true;
2470                 } else {
2471                         for_each_populated_zone(z) {
2472                                 pcp = per_cpu_ptr(z->pageset, cpu);
2473                                 if (pcp->pcp.count) {
2474                                         has_pcps = true;
2475                                         break;
2476                                 }
2477                         }
2478                 }
2479 
2480                 if (has_pcps)
2481                         cpumask_set_cpu(cpu, &cpus_with_pcps);
2482                 else
2483                         cpumask_clear_cpu(cpu, &cpus_with_pcps);
2484         }
2485 
2486         for_each_cpu(cpu, &cpus_with_pcps) {
2487                 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2488                 INIT_WORK(work, drain_local_pages_wq);
2489                 queue_work_on(cpu, mm_percpu_wq, work);
2490         }
2491         for_each_cpu(cpu, &cpus_with_pcps)
2492                 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
2493 
2494         mutex_unlock(&pcpu_drain_mutex);
2495 }
2496 
2497 #ifdef CONFIG_HIBERNATION
2498 
2499 /*
2500  * Touch the watchdog for every WD_PAGE_COUNT pages.
2501  */
2502 #define WD_PAGE_COUNT   (128*1024)
2503 
2504 void mark_free_pages(struct zone *zone)
2505 {
2506         unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2507         unsigned long flags;
2508         unsigned int order, t;
2509         struct page *page;
2510 
2511         if (zone_is_empty(zone))
2512                 return;
2513 
2514         spin_lock_irqsave(&zone->lock, flags);
2515 
2516         max_zone_pfn = zone_end_pfn(zone);
2517         for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2518                 if (pfn_valid(pfn)) {
2519                         page = pfn_to_page(pfn);
2520 
2521                         if (!--page_count) {
2522                                 touch_nmi_watchdog();
2523                                 page_count = WD_PAGE_COUNT;
2524                         }
2525 
2526                         if (page_zone(page) != zone)
2527                                 continue;
2528 
2529                         if (!swsusp_page_is_forbidden(page))
2530                                 swsusp_unset_page_free(page);
2531                 }
2532 
2533         for_each_migratetype_order(order, t) {
2534                 list_for_each_entry(page,
2535                                 &zone->free_area[order].free_list[t], lru) {
2536                         unsigned long i;
2537 
2538                         pfn = page_to_pfn(page);
2539                         for (i = 0; i < (1UL << order); i++) {
2540                                 if (!--page_count) {
2541                                         touch_nmi_watchdog();
2542                                         page_count = WD_PAGE_COUNT;
2543                                 }
2544                                 swsusp_set_page_free(pfn_to_page(pfn + i));
2545                         }
2546                 }
2547         }
2548         spin_unlock_irqrestore(&zone->lock, flags);
2549 }
2550 #endif /* CONFIG_PM */
2551 
2552 /*
2553  * Free a 0-order page
2554  * cold == true ? free a cold page : free a hot page
2555  */
2556 void free_hot_cold_page(struct page *page, bool cold)
2557 {
2558         struct zone *zone = page_zone(page);
2559         struct per_cpu_pages *pcp;
2560         unsigned long flags;
2561         unsigned long pfn = page_to_pfn(page);
2562         int migratetype;
2563 
2564         if (!free_pcp_prepare(page))
2565                 return;
2566 
2567         migratetype = get_pfnblock_migratetype(page, pfn);
2568         set_pcppage_migratetype(page, migratetype);
2569         local_irq_save(flags);
2570         __count_vm_event(PGFREE);
2571 
2572         /*
2573          * We only track unmovable, reclaimable and movable on pcp lists.
2574          * Free ISOLATE pages back to the allocator because they are being
2575          * offlined but treat HIGHATOMIC as movable pages so we can get those
2576          * areas back if necessary. Otherwise, we may have to free
2577          * excessively into the page allocator
2578          */
2579         if (migratetype >= MIGRATE_PCPTYPES) {
2580                 if (unlikely(is_migrate_isolate(migratetype))) {
2581                         free_one_page(zone, page, pfn, 0, migratetype);
2582                         goto out;
2583                 }
2584                 migratetype = MIGRATE_MOVABLE;
2585         }
2586 
2587         pcp = &this_cpu_ptr(zone->pageset)->pcp;
2588         if (!cold)
2589                 list_add(&page->lru, &pcp->lists[migratetype]);
2590         else
2591                 list_add_tail(&page->lru, &pcp->lists[migratetype]);
2592         pcp->count++;
2593         if (pcp->count >= pcp->high) {
2594                 unsigned long batch = READ_ONCE(pcp->batch);
2595                 free_pcppages_bulk(zone, batch, pcp);
2596                 pcp->count -= batch;
2597         }
2598 
2599 out:
2600         local_irq_restore(flags);
2601 }
2602 
2603 /*
2604  * Free a list of 0-order pages
2605  */
2606 void free_hot_cold_page_list(struct list_head *list, bool cold)
2607 {
2608         struct page *page, *next;
2609 
2610         list_for_each_entry_safe(page, next, list, lru) {
2611                 trace_mm_page_free_batched(page, cold);
2612                 free_hot_cold_page(page, cold);
2613         }
2614 }
2615 
2616 /*
2617  * split_page takes a non-compound higher-order page, and splits it into
2618  * n (1<<order) sub-pages: page[0..n]
2619  * Each sub-page must be freed individually.
2620  *
2621  * Note: this is probably too low level an operation for use in drivers.
2622  * Please consult with lkml before using this in your driver.
2623  */
2624 void split_page(struct page *page, unsigned int order)
2625 {
2626         int i;
2627 
2628         VM_BUG_ON_PAGE(PageCompound(page), page);
2629         VM_BUG_ON_PAGE(!page_count(page), page);
2630 
2631 #ifdef CONFIG_KMEMCHECK
2632         /*
2633          * Split shadow pages too, because free(page[0]) would
2634          * otherwise free the whole shadow.
2635          */
2636         if (kmemcheck_page_is_tracked(page))
2637                 split_page(virt_to_page(page[0].shadow), order);
2638 #endif
2639 
2640         for (i = 1; i < (1 << order); i++)
2641                 set_page_refcounted(page + i);
2642         split_page_owner(page, order);
2643 }
2644 EXPORT_SYMBOL_GPL(split_page);
2645 
2646 int __isolate_free_page(struct page *page, unsigned int order)
2647 {
2648         unsigned long watermark;
2649         struct zone *zone;
2650         int mt;
2651 
2652         BUG_ON(!PageBuddy(page));
2653 
2654         zone = page_zone(page);
2655         mt = get_pageblock_migratetype(page);
2656 
2657         if (!is_migrate_isolate(mt)) {
2658                 /*
2659                  * Obey watermarks as if the page was being allocated. We can
2660                  * emulate a high-order watermark check with a raised order-0
2661                  * watermark, because we already know our high-order page
2662                  * exists.
2663                  */
2664                 watermark = min_wmark_pages(zone) + (1UL << order);
2665                 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2666                         return 0;
2667 
2668                 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2669         }
2670 
2671         /* Remove page from free list */
2672         list_del(&page->lru);
2673         zone->free_area[order].nr_free--;
2674         rmv_page_order(page);
2675 
2676         /*
2677          * Set the pageblock if the isolated page is at least half of a
2678          * pageblock
2679          */
2680         if (order >= pageblock_order - 1) {
2681                 struct page *endpage = page + (1 << order) - 1;
2682                 for (; page < endpage; page += pageblock_nr_pages) {
2683                         int mt = get_pageblock_migratetype(page);
2684                         if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
2685                             && !is_migrate_highatomic(mt))
2686                                 set_pageblock_migratetype(page,
2687                                                           MIGRATE_MOVABLE);
2688                 }
2689         }
2690 
2691 
2692         return 1UL << order;
2693 }
2694 
2695 /*
2696  * Update NUMA hit/miss statistics
2697  *
2698  * Must be called with interrupts disabled.
2699  */
2700 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
2701 {
2702 #ifdef CONFIG_NUMA
2703         enum zone_stat_item local_stat = NUMA_LOCAL;
2704 
2705         if (z->node != numa_node_id())
2706                 local_stat = NUMA_OTHER;
2707 
2708         if (z->node == preferred_zone->node)
2709                 __inc_zone_state(z, NUMA_HIT);
2710         else {
2711                 __inc_zone_state(z, NUMA_MISS);
2712                 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2713         }
2714         __inc_zone_state(z, local_stat);
2715 #endif
2716 }
2717 
2718 /* Remove page from the per-cpu list, caller must protect the list */
2719 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2720                         bool cold, struct per_cpu_pages *pcp,
2721                         struct list_head *list)
2722 {
2723         struct page *page;
2724 
2725         do {
2726                 if (list_empty(list)) {
2727                         pcp->count += rmqueue_bulk(zone, 0,
2728                                         pcp->batch, list,
2729                                         migratetype, cold);
2730                         if (unlikely(list_empty(list)))
2731                                 return NULL;
2732                 }
2733 
2734                 if (cold)
2735                         page = list_last_entry(list, struct page, lru);
2736                 else
2737                         page = list_first_entry(list, struct page, lru);
2738 
2739                 list_del(&page->lru);
2740                 pcp->count--;
2741         } while (check_new_pcp(page));
2742 
2743         return page;
2744 }
2745 
2746 /* Lock and remove page from the per-cpu list */
2747 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2748                         struct zone *zone, unsigned int order,
2749                         gfp_t gfp_flags, int migratetype)
2750 {
2751         struct per_cpu_pages *pcp;
2752         struct list_head *list;
2753         bool cold = ((gfp_flags & __GFP_COLD) != 0);
2754         struct page *page;
2755         unsigned long flags;
2756 
2757         local_irq_save(flags);
2758         pcp = &this_cpu_ptr(zone->pageset)->pcp;
2759         list = &pcp->lists[migratetype];
2760         page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list);
2761         if (page) {
2762                 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2763                 zone_statistics(preferred_zone, zone);
2764         }
2765         local_irq_restore(flags);
2766         return page;
2767 }
2768 
2769 /*
2770  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
2771  */
2772 static inline
2773 struct page *rmqueue(struct zone *preferred_zone,
2774                         struct zone *zone, unsigned int order,
2775                         gfp_t gfp_flags, unsigned int alloc_flags,
2776                         int migratetype)
2777 {
2778         unsigned long flags;
2779         struct page *page;
2780 
2781         if (likely(order == 0)) {
2782                 page = rmqueue_pcplist(preferred_zone, zone, order,
2783                                 gfp_flags, migratetype);
2784                 goto out;
2785         }
2786 
2787         /*
2788          * We most definitely don't want callers attempting to
2789          * allocate greater than order-1 page units with __GFP_NOFAIL.
2790          */
2791         WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2792         spin_lock_irqsave(&zone->lock, flags);
2793 
2794         do {
2795                 page = NULL;
2796                 if (alloc_flags & ALLOC_HARDER) {
2797                         page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2798                         if (page)
2799                                 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2800                 }
2801                 if (!page)
2802                         page = __rmqueue(zone, order, migratetype);
2803         } while (page && check_new_pages(page, order));
2804         spin_unlock(&zone->lock);
2805         if (!page)
2806                 goto failed;
2807         __mod_zone_freepage_state(zone, -(1 << order),
2808                                   get_pcppage_migratetype(page));
2809 
2810         __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2811         zone_statistics(preferred_zone, zone);
2812         local_irq_restore(flags);
2813 
2814 out:
2815         VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
2816         return page;
2817 
2818 failed:
2819         local_irq_restore(flags);
2820         return NULL;
2821 }
2822 
2823 #ifdef CONFIG_FAIL_PAGE_ALLOC
2824 
2825 static struct {
2826         struct fault_attr attr;
2827 
2828         bool ignore_gfp_highmem;
2829         bool ignore_gfp_reclaim;
2830         u32 min_order;
2831 } fail_page_alloc = {
2832         .attr = FAULT_ATTR_INITIALIZER,
2833         .ignore_gfp_reclaim = true,
2834         .ignore_gfp_highmem = true,
2835         .min_order = 1,
2836 };
2837 
2838 static int __init setup_fail_page_alloc(char *str)
2839 {
2840         return setup_fault_attr(&fail_page_alloc.attr, str);
2841 }
2842 __setup("fail_page_alloc=", setup_fail_page_alloc);
2843 
2844 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2845 {
2846         if (order < fail_page_alloc.min_order)
2847                 return false;
2848         if (gfp_mask & __GFP_NOFAIL)
2849                 return false;
2850         if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
2851                 return false;
2852         if (fail_page_alloc.ignore_gfp_reclaim &&
2853                         (gfp_mask & __GFP_DIRECT_RECLAIM))
2854                 return false;
2855 
2856         return should_fail(&fail_page_alloc.attr, 1 << order);
2857 }
2858 
2859 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2860 
2861 static int __init fail_page_alloc_debugfs(void)
2862 {
2863         umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
2864         struct dentry *dir;
2865 
2866         dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2867                                         &fail_page_alloc.attr);
2868         if (IS_ERR(dir))
2869                 return PTR_ERR(dir);
2870 
2871         if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
2872                                 &fail_page_alloc.ignore_gfp_reclaim))
2873                 goto fail;
2874         if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2875                                 &fail_page_alloc.ignore_gfp_highmem))
2876                 goto fail;
2877         if (!debugfs_create_u32("min-order", mode, dir,
2878                                 &fail_page_alloc.min_order))
2879                 goto fail;
2880 
2881         return 0;
2882 fail:
2883         debugfs_remove_recursive(dir);
2884 
2885         return -ENOMEM;
2886 }
2887 
2888 late_initcall(fail_page_alloc_debugfs);
2889 
2890 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2891 
2892 #else /* CONFIG_FAIL_PAGE_ALLOC */
2893 
2894 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2895 {
2896         return false;
2897 }
2898 
2899 #endif /* CONFIG_FAIL_PAGE_ALLOC */
2900 
2901 /*
2902  * Return true if free base pages are above 'mark'. For high-order checks it
2903  * will return true of the order-0 watermark is reached and there is at least
2904  * one free page of a suitable size. Checking now avoids taking the zone lock
2905  * to check in the allocation paths if no pages are free.
2906  */
2907 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2908                          int classzone_idx, unsigned int alloc_flags,
2909                          long free_pages)
2910 {
2911         long min = mark;
2912         int o;
2913         const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
2914 
2915         /* free_pages may go negative - that's OK */
2916         free_pages -= (1 << order) - 1;
2917 
2918         if (alloc_flags & ALLOC_HIGH)
2919                 min -= min / 2;
2920 
2921         /*
2922          * If the caller does not have rights to ALLOC_HARDER then subtract
2923          * the high-atomic reserves. This will over-estimate the size of the
2924          * atomic reserve but it avoids a search.
2925          */
2926         if (likely(!alloc_harder))
2927                 free_pages -= z->nr_reserved_highatomic;
2928         else
2929                 min -= min / 4;
2930 
2931 #ifdef CONFIG_CMA
2932         /* If allocation can't use CMA areas don't use free CMA pages */
2933         if (!(alloc_flags & ALLOC_CMA))
2934                 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
2935 #endif
2936 
2937         /*
2938          * Check watermarks for an order-0 allocation request. If these
2939          * are not met, then a high-order request also cannot go ahead
2940          * even if a suitable page happened to be free.
2941          */
2942         if (free_pages <= min + z->lowmem_reserve[classzone_idx])
2943                 return false;
2944 
2945         /* If this is an order-0 request then the watermark is fine */
2946         if (!order)
2947                 return true;
2948 
2949         /* For a high-order request, check at least one suitable page is free */
2950         for (o = order; o < MAX_ORDER; o++) {
2951                 struct free_area *area = &z->free_area[o];
2952                 int mt;
2953 
2954                 if (!area->nr_free)
2955                         continue;
2956 
2957                 if (alloc_harder)
2958                         return true;
2959 
2960                 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2961                         if (!list_empty(&area->free_list[mt]))
2962                                 return true;
2963                 }
2964 
2965 #ifdef CONFIG_CMA
2966                 if ((alloc_flags & ALLOC_CMA) &&
2967                     !list_empty(&area->free_list[MIGRATE_CMA])) {
2968                         return true;
2969                 }
2970 #endif
2971         }
2972         return false;
2973 }
2974 
2975 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2976                       int classzone_idx, unsigned int alloc_flags)
2977 {
2978         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2979                                         zone_page_state(z, NR_FREE_PAGES));
2980 }
2981 
2982 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2983                 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
2984 {
2985         long free_pages = zone_page_state(z, NR_FREE_PAGES);
2986         long cma_pages = 0;
2987 
2988 #ifdef CONFIG_CMA
2989         /* If allocation can't use CMA areas don't use free CMA pages */
2990         if (!(alloc_flags & ALLOC_CMA))
2991                 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
2992 #endif
2993 
2994         /*
2995          * Fast check for order-0 only. If this fails then the reserves
2996          * need to be calculated. There is a corner case where the check
2997          * passes but only the high-order atomic reserve are free. If
2998          * the caller is !atomic then it'll uselessly search the free
2999          * list. That corner case is then slower but it is harmless.
3000          */
3001         if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3002                 return true;
3003 
3004         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3005                                         free_pages);
3006 }
3007 
3008 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3009                         unsigned long mark, int classzone_idx)
3010 {
3011         long free_pages = zone_page_state(z, NR_FREE_PAGES);
3012 
3013         if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3014                 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3015 
3016         return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
3017                                                                 free_pages);
3018 }
3019 
3020 #ifdef CONFIG_NUMA
3021 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3022 {
3023         return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3024                                 RECLAIM_DISTANCE;
3025 }
3026 #else   /* CONFIG_NUMA */
3027 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3028 {
3029         return true;
3030 }
3031 #endif  /* CONFIG_NUMA */
3032 
3033 /*
3034  * get_page_from_freelist goes through the zonelist trying to allocate
3035  * a page.
3036  */
3037 static struct page *
3038 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3039                                                 const struct alloc_context *ac)
3040 {
3041         struct zoneref *z = ac->preferred_zoneref;
3042         struct zone *zone;
3043         struct pglist_data *last_pgdat_dirty_limit = NULL;
3044 
3045         /*
3046          * Scan zonelist, looking for a zone with enough free.
3047          * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
3048          */
3049         for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3050                                                                 ac->nodemask) {
3051                 struct page *page;
3052                 unsigned long mark;
3053 
3054                 if (cpusets_enabled() &&
3055                         (alloc_flags & ALLOC_CPUSET) &&
3056                         !__cpuset_zone_allowed(zone, gfp_mask))
3057                                 continue;
3058                 /*
3059                  * When allocating a page cache page for writing, we
3060                  * want to get it from a node that is within its dirty
3061                  * limit, such that no single node holds more than its
3062                  * proportional share of globally allowed dirty pages.
3063                  * The dirty limits take into account the node's
3064                  * lowmem reserves and high watermark so that kswapd
3065                  * should be able to balance it without having to
3066                  * write pages from its LRU list.
3067                  *
3068                  * XXX: For now, allow allocations to potentially
3069                  * exceed the per-node dirty limit in the slowpath
3070                  * (spread_dirty_pages unset) before going into reclaim,
3071                  * which is important when on a NUMA setup the allowed
3072                  * nodes are together not big enough to reach the
3073                  * global limit.  The proper fix for these situations
3074                  * will require awareness of nodes in the
3075                  * dirty-throttling and the flusher threads.
3076                  */
3077                 if (ac->spread_dirty_pages) {
3078                         if (last_pgdat_dirty_limit == zone->zone_pgdat)
3079                                 continue;
3080 
3081                         if (!node_dirty_ok(zone->zone_pgdat)) {
3082                                 last_pgdat_dirty_limit = zone->zone_pgdat;
3083                                 continue;
3084                         }
3085                 }
3086 
3087                 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
3088                 if (!zone_watermark_fast(zone, order, mark,
3089                                        ac_classzone_idx(ac), alloc_flags)) {
3090                         int ret;
3091 
3092                         /* Checked here to keep the fast path fast */
3093                         BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3094                         if (alloc_flags & ALLOC_NO_WATERMARKS)
3095                                 goto try_this_zone;
3096 
3097                         if (node_reclaim_mode == 0 ||
3098                             !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3099                                 continue;
3100 
3101                         ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3102                         switch (ret) {
3103                         case NODE_RECLAIM_NOSCAN:
3104                                 /* did not scan */
3105                                 continue;
3106                         case NODE_RECLAIM_FULL:
3107                                 /* scanned but unreclaimable */
3108                                 continue;
3109                         default:
3110                                 /* did we reclaim enough */
3111                                 if (zone_watermark_ok(zone, order, mark,
3112                                                 ac_classzone_idx(ac), alloc_flags))
3113                                         goto try_this_zone;
3114 
3115                                 continue;
3116                         }
3117                 }
3118 
3119 try_this_zone:
3120                 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3121                                 gfp_mask, alloc_flags, ac->migratetype);
3122                 if (page) {
3123                         prep_new_page(page, order, gfp_mask, alloc_flags);
3124 
3125                         /*
3126                          * If this is a high-order atomic allocation then check
3127                          * if the pageblock should be reserved for the future
3128                          */
3129                         if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3130                                 reserve_highatomic_pageblock(page, zone, order);
3131 
3132                         return page;
3133                 }
3134         }
3135 
3136         return NULL;
3137 }
3138 
3139 /*
3140  * Large machines with many possible nodes should not always dump per-node
3141  * meminfo in irq context.
3142  */
3143 static inline bool should_suppress_show_mem(void)
3144 {
3145         bool ret = false;
3146 
3147 #if NODES_SHIFT > 8
3148         ret = in_interrupt();
3149 #endif
3150         return ret;
3151 }
3152 
3153 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3154 {
3155         unsigned int filter = SHOW_MEM_FILTER_NODES;
3156         static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
3157 
3158         if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
3159                 return;
3160 
3161         /*
3162          * This documents exceptions given to allocations in certain
3163          * contexts that are allowed to allocate outside current's set
3164          * of allowed nodes.
3165          */
3166         if (!(gfp_mask & __GFP_NOMEMALLOC))
3167                 if (test_thread_flag(TIF_MEMDIE) ||
3168                     (current->flags & (PF_MEMALLOC | PF_EXITING)))
3169                         filter &= ~SHOW_MEM_FILTER_NODES;
3170         if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3171                 filter &= ~SHOW_MEM_FILTER_NODES;
3172 
3173         show_mem(filter, nodemask);
3174 }
3175 
3176 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3177 {
3178         struct va_format vaf;
3179         va_list args;
3180         static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3181                                       DEFAULT_RATELIMIT_BURST);
3182 
3183         if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3184                 return;
3185 
3186         pr_warn("%s: ", current->comm);
3187 
3188         va_start(args, fmt);
3189         vaf.fmt = fmt;
3190         vaf.va = &args;
3191         pr_cont("%pV", &vaf);
3192         va_end(args);
3193 
3194         pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask);
3195         if (nodemask)
3196                 pr_cont("%*pbl\n", nodemask_pr_args(nodemask));
3197         else
3198                 pr_cont("(null)\n");
3199 
3200         cpuset_print_current_mems_allowed();
3201 
3202         dump_stack();
3203         warn_alloc_show_mem(gfp_mask, nodemask);
3204 }
3205 
3206 static inline struct page *
3207 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3208                               unsigned int alloc_flags,
3209                               const struct alloc_context *ac)
3210 {
3211         struct page *page;
3212 
3213         page = get_page_from_freelist(gfp_mask, order,
3214                         alloc_flags|ALLOC_CPUSET, ac);
3215         /*
3216          * fallback to ignore cpuset restriction if our nodes
3217          * are depleted
3218          */
3219         if (!page)
3220                 page = get_page_from_freelist(gfp_mask, order,
3221                                 alloc_flags, ac);
3222 
3223         return page;
3224 }
3225 
3226 static inline struct page *
3227 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3228         const struct alloc_context *ac, unsigned long *did_some_progress)
3229 {
3230         struct oom_control oc = {
3231                 .zonelist = ac->zonelist,
3232                 .nodemask = ac->nodemask,
3233                 .memcg = NULL,
3234                 .gfp_mask = gfp_mask,
3235                 .order = order,
3236         };
3237         struct page *page;
3238 
3239         *did_some_progress = 0;
3240 
3241         /*
3242          * Acquire the oom lock.  If that fails, somebody else is
3243          * making progress for us.
3244          */
3245         if (!mutex_trylock(&oom_lock)) {
3246                 *did_some_progress = 1;
3247                 schedule_timeout_uninterruptible(1);
3248                 return NULL;
3249         }
3250 
3251         /*
3252          * Go through the zonelist yet one more time, keep very high watermark
3253          * here, this is only to catch a parallel oom killing, we must fail if
3254          * we're still under heavy pressure.
3255          */
3256         page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
3257                                         ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3258         if (page)
3259                 goto out;
3260 
3261         /* Coredumps can quickly deplete all memory reserves */
3262         if (current->flags & PF_DUMPCORE)
3263                 goto out;
3264         /* The OOM killer will not help higher order allocs */
3265         if (order > PAGE_ALLOC_COSTLY_ORDER)
3266                 goto out;
3267         /* The OOM killer does not needlessly kill tasks for lowmem */
3268         if (ac->high_zoneidx < ZONE_NORMAL)
3269                 goto out;
3270         if (pm_suspended_storage())
3271                 goto out;
3272         /*
3273          * XXX: GFP_NOFS allocations should rather fail than rely on
3274          * other request to make a forward progress.
3275          * We are in an unfortunate situation where out_of_memory cannot
3276          * do much for this context but let's try it to at least get
3277          * access to memory reserved if the current task is killed (see
3278          * out_of_memory). Once filesystems are ready to handle allocation
3279          * failures more gracefully we should just bail out here.
3280          */
3281 
3282         /* The OOM killer may not free memory on a specific node */
3283         if (gfp_mask & __GFP_THISNODE)
3284                 goto out;
3285 
3286         /* Exhausted what can be done so it's blamo time */
3287         if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3288                 *did_some_progress = 1;
3289 
3290                 /*
3291                  * Help non-failing allocations by giving them access to memory
3292                  * reserves
3293                  */
3294                 if (gfp_mask & __GFP_NOFAIL)
3295                         page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3296                                         ALLOC_NO_WATERMARKS, ac);
3297         }
3298 out:
3299         mutex_unlock(&oom_lock);
3300         return page;
3301 }
3302 
3303 /*
3304  * Maximum number of compaction retries wit a progress before OOM
3305  * killer is consider as the only way to move forward.
3306  */
3307 #define MAX_COMPACT_RETRIES 16
3308 
3309 #ifdef CONFIG_COMPACTION
3310 /* Try memory compaction for high-order allocations before reclaim */
3311 static struct page *
3312 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3313                 unsigned int alloc_flags, const struct alloc_context *ac,
3314                 enum compact_priority prio, enum compact_result *compact_result)
3315 {
3316         struct page *page;
3317         unsigned int noreclaim_flag;
3318 
3319         if (!order)
3320                 return NULL;
3321 
3322         noreclaim_flag = memalloc_noreclaim_save();
3323         *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3324                                                                         prio);
3325         memalloc_noreclaim_restore(noreclaim_flag);
3326 
3327         if (*compact_result <= COMPACT_INACTIVE)
3328                 return NULL;
3329 
3330         /*
3331          * At least in one zone compaction wasn't deferred or skipped, so let's
3332          * count a compaction stall
3333          */
3334         count_vm_event(COMPACTSTALL);
3335 
3336         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3337 
3338         if (page) {
3339                 struct zone *zone = page_zone(page);
3340 
3341                 zone->compact_blockskip_flush = false;
3342                 compaction_defer_reset(zone, order, true);
3343                 count_vm_event(COMPACTSUCCESS);
3344                 return page;
3345         }
3346 
3347         /*
3348          * It's bad if compaction run occurs and fails. The most likely reason
3349          * is that pages exist, but not enough to satisfy watermarks.
3350          */
3351         count_vm_event(COMPACTFAIL);
3352 
3353         cond_resched();
3354 
3355         return NULL;
3356 }
3357 
3358 static inline bool
3359 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3360                      enum compact_result compact_result,
3361                      enum compact_priority *compact_priority,
3362                      int *compaction_retries)
3363 {
3364         int max_retries = MAX_COMPACT_RETRIES;
3365         int min_priority;
3366         bool ret = false;
3367         int retries = *compaction_retries;
3368         enum compact_priority priority = *compact_priority;
3369 
3370         if (!order)
3371                 return false;
3372 
3373         if (compaction_made_progress(compact_result))
3374                 (*compaction_retries)++;
3375 
3376         /*
3377          * compaction considers all the zone as desperately out of memory
3378          * so it doesn't really make much sense to retry except when the
3379          * failure could be caused by insufficient priority
3380          */
3381         if (compaction_failed(compact_result))
3382                 goto check_priority;
3383 
3384         /*
3385          * make sure the compaction wasn't deferred or didn't bail out early
3386          * due to locks contention before we declare that we should give up.
3387          * But do not retry if the given zonelist is not suitable for
3388          * compaction.
3389          */
3390         if (compaction_withdrawn(compact_result)) {
3391                 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3392                 goto out;
3393         }
3394 
3395         /*
3396          * !costly requests are much more important than __GFP_REPEAT
3397          * costly ones because they are de facto nofail and invoke OOM
3398          * killer to move on while costly can fail and users are ready
3399          * to cope with that. 1/4 retries is rather arbitrary but we
3400          * would need much more detailed feedback from compaction to
3401          * make a better decision.
3402          */
3403         if (order > PAGE_ALLOC_COSTLY_ORDER)
3404                 max_retries /= 4;
3405         if (*compaction_retries <= max_retries) {
3406                 ret = true;
3407                 goto out;
3408         }
3409 
3410         /*
3411          * Make sure there are attempts at the highest priority if we exhausted
3412          * all retries or failed at the lower priorities.
3413          */
3414 check_priority:
3415         min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3416                         MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3417 
3418         if (*compact_priority > min_priority) {
3419                 (*compact_priority)--;
3420                 *compaction_retries = 0;
3421                 ret = true;
3422         }
3423 out:
3424         trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3425         return ret;
3426 }
3427 #else
3428 static inline struct page *
3429 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3430                 unsigned int alloc_flags, const struct alloc_context *ac,
3431                 enum compact_priority prio, enum compact_result *compact_result)
3432 {
3433         *compact_result = COMPACT_SKIPPED;
3434         return NULL;
3435 }
3436 
3437 static inline bool
3438 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3439                      enum compact_result compact_result,
3440                      enum compact_priority *compact_priority,
3441                      int *compaction_retries)
3442 {
3443         struct zone *zone;
3444         struct zoneref *z;
3445 
3446         if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3447                 return false;
3448 
3449         /*
3450          * There are setups with compaction disabled which would prefer to loop
3451          * inside the allocator rather than hit the oom killer prematurely.
3452          * Let's give them a good hope and keep retrying while the order-0
3453          * watermarks are OK.
3454          */
3455         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3456                                         ac->nodemask) {
3457                 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3458                                         ac_classzone_idx(ac), alloc_flags))
3459                         return true;
3460         }
3461         return false;
3462 }
3463 #endif /* CONFIG_COMPACTION */
3464 
3465 /* Perform direct synchronous page reclaim */
3466 static int
3467 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3468                                         const struct alloc_context *ac)
3469 {
3470         struct reclaim_state reclaim_state;
3471         int progress;
3472         unsigned int noreclaim_flag;
3473 
3474         cond_resched();
3475 
3476         /* We now go into synchronous reclaim */
3477         cpuset_memory_pressure_bump();
3478         noreclaim_flag = memalloc_noreclaim_save();
3479         lockdep_set_current_reclaim_state(gfp_mask);
3480         reclaim_state.reclaimed_slab = 0;
3481         current->reclaim_state = &reclaim_state;
3482 
3483         progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3484                                                                 ac->nodemask);
3485 
3486         current->reclaim_state = NULL;
3487         lockdep_clear_current_reclaim_state();
3488         memalloc_noreclaim_restore(noreclaim_flag);
3489 
3490         cond_resched();
3491 
3492         return progress;
3493 }
3494 
3495 /* The really slow allocator path where we enter direct reclaim */
3496 static inline struct page *
3497 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3498                 unsigned int alloc_flags, const struct alloc_context *ac,
3499                 unsigned long *did_some_progress)
3500 {
3501         struct page *page = NULL;
3502         bool drained = false;
3503 
3504         *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3505         if (unlikely(!(*did_some_progress)))
3506                 return NULL;
3507 
3508 retry:
3509         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3510 
3511         /*
3512          * If an allocation failed after direct reclaim, it could be because
3513          * pages are pinned on the per-cpu lists or in high alloc reserves.
3514          * Shrink them them and try again
3515          */
3516         if (!page && !drained) {
3517                 unreserve_highatomic_pageblock(ac, false);
3518                 drain_all_pages(NULL);
3519                 drained = true;
3520                 goto retry;
3521         }
3522 
3523         return page;
3524 }
3525 
3526 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3527 {
3528         struct zoneref *z;
3529         struct zone *zone;
3530         pg_data_t *last_pgdat = NULL;
3531 
3532         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3533                                         ac->high_zoneidx, ac->nodemask) {
3534                 if (last_pgdat != zone->zone_pgdat)
3535                         wakeup_kswapd(zone, order, ac->high_zoneidx);
3536                 last_pgdat = zone->zone_pgdat;
3537         }
3538 }
3539 
3540 static inline unsigned int
3541 gfp_to_alloc_flags(gfp_t gfp_mask)
3542 {
3543         unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3544 
3545         /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
3546         BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
3547 
3548         /*
3549          * The caller may dip into page reserves a bit more if the caller
3550          * cannot run direct reclaim, or if the caller has realtime scheduling
3551          * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
3552          * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
3553          */
3554         alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
3555 
3556         if (gfp_mask & __GFP_ATOMIC) {
3557                 /*
3558                  * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3559                  * if it can't schedule.
3560                  */
3561                 if (!(gfp_mask & __GFP_NOMEMALLOC))
3562                         alloc_flags |= ALLOC_HARDER;
3563                 /*
3564                  * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
3565                  * comment for __cpuset_node_allowed().
3566                  */
3567                 alloc_flags &= ~ALLOC_CPUSET;
3568         } else if (unlikely(rt_task(current)) && !in_interrupt())
3569                 alloc_flags |= ALLOC_HARDER;
3570 
3571 #ifdef CONFIG_CMA
3572         if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3573                 alloc_flags |= ALLOC_CMA;
3574 #endif
3575         return alloc_flags;
3576 }
3577 
3578 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3579 {
3580         if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3581                 return false;
3582 
3583         if (gfp_mask & __GFP_MEMALLOC)
3584                 return true;
3585         if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3586                 return true;
3587         if (!in_interrupt() &&
3588                         ((current->flags & PF_MEMALLOC) ||
3589                          unlikely(test_thread_flag(TIF_MEMDIE))))
3590                 return true;
3591 
3592         return false;
3593 }
3594 
3595 /*
3596  * Checks whether it makes sense to retry the reclaim to make a forward progress
3597  * for the given allocation request.
3598  *
3599  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3600  * without success, or when we couldn't even meet the watermark if we
3601  * reclaimed all remaining pages on the LRU lists.
3602  *
3603  * Returns true if a retry is viable or false to enter the oom path.
3604  */
3605 static inline bool
3606 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3607                      struct alloc_context *ac, int alloc_flags,
3608                      bool did_some_progress, int *no_progress_loops)
3609 {
3610         struct zone *zone;
3611         struct zoneref *z;
3612 
3613         /*
3614          * Costly allocations might have made a progress but this doesn't mean
3615          * their order will become available due to high fragmentation so
3616          * always increment the no progress counter for them
3617          */
3618         if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3619                 *no_progress_loops = 0;
3620         else
3621                 (*no_progress_loops)++;
3622 
3623         /*
3624          * Make sure we converge to OOM if we cannot make any progress
3625          * several times in the row.
3626          */
3627         if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
3628                 /* Before OOM, exhaust highatomic_reserve */
3629                 return unreserve_highatomic_pageblock(ac, true);
3630         }
3631 
3632         /*
3633          * Keep reclaiming pages while there is a chance this will lead
3634          * somewhere.  If none of the target zones can satisfy our allocation
3635          * request even if all reclaimable pages are considered then we are
3636          * screwed and have to go OOM.
3637          */
3638         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3639                                         ac->nodemask) {
3640                 unsigned long available;
3641                 unsigned long reclaimable;
3642                 unsigned long min_wmark = min_wmark_pages(zone);
3643                 bool wmark;
3644 
3645                 available = reclaimable = zone_reclaimable_pages(zone);
3646                 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3647 
3648                 /*
3649                  * Would the allocation succeed if we reclaimed all
3650                  * reclaimable pages?
3651                  */
3652                 wmark = __zone_watermark_ok(zone, order, min_wmark,
3653                                 ac_classzone_idx(ac), alloc_flags, available);
3654                 trace_reclaim_retry_zone(z, order, reclaimable,
3655                                 available, min_wmark, *no_progress_loops, wmark);
3656                 if (wmark) {
3657                         /*
3658                          * If we didn't make any progress and have a lot of
3659                          * dirty + writeback pages then we should wait for
3660                          * an IO to complete to slow down the reclaim and
3661                          * prevent from pre mature OOM
3662                          */
3663                         if (!did_some_progress) {
3664                                 unsigned long write_pending;
3665 
3666                                 write_pending = zone_page_state_snapshot(zone,
3667                                                         NR_ZONE_WRITE_PENDING);
3668 
3669                                 if (2 * write_pending > reclaimable) {
3670                                         congestion_wait(BLK_RW_ASYNC, HZ/10);
3671                                         return true;
3672                                 }
3673                         }
3674 
3675                         /*
3676                          * Memory allocation/reclaim might be called from a WQ
3677                          * context and the current implementation of the WQ
3678                          * concurrency control doesn't recognize that
3679                          * a particular WQ is congested if the worker thread is
3680                          * looping without ever sleeping. Therefore we have to
3681                          * do a short sleep here rather than calling
3682                          * cond_resched().
3683                          */
3684                         if (current->flags & PF_WQ_WORKER)
3685                                 schedule_timeout_uninterruptible(1);
3686                         else
3687                                 cond_resched();
3688 
3689                         return true;
3690                 }
3691         }
3692 
3693         return false;
3694 }
3695 
3696 static inline struct page *
3697 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3698                                                 struct alloc_context *ac)
3699 {
3700         bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3701         const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3702         struct page *page = NULL;
3703         unsigned int alloc_flags;
3704         unsigned long did_some_progress;
3705         enum compact_priority compact_priority;
3706         enum compact_result compact_result;
3707         int compaction_retries;
3708         int no_progress_loops;
3709         unsigned long alloc_start = jiffies;
3710         unsigned int stall_timeout = 10 * HZ;
3711         unsigned int cpuset_mems_cookie;
3712 
3713         /*
3714          * In the slowpath, we sanity check order to avoid ever trying to
3715          * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3716          * be using allocators in order of preference for an area that is
3717          * too large.
3718          */
3719         if (order >= MAX_ORDER) {
3720                 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
3721                 return NULL;
3722         }
3723 
3724         /*
3725          * We also sanity check to catch abuse of atomic reserves being used by
3726          * callers that are not in atomic context.
3727          */
3728         if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3729                                 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3730                 gfp_mask &= ~__GFP_ATOMIC;
3731 
3732 retry_cpuset:
3733         compaction_retries = 0;
3734         no_progress_loops = 0;
3735         compact_priority = DEF_COMPACT_PRIORITY;
3736         cpuset_mems_cookie = read_mems_allowed_begin();
3737 
3738         /*
3739          * The fast path uses conservative alloc_flags to succeed only until
3740          * kswapd needs to be woken up, and to avoid the cost of setting up
3741          * alloc_flags precisely. So we do that now.
3742          */
3743         alloc_flags = gfp_to_alloc_flags(gfp_mask);
3744 
3745         /*
3746          * We need to recalculate the starting point for the zonelist iterator
3747          * because we might have used different nodemask in the fast path, or
3748          * there was a cpuset modification and we are retrying - otherwise we
3749          * could end up iterating over non-eligible zones endlessly.
3750          */
3751         ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3752                                         ac->high_zoneidx, ac->nodemask);
3753         if (!ac->preferred_zoneref->zone)
3754                 goto nopage;
3755 
3756         if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3757                 wake_all_kswapds(order, ac);
3758 
3759         /*
3760          * The adjusted alloc_flags might result in immediate success, so try
3761          * that first
3762          */
3763         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3764         if (page)
3765                 goto got_pg;
3766 
3767         /*
3768          * For costly allocations, try direct compaction first, as it's likely
3769          * that we have enough base pages and don't need to reclaim. For non-
3770          * movable high-order allocations, do that as well, as compaction will
3771          * try prevent permanent fragmentation by migrating from blocks of the
3772          * same migratetype.
3773          * Don't try this for allocations that are allowed to ignore
3774          * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
3775          */
3776         if (can_direct_reclaim &&
3777                         (costly_order ||
3778                            (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3779                         && !gfp_pfmemalloc_allowed(gfp_mask)) {
3780                 page = __alloc_pages_direct_compact(gfp_mask, order,
3781                                                 alloc_flags, ac,
3782                                                 INIT_COMPACT_PRIORITY,
3783                                                 &compact_result);
3784                 if (page)
3785                         goto got_pg;
3786 
3787                 /*
3788                  * Checks for costly allocations with __GFP_NORETRY, which
3789                  * includes THP page fault allocations
3790                  */
3791                 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
3792                         /*
3793                          * If compaction is deferred for high-order allocations,
3794                          * it is because sync compaction recently failed. If
3795                          * this is the case and the caller requested a THP
3796                          * allocation, we do not want to heavily disrupt the
3797                          * system, so we fail the allocation instead of entering
3798                          * direct reclaim.
3799                          */
3800                         if (compact_result == COMPACT_DEFERRED)
3801                                 goto nopage;
3802 
3803                         /*
3804                          * Looks like reclaim/compaction is worth trying, but
3805                          * sync compaction could be very expensive, so keep
3806                          * using async compaction.
3807                          */
3808                         compact_priority = INIT_COMPACT_PRIORITY;
3809                 }
3810         }
3811 
3812 retry:
3813         /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
3814         if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3815                 wake_all_kswapds(order, ac);
3816 
3817         if (gfp_pfmemalloc_allowed(gfp_mask))
3818                 alloc_flags = ALLOC_NO_WATERMARKS;
3819 
3820         /*
3821          * Reset the zonelist iterators if memory policies can be ignored.
3822          * These allocations are high priority and system rather than user
3823          * orientated.
3824          */
3825         if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) {
3826                 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3827                 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3828                                         ac->high_zoneidx, ac->nodemask);
3829         }
3830 
3831         /* Attempt with potentially adjusted zonelist and alloc_flags */
3832         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3833         if (page)
3834                 goto got_pg;
3835 
3836         /* Caller is not willing to reclaim, we can't balance anything */
3837         if (!can_direct_reclaim)
3838                 goto nopage;
3839 
3840         /* Make sure we know about allocations which stall for too long */
3841         if (time_after(jiffies, alloc_start + stall_timeout)) {
3842                 warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
3843                         "page allocation stalls for %ums, order:%u",
3844                         jiffies_to_msecs(jiffies-alloc_start), order);
3845                 stall_timeout += 10 * HZ;
3846         }
3847 
3848         /* Avoid recursion of direct reclaim */
3849         if (current->flags & PF_MEMALLOC)
3850                 goto nopage;
3851 
3852         /* Try direct reclaim and then allocating */
3853         page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3854                                                         &did_some_progress);
3855         if (page)
3856                 goto got_pg;
3857 
3858         /* Try direct compaction and then allocating */
3859         page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3860                                         compact_priority, &compact_result);
3861         if (page)
3862                 goto got_pg;
3863 
3864         /* Do not loop if specifically requested */
3865         if (gfp_mask & __GFP_NORETRY)
3866                 goto nopage;
3867 
3868         /*
3869          * Do not retry costly high order allocations unless they are
3870          * __GFP_REPEAT
3871          */
3872         if (costly_order && !(gfp_mask & __GFP_REPEAT))
3873                 goto nopage;
3874 
3875         if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
3876                                  did_some_progress > 0, &no_progress_loops))
3877                 goto retry;
3878 
3879         /*
3880          * It doesn't make any sense to retry for the compaction if the order-0
3881          * reclaim is not able to make any progress because the current
3882          * implementation of the compaction depends on the sufficient amount
3883          * of free memory (see __compaction_suitable)
3884          */
3885         if (did_some_progress > 0 &&
3886                         should_compact_retry(ac, order, alloc_flags,
3887                                 compact_result, &compact_priority,
3888                                 &compaction_retries))
3889                 goto retry;
3890 
3891         /*
3892          * It's possible we raced with cpuset update so the OOM would be
3893          * premature (see below the nopage: label for full explanation).
3894          */
3895         if (read_mems_allowed_retry(cpuset_mems_cookie))
3896                 goto retry_cpuset;
3897 
3898         /* Reclaim has failed us, start killing things */
3899         page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3900         if (page)
3901                 goto got_pg;
3902 
3903         /* Avoid allocations with no watermarks from looping endlessly */
3904         if (test_thread_flag(TIF_MEMDIE) &&
3905             (alloc_flags == ALLOC_NO_WATERMARKS ||
3906              (gfp_mask & __GFP_NOMEMALLOC)))
3907                 goto nopage;
3908 
3909         /* Retry as long as the OOM killer is making progress */
3910         if (did_some_progress) {
3911                 no_progress_loops = 0;
3912                 goto retry;
3913         }
3914 
3915 nopage:
3916         /*
3917          * When updating a task's mems_allowed or mempolicy nodemask, it is
3918          * possible to race with parallel threads in such a way that our
3919          * allocation can fail while the mask is being updated. If we are about
3920          * to fail, check if the cpuset changed during allocation and if so,
3921          * retry.
3922          */
3923         if (read_mems_allowed_retry(cpuset_mems_cookie))
3924                 goto retry_cpuset;
3925 
3926         /*
3927          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
3928          * we always retry
3929          */
3930         if (gfp_mask & __GFP_NOFAIL) {
3931                 /*
3932                  * All existing users of the __GFP_NOFAIL are blockable, so warn
3933                  * of any new users that actually require GFP_NOWAIT
3934                  */
3935                 if (WARN_ON_ONCE(!can_direct_reclaim))
3936                         goto fail;
3937 
3938                 /*
3939                  * PF_MEMALLOC request from this context is rather bizarre
3940                  * because we cannot reclaim anything and only can loop waiting
3941                  * for somebody to do a work for us
3942                  */
3943                 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
3944 
3945                 /*
3946                  * non failing costly orders are a hard requirement which we
3947                  * are not prepared for much so let's warn about these users
3948                  * so that we can identify them and convert them to something
3949                  * else.
3950                  */
3951                 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
3952 
3953                 /*
3954                  * Help non-failing allocations by giving them access to memory
3955                  * reserves but do not use ALLOC_NO_WATERMARKS because this
3956                  * could deplete whole memory reserves which would just make
3957                  * the situation worse
3958                  */
3959                 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
3960                 if (page)
3961                         goto got_pg;
3962 
3963                 cond_resched();
3964                 goto retry;
3965         }
3966 fail:
3967         warn_alloc(gfp_mask, ac->nodemask,
3968                         "page allocation failure: order:%u", order);
3969 got_pg:
3970         return page;
3971 }
3972 
3973 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
3974                 struct zonelist *zonelist, nodemask_t *nodemask,
3975                 struct alloc_context *ac, gfp_t *alloc_mask,
3976                 unsigned int *alloc_flags)
3977 {
3978         ac->high_zoneidx = gfp_zone(gfp_mask);
3979         ac->zonelist = zonelist;
3980         ac->nodemask = nodemask;
3981         ac->migratetype = gfpflags_to_migratetype(gfp_mask);
3982 
3983         if (cpusets_enabled()) {
3984                 *alloc_mask |= __GFP_HARDWALL;
3985                 if (!ac->nodemask)
3986                         ac->nodemask = &cpuset_current_mems_allowed;
3987                 else
3988                         *alloc_flags |= ALLOC_CPUSET;
3989         }
3990 
3991         lockdep_trace_alloc(gfp_mask);
3992 
3993         might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
3994 
3995         if (should_fail_alloc_page(gfp_mask, order))
3996                 return false;
3997 
3998         if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
3999                 *alloc_flags |= ALLOC_CMA;
4000 
4001         return true;
4002 }
4003 
4004 /* Determine whether to spread dirty pages and what the first usable zone */
4005 static inline void finalise_ac(gfp_t gfp_mask,
4006                 unsigned int order, struct alloc_context *ac)
4007 {
4008         /* Dirty zone balancing only done in the fast path */
4009         ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4010 
4011         /*
4012          * The preferred zone is used for statistics but crucially it is
4013          * also used as the starting point for the zonelist iterator. It
4014          * may get reset for allocations that ignore memory policies.
4015          */
4016         ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4017                                         ac->high_zoneidx, ac->nodemask);
4018 }
4019 
4020 /*
4021  * This is the 'heart' of the zoned buddy allocator.
4022  */
4023 struct page *
4024 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
4025                         struct zonelist *zonelist, nodemask_t *nodemask)
4026 {
4027         struct page *page;
4028         unsigned int alloc_flags = ALLOC_WMARK_LOW;
4029         gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
4030         struct alloc_context ac = { };
4031 
4032         gfp_mask &= gfp_allowed_mask;
4033         if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags))
4034                 return NULL;
4035 
4036         finalise_ac(gfp_mask, order, &ac);
4037 
4038         /* First allocation attempt */
4039         page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4040         if (likely(page))
4041                 goto out;
4042 
4043         /*
4044          * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4045          * resp. GFP_NOIO which has to be inherited for all allocation requests
4046          * from a particular context which has been marked by
4047          * memalloc_no{fs,io}_{save,restore}.
4048          */
4049         alloc_mask = current_gfp_context(gfp_mask);
4050         ac.spread_dirty_pages = false;
4051 
4052         /*
4053          * Restore the original nodemask if it was potentially replaced with
4054          * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4055          */
4056         if (unlikely(ac.nodemask != nodemask))
4057                 ac.nodemask = nodemask;
4058 
4059         page = __alloc_pages_slowpath(alloc_mask, order, &ac);
4060 
4061 out:
4062         if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4063             unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4064                 __free_pages(page, order);
4065                 page = NULL;
4066         }
4067 
4068         if (kmemcheck_enabled && page)
4069                 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
4070 
4071         trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4072 
4073         return page;
4074 }
4075 EXPORT_SYMBOL(__alloc_pages_nodemask);
4076 
4077 /*
4078  * Common helper functions.
4079  */
4080 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4081 {
4082         struct page *page;
4083 
4084         /*
4085          * __get_free_pages() returns a 32-bit address, which cannot represent
4086          * a highmem page
4087          */
4088         VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
4089 
4090         page = alloc_pages(gfp_mask, order);
4091         if (!page)
4092                 return 0;
4093         return (unsigned long) page_address(page);
4094 }
4095 EXPORT_SYMBOL(__get_free_pages);
4096 
4097 unsigned long get_zeroed_page(gfp_t gfp_mask)
4098 {
4099         return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
4100 }
4101 EXPORT_SYMBOL(get_zeroed_page);
4102 
4103 void __free_pages(struct page *page, unsigned int order)
4104 {
4105         if (put_page_testzero(page)) {
4106                 if (order == 0)
4107                         free_hot_cold_page(page, false);
4108                 else
4109                         __free_pages_ok(page, order);
4110         }
4111 }
4112 
4113 EXPORT_SYMBOL(__free_pages);
4114 
4115 void free_pages(unsigned long addr, unsigned int order)
4116 {
4117         if (addr != 0) {
4118                 VM_BUG_ON(!virt_addr_valid((void *)addr));
4119                 __free_pages(virt_to_page((void *)addr), order);
4120         }
4121 }
4122 
4123 EXPORT_SYMBOL(free_pages);
4124 
4125 /*
4126  * Page Fragment:
4127  *  An arbitrary-length arbitrary-offset area of memory which resides
4128  *  within a 0 or higher order page.  Multiple fragments within that page
4129  *  are individually refcounted, in the page's reference counter.
4130  *
4131  * The page_frag functions below provide a simple allocation framework for
4132  * page fragments.  This is used by the network stack and network device
4133  * drivers to provide a backing region of memory for use as either an
4134  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4135  */
4136 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4137                                              gfp_t gfp_mask)
4138 {
4139         struct page *page = NULL;
4140         gfp_t gfp = gfp_mask;
4141 
4142 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4143         gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4144                     __GFP_NOMEMALLOC;
4145         page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4146                                 PAGE_FRAG_CACHE_MAX_ORDER);
4147         nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4148 #endif
4149         if (unlikely(!page))
4150                 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4151 
4152         nc->va = page ? page_address(page) : NULL;
4153 
4154         return page;
4155 }
4156 
4157 void __page_frag_cache_drain(struct page *page, unsigned int count)
4158 {
4159         VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4160 
4161         if (page_ref_sub_and_test(page, count)) {
4162                 unsigned int order = compound_order(page);
4163 
4164                 if (order == 0)
4165                         free_hot_cold_page(page, false);
4166                 else
4167                         __free_pages_ok(page, order);
4168         }
4169 }
4170 EXPORT_SYMBOL(__page_frag_cache_drain);
4171 
4172 void *page_frag_alloc(struct page_frag_cache *nc,
4173                       unsigned int fragsz, gfp_t gfp_mask)
4174 {
4175         unsigned int size = PAGE_SIZE;
4176         struct page *page;
4177         int offset;
4178 
4179         if (unlikely(!nc->va)) {
4180 refill:
4181                 page = __page_frag_cache_refill(nc, gfp_mask);
4182                 if (!page)
4183                         return NULL;
4184 
4185 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4186                 /* if size can vary use size else just use PAGE_SIZE */
4187                 size = nc->size;
4188 #endif
4189                 /* Even if we own the page, we do not use atomic_set().
4190                  * This would break get_page_unless_zero() users.
4191                  */
4192                 page_ref_add(page, size - 1);
4193 
4194                 /* reset page count bias and offset to start of new frag */
4195                 nc->pfmemalloc = page_is_pfmemalloc(page);
4196                 nc->pagecnt_bias = size;
4197                 nc->offset = size;
4198         }
4199 
4200         offset = nc->offset - fragsz;
4201         if (unlikely(offset < 0)) {
4202                 page = virt_to_page(nc->va);
4203 
4204                 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4205                         goto refill;
4206 
4207 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4208                 /* if size can vary use size else just use PAGE_SIZE */
4209                 size = nc->size;
4210 #endif
4211                 /* OK, page count is 0, we can safely set it */
4212                 set_page_count(page, size);
4213 
4214                 /* reset page count bias and offset to start of new frag */
4215                 nc->pagecnt_bias = size;
4216                 offset = size - fragsz;
4217         }
4218 
4219         nc->pagecnt_bias--;
4220         nc->offset = offset;
4221 
4222         return nc->va + offset;
4223 }
4224 EXPORT_SYMBOL(page_frag_alloc);
4225 
4226 /*
4227  * Frees a page fragment allocated out of either a compound or order 0 page.
4228  */
4229 void page_frag_free(void *addr)
4230 {
4231         struct page *page = virt_to_head_page(addr);
4232 
4233         if (unlikely(put_page_testzero(page)))
4234                 __free_pages_ok(page, compound_order(page));
4235 }
4236 EXPORT_SYMBOL(page_frag_free);
4237 
4238 static void *make_alloc_exact(unsigned long addr, unsigned int order,
4239                 size_t size)
4240 {
4241         if (addr) {
4242                 unsigned long alloc_end = addr + (PAGE_SIZE << order);
4243                 unsigned long used = addr + PAGE_ALIGN(size);
4244 
4245                 split_page(virt_to_page((void *)addr), order);
4246                 while (used < alloc_end) {
4247                         free_page(used);
4248                         used += PAGE_SIZE;
4249                 }
4250         }
4251         return (void *)addr;
4252 }
4253 
4254 /**
4255  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4256  * @size: the number of bytes to allocate
4257  * @gfp_mask: GFP flags for the allocation
4258  *
4259  * This function is similar to alloc_pages(), except that it allocates the
4260  * minimum number of pages to satisfy the request.  alloc_pages() can only
4261  * allocate memory in power-of-two pages.
4262  *
4263  * This function is also limited by MAX_ORDER.
4264  *
4265  * Memory allocated by this function must be released by free_pages_exact().
4266  */
4267 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4268 {
4269         unsigned int order = get_order(size);
4270         unsigned long addr;
4271 
4272         addr = __get_free_pages(gfp_mask, order);
4273         return make_alloc_exact(addr, order, size);
4274 }
4275 EXPORT_SYMBOL(alloc_pages_exact);
4276 
4277 /**
4278  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4279  *                         pages on a node.
4280  * @nid: the preferred node ID where memory should be allocated
4281  * @size: the number of bytes to allocate
4282  * @gfp_mask: GFP flags for the allocation
4283  *
4284  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4285  * back.
4286  */
4287 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4288 {
4289         unsigned int order = get_order(size);
4290         struct page *p = alloc_pages_node(nid, gfp_mask, order);
4291         if (!p)
4292                 return NULL;
4293         return make_alloc_exact((unsigned long)page_address(p), order, size);
4294 }
4295 
4296 /**
4297  * free_pages_exact - release memory allocated via alloc_pages_exact()
4298  * @virt: the value returned by alloc_pages_exact.
4299  * @size: size of allocation, same value as passed to alloc_pages_exact().
4300  *
4301  * Release the memory allocated by a previous call to alloc_pages_exact.
4302  */
4303 void free_pages_exact(void *virt, size_t size)
4304 {
4305         unsigned long addr = (unsigned long)virt;
4306         unsigned long end = addr + PAGE_ALIGN(size);
4307 
4308         while (addr < end) {
4309                 free_page(addr);
4310                 addr += PAGE_SIZE;
4311         }
4312 }
4313 EXPORT_SYMBOL(free_pages_exact);
4314 
4315 /**
4316  * nr_free_zone_pages - count number of pages beyond high watermark
4317  * @offset: The zone index of the highest zone
4318  *
4319  * nr_free_zone_pages() counts the number of counts pages which are beyond the
4320  * high watermark within all zones at or below a given zone index.  For each
4321  * zone, the number of pages is calculated as:
4322  *
4323  *     nr_free_zone_pages = managed_pages - high_pages
4324  */
4325 static unsigned long nr_free_zone_pages(int offset)
4326 {
4327         struct zoneref *z;
4328         struct zone *zone;
4329 
4330         /* Just pick one node, since fallback list is circular */
4331         unsigned long sum = 0;
4332 
4333         struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4334 
4335         for_each_zone_zonelist(zone, z, zonelist, offset) {
4336                 unsigned long size = zone->managed_pages;
4337                 unsigned long high = high_wmark_pages(zone);
4338                 if (size > high)
4339                         sum += size - high;
4340         }
4341 
4342         return sum;
4343 }
4344 
4345 /**
4346  * nr_free_buffer_pages - count number of pages beyond high watermark
4347  *
4348  * nr_free_buffer_pages() counts the number of pages which are beyond the high
4349  * watermark within ZONE_DMA and ZONE_NORMAL.
4350  */
4351 unsigned long nr_free_buffer_pages(void)
4352 {
4353         return nr_free_zone_pages(gfp_zone(GFP_USER));
4354 }
4355 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4356 
4357 /**
4358  * nr_free_pagecache_pages - count number of pages beyond high watermark
4359  *
4360  * nr_free_pagecache_pages() counts the number of pages which are beyond the
4361  * high watermark within all zones.
4362  */
4363 unsigned long nr_free_pagecache_pages(void)
4364 {
4365         return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
4366 }
4367 
4368 static inline void show_node(struct zone *zone)
4369 {
4370         if (IS_ENABLED(CONFIG_NUMA))
4371                 printk("Node %d ", zone_to_nid(zone));
4372 }
4373 
4374 long si_mem_available(void)
4375 {
4376         long available;
4377         unsigned long pagecache;
4378         unsigned long wmark_low = 0;
4379         unsigned long pages[NR_LRU_LISTS];
4380         struct zone *zone;
4381         int lru;
4382 
4383         for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4384                 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4385 
4386         for_each_zone(zone)
4387                 wmark_low += zone->watermark[WMARK_LOW];
4388 
4389         /*
4390          * Estimate the amount of memory available for userspace allocations,
4391          * without causing swapping.
4392          */
4393         available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
4394 
4395         /*
4396          * Not all the page cache can be freed, otherwise the system will
4397          * start swapping. Assume at least half of the page cache, or the
4398          * low watermark worth of cache, needs to stay.
4399          */
4400         pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4401         pagecache -= min(pagecache / 2, wmark_low);
4402         available += pagecache;
4403 
4404         /*
4405          * Part of the reclaimable slab consists of items that are in use,
4406          * and cannot be freed. Cap this estimate at the low watermark.
4407          */
4408         available += global_page_state(NR_SLAB_RECLAIMABLE) -
4409                      min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
4410 
4411         if (available < 0)
4412                 available = 0;
4413         return available;
4414 }
4415 EXPORT_SYMBOL_GPL(si_mem_available);
4416 
4417 void si_meminfo(struct sysinfo *val)
4418 {
4419         val->totalram = totalram_pages;
4420         val->sharedram = global_node_page_state(NR_SHMEM);
4421         val->freeram = global_page_state(NR_FREE_PAGES);
4422         val->bufferram = nr_blockdev_pages();
4423         val->totalhigh = totalhigh_pages;
4424         val->freehigh = nr_free_highpages();
4425         val->mem_unit = PAGE_SIZE;
4426 }
4427 
4428 EXPORT_SYMBOL(si_meminfo);
4429 
4430 #ifdef CONFIG_NUMA
4431 void si_meminfo_node(struct sysinfo *val, int nid)
4432 {
4433         int zone_type;          /* needs to be signed */
4434         unsigned long managed_pages = 0;
4435         unsigned long managed_highpages = 0;
4436         unsigned long free_highpages = 0;
4437         pg_data_t *pgdat = NODE_DATA(nid);
4438 
4439         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4440                 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4441         val->totalram = managed_pages;
4442         val->sharedram = node_page_state(pgdat, NR_SHMEM);
4443         val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
4444 #ifdef CONFIG_HIGHMEM
4445         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4446                 struct zone *zone = &pgdat->node_zones[zone_type];
4447 
4448                 if (is_highmem(zone)) {
4449                         managed_highpages += zone->managed_pages;
4450                         free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4451                 }
4452         }
4453         val->totalhigh = managed_highpages;
4454         val->freehigh = free_highpages;
4455 #else
4456         val->totalhigh = managed_highpages;
4457         val->freehigh = free_highpages;
4458 #endif
4459         val->mem_unit = PAGE_SIZE;
4460 }
4461 #endif
4462 
4463 /*
4464  * Determine whether the node should be displayed or not, depending on whether
4465  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
4466  */
4467 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
4468 {
4469         if (!(flags & SHOW_MEM_FILTER_NODES))
4470                 return false;
4471 
4472         /*
4473          * no node mask - aka implicit memory numa policy. Do not bother with
4474          * the synchronization - read_mems_allowed_begin - because we do not
4475          * have to be precise here.
4476          */
4477         if (!nodemask)
4478                 nodemask = &cpuset_current_mems_allowed;
4479 
4480         return !node_isset(nid, *nodemask);
4481 }
4482 
4483 #define K(x) ((x) << (PAGE_SHIFT-10))
4484 
4485 static void show_migration_types(unsigned char type)
4486 {
4487         static const char types[MIGRATE_TYPES] = {
4488                 [MIGRATE_UNMOVABLE]     = 'U',
4489                 [MIGRATE_MOVABLE]       = 'M',
4490                 [MIGRATE_RECLAIMABLE]   = 'E',
4491                 [MIGRATE_HIGHATOMIC]    = 'H',
4492 #ifdef CONFIG_CMA
4493                 [MIGRATE_CMA]           = 'C',
4494 #endif
4495 #ifdef CONFIG_MEMORY_ISOLATION
4496                 [MIGRATE_ISOLATE]       = 'I',
4497 #endif
4498         };
4499         char tmp[MIGRATE_TYPES + 1];
4500         char *p = tmp;
4501         int i;
4502 
4503         for (i = 0; i < MIGRATE_TYPES; i++) {
4504                 if (type & (1 << i))
4505                         *p++ = types[i];
4506         }
4507 
4508         *p = '\0';
4509         printk(KERN_CONT "(%s) ", tmp);
4510 }
4511 
4512 /*
4513  * Show free area list (used inside shift_scroll-lock stuff)
4514  * We also calculate the percentage fragmentation. We do this by counting the
4515  * memory on each free list with the exception of the first item on the list.
4516  *
4517  * Bits in @filter:
4518  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
4519  *   cpuset.
4520  */
4521 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4522 {
4523         unsigned long free_pcp = 0;
4524         int cpu;
4525         struct zone *zone;
4526         pg_data_t *pgdat;
4527 
4528         for_each_populated_zone(zone) {
4529                 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4530                         continue;
4531 
4532                 for_each_online_cpu(cpu)
4533                         free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4534         }
4535 
4536         printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4537                 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
4538                 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4539                 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4540                 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
4541                 " free:%lu free_pcp:%lu free_cma:%lu\n",
4542                 global_node_page_state(NR_ACTIVE_ANON),
4543                 global_node_page_state(NR_INACTIVE_ANON),
4544                 global_node_page_state(NR_ISOLATED_ANON),
4545                 global_node_page_state(NR_ACTIVE_FILE),
4546                 global_node_page_state(NR_INACTIVE_FILE),
4547                 global_node_page_state(NR_ISOLATED_FILE),
4548                 global_node_page_state(NR_UNEVICTABLE),
4549                 global_node_page_state(NR_FILE_DIRTY),
4550                 global_node_page_state(NR_WRITEBACK),
4551                 global_node_page_state(NR_UNSTABLE_NFS),
4552                 global_page_state(NR_SLAB_RECLAIMABLE),
4553                 global_page_state(NR_SLAB_UNRECLAIMABLE),
4554                 global_node_page_state(NR_FILE_MAPPED),
4555                 global_node_page_state(NR_SHMEM),
4556                 global_page_state(NR_PAGETABLE),
4557                 global_page_state(NR_BOUNCE),
4558                 global_page_state(NR_FREE_PAGES),
4559                 free_pcp,
4560                 global_page_state(NR_FREE_CMA_PAGES));
4561 
4562         for_each_online_pgdat(pgdat) {
4563                 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
4564                         continue;
4565 
4566                 printk("Node %d"
4567                         " active_anon:%lukB"
4568                         " inactive_anon:%lukB"
4569                         " active_file:%lukB"
4570                         " inactive_file:%lukB"
4571                         " unevictable:%lukB"
4572                         " isolated(anon):%lukB"
4573                         " isolated(file):%lukB"
4574                         " mapped:%lukB"
4575                         " dirty:%lukB"
4576                         " writeback:%lukB"
4577                         " shmem:%lukB"
4578 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4579                         " shmem_thp: %lukB"
4580                         " shmem_pmdmapped: %lukB"
4581                         " anon_thp: %lukB"
4582 #endif
4583                         " writeback_tmp:%lukB"
4584                         " unstable:%lukB"
4585                         " all_unreclaimable? %s"
4586                         "\n",
4587                         pgdat->node_id,
4588                         K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4589                         K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4590                         K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4591                         K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4592                         K(node_page_state(pgdat, NR_UNEVICTABLE)),
4593                         K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4594                         K(node_page_state(pgdat, NR_ISOLATED_FILE)),
4595                         K(node_page_state(pgdat, NR_FILE_MAPPED)),
4596                         K(node_page_state(pgdat, NR_FILE_DIRTY)),
4597                         K(node_page_state(pgdat, NR_WRITEBACK)),
4598                         K(node_page_state(pgdat, NR_SHMEM)),
4599 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4600                         K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4601                         K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4602                                         * HPAGE_PMD_NR),
4603                         K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4604 #endif
4605                         K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4606                         K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4607                         pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4608                                 "yes" : "no");
4609         }
4610 
4611         for_each_populated_zone(zone) {
4612                 int i;
4613 
4614                 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4615                         continue;
4616 
4617                 free_pcp = 0;
4618                 for_each_online_cpu(cpu)
4619                         free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4620 
4621                 show_node(zone);
4622                 printk(KERN_CONT
4623                         "%s"
4624                         " free:%lukB"
4625                         " min:%lukB"
4626                         " low:%lukB"
4627                         " high:%lukB"
4628                         " active_anon:%lukB"
4629                         " inactive_anon:%lukB"
4630                         " active_file:%lukB"
4631                         " inactive_file:%lukB"
4632                         " unevictable:%lukB"
4633                         " writepending:%lukB"
4634                         " present:%lukB"
4635                         " managed:%lukB"
4636                         " mlocked:%lukB"
4637                         " slab_reclaimable:%lukB"
4638                         " slab_unreclaimable:%lukB"
4639                         " kernel_stack:%lukB"
4640                         " pagetables:%lukB"
4641                         " bounce:%lukB"
4642                         " free_pcp:%lukB"
4643                         " local_pcp:%ukB"
4644                         " free_cma:%lukB"
4645                         "\n",
4646                         zone->name,
4647                         K(zone_page_state(zone, NR_FREE_PAGES)),
4648                         K(min_wmark_pages(zone)),
4649                         K(low_wmark_pages(zone)),
4650                         K(high_wmark_pages(zone)),
4651                         K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4652                         K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4653                         K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4654                         K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4655                         K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
4656                         K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
4657                         K(zone->present_pages),
4658                         K(zone->managed_pages),
4659                         K(zone_page_state(zone, NR_MLOCK)),
4660                         K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
4661                         K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
4662                         zone_page_state(zone, NR_KERNEL_STACK_KB),
4663                         K(zone_page_state(zone, NR_PAGETABLE)),
4664                         K(zone_page_state(zone, NR_BOUNCE)),
4665                         K(free_pcp),
4666                         K(this_cpu_read(zone->pageset->pcp.count)),
4667                         K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
4668                 printk("lowmem_reserve[]:");
4669                 for (i = 0; i < MAX_NR_ZONES; i++)
4670                         printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4671                 printk(KERN_CONT "\n");
4672         }
4673 
4674         for_each_populated_zone(zone) {
4675                 unsigned int order;
4676                 unsigned long nr[MAX_ORDER], flags, total = 0;
4677                 unsigned char types[MAX_ORDER];
4678 
4679                 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4680                         continue;
4681                 show_node(zone);
4682                 printk(KERN_CONT "%s: ", zone->name);
4683 
4684                 spin_lock_irqsave(&zone->lock, flags);
4685                 for (order = 0; order < MAX_ORDER; order++) {
4686                         struct free_area *area = &zone->free_area[order];
4687                         int type;
4688 
4689                         nr[order] = area->nr_free;
4690                         total += nr[order] << order;
4691 
4692                         types[order] = 0;
4693                         for (type = 0; type < MIGRATE_TYPES; type++) {
4694                                 if (!list_empty(&area->free_list[type]))
4695                                         types[order] |= 1 << type;
4696                         }
4697                 }
4698                 spin_unlock_irqrestore(&zone->lock, flags);
4699                 for (order = 0; order < MAX_ORDER; order++) {
4700                         printk(KERN_CONT "%lu*%lukB ",
4701                                nr[order], K(1UL) << order);
4702                         if (nr[order])
4703                                 show_migration_types(types[order]);
4704                 }
4705                 printk(KERN_CONT "= %lukB\n", K(total));
4706         }
4707 
4708         hugetlb_show_meminfo();
4709 
4710         printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
4711 
4712         show_swap_cache_info();
4713 }
4714 
4715 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4716 {
4717         zoneref->zone = zone;
4718         zoneref->zone_idx = zone_idx(zone);
4719 }
4720 
4721 /*
4722  * Builds allocation fallback zone lists.
4723  *
4724  * Add all populated zones of a node to the zonelist.
4725  */
4726 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
4727                                 int nr_zones)
4728 {
4729         struct zone *zone;
4730         enum zone_type zone_type = MAX_NR_ZONES;
4731 
4732         do {
4733                 zone_type--;
4734                 zone = pgdat->node_zones + zone_type;
4735                 if (managed_zone(zone)) {
4736                         zoneref_set_zone(zone,
4737                                 &zonelist->_zonerefs[nr_zones++]);
4738                         check_highest_zone(zone_type);
4739                 }
4740         } while (zone_type);
4741 
4742         return nr_zones;
4743 }
4744 
4745 
4746 /*
4747  *  zonelist_order:
4748  *  0 = automatic detection of better ordering.
4749  *  1 = order by ([node] distance, -zonetype)
4750  *  2 = order by (-zonetype, [node] distance)
4751  *
4752  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
4753  *  the same zonelist. So only NUMA can configure this param.
4754  */
4755 #define ZONELIST_ORDER_DEFAULT  0
4756 #define ZONELIST_ORDER_NODE     1
4757 #define ZONELIST_ORDER_ZONE     2
4758 
4759 /* zonelist order in the kernel.
4760  * set_zonelist_order() will set this to NODE or ZONE.
4761  */
4762 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4763 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4764 
4765 
4766 #ifdef CONFIG_NUMA
4767 /* The value user specified ....changed by config */
4768 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4769 /* string for sysctl */
4770 #define NUMA_ZONELIST_ORDER_LEN 16
4771 char numa_zonelist_order[16] = "default";
4772 
4773 /*
4774  * interface for configure zonelist ordering.
4775  * command line option "numa_zonelist_order"
4776  *      = "[dD]efault   - default, automatic configuration.
4777  *      = "[nN]ode      - order by node locality, then by zone within node
4778  *      = "[zZ]one      - order by zone, then by locality within zone
4779  */
4780 
4781 static int __parse_numa_zonelist_order(char *s)
4782 {
4783         if (*s == 'd' || *s == 'D') {
4784                 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4785         } else if (*s == 'n' || *s == 'N') {
4786                 user_zonelist_order = ZONELIST_ORDER_NODE;
4787         } else if (*s == 'z' || *s == 'Z') {
4788                 user_zonelist_order = ZONELIST_ORDER_ZONE;
4789         } else {
4790                 pr_warn("Ignoring invalid numa_zonelist_order value:  %s\n", s);
4791                 return -EINVAL;
4792         }
4793         return 0;
4794 }
4795 
4796 static __init int setup_numa_zonelist_order(char *s)
4797 {
4798         int ret;
4799 
4800         if (!s)
4801                 return 0;
4802 
4803         ret = __parse_numa_zonelist_order(s);
4804         if (ret == 0)
4805                 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4806 
4807         return ret;
4808 }
4809 early_param("numa_zonelist_order", setup_numa_zonelist_order);
4810 
4811 /*
4812  * sysctl handler for numa_zonelist_order
4813  */
4814 int numa_zonelist_order_handler(struct ctl_table *table, int write,
4815                 void __user *buffer, size_t *length,
4816                 loff_t *ppos)
4817 {
4818         char saved_string[NUMA_ZONELIST_ORDER_LEN];
4819         int ret;
4820         static DEFINE_MUTEX(zl_order_mutex);
4821 
4822         mutex_lock(&zl_order_mutex);
4823         if (write) {
4824                 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4825                         ret = -EINVAL;
4826                         goto out;
4827                 }
4828                 strcpy(saved_string, (char *)table->data);
4829         }
4830         ret = proc_dostring(table, write, buffer, length, ppos);
4831         if (ret)
4832                 goto out;
4833         if (write) {
4834                 int oldval = user_zonelist_order;
4835 
4836                 ret = __parse_numa_zonelist_order((char *)table->data);
4837                 if (ret) {
4838                         /*
4839                          * bogus value.  restore saved string
4840                          */
4841                         strncpy((char *)table->data, saved_string,
4842                                 NUMA_ZONELIST_ORDER_LEN);
4843                         user_zonelist_order = oldval;
4844                 } else if (oldval != user_zonelist_order) {
4845                         mutex_lock(&zonelists_mutex);
4846                         build_all_zonelists(NULL, NULL);
4847                         mutex_unlock(&zonelists_mutex);
4848                 }
4849         }
4850 out:
4851         mutex_unlock(&zl_order_mutex);
4852         return ret;
4853 }
4854 
4855 
4856 #define MAX_NODE_LOAD (nr_online_nodes)
4857 static int node_load[MAX_NUMNODES];
4858 
4859 /**
4860  * find_next_best_node - find the next node that should appear in a given node's fallback list
4861  * @node: node whose fallback list we're appending
4862  * @used_node_mask: nodemask_t of already used nodes
4863  *
4864  * We use a number of factors to determine which is the next node that should
4865  * appear on a given node's fallback list.  The node should not have appeared
4866  * already in @node's fallback list, and it should be the next closest node
4867  * according to the distance array (which contains arbitrary distance values
4868  * from each node to each node in the system), and should also prefer nodes
4869  * with no CPUs, since presumably they'll have very little allocation pressure
4870  * on them otherwise.
4871  * It returns -1 if no node is found.
4872  */
4873 static int find_next_best_node(int node, nodemask_t *used_node_mask)
4874 {
4875         int n, val;
4876         int min_val = INT_MAX;
4877         int best_node = NUMA_NO_NODE;
4878         const struct cpumask *tmp = cpumask_of_node(0);
4879 
4880         /* Use the local node if we haven't already */
4881         if (!node_isset(node, *used_node_mask)) {
4882                 node_set(node, *used_node_mask);
4883                 return node;
4884         }
4885 
4886         for_each_node_state(n, N_MEMORY) {
4887 
4888                 /* Don't want a node to appear more than once */
4889                 if (node_isset(n, *used_node_mask))
4890                         continue;
4891 
4892                 /* Use the distance array to find the distance */
4893                 val = node_distance(node, n);
4894 
4895                 /* Penalize nodes under us ("prefer the next node") */
4896                 val += (n < node);
4897 
4898                 /* Give preference to headless and unused nodes */
4899                 tmp = cpumask_of_node(n);
4900                 if (!cpumask_empty(tmp))
4901                         val += PENALTY_FOR_NODE_WITH_CPUS;
4902 
4903                 /* Slight preference for less loaded node */
4904                 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4905                 val += node_load[n];
4906 
4907                 if (val < min_val) {
4908                         min_val = val;
4909                         best_node = n;
4910                 }
4911         }
4912 
4913         if (best_node >= 0)
4914                 node_set(best_node, *used_node_mask);
4915 
4916         return best_node;
4917 }
4918 
4919 
4920 /*
4921  * Build zonelists ordered by node and zones within node.
4922  * This results in maximum locality--normal zone overflows into local
4923  * DMA zone, if any--but risks exhausting DMA zone.
4924  */
4925 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
4926 {
4927         int j;
4928         struct zonelist *zonelist;
4929 
4930         zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
4931         for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
4932                 ;
4933         j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4934         zonelist->_zonerefs[j].zone = NULL;
4935         zonelist->_zonerefs[j].zone_idx = 0;
4936 }
4937 
4938 /*
4939  * Build gfp_thisnode zonelists
4940  */
4941 static void build_thisnode_zonelists(pg_data_t *pgdat)
4942 {
4943         int j;
4944         struct zonelist *zonelist;
4945 
4946         zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
4947         j = build_zonelists_node(pgdat, zonelist, 0);
4948         zonelist->_zonerefs[j].zone = NULL;
4949         zonelist->_zonerefs[j].zone_idx = 0;
4950 }
4951 
4952 /*
4953  * Build zonelists ordered by zone and nodes within zones.
4954  * This results in conserving DMA zone[s] until all Normal memory is
4955  * exhausted, but results in overflowing to remote node while memory
4956  * may still exist in local DMA zone.
4957  */
4958 static int node_order[MAX_NUMNODES];
4959 
4960 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4961 {
4962         int pos, j, node;
4963         int zone_type;          /* needs to be signed */
4964         struct zone *z;
4965         struct zonelist *zonelist;
4966 
4967         zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
4968         pos = 0;
4969         for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4970                 for (j = 0; j < nr_nodes; j++) {
4971                         node = node_order[j];
4972                         z = &NODE_DATA(node)->node_zones[zone_type];
4973                         if (managed_zone(z)) {
4974                                 zoneref_set_zone(z,
4975                                         &zonelist->_zonerefs[pos++]);
4976                                 check_highest_zone(zone_type);
4977                         }
4978                 }
4979         }
4980         zonelist->_zonerefs[pos].zone = NULL;
4981         zonelist->_zonerefs[pos].zone_idx = 0;
4982 }
4983 
4984 #if defined(CONFIG_64BIT)
4985 /*
4986  * Devices that require DMA32/DMA are relatively rare and do not justify a
4987  * penalty to every machine in case the specialised case applies. Default
4988  * to Node-ordering on 64-bit NUMA machines
4989  */
4990 static int default_zonelist_order(void)
4991 {
4992         return ZONELIST_ORDER_NODE;
4993 }
4994 #else
4995 /*
4996  * On 32-bit, the Normal zone needs to be preserved for allocations accessible
4997  * by the kernel. If processes running on node 0 deplete the low memory zone
4998  * then reclaim will occur more frequency increasing stalls and potentially
4999  * be easier to OOM if a large percentage of the zone is under writeback or
5000  * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
5001  * Hence, default to zone ordering on 32-bit.
5002  */
5003 static int default_zonelist_order(void)
5004 {
5005         return ZONELIST_ORDER_ZONE;
5006 }
5007 #endif /* CONFIG_64BIT */
5008 
5009 static void set_zonelist_order(void)
5010 {
5011         if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
5012                 current_zonelist_order = default_zonelist_order();
5013         else
5014                 current_zonelist_order = user_zonelist_order;
5015 }
5016 
5017 static void build_zonelists(pg_data_t *pgdat)
5018 {
5019         int i, node, load;
5020         nodemask_t used_mask;
5021         int local_node, prev_node;
5022         struct zonelist *zonelist;
5023         unsigned int order = current_zonelist_order;
5024 
5025         /* initialize zonelists */
5026         for (i = 0; i < MAX_ZONELISTS; i++) {
5027                 zonelist = pgdat->node_zonelists + i;
5028                 zonelist->_zonerefs[0].zone = NULL;
5029                 zonelist->_zonerefs[0].zone_idx = 0;
5030         }
5031 
5032         /* NUMA-aware ordering of nodes */
5033         local_node = pgdat->node_id;
5034         load = nr_online_nodes;
5035         prev_node = local_node;
5036         nodes_clear(used_mask);
5037 
5038         memset(node_order, 0, sizeof(node_order));
5039         i = 0;
5040 
5041         while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5042                 /*
5043                  * We don't want to pressure a particular node.
5044                  * So adding penalty to the first node in same
5045                  * distance group to make it round-robin.
5046                  */
5047                 if (node_distance(local_node, node) !=
5048                     node_distance(local_node, prev_node))
5049                         node_load[node] = load;
5050 
5051                 prev_node = node;
5052                 load--;
5053                 if (order == ZONELIST_ORDER_NODE)
5054                         build_zonelists_in_node_order(pgdat, node);
5055                 else
5056                         node_order[i++] = node; /* remember order */
5057         }
5058 
5059         if (order == ZONELIST_ORDER_ZONE) {
5060                 /* calculate node order -- i.e., DMA last! */
5061                 build_zonelists_in_zone_order(pgdat, i);
5062         }
5063 
5064         build_thisnode_zonelists(pgdat);
5065 }
5066 
5067 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5068 /*
5069  * Return node id of node used for "local" allocations.
5070  * I.e., first node id of first zone in arg node's generic zonelist.
5071  * Used for initializing percpu 'numa_mem', which is used primarily
5072  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5073  */
5074 int local_memory_node(int node)
5075 {
5076         struct zoneref *z;
5077 
5078         z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5079                                    gfp_zone(GFP_KERNEL),
5080                                    NULL);
5081         return z->zone->node;
5082 }
5083 #endif
5084 
5085 static void setup_min_unmapped_ratio(void);
5086 static void setup_min_slab_ratio(void);
5087 #else   /* CONFIG_NUMA */
5088 
5089 static void set_zonelist_order(void)
5090 {
5091         current_zonelist_order = ZONELIST_ORDER_ZONE;
5092 }
5093 
5094 static void build_zonelists(pg_data_t *pgdat)
5095 {
5096         int node, local_node;
5097         enum zone_type j;
5098         struct zonelist *zonelist;
5099 
5100         local_node = pgdat->node_id;
5101 
5102         zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5103         j = build_zonelists_node(pgdat, zonelist, 0);
5104 
5105         /*
5106          * Now we build the zonelist so that it contains the zones
5107          * of all the other nodes.
5108          * We don't want to pressure a particular node, so when
5109          * building the zones for node N, we make sure that the
5110          * zones coming right after the local ones are those from
5111          * node N+1 (modulo N)
5112          */
5113         for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5114                 if (!node_online(node))
5115                         continue;
5116                 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5117         }
5118         for (node = 0; node < local_node; node++) {
5119                 if (!node_online(node))
5120                         continue;
5121                 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5122         }
5123 
5124         zonelist->_zonerefs[j].zone = NULL;
5125         zonelist->_zonerefs[j].zone_idx = 0;
5126 }
5127 
5128 #endif  /* CONFIG_NUMA */
5129 
5130 /*
5131  * Boot pageset table. One per cpu which is going to be used for all
5132  * zones and all nodes. The parameters will be set in such a way
5133  * that an item put on a list will immediately be handed over to
5134  * the buddy list. This is safe since pageset manipulation is done
5135  * with interrupts disabled.
5136  *
5137  * The boot_pagesets must be kept even after bootup is complete for
5138  * unused processors and/or zones. They do play a role for bootstrapping
5139  * hotplugged processors.
5140  *
5141  * zoneinfo_show() and maybe other functions do
5142  * not check if the processor is online before following the pageset pointer.
5143  * Other parts of the kernel may not check if the zone is available.
5144  */
5145 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5146 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5147 static void setup_zone_pageset(struct zone *zone);
5148 
5149 /*
5150  * Global mutex to protect against size modification of zonelists
5151  * as well as to serialize pageset setup for the new populated zone.
5152  */
5153 DEFINE_MUTEX(zonelists_mutex);
5154 
5155 /* return values int ....just for stop_machine() */
5156 static int __build_all_zonelists(void *data)
5157 {
5158         int nid;
5159         int cpu;
5160         pg_data_t *self = data;
5161 
5162 #ifdef CONFIG_NUMA
5163         memset(node_load, 0, sizeof(node_load));
5164 #endif
5165 
5166         if (self && !node_online(self->node_id)) {
5167                 build_zonelists(self);
5168         }
5169 
5170         for_each_online_node(nid) {
5171                 pg_data_t *pgdat = NODE_DATA(nid);
5172 
5173                 build_zonelists(pgdat);
5174         }
5175 
5176         /*
5177          * Initialize the boot_pagesets that are going to be used
5178          * for bootstrapping processors. The real pagesets for
5179          * each zone will be allocated later when the per cpu
5180          * allocator is available.
5181          *
5182          * boot_pagesets are used also for bootstrapping offline
5183          * cpus if the system is already booted because the pagesets
5184          * are needed to initialize allocators on a specific cpu too.
5185          * F.e. the percpu allocator needs the page allocator which
5186          * needs the percpu allocator in order to allocate its pagesets
5187          * (a chicken-egg dilemma).
5188          */
5189         for_each_possible_cpu(cpu) {
5190                 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5191 
5192 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5193                 /*
5194                  * We now know the "local memory node" for each node--
5195                  * i.e., the node of the first zone in the generic zonelist.
5196                  * Set up numa_mem percpu variable for on-line cpus.  During
5197                  * boot, only the boot cpu should be on-line;  we'll init the
5198                  * secondary cpus' numa_mem as they come on-line.  During
5199                  * node/memory hotplug, we'll fixup all on-line cpus.
5200                  */
5201                 if (cpu_online(cpu))
5202                         set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5203 #endif
5204         }
5205 
5206         return 0;
5207 }
5208 
5209 static noinline void __init
5210 build_all_zonelists_init(void)
5211 {
5212         __build_all_zonelists(NULL);
5213         mminit_verify_zonelist();
5214         cpuset_init_current_mems_allowed();
5215 }
5216 
5217 /*
5218  * Called with zonelists_mutex held always
5219  * unless system_state == SYSTEM_BOOTING.
5220  *
5221  * __ref due to (1) call of __meminit annotated setup_zone_pageset
5222  * [we're only called with non-NULL zone through __meminit paths] and
5223  * (2) call of __init annotated helper build_all_zonelists_init
5224  * [protected by SYSTEM_BOOTING].
5225  */
5226 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
5227 {
5228         set_zonelist_order();
5229 
5230         if (system_state == SYSTEM_BOOTING) {
5231                 build_all_zonelists_init();
5232         } else {
5233 #ifdef CONFIG_MEMORY_HOTPLUG
5234                 if (zone)
5235                         setup_zone_pageset(zone);
5236 #endif
5237                 /* we have to stop all cpus to guarantee there is no user
5238                    of zonelist */
5239                 stop_machine(__build_all_zonelists, pgdat, NULL);
5240                 /* cpuset refresh routine should be here */
5241         }
5242         vm_total_pages = nr_free_pagecache_pages();
5243         /*
5244          * Disable grouping by mobility if the number of pages in the
5245          * system is too low to allow the mechanism to work. It would be
5246          * more accurate, but expensive to check per-zone. This check is
5247          * made on memory-hotadd so a system can start with mobility
5248          * disabled and enable it later
5249          */
5250         if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5251                 page_group_by_mobility_disabled = 1;
5252         else
5253                 page_group_by_mobility_disabled = 0;
5254 
5255         pr_info("Built %i zonelists in %s order, mobility grouping %s.  Total pages: %ld\n",
5256                 nr_online_nodes,
5257                 zonelist_order_name[current_zonelist_order],
5258                 page_group_by_mobility_disabled ? "off" : "on",
5259                 vm_total_pages);
5260 #ifdef CONFIG_NUMA
5261         pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5262 #endif
5263 }
5264 
5265 /*
5266  * Initially all pages are reserved - free ones are freed
5267  * up by free_all_bootmem() once the early boot process is
5268  * done. Non-atomic initialization, single-pass.
5269  */
5270 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5271                 unsigned long start_pfn, enum memmap_context context)
5272 {
5273         struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
5274         unsigned long end_pfn = start_pfn + size;
5275         pg_data_t *pgdat = NODE_DATA(nid);
5276         unsigned long pfn;
5277         unsigned long nr_initialised = 0;
5278 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5279         struct memblock_region *r = NULL, *tmp;
5280 #endif
5281 
5282         if (highest_memmap_pfn < end_pfn - 1)
5283                 highest_memmap_pfn = end_pfn - 1;
5284 
5285         /*
5286          * Honor reservation requested by the driver for this ZONE_DEVICE
5287          * memory
5288          */
5289         if (altmap && start_pfn == altmap->base_pfn)
5290                 start_pfn += altmap->reserve;
5291 
5292         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5293                 /*
5294                  * There can be holes in boot-time mem_map[]s handed to this
5295                  * function.  They do not exist on hotplugged memory.
5296                  */
5297                 if (context != MEMMAP_EARLY)
5298                         goto not_early;
5299 
5300                 if (!early_pfn_valid(pfn)) {
5301 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5302                         /*
5303                          * Skip to the pfn preceding the next valid one (or
5304                          * end_pfn), such that we hit a valid pfn (or end_pfn)
5305                          * on our next iteration of the loop.
5306                          */
5307                         pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
5308 #endif
5309                         continue;
5310                 }
5311                 if (!early_pfn_in_nid(pfn, nid))
5312                         continue;
5313                 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5314                         break;
5315 
5316 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5317                 /*
5318                  * Check given memblock attribute by firmware which can affect
5319                  * kernel memory layout.  If zone==ZONE_MOVABLE but memory is
5320                  * mirrored, it's an overlapped memmap init. skip it.
5321                  */
5322                 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5323                         if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5324                                 for_each_memblock(memory, tmp)
5325                                         if (pfn < memblock_region_memory_end_pfn(tmp))
5326                                                 break;
5327                                 r = tmp;
5328                         }
5329                         if (pfn >= memblock_region_memory_base_pfn(r) &&
5330                             memblock_is_mirror(r)) {
5331                                 /* already initialized as NORMAL */
5332                                 pfn = memblock_region_memory_end_pfn(r);
5333                                 continue;
5334                         }
5335                 }
5336 #endif
5337 
5338 not_early:
5339                 /*
5340                  * Mark the block movable so that blocks are reserved for
5341                  * movable at startup. This will force kernel allocations
5342                  * to reserve their blocks rather than leaking throughout
5343                  * the address space during boot when many long-lived
5344                  * kernel allocations are made.
5345                  *
5346                  * bitmap is created for zone's valid pfn range. but memmap
5347                  * can be created for invalid pages (for alignment)
5348                  * check here not to call set_pageblock_migratetype() against
5349                  * pfn out of zone.
5350                  */
5351                 if (!(pfn & (pageblock_nr_pages - 1))) {
5352                         struct page *page = pfn_to_page(pfn);
5353 
5354                         __init_single_page(page, pfn, zone, nid);
5355                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5356                 } else {
5357                         __init_single_pfn(pfn, zone, nid);
5358                 }
5359         }
5360 }
5361 
5362 static void __meminit zone_init_free_lists(struct zone *zone)
5363 {
5364         unsigned int order, t;
5365         for_each_migratetype_order(order, t) {
5366                 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
5367                 zone->free_area[order].nr_free = 0;
5368         }
5369 }
5370 
5371 #ifndef __HAVE_ARCH_MEMMAP_INIT
5372 #define memmap_init(size, nid, zone, start_pfn) \
5373         memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
5374 #endif
5375 
5376 static int zone_batchsize(struct zone *zone)
5377 {
5378 #ifdef CONFIG_MMU
5379         int batch;
5380 
5381         /*
5382          * The per-cpu-pages pools are set to around 1000th of the
5383          * size of the zone.  But no more than 1/2 of a meg.
5384          *
5385          * OK, so we don't know how big the cache is.  So guess.
5386          */
5387         batch = zone->managed_pages / 1024;
5388         if (batch * PAGE_SIZE > 512 * 1024)
5389                 batch = (512 * 1024) / PAGE_SIZE;
5390         batch /= 4;             /* We effectively *= 4 below */
5391         if (batch < 1)
5392                 batch = 1;
5393 
5394         /*
5395          * Clamp the batch to a 2^n - 1 value. Having a power
5396          * of 2 value was found to be more likely to have
5397          * suboptimal cache aliasing properties in some cases.
5398          *
5399          * For example if 2 tasks are alternately allocating
5400          * batches of pages, one task can end up with a lot
5401          * of pages of one half of the possible page colors
5402          * and the other with pages of the other colors.
5403          */
5404         batch = rounddown_pow_of_two(batch + batch/2) - 1;
5405 
5406         return batch;
5407 
5408 #else
5409         /* The deferral and batching of frees should be suppressed under NOMMU
5410          * conditions.
5411          *
5412          * The problem is that NOMMU needs to be able to allocate large chunks
5413          * of contiguous memory as there's no hardware page translation to
5414          * assemble apparent contiguous memory from discontiguous pages.
5415          *
5416          * Queueing large contiguous runs of pages for batching, however,
5417          * causes the pages to actually be freed in smaller chunks.  As there
5418          * can be a significant delay between the individual batches being
5419          * recycled, this leads to the once large chunks of space being
5420          * fragmented and becoming unavailable for high-order allocations.
5421          */
5422         return 0;
5423 #endif
5424 }
5425 
5426 /*
5427  * pcp->high and pcp->batch values are related and dependent on one another:
5428  * ->batch must never be higher then ->high.
5429  * The following function updates them in a safe manner without read side
5430  * locking.
5431  *
5432  * Any new users of pcp->batch and pcp->high should ensure they can cope with
5433  * those fields changing asynchronously (acording the the above rule).
5434  *
5435  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5436  * outside of boot time (or some other assurance that no concurrent updaters
5437  * exist).
5438  */
5439 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5440                 unsigned long batch)
5441 {
5442        /* start with a fail safe value for batch */
5443         pcp->batch = 1;
5444         smp_wmb();
5445 
5446        /* Update high, then batch, in order */
5447         pcp->high = high;
5448         smp_wmb();
5449 
5450         pcp->batch = batch;
5451 }
5452 
5453 /* a companion to pageset_set_high() */
5454 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5455 {
5456         pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
5457 }
5458 
5459 static void pageset_init(struct per_cpu_pageset *p)
5460 {
5461         struct per_cpu_pages *pcp;
5462         int migratetype;
5463 
5464         memset(p, 0, sizeof(*p));
5465 
5466         pcp = &p->pcp;
5467         pcp->count = 0;
5468         for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5469                 INIT_LIST_HEAD(&pcp->lists[migratetype]);
5470 }
5471 
5472 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5473 {
5474         pageset_init(p);
5475         pageset_set_batch(p, batch);
5476 }
5477 
5478 /*
5479  * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
5480  * to the value high for the pageset p.
5481  */
5482 static void pageset_set_high(struct per_cpu_pageset *p,
5483                                 unsigned long high)
5484 {
5485         unsigned long batch = max(1UL, high / 4);
5486         if ((high / 4) > (PAGE_SHIFT * 8))
5487                 batch = PAGE_SHIFT * 8;
5488 
5489         pageset_update(&p->pcp, high, batch);
5490 }
5491 
5492 static void pageset_set_high_and_batch(struct zone *zone,
5493                                        struct per_cpu_pageset *pcp)
5494 {
5495         if (percpu_pagelist_fraction)
5496                 pageset_set_high(pcp,
5497                         (zone->managed_pages /
5498                                 percpu_pagelist_fraction));
5499         else
5500                 pageset_set_batch(pcp, zone_batchsize(zone));
5501 }
5502 
5503 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5504 {
5505         struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5506 
5507         pageset_init(pcp);
5508         pageset_set_high_and_batch(zone, pcp);
5509 }
5510 
5511 static void __meminit setup_zone_pageset(struct zone *zone)
5512 {
5513         int cpu;
5514         zone->pageset = alloc_percpu(struct per_cpu_pageset);
5515         for_each_possible_cpu(cpu)
5516                 zone_pageset_init(zone, cpu);
5517 }
5518 
5519 /*
5520  * Allocate per cpu pagesets and initialize them.
5521  * Before this call only boot pagesets were available.
5522  */
5523 void __init setup_per_cpu_pageset(void)
5524 {
5525         struct pglist_data *pgdat;
5526         struct zone *zone;
5527 
5528         for_each_populated_zone(zone)
5529                 setup_zone_pageset(zone);
5530 
5531         for_each_online_pgdat(pgdat)
5532                 pgdat->per_cpu_nodestats =
5533                         alloc_percpu(struct per_cpu_nodestat);
5534 }
5535 
5536 static __meminit void zone_pcp_init(struct zone *zone)
5537 {
5538         /*
5539          * per cpu subsystem is not up at this point. The following code
5540          * relies on the ability of the linker to provide the
5541          * offset of a (static) per cpu variable into the per cpu area.
5542          */
5543         zone->pageset = &boot_pageset;
5544 
5545         if (populated_zone(zone))
5546                 printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
5547                         zone->name, zone->present_pages,
5548                                          zone_batchsize(zone));
5549 }
5550 
5551 int __meminit init_currently_empty_zone(struct zone *zone,
5552                                         unsigned long zone_start_pfn,
5553                                         unsigned long size)
5554 {
5555         struct pglist_data *pgdat = zone->zone_pgdat;
5556 
5557         pgdat->nr_zones = zone_idx(zone) + 1;
5558 
5559         zone->zone_start_pfn = zone_start_pfn;
5560 
5561         mminit_dprintk(MMINIT_TRACE, "memmap_init",
5562                         "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5563                         pgdat->node_id,
5564                         (unsigned long)zone_idx(zone),
5565                         zone_start_pfn, (zone_start_pfn + size));
5566 
5567         zone_init_free_lists(zone);
5568         zone->initialized = 1;
5569 
5570         return 0;
5571 }
5572 
5573 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5574 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
5575 
5576 /*
5577  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
5578  */
5579 int __meminit __early_pfn_to_nid(unsigned long pfn,
5580                                         struct mminit_pfnnid_cache *state)
5581 {
5582         unsigned long start_pfn, end_pfn;
5583         int nid;
5584 
5585         if (state->last_start <= pfn && pfn < state->last_end)
5586                 return state->last_nid;
5587 
5588         nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5589         if (nid != -1) {
5590                 state->last_start = start_pfn;
5591                 state->last_end = end_pfn;
5592                 state->last_nid = nid;
5593         }
5594 
5595         return nid;
5596 }
5597 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
5598 
5599 /**
5600  * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
5601  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
5602  * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
5603  *
5604  * If an architecture guarantees that all ranges registered contain no holes
5605  * and may be freed, this this function may be used instead of calling
5606  * memblock_free_early_nid() manually.
5607  */
5608 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
5609 {
5610         unsigned long start_pfn, end_pfn;
5611         int i, this_nid;
5612 
5613         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5614                 start_pfn = min(start_pfn, max_low_pfn);
5615                 end_pfn = min(end_pfn, max_low_pfn);
5616 
5617                 if (start_pfn < end_pfn)
5618                         memblock_free_early_nid(PFN_PHYS(start_pfn),
5619                                         (end_pfn - start_pfn) << PAGE_SHIFT,
5620                                         this_nid);
5621         }
5622 }
5623 
5624 /**
5625  * sparse_memory_present_with_active_regions - Call memory_present for each active range
5626  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
5627  *
5628  * If an architecture guarantees that all ranges registered contain no holes and may
5629  * be freed, this function may be used instead of calling memory_present() manually.
5630  */
5631 void __init sparse_memory_present_with_active_regions(int nid)
5632 {
5633         unsigned long start_pfn, end_pfn;
5634         int i, this_nid;
5635 
5636         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5637                 memory_present(this_nid, start_pfn, end_pfn);
5638 }
5639 
5640 /**
5641  * get_pfn_range_for_nid - Return the start and end page frames for a node
5642  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5643  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
5644  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
5645  *
5646  * It returns the start and end page frame of a node based on information
5647  * provided by memblock_set_node(). If called for a node
5648  * with no available memory, a warning is printed and the start and end
5649  * PFNs will be 0.
5650  */
5651 void __meminit get_pfn_range_for_nid(unsigned int nid,
5652                         unsigned long *start_pfn, unsigned long *end_pfn)
5653 {
5654         unsigned long this_start_pfn, this_end_pfn;
5655         int i;
5656 
5657         *start_pfn = -1UL;
5658         *end_pfn = 0;
5659 
5660         for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5661                 *start_pfn = min(*start_pfn, this_start_pfn);
5662                 *end_pfn = max(*end_pfn, this_end_pfn);
5663         }
5664 
5665         if (*start_pfn == -1UL)
5666                 *start_pfn = 0;
5667 }
5668 
5669 /*
5670  * This finds a zone that can be used for ZONE_MOVABLE pages. The
5671  * assumption is made that zones within a node are ordered in monotonic
5672  * increasing memory addresses so that the "highest" populated zone is used
5673  */
5674 static void __init find_usable_zone_for_movable(void)
5675 {
5676         int zone_index;
5677         for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5678                 if (zone_index == ZONE_MOVABLE)
5679                         continue;
5680 
5681                 if (arch_zone_highest_possible_pfn[zone_index] >
5682                                 arch_zone_lowest_possible_pfn[zone_index])
5683                         break;
5684         }
5685 
5686         VM_BUG_ON(zone_index == -1);
5687         movable_zone = zone_index;
5688 }
5689 
5690 /*
5691  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
5692  * because it is sized independent of architecture. Unlike the other zones,
5693  * the starting point for ZONE_MOVABLE is not fixed. It may be different
5694  * in each node depending on the size of each node and how evenly kernelcore
5695  * is distributed. This helper function adjusts the zone ranges
5696  * provided by the architecture for a given node by using the end of the
5697  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5698  * zones within a node are in order of monotonic increases memory addresses
5699  */
5700 static void __meminit adjust_zone_range_for_zone_movable(int nid,
5701                                         unsigned long zone_type,
5702                                         unsigned long node_start_pfn,
5703                                         unsigned long node_end_pfn,
5704                                         unsigned long *zone_start_pfn,
5705                                         unsigned long *zone_end_pfn)
5706 {
5707         /* Only adjust if ZONE_MOVABLE is on this node */
5708         if (zone_movable_pfn[nid]) {
5709                 /* Size ZONE_MOVABLE */
5710                 if (zone_type == ZONE_MOVABLE) {
5711                         *zone_start_pfn = zone_movable_pfn[nid];
5712                         *zone_end_pfn = min(node_end_pfn,
5713                                 arch_zone_highest_possible_pfn[movable_zone]);
5714 
5715                 /* Adjust for ZONE_MOVABLE starting within this range */
5716                 } else if (!mirrored_kernelcore &&
5717                         *zone_start_pfn < zone_movable_pfn[nid] &&
5718                         *zone_end_pfn > zone_movable_pfn[nid]) {
5719                         *zone_end_pfn = zone_movable_pfn[nid];
5720 
5721                 /* Check if this whole range is within ZONE_MOVABLE */
5722                 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5723                         *zone_start_pfn = *zone_end_pfn;
5724         }
5725 }
5726 
5727 /*
5728  * Return the number of pages a zone spans in a node, including holes
5729  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5730  */
5731 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5732                                         unsigned long zone_type,
5733                                         unsigned long node_start_pfn,
5734                                         unsigned long node_end_pfn,
5735                                         unsigned long *zone_start_pfn,
5736                                         unsigned long *zone_end_pfn,
5737                                         unsigned long *ignored)
5738 {
5739         /* When hotadd a new node from cpu_up(), the node should be empty */
5740         if (!node_start_pfn && !node_end_pfn)
5741                 return 0;
5742 
5743         /* Get the start and end of the zone */
5744         *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5745         *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
5746         adjust_zone_range_for_zone_movable(nid, zone_type,
5747                                 node_start_pfn, node_end_pfn,
5748                                 zone_start_pfn, zone_end_pfn);
5749 
5750         /* Check that this node has pages within the zone's required range */
5751         if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
5752                 return 0;
5753 
5754         /* Move the zone boundaries inside the node if necessary */
5755         *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5756         *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
5757 
5758         /* Return the spanned pages */
5759         return *zone_end_pfn - *zone_start_pfn;
5760 }
5761 
5762 /*
5763  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
5764  * then all holes in the requested range will be accounted for.
5765  */
5766 unsigned long __meminit __absent_pages_in_range(int nid,
5767                                 unsigned long range_start_pfn,
5768                                 unsigned long range_end_pfn)
5769 {
5770         unsigned long nr_absent = range_end_pfn - range_start_pfn;
5771         unsigned long start_pfn, end_pfn;
5772         int i;
5773 
5774         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5775                 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5776                 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5777                 nr_absent -= end_pfn - start_pfn;
5778         }
5779         return nr_absent;
5780 }
5781 
5782 /**
5783  * absent_pages_in_range - Return number of page frames in holes within a range
5784  * @start_pfn: The start PFN to start searching for holes
5785  * @end_pfn: The end PFN to stop searching for holes
5786  *
5787  * It returns the number of pages frames in memory holes within a range.
5788  */
5789 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5790                                                         unsigned long end_pfn)
5791 {
5792         return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5793 }
5794 
5795 /* Return the number of page frames in holes in a zone on a node */
5796 static unsigned long __meminit zone_absent_pages_in_node(int nid,
5797                                         unsigned long zone_type,
5798                                         unsigned long node_start_pfn,
5799                                         unsigned long node_end_pfn,
5800                                         unsigned long *ignored)
5801 {
5802         unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5803         unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5804         unsigned long zone_start_pfn, zone_end_pfn;
5805         unsigned long nr_absent;
5806 
5807         /* When hotadd a new node from cpu_up(), the node should be empty */
5808         if (!node_start_pfn && !node_end_pfn)
5809                 return 0;
5810 
5811         zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5812         zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5813 
5814         adjust_zone_range_for_zone_movable(nid, zone_type,
5815                         node_start_pfn, node_end_pfn,
5816                         &zone_start_pfn, &zone_end_pfn);
5817         nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5818 
5819         /*
5820          * ZONE_MOVABLE handling.
5821          * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5822          * and vice versa.
5823          */
5824         if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5825                 unsigned long start_pfn, end_pfn;
5826                 struct memblock_region *r;
5827 
5828                 for_each_memblock(memory, r) {
5829                         start_pfn = clamp(memblock_region_memory_base_pfn(r),
5830                                           zone_start_pfn, zone_end_pfn);
5831                         end_pfn = clamp(memblock_region_memory_end_pfn(r),
5832                                         zone_start_pfn, zone_end_pfn);
5833 
5834                         if (zone_type == ZONE_MOVABLE &&
5835                             memblock_is_mirror(r))
5836                                 nr_absent += end_pfn - start_pfn;
5837 
5838                         if (zone_type == ZONE_NORMAL &&
5839                             !memblock_is_mirror(r))
5840                                 nr_absent += end_pfn - start_pfn;
5841                 }
5842         }
5843 
5844         return nr_absent;
5845 }
5846 
5847 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5848 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
5849                                         unsigned long zone_type,
5850                                         unsigned long node_start_pfn,
5851                                         unsigned long node_end_pfn,
5852                                         unsigned long *zone_start_pfn,
5853                                         unsigned long *zone_end_pfn,
5854                                         unsigned long *zones_size)
5855 {
5856         unsigned int zone;
5857 
5858         *zone_start_pfn = node_start_pfn;
5859         for (zone = 0; zone < zone_type; zone++)
5860                 *zone_start_pfn += zones_size[zone];
5861 
5862         *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5863 
5864         return zones_size[zone_type];
5865 }
5866 
5867 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
5868                                                 unsigned long zone_type,
5869                                                 unsigned long node_start_pfn,
5870                                                 unsigned long node_end_pfn,
5871                                                 unsigned long *zholes_size)
5872 {
5873         if (!zholes_size)
5874                 return 0;
5875 
5876         return zholes_size[zone_type];
5877 }
5878 
5879 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5880 
5881 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
5882                                                 unsigned long node_start_pfn,
5883                                                 unsigned long node_end_pfn,
5884                                                 unsigned long *zones_size,
5885                                                 unsigned long *zholes_size)
5886 {
5887         unsigned long realtotalpages = 0, totalpages = 0;
5888         enum zone_type i;
5889 
5890         for (i = 0; i < MAX_NR_ZONES; i++) {
5891                 struct zone *zone = pgdat->node_zones + i;
5892                 unsigned long zone_start_pfn, zone_end_pfn;
5893                 unsigned long size, real_size;
5894 
5895                 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5896                                                   node_start_pfn,
5897                                                   node_end_pfn,
5898                                                   &zone_start_pfn,
5899                                                   &zone_end_pfn,
5900                                                   zones_size);
5901                 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
5902                                                   node_start_pfn, node_end_pfn,
5903                                                   zholes_size);
5904                 if (size)
5905                         zone->zone_start_pfn = zone_start_pfn;
5906                 else
5907                         zone->zone_start_pfn = 0;
5908                 zone->spanned_pages = size;
5909                 zone->present_pages = real_size;
5910 
5911                 totalpages += size;
5912                 realtotalpages += real_size;
5913         }
5914 
5915         pgdat->node_spanned_pages = totalpages;
5916         pgdat->node_present_pages = realtotalpages;
5917         printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5918                                                         realtotalpages);
5919 }
5920 
5921 #ifndef CONFIG_SPARSEMEM
5922 /*
5923  * Calculate the size of the zone->blockflags rounded to an unsigned long
5924  * Start by making sure zonesize is a multiple of pageblock_order by rounding
5925  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
5926  * round what is now in bits to nearest long in bits, then return it in
5927  * bytes.
5928  */
5929 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
5930 {
5931         unsigned long usemapsize;
5932 
5933         zonesize += zone_start_pfn & (pageblock_nr_pages-1);
5934         usemapsize = roundup(zonesize, pageblock_nr_pages);
5935         usemapsize = usemapsize >> pageblock_order;
5936         usemapsize *= NR_PAGEBLOCK_BITS;
5937         usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5938 
5939         return usemapsize / 8;
5940 }
5941 
5942 static void __init setup_usemap(struct pglist_data *pgdat,
5943                                 struct zone *zone,
5944                                 unsigned long zone_start_pfn,
5945                                 unsigned long zonesize)
5946 {
5947         unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
5948         zone->pageblock_flags = NULL;
5949         if (usemapsize)
5950                 zone->pageblock_flags =
5951                         memblock_virt_alloc_node_nopanic(usemapsize,
5952                                                          pgdat->node_id);
5953 }
5954 #else
5955 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5956                                 unsigned long zone_start_pfn, unsigned long zonesize) {}
5957 #endif /* CONFIG_SPARSEMEM */
5958 
5959 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
5960 
5961 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
5962 void __paginginit set_pageblock_order(void)
5963 {
5964         unsigned int order;
5965 
5966         /* Check that pageblock_nr_pages has not already been setup */
5967         if (pageblock_order)
5968                 return;
5969 
5970         if (HPAGE_SHIFT > PAGE_SHIFT)
5971                 order = HUGETLB_PAGE_ORDER;
5972         else
5973                 order = MAX_ORDER - 1;
5974 
5975         /*
5976          * Assume the largest contiguous order of interest is a huge page.
5977          * This value may be variable depending on boot parameters on IA64 and
5978          * powerpc.
5979          */
5980         pageblock_order = order;
5981 }
5982 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5983 
5984 /*
5985  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
5986  * is unused as pageblock_order is set at compile-time. See
5987  * include/linux/pageblock-flags.h for the values of pageblock_order based on
5988  * the kernel config
5989  */
5990 void __paginginit set_pageblock_order(void)
5991 {
5992 }
5993 
5994 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5995 
5996 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5997                                                    unsigned long present_pages)
5998 {
5999         unsigned long pages = spanned_pages;
6000 
6001         /*
6002          * Provide a more accurate estimation if there are holes within
6003          * the zone and SPARSEMEM is in use. If there are holes within the
6004          * zone, each populated memory region may cost us one or two extra
6005          * memmap pages due to alignment because memmap pages for each
6006          * populated regions may not be naturally aligned on page boundary.
6007          * So the (present_pages >> 4) heuristic is a tradeoff for that.
6008          */
6009         if (spanned_pages > present_pages + (present_pages >> 4) &&
6010             IS_ENABLED(CONFIG_SPARSEMEM))
6011                 pages = present_pages;
6012 
6013         return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6014 }
6015 
6016 /*
6017  * Set up the zone data structures:
6018  *   - mark all pages reserved
6019  *   - mark all memory queues empty
6020  *   - clear the memory bitmaps
6021  *
6022  * NOTE: pgdat should get zeroed by caller.
6023  */
6024 static void __paginginit free_area_init_core(struct pglist_data *pgdat)
6025 {
6026         enum zone_type j;
6027         int nid = pgdat->node_id;
6028         int ret;
6029 
6030         pgdat_resize_init(pgdat);
6031 #ifdef CONFIG_NUMA_BALANCING
6032         spin_lock_init(&pgdat->numabalancing_migrate_lock);
6033         pgdat->numabalancing_migrate_nr_pages = 0;
6034         pgdat->numabalancing_migrate_next_window = jiffies;
6035 #endif
6036 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6037         spin_lock_init(&pgdat->split_queue_lock);
6038         INIT_LIST_HEAD(&pgdat->split_queue);
6039         pgdat->split_queue_len = 0;
6040 #endif
6041         init_waitqueue_head(&pgdat->kswapd_wait);
6042         init_waitqueue_head(&pgdat->pfmemalloc_wait);
6043 #ifdef CONFIG_COMPACTION
6044         init_waitqueue_head(&pgdat->kcompactd_wait);
6045 #endif
6046         pgdat_page_ext_init(pgdat);
6047         spin_lock_init(&pgdat->lru_lock);
6048         lruvec_init(node_lruvec(pgdat));
6049 
6050         for (j = 0; j < MAX_NR_ZONES; j++) {
6051                 struct zone *zone = pgdat->node_zones + j;
6052                 unsigned long size, realsize, freesize, memmap_pages;
6053                 unsigned long zone_start_pfn = zone->zone_start_pfn;
6054 
6055                 size = zone->spanned_pages;
6056                 realsize = freesize = zone->present_pages;
6057 
6058                 /*
6059                  * Adjust freesize so that it accounts for how much memory
6060                  * is used by this zone for memmap. This affects the watermark
6061                  * and per-cpu initialisations
6062                  */
6063                 memmap_pages = calc_memmap_size(size, realsize);
6064                 if (!is_highmem_idx(j)) {
6065                         if (freesize >= memmap_pages) {
6066                                 freesize -= memmap_pages;
6067                                 if (memmap_pages)
6068                                         printk(KERN_DEBUG
6069                                                "  %s zone: %lu pages used for memmap\n",
6070                                                zone_names[j], memmap_pages);
6071                         } else
6072                                 pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
6073                                         zone_names[j], memmap_pages, freesize);
6074                 }
6075 
6076                 /* Account for reserved pages */
6077                 if (j == 0 && freesize > dma_reserve) {
6078                         freesize -= dma_reserve;
6079                         printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
6080                                         zone_names[0], dma_reserve);
6081                 }
6082 
6083                 if (!is_highmem_idx(j))
6084                         nr_kernel_pages += freesize;
6085                 /* Charge for highmem memmap if there are enough kernel pages */
6086                 else if (nr_kernel_pages > memmap_pages * 2)
6087                         nr_kernel_pages -= memmap_pages;
6088                 nr_all_pages += freesize;
6089 
6090                 /*
6091                  * Set an approximate value for lowmem here, it will be adjusted
6092                  * when the bootmem allocator frees pages into the buddy system.
6093                  * And all highmem pages will be managed by the buddy system.
6094                  */
6095                 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
6096 #ifdef CONFIG_NUMA
6097                 zone->node = nid;
6098 #endif
6099                 zone->name = zone_names[j];
6100                 zone->zone_pgdat = pgdat;
6101                 spin_lock_init(&zone->lock);
6102                 zone_seqlock_init(zone);
6103                 zone_pcp_init(zone);
6104 
6105                 if (!size)
6106                         continue;
6107 
6108                 set_pageblock_order();
6109                 setup_usemap(pgdat, zone, zone_start_pfn, size);
6110                 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
6111                 BUG_ON(ret);
6112                 memmap_init(size, nid, j, zone_start_pfn);
6113         }
6114 }
6115 
6116 static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6117 {
6118         unsigned long __maybe_unused start = 0;
6119         unsigned long __maybe_unused offset = 0;
6120 
6121         /* Skip empty nodes */
6122         if (!pgdat->node_spanned_pages)
6123                 return;
6124 
6125 #ifdef CONFIG_FLAT_NODE_MEM_MAP
6126         start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6127         offset = pgdat->node_start_pfn - start;
6128         /* ia64 gets its own node_mem_map, before this, without bootmem */
6129         if (!pgdat->node_mem_map) {
6130                 unsigned long size, end;
6131                 struct page *map;
6132 
6133                 /*
6134                  * The zone's endpoints aren't required to be MAX_ORDER
6135                  * aligned but the node_mem_map endpoints must be in order
6136                  * for the buddy allocator to function correctly.
6137                  */
6138                 end = pgdat_end_pfn(pgdat);
6139                 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6140                 size =  (end - start) * sizeof(struct page);
6141                 map = alloc_remap(pgdat->node_id, size);
6142                 if (!map)
6143                         map = memblock_virt_alloc_node_nopanic(size,
6144                                                                pgdat->node_id);
6145